ngram
listlengths
0
67.8k
[ "= { \"services-accounting-information\": { \"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\",", "\"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\": \"No\", } ] } }", "{ \"services-accounting-information\": { \"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\":", "\"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\",", "[ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\":", "{ \"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\":", "\"services-accounting-information\": { \"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\",", "{ \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\": \"No\",", "\"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\": \"No\", } ] }", "\"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\": \"No\", }", "\"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\": \"0\", \"export-packet-failures\": \"0\", \"flow-creation-failures\": \"0\", \"memory-overload\": \"No\", } ]", "expected_output = { \"services-accounting-information\": { \"v9-error-information\": [ { \"interface-name\": \"ms-9/0/0\", \"service-set-dropped\": \"0\", \"active-timeout-failures\":" ]
[ "assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert", "0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n > 0 and", "Power of Two Given an integer, write a function to determine if it", "beats 82.84% of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type", ">>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\"", "return False \"\"\" Example output >>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8)", "\"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n", "Your runtime beats 82.84% of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n):", "Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all test", "adding this problem and creating all test cases. Performance: 1. Total Accepted: 31274", "result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100'", "Difficulty: Easy 2. Your runtime beats 82.84% of python submissions. \"\"\" class Solution(object):", "power of two. Credits: Special thanks to @jianchao.li.fighter for adding this problem and", "'0b11' \"\"\" return n > 0 and (n & (n - 1)) ==", "\"\"\" if n <= 0: return False \"\"\" Example output >>> bin(2) '0b10'", "bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\" result_right =", "test cases. Performance: 1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2.", "write a function to determine if it is a power of two. Credits:", "<= 0: return False \"\"\" Example output >>> bin(2) '0b10' >>> bin(4) '0b100'", "n > 0 and (n & (n - 1)) == 0 assert Solution().isPowerOfTwo(0)", "and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>>", "and (n & (n - 1)) == 0 assert Solution().isPowerOfTwo(0) is False assert", "= bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\")", "assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert", "'0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right =", "True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True", "False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True", "bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left", "bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512)", "n): \"\"\" :type n: int :rtype: bool \"\"\" if n <= 0: return", "@jianchao.li.fighter for adding this problem and creating all test cases. Performance: 1. Total", "problem and creating all test cases. Performance: 1. Total Accepted: 31274 Total Submissions:", "Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3)", "of two. Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating", "def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11'", "'0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str =", "is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is", ":rtype: bool \"\"\" if n <= 0: return False \"\"\" Example output >>>", "n: int :rtype: bool \"\"\" if n <= 0: return False \"\"\" Example", ">>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n > 0 and (n", "of Two Given an integer, write a function to determine if it is", "bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right", "two. Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all", "'0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000'", "bin_str[3:] result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left", "(n & (n - 1)) == 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1)", "\"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n):", "it is a power of two. Credits: Special thanks to @jianchao.li.fighter for adding", "int :rtype: bool \"\"\" if n <= 0: return False \"\"\" Example output", "a function to determine if it is a power of two. Credits: Special", "isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool \"\"\" if n <= 0:", "n <= 0: return False \"\"\" Example output >>> bin(2) '0b10' >>> bin(4)", "Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2)", ">>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n)", "assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert", "bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str", "4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n > 0", "integer, write a function to determine if it is a power of two.", "0 and (n & (n - 1)) == 0 assert Solution().isPowerOfTwo(0) is False", "result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>> bin(3)", "1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2. Your runtime beats", "\"\"\" :type n: int :rtype: bool \"\"\" if n <= 0: return False", "2. Your runtime beats 82.84% of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self,", "thanks to @jianchao.li.fighter for adding this problem and creating all test cases. Performance:", "== \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self,", "= bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right", "runtime beats 82.84% of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\"", "\"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool \"\"\"", "if n <= 0: return False \"\"\" Example output >>> bin(2) '0b10' >>>", ">>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3]", "Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15)", "'0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left =", "is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is", ":type n: int :rtype: bool \"\"\" if n <= 0: return False \"\"\"", "def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool \"\"\" if n <=", "Submissions: 99121 Difficulty: Easy 2. Your runtime beats 82.84% of python submissions. \"\"\"", "\"\"\" Example output >>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>>", ">>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:]", "all test cases. Performance: 1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy", "for adding this problem and creating all test cases. Performance: 1. Total Accepted:", "\"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left", "False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is False assert Solution().isPowerOfTwo(16) is True", "result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and", "to determine if it is a power of two. Credits: Special thanks to", "bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") ==", "== len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0", "bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right)", "Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool \"\"\" if n", "Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.", "0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True", "Question: Power of Two Given an integer, write a function to determine if", "bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3", "isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\"", "'0b100' >>> bin(3) '0b11' \"\"\" return n > 0 and (n & (n", "an integer, write a function to determine if it is a power of", "assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is False assert", "Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4)", "result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\"", "> 0 and (n & (n - 1)) == 0 assert Solution().isPowerOfTwo(0) is", "= bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\"", ">>> 4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n >", "is a power of two. Credits: Special thanks to @jianchao.li.fighter for adding this", "99121 Difficulty: Easy 2. Your runtime beats 82.84% of python submissions. \"\"\" class", "is True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is", "a power of two. Credits: Special thanks to @jianchao.li.fighter for adding this problem", "return n > 0 and (n & (n - 1)) == 0 assert", "and creating all test cases. Performance: 1. Total Accepted: 31274 Total Submissions: 99121", ">>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000' >>>", "= bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>>", "len(bin_str_right) return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>>", "False \"\"\" Example output >>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000'", "Example output >>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64)", "bin(3) '0b11' \"\"\" return n > 0 and (n & (n - 1))", "this problem and creating all test cases. Performance: 1. Total Accepted: 31274 Total", "submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool", "Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2. Your runtime beats 82.84% of", "Performance: 1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2. Your runtime", "\"\"\" return n > 0 and (n & (n - 1)) == 0", "is False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is", "cases. Performance: 1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2. Your", "bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left == \"0b1\" result_right", "True assert Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is False", "class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype: bool \"\"\" if", "creating all test cases. Performance: 1. Total Accepted: 31274 Total Submissions: 99121 Difficulty:", "Easy 2. Your runtime beats 82.84% of python submissions. \"\"\" class Solution(object): def", "82.84% of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n:", "n): \"\"\" >>> 4&3 0 >>> bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return", "= bin_str[3:] result_left = bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return", "Two Given an integer, write a function to determine if it is a", "(n - 1)) == 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False", "bool \"\"\" if n <= 0: return False \"\"\" Example output >>> bin(2)", "bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left", "determine if it is a power of two. Credits: Special thanks to @jianchao.li.fighter", "to @jianchao.li.fighter for adding this problem and creating all test cases. Performance: 1.", "0: return False \"\"\" Example output >>> bin(2) '0b10' >>> bin(4) '0b100' >>>", "output >>> bin(2) '0b10' >>> bin(4) '0b100' >>> bin(8) '0b1000' >>> bin(64) '0b1000000'", ">>> bin(3) '0b11' \"\"\" return n > 0 and (n & (n -", "python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int :rtype:", "1)) == 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1)", "function to determine if it is a power of two. Credits: Special thanks", "False assert Solution().isPowerOfTwo(1) is True assert Solution().isPowerOfTwo(2) is True assert Solution().isPowerOfTwo(3) is False", "assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is True assert", "of python submissions. \"\"\" class Solution(object): def isPowerOfTwo(self, n): \"\"\" :type n: int", "'0b1000' >>> bin(64) '0b1000000' >>> bin(512) '0b1000000000' \"\"\" bin_str = bin(n) bin_str_left =", "31274 Total Submissions: 99121 Difficulty: Easy 2. Your runtime beats 82.84% of python", "return result_left and result_right def isPowerOfTwo_from_other(self, n): \"\"\" >>> 4&3 0 >>> bin(4)", "& (n - 1)) == 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is", "Given an integer, write a function to determine if it is a power", "- 1)) == 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert", "Solution().isPowerOfTwo(3) is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is False assert Solution().isPowerOfTwo(16)", "is False assert Solution().isPowerOfTwo(4) is True assert Solution().isPowerOfTwo(15) is False assert Solution().isPowerOfTwo(16) is", "bin_str_left == \"0b1\" result_right = bin_str_right.count(\"0\") == len(bin_str_right) return result_left and result_right def", "if it is a power of two. Credits: Special thanks to @jianchao.li.fighter for", "\"\"\" Question: Power of Two Given an integer, write a function to determine", "bin_str = bin(n) bin_str_left = bin_str[0:3] bin_str_right = bin_str[3:] result_left = bin_str_left ==", "bin(4) '0b100' >>> bin(3) '0b11' \"\"\" return n > 0 and (n &", "Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy 2. Your runtime beats 82.84%", "== 0 assert Solution().isPowerOfTwo(0) is False assert Solution().isPowerOfTwo(-1) is False assert Solution().isPowerOfTwo(1) is", "Total Submissions: 99121 Difficulty: Easy 2. Your runtime beats 82.84% of python submissions." ]
[ "module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet, basename='question-modules-statistic') urlpatterns", "module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules')", "import DefaultRouter from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter()", "QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet,", "from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet,", "<reponame>NicolasMuras/Lookdaluv<gh_stars>1-10 from rest_framework.routers import DefaultRouter from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet", "import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics',", "rest_framework.routers import DefaultRouter from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router =", "QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet, basename='question-modules-statistic') urlpatterns = router.urls", "from rest_framework.routers import DefaultRouter from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router", "from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet, basename='question-modules-statistic')", "DefaultRouter from module_question.api.views.module_question_viewsets import QuestionModuleViewSet from module_question.api.views.general_views import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules',", "import QuestionModuleStatisticsViewSet router = DefaultRouter() router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules') router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet, basename='question-modules-statistic') urlpatterns =" ]
[ "color, dimensions, name, manufacturer, model): self._color = color self._dimensions = dimensions self._name =", "return self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def set_model(self, model):", "get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def set_model(self, model): self._model = model", "self._manufacturer = manufacturer def set_name(self, name): self._name = name def __str__(self): return str.format(\"Musical", "manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name = name def __str__(self): return", "= model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name =", "set_name(self, name): self._name = name def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={},", "self._name def set_model(self, model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer", "self._model = model def play(self): pass def get_model(self): return self._model def get_manufacturer(self): return", "dimensions self._name = name self._manufacturer = manufacturer self._model = model def play(self): pass", "return self._manufacturer def get_name(self): return self._name def set_model(self, model): self._model = model def", "name): self._name = name def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={},", "def get_name(self): return self._name def set_model(self, model): self._model = model def set_manufacturer(self, manufacturer):", "= manufacturer self._model = model def play(self): pass def get_model(self): return self._model def", "self._dimensions = dimensions self._name = name self._manufacturer = manufacturer self._model = model def", "model): self._color = color self._dimensions = dimensions self._name = name self._manufacturer = manufacturer", "manufacturer def set_name(self, name): self._name = name def __str__(self): return str.format(\"Musical Instrument: name={},", "name, manufacturer, model): self._color = color self._dimensions = dimensions self._name = name self._manufacturer", "MusicalInstrument: def __init__(self, color, dimensions, name, manufacturer, model): self._color = color self._dimensions =", "set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name = name def __str__(self):", "set_model(self, model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self,", "def play(self): pass def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def get_name(self):", "= manufacturer def set_name(self, name): self._name = name def __str__(self): return str.format(\"Musical Instrument:", "get_name(self): return self._name def set_model(self, model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer", "return self._name def set_model(self, model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer =", "get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def set_model(self,", "def __init__(self, color, dimensions, name, manufacturer, model): self._color = color self._dimensions = dimensions", "color self._dimensions = dimensions self._name = name self._manufacturer = manufacturer self._model = model", "self._model = model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name", "= name self._manufacturer = manufacturer self._model = model def play(self): pass def get_model(self):", "def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name = name def", "self._name = name def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\",", "manufacturer self._model = model def play(self): pass def get_model(self): return self._model def get_manufacturer(self):", "def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def", "self._manufacturer def get_name(self): return self._name def set_model(self, model): self._model = model def set_manufacturer(self,", "self._manufacturer = manufacturer self._model = model def play(self): pass def get_model(self): return self._model", "pass def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name", "def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\", self._name, self._color, self._dimensions,", "def set_name(self, name): self._name = name def __str__(self): return str.format(\"Musical Instrument: name={}, color={},", "= model def play(self): pass def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer", "manufacturer, model): self._color = color self._dimensions = dimensions self._name = name self._manufacturer =", "= dimensions self._name = name self._manufacturer = manufacturer self._model = model def play(self):", "self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def set_model(self, model): self._model", "def get_manufacturer(self): return self._manufacturer def get_name(self): return self._name def set_model(self, model): self._model =", "class MusicalInstrument: def __init__(self, color, dimensions, name, manufacturer, model): self._color = color self._dimensions", "model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name):", "model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def set_name(self, name): self._name = name", "name def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\", self._name, self._color,", "self._name = name self._manufacturer = manufacturer self._model = model def play(self): pass def", "__str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\", self._name, self._color, self._dimensions, self._manufacturer,", "return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\", self._name, self._color, self._dimensions, self._manufacturer, self._model)", "__init__(self, color, dimensions, name, manufacturer, model): self._color = color self._dimensions = dimensions self._name", "self._color = color self._dimensions = dimensions self._name = name self._manufacturer = manufacturer self._model", "dimensions, name, manufacturer, model): self._color = color self._dimensions = dimensions self._name = name", "play(self): pass def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def get_name(self): return", "def set_model(self, model): self._model = model def set_manufacturer(self, manufacturer): self._manufacturer = manufacturer def", "= name def __str__(self): return str.format(\"Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}\", self._name,", "= color self._dimensions = dimensions self._name = name self._manufacturer = manufacturer self._model =", "model def play(self): pass def get_model(self): return self._model def get_manufacturer(self): return self._manufacturer def", "name self._manufacturer = manufacturer self._model = model def play(self): pass def get_model(self): return" ]
[ "def login(self, username): password = input(\"Enter your password: \") if password == \"***\":", "self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application) + +", "color to \" + color) def get_car_color(self): return self.car.get_color() def get_car_properties(self): return self.car.get_all()", "incorrect\") def logout(self): if self.logged_in == False: print(\"Please login first!\") return 0 self.nav.clean()", "== False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color to \" +", "False print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\")", "= True else: print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in == False:", "\"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else:", "def set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed", "application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return", "== \"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True", "return 0 self.car.set_color(color) print(\"Changed car color to \" + color) def get_car_color(self): return", "False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control", "logout(self): if self.logged_in == False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in", "\"\"\" def __init__(self): self.car = Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>:", "self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color to \"", "input(\"Enter your password: \") if password == \"***\": print(\"\\n\\r Welcome back, \"+ username", "your password: \") if password == \"***\": print(\"\\n\\r Welcome back, \"+ username +", "return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def set_car_color(self, color):", "self.logged_in = True else: print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in ==", "print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\")", "= input(\"Enter your password: \") if password == \"***\": print(\"\\n\\r Welcome back, \"+", "self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example", "guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username): password = input(\"Enter your password:", "Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application) +", "False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out", "v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self,", "= Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\"", "print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\") return", "0 self.car.set_color(color) print(\"Changed car color to \" + color) def get_car_color(self): return self.car.get_color()", "def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application) + + v1.0 +", "self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in", "tests.car import Car class CarController: \"\"\" Example class \"\"\" def __init__(self): self.car =", "get_nav(self): return self.nav.getLocation() def login(self, username): password = input(\"Enter your password: \") if", "def logout(self): if self.logged_in == False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest')", "denied!\") return 0 self.car.set_color(color) print(\"Changed car color to \" + color) def get_car_color(self):", "self.car = Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self):", "import Navigator from tests.car import Car class CarController: \"\"\" Example class \"\"\" def", "out successfully\") def set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\") return 0", "from tests.car import Car class CarController: \"\"\" Example class \"\"\" def __init__(self): self.car", "\"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password incorrect\") def logout(self): if", "password: \") if password == \"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\")", "control (example application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def", "self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in ==", "\"\"\" Example class \"\"\" def __init__(self): self.car = Car() self.logged_in = False self.nav", "password = input(\"Enter your password: \") if password == \"***\": print(\"\\n\\r Welcome back,", "print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in == False: print(\"Please login first!\")", "def __init__(self): self.car = Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ')", "') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application) + + v1.0", "def get_nav(self): return self.nav.getLocation() def login(self, username): password = input(\"Enter your password: \")", "class \"\"\" def __init__(self): self.car = Car() self.logged_in = False self.nav = Navigator()", "+ + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation()", "= False print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in == False: print(\"Permission", "print(\"\"\" +--------------------------------------+ + Car control (example application) + + v1.0 + +--------------------------------------+ \"\"\")", "password incorrect\") def logout(self): if self.logged_in == False: print(\"Please login first!\") return 0", "Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied]", "show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application) + + v1.0 + +--------------------------------------+", "self.logged_in == False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False", "self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username): password = input(\"Enter", "self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in", "<gh_stars>0 from core.Navigator import Navigator from tests.car import Car class CarController: \"\"\" Example", "if password == \"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in", "denied] password incorrect\") def logout(self): if self.logged_in == False: print(\"Please login first!\") return", "False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color to \" + color)", "+--------------------------------------+ + Car control (example application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest')", "class CarController: \"\"\" Example class \"\"\" def __init__(self): self.car = Car() self.logged_in =", "Navigator from tests.car import Car class CarController: \"\"\" Example class \"\"\" def __init__(self):", "True else: print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in == False: print(\"Please", "car color to \" + color) def get_car_color(self): return self.car.get_color() def get_car_properties(self): return", "__init__(self): self.car = Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ') def", "successfully\") def set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color)", "Car class CarController: \"\"\" Example class \"\"\" def __init__(self): self.car = Car() self.logged_in", "first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def set_car_color(self,", "username): password = input(\"Enter your password: \") if password == \"***\": print(\"\\n\\r Welcome", "Car() self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+", "+ +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username):", "= Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car control (example application)", "\") if password == \"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username)", "print(\"Changed car color to \" + color) def get_car_color(self): return self.car.get_color() def get_car_properties(self):", "core.Navigator import Navigator from tests.car import Car class CarController: \"\"\" Example class \"\"\"", "self.car.set_color(color) print(\"Changed car color to \" + color) def get_car_color(self): return self.car.get_color() def", "+ \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password incorrect\") def logout(self):", "else: print(\"[Access denied] password incorrect\") def logout(self): if self.logged_in == False: print(\"Please login", "if self.logged_in == False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in =", "CarController: \"\"\" Example class \"\"\" def __init__(self): self.car = Car() self.logged_in = False", "\"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username): password =", "== False: print(\"Please login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged", "password == \"***\": print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in =", "print(\"\\n\\r Welcome back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access", "from core.Navigator import Navigator from tests.car import Car class CarController: \"\"\" Example class", "self.logged_in = False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ +", "+ v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def", "Car control (example application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\")", "username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password incorrect\") def", "self.logged_in = False print(\"Logged out successfully\") def set_car_color(self, color): if self.logged_in == False:", "set_car_color(self, color): if self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car", "Example class \"\"\" def __init__(self): self.car = Car() self.logged_in = False self.nav =", "login first!\") return 0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def", "print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color to \" + color) def", "back, \"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password", "self.nav.getLocation() def login(self, username): password = input(\"Enter your password: \") if password ==", "import Car class CarController: \"\"\" Example class \"\"\" def __init__(self): self.car = Car()", "print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username): password = input(\"Enter your", "+ Car control (example application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome,", "(example application) + + v1.0 + +--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self):", "+--------------------------------------+ \"\"\") self.nav.navigate('guest') print(\"Welcome, guest!\") def get_nav(self): return self.nav.getLocation() def login(self, username): password", "return self.nav.getLocation() def login(self, username): password = input(\"Enter your password: \") if password", "\"+ username + \"\\n\\r\") self.nav.navigate(username) self.logged_in = True else: print(\"[Access denied] password incorrect\")", "login(self, username): password = input(\"Enter your password: \") if password == \"***\": print(\"\\n\\r", "color): if self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color", "if self.logged_in == False: print(\"Permission denied!\") return 0 self.car.set_color(color) print(\"Changed car color to", "= False self.nav = Navigator() self.nav.set_end('>: ') def show_motd(self): print(\"\"\" +--------------------------------------+ + Car", "0 self.nav.clean() self.nav.navigate('guest') self.logged_in = False print(\"Logged out successfully\") def set_car_color(self, color): if" ]
[]
[ "= c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the", "subject_id : str the id of the subject sub_list : list of dict", "subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space", "already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\", "out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp,", "skull stripping as in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and \\", "merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node =", "because # FNIRT requires an input with the skull still on if already_skullstripped", "= '_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add optional flag workflow, diff,", "\\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) )", "standard template # TODO add session information in node name for num_reg_strat, reg_strat", "in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'),", "iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file')", "'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list,", "function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag", "= 'The selected ANTS interpolation method may be in the list of values:", "create the input for the longitudinal algorithm for session in sub_list: unique_id =", "= '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'),", "fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat", "containing the information of the pipeline config. (Same as for prep_workflow) Returns -------", "\"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name =", "from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import (", "\"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node,", "of strat with the resource pool updated strat_nodes_list_list : list a list of", "}, override=True) elif type == 'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(),", "\"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' %", "= 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template,", "c.regWithSkull: # get the skull-stripped anatomical from resource pool node, out_file = strat['functional_preprocessed_median']", "None strat = Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional", "connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we", "# 1 func alone works # 2 anat + func works, pass anat", "skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for o", "= connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration (BBREG) workflow, strat_list =", "\\ 'found. Check this path and try again.' % ( creds_path, subject_id) raise", "index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm,", "strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func,", "already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name,", "= strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name)", "workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing items for i in range(len(strat_nodes_list)):", "skullstripping strategies, # a list of sessions within each strategy list # TODO", "(ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain')", "this path and try ' \\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg)", "name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate the longitudinal template (the functions", "if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath)", "sessions within each strategy list # TODO rename and reorganize dict # TODO", "skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for func wf?", "1)) # the in{}.format take i+1 because the Merge nodes inputs starts at", "parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is #", "we have all the anat_preproc set up for every session of the subject", "Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction", "out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file,", "is preferred if 1 in c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!]", "registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is", "sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id", "the resource pool strat_name : str name of the strategy strat_nodes_list_list : list", "'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation from longitudinal template space to", "strat_list # Here we have all the func_preproc set up for every session", "\"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for o in [\"AFNI\",", "] for key_type, key in template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node(", "fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list =", "fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource]", "1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for func wf? # TODO check:", "for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any:", "Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config,", "strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list) == 1: file_list", "{} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution']", "'_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add optional flag workflow, diff, blip,", "list ses_list_strat_list # a list of skullstripping strategies, # a list of sessions", "of dict this is a list of sessions for one subject and each", "key, node) strat = Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc", "# workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file')", "rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing items", "'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list", "registration items for num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix =", "config dictionaries to be updated during the preprocessing # creds_list = [] session_id_list", "path to creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path", "create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file = strat['functional_preprocessed_median']", "\"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating", "'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat']", "config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative,", "(\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ]", "workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_func_preproc']", "CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat,", "skullstripping is imprecise # registration with skull is preferred if 1 in c.regWithSkull:", "'cm') }) # Here we have the same strategies for the skull stripping", "seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference',", "the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node,", "logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if", "the subject # TODO create a list of list ses_list_strat_list # a list", "node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file", "ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is # reported", "skull-on anatomical from resource pool node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical", "not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]:", "workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({", "def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy the", "calculate it, like the multivariate template from ANTS # It would just require", "rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node,", "config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to", "in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and", "try ' \\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path =", "of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc,", "for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if", "as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name", "strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc')", "workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow, strat_list =", "anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format", "templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name)", "anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain':", "sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ----------", "workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') #", "c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') ==", "strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': (", "strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\"", "workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name)", "Here we have the same strategies for the skull stripping as in prep_workflow", "var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull,", "'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask',", "(Same as for prep_workflow) Returns ------- strat_list_ses_list : list of list a list", "map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key =", "[] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in", "try again.' % ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None except", "\\ 'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get", "brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub,", "connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource,", "-*- coding: utf-8 -*- import os import copy import time import shutil from", "requires an input with the skull still on # TODO ASH normalize w", "creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access =", "(flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try:", "output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' +", "ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni,", "= strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc']", "subject_specific_template ) from CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils import (", "in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni =", "return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string", "strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain')", "and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id)", "the different skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name,", "node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm']", "= [] for dirpath, dirnames, filenames in os.walk(working_directory): for f in filenames: if", "thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list =", "in their pipe config, # sinc will be default option if not hasattr(c,", "strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node,", "it requires very high # quality skullstripping. If skullstripping is imprecise # registration", "logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}')", "file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask", "] # update resampled template to resource pool for resolution, template, template_name, tag", "workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node,", "'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': (", "# Func -> T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow,", "new_strat_list ''' # Func -> T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete", "pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio", "(creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path =", "} # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func,", "out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node, out_file =", "configuration object containing the information of the pipeline config. (Same as for prep_workflow)", "if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp(", "selected ' \\ 'to run anatomical registration with ' \\ 'the skull, but", "out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file,", "out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif", "err_msg = 'Credentials path: \"%s\" for subject \"%s\" session \"%s\" ' \\ 'was", "= strat_list # Here we have all the func_preproc set up for every", "session of the subject # TODO create a list of list ses_list_strat_list #", "new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc,", "---------- subject_id : string the id of the subject sub_list : list of", "creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None", "= strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption", "the same strategies for the skull stripping as in prep_workflow if 'brain_mask' in", "strategies; within each strategy, a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker')", "rename and reorganize dict # TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default']", "dict this is a list of sessions for one subject and each session", "sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and 'none'", "parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with the skullstripped is #", "out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'),", "This node will generate the longitudinal template (the functions are in longitudinal_preproc) #", "config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask,", "pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file,", "path: \"%s\" for subject \"%s\" session \"%s\" ' \\ 'was not found. Check", "= [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\",", "subject strat_list : list of list first level strategy, second level session config", "name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str',", "config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func,", "resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the", "( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils", "strat in enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni", "= connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_anat']", "register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1 to the standard template #", "# pass the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull')", "strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name,", "the preprocessing # creds_list = [] session_id_list = [] # Loop over the", "= strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork()", "'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm']", "first level strategy, second level session config : configuration a configuration object containing", "id of the subject strat_list : list of list first level strategy, second", "num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation", "strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat)", "\\ 'ANTS for registration or provide input ' \\ 'images that have not", "node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node,", "strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only", "level session config : configuration a configuration object containing the information of the", "resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over the different", "strat_nodes_list_list # for func wf? # TODO check: # 1 func alone works", "the multivariate template from ANTS # It would just require to change it", "config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template", "strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') #", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node,", "num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the", "= strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull')", "fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm':", "'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni,", "\"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node,", "the longitudinal algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path", "pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac,", "name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp,", "(BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI Template", "reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get", "= [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool,", "fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1',", "'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name", "the longitudinal template (the functions are in longitudinal_preproc) # Later other algorithms could", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference", "'images that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni", "strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified", "node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(", "from resource pool node, out_file = strat['functional_preprocessed_median'] # pass the anatomical to the", "include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat')", "= create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file =", "be connected and added to the resource pool strat_name : str name of", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node,", "= create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup,", "'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list =", "file creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if", "path for output if it exists try: # Get path to creds file", "+ node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in", "i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node,", "from indi_aws import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from", "# TODO add other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni'", "to run once for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2:", "= pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate the longitudinal", "(ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list", "workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str':", "the one given to prep_workflow config : configuration a configuration object containing the", "fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out',", "reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') ==", "anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list", "stripping as in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower()", "fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain')", "= strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node,", "been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' %", "of the pipeline config. (Same as for prep_workflow) Returns ------- strat_list_ses_list : list", "raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None strat =", "strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node,", "workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat = strat.fork()", "fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file =", "to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node,", "from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import", "the user from running FNIRT if they are # providing already-skullstripped inputs. this", "strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat =", "new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key", "import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import CPAC from", "# pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain')", "'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list,", "elif type == 'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference',", "CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard )", "anat = session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' )", "range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list',", "= 'There was an error processing credentials or ' \\ 'accessing the S3", "out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file,", "'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull',", "'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file,", "workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a path", "'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the", "or... if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): # this is", "dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s'", "fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep", "main longitudinal workflow Returns ------- new_strat : Strategy the fork of strat with", "config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name)", "resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node =", "reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid')", "rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds =", "fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list': for index", "'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard':", "get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask =", "as for prep_workflow) Returns ------- strat_list_ses_list : list of list a list of", "connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow,", "not s3_write_access: raise Exception('Not able to write to bucket!') except Exception as e:", "'found. Check this path and try again.' % ( creds_path, subject_id) raise Exception(err_msg)", "session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method =", "mass of the standard template to align the images with it. template_center_of_mass =", "# Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) #", "out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') #", "subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped", "creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc,", "strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat')", "rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config,", "workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init, 'default'", "reported to be better, but it requires very high # quality skullstripping. If", "file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask", "anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni", "Parameters ---------- subject_id : str the id of the subject sub_list : list", "workflow.run() return reg_strat_list # strat_nodes_list_list # for func wf? # TODO check: #", "None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm':", "bucket. Check and try again.\\n' \\ 'Error: %s' % e raise Exception(err_msg) if", "in{}.format take i+1 because the Merge nodes inputs starts at 1 rsc_key =", "doesn't have anatRegFSLinterpolation in their pipe config, # sinc will be default option", "node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix)", "ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation Preprocessing", "= input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name", "(config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat,", "node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT", "have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg(", "' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have all the", "session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key]", "out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat = strat.fork() #", "(config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative,", "'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func' in sub_dict", "Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False # TODO", "ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file,", "'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = []", "method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust,", "reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference',", "ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s'", "default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not", "= 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') })", "config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped", "strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'),", "Returns ------- brain_list : list a list of func preprocessed brain skull_list :", "= reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map,", "fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file')", "out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({", "quality skullstripping. If skullstripping is imprecise # registration with skull is preferred if", "for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list )", "= c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm':", "'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] # only need to run", "'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') },", "key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config,", "for the skull stripping as in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask']", "node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal template", "strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff')", "from resource pool node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the", "if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical '", "'The selected FSL interpolation method may be in the list of values: \"trilinear\",", "method may be in the list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg)", "= Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'),", "fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg(", ") from CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources,", "strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items for", "'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') #", "will be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation", "of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp =", "strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc',", "sub_dict in sub_list: if 'func' in sub_dict or 'rest' in sub_dict: if 'func'", "= os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" session \"%s\"", "T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix,", "'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file = strat['anatomical_brain'] workflow.connect(node,", "num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain')", "list of sessions for one subject and each session if the same dictionary", "not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method may", "new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = []", "rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take", "cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow,", "num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file'])", "during the preprocessing # creds_list = [] session_id_list = [] # Loop over", "in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method may be", ": configuration a configuration object containing the information of the pipeline config. (Same", "in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name", "list first level strategy, second level session config : configuration a configuration object", "for output if it exists try: # Get path to creds file creds_path", "\"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method':", "}) strat_list += new_strat_list ''' # Func -> T1 Registration (Initial Linear Reg)", "c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm':", "with the skull still on # TODO ASH normalize w schema validation to", "the information of the pipeline config. (Same as for prep_workflow) Returns ------- strat_list_ses_list", "reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine')", "f in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath =", "c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork()", "register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node,", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference", "Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get", "not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d'", "'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func' in sub_dict or", "= 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = [] # either", "strategy list # TODO rename and reorganize dict # TODO update strat name", "resource pool node, out_file = strat['motion_correct_median'] # pass the anatomical to the workflow", "connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow,", "ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key)", "subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield", "if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node,", "create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in", "Exception(err) # Here we have all the anat_preproc set up for every session", "anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask':", "if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation", "'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index", "to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config,", "remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc,", "'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list')", ") # if someone doesn't have anatRegANTSinterpolation in their pipe config, # it", "= {} # list of the data config dictionaries to be updated during", "interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list')", "reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm']", "You selected ' \\ 'to run anatomical registration with ' \\ 'the skull,", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node,", "c.funcRegANTSinterpolation # calculating the transform with the skullstripped is # reported to be", "specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat", "'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg", "strat_list.append(new_strat) else: # TODO add other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method", "you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have all", "num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not", "+ str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp',", "# new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard':", "'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) #", "'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni,", "in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list", "brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'],", "'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') },", "their pipe config, # sinc will be default option if not hasattr(c, 'funcRegFSLinterpolation'):", "if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation", "check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init,", "create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp", "= c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption and 0", "'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine',", "rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1))", "(ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func ->", "c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method", "0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni", "= strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'),", "'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] # only", "strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node,", "node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id,", "path to the working directory Returns ------- brain_list : list a list of", "\\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't", "a list of strategies; within each strategy, a list of sessions \"\"\" datasink", "hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg", "'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'),", "'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') #", ": list a list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key =", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node,", "list a list of strategies; within each strategy, a list of sessions \"\"\"", "config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow, strat_list =", "node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median']", "as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing credentials or", "file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id,", "strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other SS methods", "output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource,", "KeyError: input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"),", "run once for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped", "input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node,", "isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory )", "out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file')", "# longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id,", "import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface", "out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign", "'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL", "import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c,", "T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def", "create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress", "workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name)", "strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped:", "assign the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config =", "= connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method", "skull still on # TODO ASH normalize w schema validation to bool if", "fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node,", "pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm,", "= create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key", "sub_list, config): \"\"\" Parameters ---------- subject_id : string the id of the subject", "strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list", "= create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "1 in c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: You", "string a path to the working directory Returns ------- brain_list : list a", "encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce value with schema", "connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import Strategy, find_files,", "blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method +", "o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping ' \\", "update resampled template to resource pool for resolution, template, template_name, tag in templates_for_resampling:", "template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file')", "in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name,", "skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference", "and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat),", "# Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix)", "(ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list", "Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1", "except: encrypt_data = False # TODO Enforce value with schema validation # Extract", "workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS':", "isinstance(file_list, list): if len(file_list) == 1: file_list = file_list[0] for file_name in file_list:", "else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference", "pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference", "merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a path to the working directory", ": list a list of strat_nodes_list workflow: Workflow main longitudinal workflow Returns -------", "node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on", "'anatomical_to_longitudinal_template') # longitudinal to standard registration items for num_strat, strat in enumerate(reg_strat_list): for", "'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'),", "override=True) elif type == 'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}',", "ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni,", "for func wf? # TODO check: # 1 func alone works # 2", "file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy() strat_list", "Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with", "default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in", "def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a path to the working", "'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi',", "# pass the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull')", "strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni,", "workflow) strat_list.append(new_strat) else: # TODO add other SS methods if \"AFNI\" in config.skullstrip_option:", "resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc", "num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL':", "unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir =", "in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory :", "for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name),", "# Functional Ingress Workflow # add optional flag workflow, diff, blip, fmap_rp_list =", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({", "# get the skull-stripped anatomical from resource pool node, out_file = strat['functional_preprocessed_median'] #", "'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError:", "dict is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped =", "config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have all", "new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif", "interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This", "= pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file,", "1: err_msg = '\\n\\n[!] CPAC says: You selected ' \\ 'to run anatomical", "reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat,", "if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix,", "workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict", "+ str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) }", "already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name,", "node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat", "and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat),", "elif already_skullstripped == 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'],", "strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing items for i in", "anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final,", "input with the skull still on if already_skullstripped == 1: err_msg = '\\n\\n[!]", "= create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') #", "config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge,", "list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp", "os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f:", "( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import Strategy,", "registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is", "strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({", "in enumerate(strat_list): # this is to prevent the user from running FNIRT if", "strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node to feed the anat_preproc outputs", "hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg", "# strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm':", "add optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id,", "= '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): #", "node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp =", "in sub_dict or 'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func']", "Your skull-stripping ' \\ 'method options setting does not include either' \\ '", "for subject \"%s\" session \"%s\" ' \\ 'was not found. Check this path", "node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node,", "out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file =", "if 'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS': # this is to", "1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution =", "= '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat", "enumerate(strat_list): # this is to prevent the user from running FNIRT if they", "workflow Returns ------- new_strat : Strategy the fork of strat with the resource", "as the one given to prep_workflow config : configuration a configuration object containing", "seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on", "aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function", "one subject and each session if the same dictionary as the one given", "Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with the skullstripped", "try: # Get path to creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path", "anat_preproc : Workflow the anat_preproc workflow node to be connected and added to", "touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey,", "'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni,", "reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform", "a configuration object containing the information of the pipeline config. Returns ------- None", "out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard':", "strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list", "Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow, strat_list", "}) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node", "FNIRT requires an input with the skull still on if already_skullstripped == 1:", "in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\"", "(ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': (", "Ingress Workflow # add optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list,", "strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain')", "unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and 'none' not", "= c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm':", "'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) #", "pass the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else:", "s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able to write", "strat : Strategy the strategy object you want to fork anat_preproc : Workflow", "have anatRegANTSinterpolation in their pipe config, # it will default to LanczosWindowedSinc if", "'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list =", "not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path:", "= pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node,", "of sessions within each strategy list # TODO rename and reorganize dict #", "'.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and", "node) strat = Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc =", "ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy", "check_config_resources(c) new_strat_list = [] # either run FSL anatomical-to-MNI registration, or... if 'FSL'", "Later other algorithms could be added to calculate it, like the multivariate template", "'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return", "setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The", "workflow: Workflow main longitudinal workflow Returns ------- new_strat : Strategy the fork of", "(strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in their pipe config,", "creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg", "[] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\"", "list a list of func preprocessed brain skull_list : list a list of", "map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items for num_strat,", "% (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc')", "items for num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard',", "brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\", ") from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import Strategy, find_files, function,", "ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL':", "AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption and 0 in fsl_linear_reg_only: for", "with the skullstripped is # reported to be better, but it requires very", "to ' \\ 'use already-skullstripped images as ' \\ 'your inputs. This can", "strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain')", ") from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation )", "ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue", "import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction )", "------- strat_list_ses_list : list of list a list of strategies; within each strategy,", "'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for", "# Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "= sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path =", "workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical from resource pool", "+ \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for o in", "len(file_list) == 1: file_list = file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return", "sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none'", "\\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') })", "strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items():", "\"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list': for", "pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_'", "strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni,", "% (config.pipelineName, strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list,", "connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters", "config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow", "pool node, out_file = strat['motion_correct_median'] # pass the anatomical to the workflow workflow.connect(node,", "registration (FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC and 1 in getattr(c,", "[strat_init_new] # only need to run once for each subject already_skullstripped = c.already_skullstripped[0]", "( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import (", "node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file,", "' \\ 'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception #", "same strategies for the skull stripping as in prep_workflow if 'brain_mask' in session.keys()", "node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal", "= [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add optional", "interpolation method may be in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise", "out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni,", "# get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node,", "else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1')", "' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat)", "if 'FSL' in c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list):", "config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name", "over the sessions to create the input for the longitudinal algorithm for session", "= pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node,", "'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1", "out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config", "c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni,", "(resampled_template, 'resampled_template') }) # loop over the different skull stripping strategies for strat_name,", "session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path", "func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string the id of the", "'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration", ": string the id of the subject strat_list : list of list first", "= connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o", "radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(", "pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file,", "sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path)", "'\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method options setting does not include", "connected and added to the resource pool strat_name : str name of the", "would just require to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix", "= 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean,", "Functional Ingress Workflow # add optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow,", "of the subject strat_list : list of list first level strategy, second level", "Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow')", "or run ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method')", "registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni =", "in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni =", "reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm']", "strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask',", "workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node, out_file =", "seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool #", "'images that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass", "avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol )", "(ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list #", "for num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name,", "brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node,", "(fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid',", "config, workflow, strat_init, strat_name) # Register T1 to the standard template # TODO", "template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init, 'default' ) workflow.run()", "'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we have a list of", "have already been ' \\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for", "strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical from resource", "' \\ 'use already-skullstripped images as ' \\ 'your inputs. This can be", "is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0", "in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node to feed the anat_preproc", "'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node,", "strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow, strat_list", "strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni,", "with the skull still on if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC", "connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution,", "name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp,", "'Error: %s' % e raise Exception(err_msg) if map_node_iterfield is not None: ds =", "util from indi_aws import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink", "mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow)", "out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory", "Workflow the anat_preproc workflow node to be connected and added to the resource", "'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update", "to resource pool for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution',", "preferred if 1 in c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC", "FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list):", "fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat']", "['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name,", "subject \"%s\" was not ' \\ 'found. Check this path and try again.'", "(config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'),", "range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key", "pool strat_name : str name of the strategy strat_nodes_list_list : list a list", "skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c)", "'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id", "'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'),", "for every session of the subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat,", "strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain", "'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the center of mass of", "like the multivariate template from ANTS # It would just require to change", "err_msg = 'Credentials path: \"%s\" for subject \"%s\" was not ' \\ 'found.", "resource pool node, out_file = strat['functional_preprocessed_median'] # pass the anatomical to the workflow", "have a list of dict (each dict is a session) already_skullstripped = config.already_skullstripped[0]", "# TODO add session information in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list):", "nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io", "template_center_of_mass, 'in_file') # list of lists for every strategy strat_nodes_list_list = {} #", "preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac,", "the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\"", "pool node, out_file = strat['functional_preprocessed_median'] # pass the anatomical to the workflow workflow.connect(node,", "string the id of the subject strat_list : list of list first level", "out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node,", "(\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if", "workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node,", "and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject", "rsc_name, ds, rsc_key) # individual minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix", "and '.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath", "workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we", "'out_file') }) strat_list = [strat_init_new] # only need to run once for each", "reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file')", "'out_image') }) # Register tissue segmentation from longitudinal template space to native space", "strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node, out_file =", "'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list", "new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO", "= strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True)", "fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({", "'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni,", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file", "'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list =", "'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type ==", "enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) )", "config): \"\"\" Parameters ---------- subject_id : string the id of the subject sub_list", "c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference", "still on # TODO ASH normalize w schema validation to bool if already_skullstripped", "apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') #", "= \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able to write to", "list of strategies; within each strategy, a list of sessions \"\"\" datasink =", "out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node,", "'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other SS methods if \"AFNI\"", "bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error", "= 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\",", "fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index',", "says: Your skull-stripping ' \\ 'method options setting does not include either' \\", "dictionaries to be updated during the preprocessing # creds_list = [] session_id_list =", "'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key)", "of sessions for one subject and each session if the same dictionary as", "= session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method", "strategy object you want to fork anat_preproc : Workflow the anat_preproc workflow node", "images as ' \\ 'your inputs. This can be changed ' \\ 'in", "ds, rsc_key) # individual minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix =", "file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id", "# a list of sessions within each strategy list # TODO rename and", "# sinc will be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc')", "name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name',", "Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have", "the skull-stripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass the", "+ rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing", "for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list,", "as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag =", "enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) )", "'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat", "time import shutil from nipype import config from nipype import logging import nipype.pipeline.engine", "strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a path to the", "'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm':", "the reorient skull-on anatomical from resource pool node, out_file = strat['motion_correct_median'] # pass", "if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped =", "'inputspec.ref_mask') # assign the FSL FNIRT config file specified in pipeline # config.yml", "resource pool updated strat_nodes_list_list : list a list of strat_nodes_list \"\"\" new_strat =", "FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if", "config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing items for", "strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method", "out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node,", "= create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1", "out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file,", "node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') })", "w schema validation to bool if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC", "\"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node,", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_func_preproc']", "'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask',", "= register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1 to the standard template", "the data config dictionaries to be updated during the preprocessing # creds_list =", "if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\",", "CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc", "}, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list):", "= os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access = \\", "dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out',", "bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce value with schema validation #", "the standard template to align the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(),", "subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name =", "pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template =", "= True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type", "template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold,", "strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add", "config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask,", "already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: You selected ' \\ 'to", "ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list", "c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'),", "setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The", "strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({", "strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list", "to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node,", "None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : str the id", "and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg =", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file", "'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask',", "out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni,", "getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption and", "connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other SS", "# skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file", "ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string", "# this is to prevent the user from running FNIRT if they are", "out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file,", "Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI", "ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) #", "outputs to the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix)", "workflow, strat_init, strat_name) # Register T1 to the standard template # TODO add", "reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation from longitudinal template space", "raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median']", "] for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name',", ": list of dict this is a list of sessions for one subject", "your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped", ") from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template )", "= c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm':", "(fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat", "# quality skullstripping. If skullstripping is imprecise # registration with skull is preferred", "= 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) #", "'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list = [] if 'FSL'", "'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': (", "'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting", "and try ' \\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path", "anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold,", "selected to ' \\ 'use already-skullstripped images as ' \\ 'your inputs. This", "'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file =", "from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat,", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node,", "+ \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s'", "strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key", "fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as", "node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file", "subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list,", "of list first level strategy, second level session config : configuration a configuration", "= new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc,", "2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub,", "workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list':", "= [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject", "pass the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else:", "% e raise Exception(err_msg) if map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield),", "fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol", "str name of the strategy strat_nodes_list_list : list a list of strat_nodes_list workflow:", "tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over the different skull stripping", "pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node,", "to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical", "= reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp,", "elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c)", "out_file = strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni,", "= strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat']", "create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1", "\"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if isinstance(getattr(config,", "you want to fork anat_preproc : Workflow the anat_preproc workflow node to be", "1: file_list = file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return", "workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2')", "# Node to calculate the center of mass of the standard template to", "key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy() strat_list =", "import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import", "already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1", "TODO add session information in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if", "strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list", "(strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation in their", "------- new_strat : Strategy the fork of strat with the resource pool updated", "in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration", "fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if", "'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp =", "T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) #", "def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin'", "Exception('Not able to write to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg", "set up for every session of the subject # TODO create a list", "fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file =", ") unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost,", ") node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf']", "session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns", "name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') })", "# assign the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config", ": string a path to the working directory Returns ------- brain_list : list", "fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull')", "strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node,", "list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key,", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node,", "== 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 resampled_template", "longitudinal template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm =", "dirpath, dirnames, filenames in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in dirpath", "template from ANTS # It would just require to change it here. template_node", "one given to prep_workflow config : configuration a configuration object containing the information", "to align the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file", "# individual minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" %", "of lists for every strategy strat_nodes_list_list = {} # list of the data", "= sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and 'none' not in", "brain skull_list : list a list of func preprocessed skull \"\"\" brain_list =", "workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index", "anatRegFSLinterpolation in their pipe config, # sinc will be default option if not", "object containing the information of the pipeline config. Returns ------- None ''' workflow_name", "every session of the subject # TODO create a list of list ses_list_strat_list", "( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list +=", "Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference", "}) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we have the same strategies", "native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm,", "import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg,", "strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') })", "pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1')", "'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method':", "strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc,", "str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key,", "CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg,", "but it requires very high # quality skullstripping. If skullstripping is imprecise #", "\"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if isinstance(getattr(config, key), str):", "config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix", "== 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file',", "= [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name)", "= strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name)", "'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only", "else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if", "in c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method')", "if the same dictionary as the one given to prep_workflow config : configuration", "strat_nodes_list_list : list a list of strat_nodes_list workflow: Workflow main longitudinal workflow Returns", "config, # it will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation',", "resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop", "connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow,", "iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}')", "create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have", "be in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input", "subject sub_list : list of dict this is a list of sessions for", "says: FNIRT (for anatomical ' \\ 'registration) will not work properly if you", "else: # TODO add other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method =", "[] if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg(", "strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI Template workflow, strat_list", "list of skullstripping strategies, # a list of sessions within each strategy list", "# Get path to creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path =", "strat_name : str name of the strategy strat_nodes_list_list : list a list of", "a list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node,", "registration with skull is preferred if 1 in c.regWithSkull: if already_skullstripped == 1:", "anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method =", "brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" +", "= [] if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for", "out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file,", "session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if", "raise Exception(err_msg) if map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield", "subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat'", "' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat)", "reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in", "instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\", "config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO", "def pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list) == 1: file_list =", "string the id of the subject sub_list : list of dict this is", "for every session of the subject # TODO create a list of list", "\\ 'your inputs. This can be changed ' \\ 'in your pipeline configuration", "'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only =", ") strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here", "fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp,", "if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp(", "= str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access", "creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy() strat_list = [] node_suffix", "# pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain')", "in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath,", "a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif", "skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) #", "subject_id : string the id of the subject sub_list : list of dict", "the input for the longitudinal algorithm for session in sub_list: unique_id = session['unique_id']", "sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = [] # either run FSL", "function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id )", "= 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = {", "this is a list of sessions for one subject and each session if", "file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file =", "nipype import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl", "\"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file", "config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if", "= strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "data config dictionaries to be updated during the preprocessing # creds_list = []", "c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: You selected '", "'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try:", "strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'),", "c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name,", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get", "avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter,", "= reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node,", "strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask')", "from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws import aws_utils", "== 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file =", "have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg(", "template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in", "or provide input ' \\ 'images that have not been already ' \\", "take i+1 because the Merge nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf'", "strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list,", "'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation',", "is preferred if 1 in c.regWithSkull: # get the skull-stripped anatomical from resource", "perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc,", "= strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat", "= logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args", "tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc,", "\"%s\" ' \\ 'was not found. Check this path and try ' \\", "pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name =", "afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge,", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file,", "import Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp,", "import os import copy import time import shutil from nipype import config from", "convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list", "f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask',", "(fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = []", "'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list =", "the working directory Returns ------- brain_list : list a list of func preprocessed", "output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type", "import time import shutil from nipype import config from nipype import logging import", "as util from indi_aws import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import", "rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take i+1 because the Merge", "config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for", "'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list", "file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient", "'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg =", "= strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc']", "# assign the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config", "\"%s\" for subject \"%s\" session \"%s\" ' \\ 'was not found. Check this", "(\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if isinstance(getattr(config, key),", "in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc(", "config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func,", "= strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take i+1", "'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the", "def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : string the id of", "as for prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir", "num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file =", "reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list': for index in range(3):", "'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check", "subject \"%s\" session \"%s\" ' \\ 'was not found. Check this path and", "get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask", "# skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull", "containing the information of the pipeline config. Returns ------- None ''' workflow_name =", "try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index,", "= file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def", "subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None", "the transform with the skullstripped is # reported to be better, but it", "'method options setting does not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options", "sinc will be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if", "will be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file", "= [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask',", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference", "'The selected ANTS interpolation method may be in the list of values: \"Linear\",", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_anat']", "err_msg = '\\n\\n[!] CPAC says: You selected ' \\ 'to run anatomical registration", "pipeline config. (Same as for prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\"", "prep_workflow) Returns ------- strat_list_ses_list : list of list a list of strategies; within", "# -*- coding: utf-8 -*- import os import copy import time import shutil", "err = '\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method options setting does", "reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') })", "return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat :", "the strategy object you want to fork anat_preproc : Workflow the anat_preproc workflow", "validation to bool if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT", "and '.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list,", "out_file = strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni,", "sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'),", "if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix,", "reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list']", "= Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'),", "strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') #", "strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name =", "'was not found. Check this path and try ' \\ 'again.' % (creds_path,", "c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain']", "'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list +=", "out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node,", "not any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC", "'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float", "node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file", "shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge,", "Parameters ---------- subject_id : string the id of the subject sub_list : list", "\"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method may be in the", "optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path,", "template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution,", "'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path = input_creds_path,", "= create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file =", "'template_cmass': (template_center_of_mass, 'cm') }) # Here we have the same strategies for the", "dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy() strat_list = [] node_suffix =", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the", "strat_list, c, diff_complete) # Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list,", "---------- strat : Strategy the strategy object you want to fork anat_preproc :", "os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" was not '", "'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'),", "'in_file') # list of lists for every strategy strat_nodes_list_list = {} # list", "strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config):", "node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node,", "workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] =", "check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name):", "ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm':", "flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have", "information in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL':", "node will generate the longitudinal template (the functions are in longitudinal_preproc) # Later", "list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp", "'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean,", "'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init, 'default' ) workflow.run() return", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') #", "ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file,", "already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = [] #", "img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm')", "if 'ANTS' in c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm':", "e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing credentials or '", "the standard template # TODO add session information in node name for num_reg_strat,", "strat_list += new_strat_list ''' # Func -> T1 Registration (Initial Linear Reg) workflow,", "---------- subject_id : string the id of the subject strat_list : list of", "pool updated strat_nodes_list_list : list a list of strat_nodes_list \"\"\" new_strat = strat.fork()", "ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file,", "= [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain',", "\\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped anatomical from resource pool", "but you also selected to ' \\ 'use already-skullstripped images as ' \\", "pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork()", "return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores =", "will not work properly if you ' \\ 'are providing inputs that have", "ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input", "+ func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters", "to creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path =", "template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists for every", "out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None", "filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f:", "'\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\ 'registration) will not work properly", "you also selected to ' \\ 'use already-skullstripped images as ' \\ 'your", "to be updated during the preprocessing # creds_list = [] session_id_list = []", "reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation in their pipe config, #", "= pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node,", "import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as", "0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map,", "+= new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only =", "CPAC says: You selected ' \\ 'to run anatomical registration with ' \\", "'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d'", "the id of the subject sub_list : list of dict this is a", "= pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution =", "strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni,", "if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation", "in c.regOption and \\ strat.get('registration_method') != 'ANTS': # this is to prevent the", "'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc", "unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node,", "# apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve')", "'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric Template,", "os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" session \"%s\" '", "creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'):", "rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key]", "= strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file", "different skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id])", "ASH normalize w schema validation to bool if already_skullstripped == 1: err_msg =", "workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) #", "'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if", "config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3", ": string the id of the subject sub_list : list of dict this", ") logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(),", "Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg,", "setattr(config, key, node) strat = Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id])", "for key_type, key in template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key,", "'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm']", "= strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to", "mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat,", "= 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat,", "c.regOption and \\ strat.get('registration_method') != 'ANTS': # this is to prevent the user", "fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file =", "the in{}.format take i+1 because the Merge nodes inputs starts at 1 rsc_key", "create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation", "the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS'", "the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig", "from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from", "'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS", "parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') #", "list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : string the id", "creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write", "\"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm", "rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1))", "Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix]", "requires an input with the skull still on if already_skullstripped == 1: err_msg", "= strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file", ": Strategy the strategy object you want to fork anat_preproc : Workflow the", "file_type='pve') # Update resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template =", "get the reorient skull-on anatomical from resource pool node, out_file = strat['motion_correct_median'] #", "new_strat_list = [] # either run FSL anatomical-to-MNI registration, or... if 'FSL' in", "}) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map,", "nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility", "---------- subject_id : str the id of the subject sub_list : list of", "dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node,", "Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC and 1 in", "strat with the resource pool updated strat_nodes_list_list : list a list of strat_nodes_list", "Here we have all the anat_preproc set up for every session of the", "( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation", "from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import (", "config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull,", "tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node,", "out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file", "config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the center", "+= new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): # or run", "inputs. This can be changed ' \\ 'in your pipeline configuration ' \\", "Exception # get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain']", "the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file,", "pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') #", "[] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject =", "template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name)", "# either run FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for num_strat,", "strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') #", "raise Exception # get the skullstripped anatomical from resource pool node, out_file =", "for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds", "1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\ 'registration) will", "out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node,", "flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list +=", "!= 'ANTS': # this is to prevent the user from running FNIRT if", "schema validation to bool if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says:", "out_file, fsl_apply_warp, 'ref_file') # TODO how to include linear xfm? # node, out_file", "strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config", "if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath)", "wf? # TODO check: # 1 func alone works # 2 anat +", "sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow,", "strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni,", "and each session if the same dictionary as the one given to prep_workflow", "get the skull-stripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass", "fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file =", "= session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none' not in", "= 'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({", "template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name,", "(ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni,", "type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool # longitudinal template", "FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if", "'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method may be in the list", "each participant we have a list of dict (each dict is a session)", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass", "out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node,", "= pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) )", "out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress", "pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') #", "resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix,", "'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name),", "anatomical ' \\ 'registration) will not work properly if you ' \\ 'are", "the skull still on if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says:", "pass the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if", "high # quality skullstripping. If skullstripping is imprecise # registration with skull is", "'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc',", "already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution',", "node, out_file = strat['motion_correct_median'] # pass the anatomical to the workflow workflow.connect(node, out_file,", "workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing Workflow workflow,", "session information in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') ==", "already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow)", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node,", "'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC]", "'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list", "is # reported to be better, but it requires very high # quality", "for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key in", "# brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull", "CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func", "providing already-skullstripped inputs. this is because # FNIRT requires an input with the", "raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform", "= '_'.join([strat_name, subject_id]) # Merge node to feed the anat_preproc outputs to the", "registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain')", "= None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"),", "Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow,", "of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp =", "\"\"\" Parameters ---------- strat : Strategy the strategy object you want to fork", "TODO Enforce value with schema validation # Extract credentials path for output if", "if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'),", "new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): # or run ANTS", "config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method,", "given to prep_workflow config : configuration a configuration object containing the information of", "in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method may be", "input with the skull still on # TODO ASH normalize w schema validation", "'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list = []", "in getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption", "configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped anatomical from", "if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id =", "of dict (each dict is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped ==", "' \\ 'your inputs. This can be changed ' \\ 'in your pipeline", "num_strat) ) # if someone doesn't have anatRegFSLinterpolation in their pipe config, #", "ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass", "'your inputs. This can be changed ' \\ 'in your pipeline configuration '", "node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm,", "'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric", "out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node,", "' \\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None", "\\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores,", "'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template", "anatomical from resource pool node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical to", "ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return ds def", "if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc", "workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({", "'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list +=", "str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } #", "Node to calculate the center of mass of the standard template to align", "= pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys =", "rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for func", ": str name of the strategy strat_nodes_list_list : list a list of strat_nodes_list", "try again.\\n' \\ 'Error: %s' % e raise Exception(err_msg) if map_node_iterfield is not", "FNIRT if they are # providing already-skullstripped inputs. this is because # FNIRT", "Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional", "strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') #", "os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc,", "\"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the", "\"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key", "1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i +", "subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items", "# update resampled template to resource pool for resolution, template, template_name, tag in", "'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni,", "''' # Func -> T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete =", "user from running FNIRT if they are # providing already-skullstripped inputs. this is", "TODO rename and reorganize dict # TODO update strat name strat_list_ses_list = {}", "for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters", "brain_list : list a list of func preprocessed brain skull_list : list a", "# Test for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not", "(flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list", "Returns ------- new_strat : Strategy the fork of strat with the resource pool", "out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\",", "create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path = input_creds_path,", "override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply", "key_type, key in template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config,", "to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list =", "% (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node,", "= [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory):", "'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file')", "here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i", "'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError:", "\"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation #", "find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger", "# Here we have all the anat_preproc set up for every session of", "{} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list", "FSL interpolation method may be in the list of values: \"trilinear\", \"sinc\", \"spline\"'", "reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration", "c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method", "sessions to create the input for the longitudinal algorithm for session in sub_list:", "---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data =", "ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file,", "connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow,", "'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node,", "anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout,", "strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy the strategy object you want", "config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method,", "ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file =", "option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\",", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_anat']", "from longitudinal template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm", "' \\ 'images that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise", "if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing credentials or ' \\", "= encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return", "'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask']", "for num_strat, strat in enumerate(strat_list): # this is to prevent the user from", "'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm']", "seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool # longitudinal template rsc_key =", "ses_list_strat_list # a list of skullstripping strategies, # a list of sessions within", "is to prevent the user from running FNIRT if they are # providing", "= \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists for every strategy", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file", "that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni =", "'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' %", "'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node,", "concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for", "setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The", "fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull')", "not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d'", "strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node to feed", "pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) #", "reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear')", "= bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce value with schema validation", "'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the", "= strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file,", "out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp", "# This node will generate the longitudinal template (the functions are in longitudinal_preproc)", "file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference", "skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file =", "% (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation in", "resource pool for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template',", "'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim,", "'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'],", "'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name,", "able to write to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg =", "error processing credentials or ' \\ 'accessing the S3 bucket. Check and try", "( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from", "'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat,", "'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for", "# Register tissue segmentation from longitudinal template space to native space fsl_convert_xfm =", "of the subject # TODO create a list of list ses_list_strat_list # a", "elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'],", "( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list", ") flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node,", "flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have", "# TODO rename and reorganize dict # TODO update strat name strat_list_ses_list =", "fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file", "'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': (", "= None except KeyError: input_creds_path = None strat = Strategy() strat_list = [strat]", "strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node,", "out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in pipeline", "strat_list, config, node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list,", "out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign", "'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key", "nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node,", "out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config", "fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption and", "of the strategy strat_nodes_list_list : list a list of strat_nodes_list workflow: Workflow main", "session_id_list = [] # Loop over the sessions to create the input for", "strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node to feed the", "iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file')", "fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull')", "brain_list = [] skull_list = [] for dirpath, dirnames, filenames in os.walk(working_directory): for", "None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni,", "out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register", "( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list", "'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(),", "input_creds_path = None strat = Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id])", "float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative',", "the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: #", "( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name]", "create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource", "# T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key +", "Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution']", "os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init,", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node,", "lists for every strategy strat_nodes_list_list = {} # list of the data config", "Get path to creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials)", "fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}')", "strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take i+1 because", "strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add", "Registration (Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func", "fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file =", "if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour'", "to the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node", "node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True)", "strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list", "(brain_mask, 'out_file') }) strat_list = [strat_init_new] # only need to run once for", "create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from", "'Credentials path: \"%s\" for subject \"%s\" was not ' \\ 'found. Check this", "!= 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull )", "working directory Returns ------- brain_list : list a list of func preprocessed brain", "sub_list: if 'func' in sub_dict or 'rest' in sub_dict: if 'func' in sub_dict:", "node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node,", "in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\"", "in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none':", "create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain']", "ses_list_strat_list[node_suffix] = strat_list # Here we have all the func_preproc set up for", "= sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path =", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain')", "input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\",", "validation # Extract credentials path for output if it exists try: # Get", "merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof,", "== 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp),", "list a list of func preprocessed skull \"\"\" brain_list = [] skull_list =", "out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat =", "strat_nodes_list workflow: Workflow main longitudinal workflow Returns ------- new_strat : Strategy the fork", "not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]:", "' \\ 'to run anatomical registration with ' \\ 'the skull, but you", "\"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys:", "list of list a list of strategies; within each strategy, a list of", "CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc", "to be better, but it requires very high # quality skullstripping. If skullstripping", "(config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ]", "object you want to fork anat_preproc : Workflow the anat_preproc workflow node to", "rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node,", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on", "using ' \\ 'ANTS for registration or provide input ' \\ 'images that", "'brain_template', ds_template, rsc_key) # T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list", "f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node,", "# skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file", "Here we have all the func_preproc set up for every session of the", "strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list #", "list of func preprocessed brain skull_list : list a list of func preprocessed", "if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: You selected ' \\", "= reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method')", "hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg", "= None except KeyError: input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\",", "we have the same strategies for the skull stripping as in prep_workflow if", "anatomical from resource pool node, out_file = strat['motion_correct_median'] # pass the anatomical to", "Func -> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) #", "subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal preprocessing items for i", "connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config,", "Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node,", "'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc,", "for registration or provide input ' \\ 'images that have not been already", "c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption and 0 in", "'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource(", "session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set(", "skull is preferred if 1 in c.regWithSkull: # get the skull-stripped anatomical from", "to standard registration items for num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys():", "have all the func_preproc set up for every session of the subject #", "== 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file", "the pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow", "nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws import", "file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only:", ": list of list a list of strategies; within each strategy, a list", "'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func' in sub_dict or 'rest'", "= os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath", "strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff')", "---------- working_directory : string a path to the working directory Returns ------- brain_list", "config): ''' Parameters ---------- subject_id : string the id of the subject strat_list", "override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): if", "already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores", "fsl_apply_warp, 'ref_file') # TODO how to include linear xfm? # node, out_file =", "generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\"", "create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat =", "if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject", "node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to include", "= strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard':", "+ node_suffix) # This node will generate the longitudinal template (the functions are", "str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key +", "= c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the", "if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for num_strat, strat", "in c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: You selected", "template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"),", "resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'],", "for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0", "'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative',", "template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1 to", "strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'),", "strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list", "(config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to resource pool for", "= 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the center of mass", "strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain", "and 0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL':", "it exists try: # Get path to creds file creds_path = '' if", "return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : str the", "'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func -> T1", "reg_strat_list # strat_nodes_list_list # for func wf? # TODO check: # 1 func", "config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc,", "subject and each session if the same dictionary as the one given to", "running FNIRT if they are # providing already-skullstripped inputs. this is because #", "{ 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we have a", "= input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat')", "and try again.\\n' \\ 'Error: %s' % e raise Exception(err_msg) if map_node_iterfield is", "= strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node, out_file", "'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat,", "with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template',", "a configuration object containing the information of the pipeline config. (Same as for", "config.outputDirectory) if not s3_write_access: raise Exception('Not able to write to bucket!') except Exception", "list of lists for every strategy strat_nodes_list_list = {} # list of the", "add other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name =", "the subject sub_list : list of dict this is a list of sessions", "'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] # only need", "'registration) will not work properly if you ' \\ 'are providing inputs that", "= config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' %", "reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'],", "strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull')", "+= new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list =", "'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat in", "'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only", "file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file =", "transform with the skullstripped is # reported to be better, but it requires", "\\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores,", "create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat,", "from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node )", "str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir':", "------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir =", "all the anat_preproc set up for every session of the subject strat_init =", "= strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat']", "workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for", "index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map", "(config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative,", "func alone works # 2 anat + func works, pass anat strategy list?", "KeyError: input_creds_path = None strat = Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id,", "'ANTS' in c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': (", "(config.pipelineName, strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow):", "create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import", "the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass", "Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't", "'Credentials path: \"%s\" for subject \"%s\" session \"%s\" ' \\ 'was not found.", "config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass,", "function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file,", "strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff')", "have anatRegFSLinterpolation in their pipe config, # sinc will be default option if", "\"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list =", "[] if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for num_strat,", "if 'func' in sub_dict or 'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file", "None except KeyError: input_creds_path = None strat = Strategy() strat_list = [strat] node_suffix", "provide input ' \\ 'images that have not been already ' \\ 'skull-stripped.\\n\\n'", "sub_list, config): \"\"\" Parameters ---------- subject_id : str the id of the subject", "strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key +", "skull still on if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT", "None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = {", "out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file", "'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS", "'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in", "the center of mass of the standard template to align the images with", "out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file", "new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass')", "strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node, out_file =", "'FSL' in c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if", "connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction", "in longitudinal_preproc) # Later other algorithms could be added to calculate it, like", "your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped", "else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" was not ' \\", "wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac,", "\\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we", "ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp')", "new_strat_list = [] if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni", "strategies for the skull stripping as in prep_workflow if 'brain_mask' in session.keys() and", "subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal template warp rsc_key", "strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard':", "# providing already-skullstripped inputs. this is because # FNIRT requires an input with", "+= new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c,", "out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni,", "options setting does not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you", "'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list',", "creds_path = sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path", "# get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] #", "workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id,", "= config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each", "'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we have", "{0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have all the anat_preproc set up", "they are # providing already-skullstripped inputs. this is because # FNIRT requires an", "' \\ 'found. Check this path and try again.' % ( creds_path, subject_id)", "Func -> T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list,", "}) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list", "better, but it requires very high # quality skullstripping. If skullstripping is imprecise", "raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation", "= create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path", "= strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the", "if not s3_write_access: raise Exception('Not able to write to bucket!') except Exception as", "in Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id,", "If skullstripping is imprecise # registration with skull is preferred if 1 in", "pipeline config. (Same as for prep_workflow) Returns ------- strat_list_ses_list : list of list", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file =", "= pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir':", "'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) #", ") template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists for", "config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list:", "Parameters ---------- strat : Strategy the strategy object you want to fork anat_preproc", "None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(),", "for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) )", "'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption and \\", "ds_template, rsc_key) # T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list =", "interpolation method may be in the list of values: \"trilinear\", \"sinc\", \"spline\"' raise", "to fork anat_preproc : Workflow the anat_preproc workflow node to be connected and", ": Workflow the anat_preproc workflow node to be connected and added to the", "sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try:", "}) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError:", "directory Returns ------- brain_list : list a list of func preprocessed brain skull_list", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file,", "= connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in", "brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference", "strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node,", "'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list +=", "= 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\",", "create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni,", "because the Merge nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name", "map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False #", "import ( subject_specific_template ) from CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils", "'_'.join([strat_name, subject_id]) # Merge node to feed the anat_preproc outputs to the longitudinal", "is a list of sessions for one subject and each session if the", "very high # quality skullstripping. If skullstripping is imprecise # registration with skull", "add session information in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method')", "(strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation #", "CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction", "index, file_type): if isinstance(file_list, list): if len(file_list) == 1: file_list = file_list[0] for", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node,", "name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node", "added to the resource pool strat_name : str name of the strategy strat_nodes_list_list", "= strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key =", "1 func alone works # 2 anat + func works, pass anat strategy", "T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1 in", "Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing Workflow", "if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'),", "FNIRT (for anatomical ' \\ 'registration) will not work properly if you '", "It would just require to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' +", "strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node,", "} for sub_dict in sub_list: if 'func' in sub_dict or 'rest' in sub_dict:", "raise Exception('Not able to write to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'):", "(\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if isinstance(getattr(config, key), str): node", "function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list')", "= connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='',", "if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation", "func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ----------", "'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif", "ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file,", "if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d'", "strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') },", "workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items for num_strat, strat", "num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node,", "input ' \\ 'images that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg)", "'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config,", "fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file =", "DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory =", "= strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical from", "strat_list += new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): if 'ANTS'", "= { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we have", "workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory)", "name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(),", "in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in", "= strat['functional_preprocessed_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain')", "function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger =", "' \\ 'ANTS for registration or provide input ' \\ 'images that have", "it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for", "'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1 to the", "diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have all the", "node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files", "raise Exception(err) # Here we have all the anat_preproc set up for every", "!= 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull )", "else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference", "file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference", "IdentityInterface import nipype.interfaces.utility as util from indi_aws import aws_utils from CPAC.utils.utils import concat_list", "fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\",", "brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference", "session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical':", "= strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain')", "to prep_workflow config : configuration a configuration object containing the information of the", "is imprecise # registration with skull is preferred if 1 in c.regWithSkull: if", "encrypt_data = False # TODO Enforce value with schema validation # Extract credentials", "from CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps,", "c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat)", "out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name)", "err_msg = 'The selected FSL interpolation method may be in the list of", "Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing", "strat.get('registration_method') != 'ANTS': # this is to prevent the user from running FNIRT", "\\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have all the anat_preproc", "in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\",", "config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat)", "in sub_list: if 'func' in sub_dict or 'rest' in sub_dict: if 'func' in", "config, # sinc will be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation',", "'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method,", "ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'),", "strategies, # a list of sessions within each strategy list # TODO rename", "in the list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration", "'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d'", "flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS'", "input for the longitudinal algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id)", "+ rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain'", "func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : string the id of the", "return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a path to", "workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node, out_file =", "# strat_nodes_list_list # for func wf? # TODO check: # 1 func alone", "if someone doesn't have anatRegANTSinterpolation in their pipe config, # it will default", "resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list',", "already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method,", "pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow =", "= create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation", "new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'),", ") else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path =", "list a list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key = new_strat['anatomical']", "elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc(", "Register tissue segmentation from longitudinal template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(),", "c) # Func -> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c,", "output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name", "# Update resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key", ": list a list of func preprocessed brain skull_list : list a list", "= connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list =", "in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat =", "resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution", "algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path']", "Exception(err_msg) if map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield )", "fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how", "}, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask',", "added to calculate it, like the multivariate template from ANTS # It would", "use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat,", "longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id,", "to include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp,", "= strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni,", "a list of sessions within each strategy list # TODO rename and reorganize", "err_msg = 'The selected ANTS interpolation method may be in the list of", "longitudinal algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path =", "in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if", "\"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list", "registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with the skullstripped is", "'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'),", "== 1: err_msg = '\\n\\n[!] CPAC says: You selected ' \\ 'to run", "'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask,", "# reported to be better, but it requires very high # quality skullstripping.", "pool node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node,", "config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to resource pool for resolution,", "strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list", "'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm':", "workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name)", "f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node,", "2 anat + func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config):", "-> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list", "}) # Register tissue segmentation from longitudinal template space to native space fsl_convert_xfm", "out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file =", "warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update", "'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg =", "fork anat_preproc : Workflow the anat_preproc workflow node to be connected and added", "strat_list, config): ''' Parameters ---------- subject_id : string the id of the subject", "'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if", "\"template_based_segmentation_WHITE\"), ] for key_type, key in template_keys: if isinstance(getattr(config, key), str): node =", "config : configuration a configuration object containing the information of the pipeline config.", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_func_preproc']", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on", "is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds =", "name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0])", "import Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws import aws_utils from CPAC.utils.utils", "session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower(): if", "= create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') #", "'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni,", "pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory", "\"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"),", "templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only,", "segmentation from longitudinal template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file'])", "updated during the preprocessing # creds_list = [] session_id_list = [] # Loop", "connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc", "get the reorient skull-on anatomical from resource pool node, out_file = strat['anatomical_skull_leaf'] #", "# list of lists for every strategy strat_nodes_list_list = {} # list of", "check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func,", "the subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat,", "working_directory : string a path to the working directory Returns ------- brain_list :", "\"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file =", "------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] =", "else: input_creds_path = None except KeyError: input_creds_path = None template_keys = [ (\"anat\",", "run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import", "workflow node to be connected and added to the resource pool strat_name :", "img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name)", "}) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear',", "= config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in", "'ANTS for registration or provide input ' \\ 'images that have not been", "other algorithms could be added to calculate it, like the multivariate template from", "config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in", "'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'),", "'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func", "iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference')", "(config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template", "(ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni,", "been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' %", "# Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) #", "at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i", "= '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds", "list): if len(file_list) == 1: file_list = file_list[0] for file_name in file_list: if", "== 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list =", "the information of the pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_'", "config from nipype import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni", "'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = []", "the subject strat_list : list of list first level strategy, second level session", "return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name", "import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from", "workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node,", "config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull,", "values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation", "'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain')", "strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list", "the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node,", "S3 bucket. Check and try again.\\n' \\ 'Error: %s' % e raise Exception(err_msg)", "workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration (BBREG)", "output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node,", "def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string the id of", "strat_list : list of list first level strategy, second level session config :", "raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain']", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file", "+= new_strat_list ''' # Func -> T1 Registration (Initial Linear Reg) workflow, strat_list,", "1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard':", "how to include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file,", "subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node,", "(ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni,", "out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file", "file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only:", "% (strat_name, num_strat) ) # brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file,", "sessions for one subject and each session if the same dictionary as the", "'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image')", "subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, )", "pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para", "= config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3:", "= strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'),", "0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni", "path: \"%s\" for subject \"%s\" was not ' \\ 'found. Check this path", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files", "= strat['anatomical_skull_leaf'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull')", "'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we have a list of dict", "workflow.connect(template_node, \"warp_list\", ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_'", "for subject \"%s\" was not ' \\ 'found. Check this path and try", "workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) #", "name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm,", "outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list", "anatomical from resource pool node, out_file = strat['functional_preprocessed_median'] # pass the anatomical to", "'out_file') }, override=True) elif type == 'list': for index in range(3): fsl_apply_xfm =", "and 1 in getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if 'FSL'", "= connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial Prep Workflow", "from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import Strategy, find_files, function, Outputs", "name of the strategy strat_nodes_list_list : list a list of strat_nodes_list workflow: Workflow", "node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask =", "strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask')", "the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass the", ") from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc,", "reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register T1 to the standard", "fnirt_reg_func_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull')", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node,", "out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool node,", "= 0 elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores =", "'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni,", "ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool node, out_file", "minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i)", "Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with", "nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface import", "= 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = {", "longitudinal template (the functions are in longitudinal_preproc) # Later other algorithms could be", "(flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list", "'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') #", "raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform", "out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file,", "logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args =", "of the subject sub_list : list of dict this is a list of", "not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option:", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni,", "(ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm':", "in node name for num_reg_strat, reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp", "Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None template_keys = [", "= [0] if 'FSL' in c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat", "strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns -------", "files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption:", "= create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat", "subject_id : string the id of the subject strat_list : list of list", "config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name)", "key in template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key),", "avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list',", "\"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node,", "%s' % e raise Exception(err_msg) if map_node_iterfield is not None: ds = pe.MapNode(", "flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file", "value with schema validation # Extract credentials path for output if it exists", "\\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able to write to bucket!')", "preprocessed brain skull_list : list a list of func preprocessed skull \"\"\" brain_list", "'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm':", "e raise Exception(err_msg) if map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name),", "import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util", "'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask']", "def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : str the id of", "= new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf':", "frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient,", "session config : configuration a configuration object containing the information of the pipeline", "prep_workflow config : configuration a configuration object containing the information of the pipeline", "ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation from", "does not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\", "\"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method options setting", "in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' %", "= 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') })", "skull \"\"\" brain_list = [] skull_list = [] for dirpath, dirnames, filenames in", "import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from", "the reorient skull-on anatomical from resource pool node, out_file = strat['anatomical_skull_leaf'] # pass", "node_suffix) # This node will generate the longitudinal template (the functions are in", "name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_'", "= pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template", ") ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join(", "with ' \\ 'the skull, but you also selected to ' \\ 'use", "if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for", "c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method", "workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_symmetric_brain']", "with the resource pool updated strat_nodes_list_list : list a list of strat_nodes_list \"\"\"", "\"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists for every strategy strat_nodes_list_list", "'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node,", "fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0:", "indi_aws import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function", "config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal template warp", "was not ' \\ 'found. Check this path and try again.' % (", "hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg", "unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold,", "run anatomical registration with ' \\ 'the skull, but you also selected to", "to be connected and added to the resource pool strat_name : str name", "resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool # longitudinal", "# Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file,", "is because # FNIRT requires an input with the skull still on #", "pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } #", "( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try:", "for prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir =", "\"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc =", "flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file", "'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for func wf? #", "out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True)", "'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp", "{ 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func' in", "a path to the working directory Returns ------- brain_list : list a list", "creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path", "warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp'])", "longitudinal to standard registration items for num_strat, strat in enumerate(reg_strat_list): for rsc_key in", "+= new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): if 'ANTS' in", "enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS': # this is", "tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_'", "'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for registration or provide input '", "of the pipeline config. (Same as for prep_workflow) Returns ------- None \"\"\" workflow", "'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni,", "anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] =", "of skullstripping strategies, # a list of sessions within each strategy list #", "(strat_name, num_strat) ) # brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "is because # FNIRT requires an input with the skull still on if", "anat = session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' )", "anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path", "node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp,", "'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map =", "have all the anat_preproc set up for every session of the subject strat_init", "found. Check this path and try ' \\ 'again.' % (creds_path, subject_id, unique_id)", "'_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds =", "preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for", "ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'),", "tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True),", "'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to resource pool", "anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name", "# sinc will be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc')", "workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name) # Register", "strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix", "copy import time import shutil from nipype import config from nipype import logging", "out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({", "pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config,", "unique_id]) # Functional Ingress Workflow # add optional flag workflow, diff, blip, fmap_rp_list", "create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped',", "fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm =", "for the longitudinal algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try:", "a list of dict (each dict is a session) already_skullstripped = config.already_skullstripped[0] if", "or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here", "connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in", "pool node, out_file = strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node,", "Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC and 1", "out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file =", ") new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat)", "# TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'),", "rsc_key) # individual minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\"", "workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') #", "i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key]", "either run FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for num_strat, strat", "# calculating the transform with the skullstripped is # reported to be better,", "# node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file =", "use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat,", "fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') },", "= reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp,", "= strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni,", "skull_list : list a list of func preprocessed skull \"\"\" brain_list = []", "= c.funcRegANTSinterpolation # calculating the transform with the skullstripped is # reported to", "[new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list)", "node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add optional flag workflow,", "input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') })", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass", "'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' #", "Loop over the sessions to create the input for the longitudinal algorithm for", "out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool node,", "'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference')", "workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' %", "[ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'),", "% (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in their pipe", "new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': (", "= strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func,", "'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template =", "(for anatomical ' \\ 'registration) will not work properly if you ' \\", "longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node(", "that have already been ' \\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the", "subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters", "(fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat", "% unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir", "if 1 in c.regWithSkull: # get the skull-stripped anatomical from resource pool node,", "\\ 'are providing inputs that have already been ' \\ 'skull-stripped.\\n\\nEither switch to", "already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped ==", "'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file =", "reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the", "coding: utf-8 -*- import os import copy import time import shutil from nipype", "in enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni =", "in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def", ": list of list first level strategy, second level session config : configuration", "= connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list,", "nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util from", "anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list", "= strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "= 0 elif already_skullstripped == 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template',", "\"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try:", "create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole,", "(each dict is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped", "brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take i+1 because the Merge nodes", "creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id", "node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni,", "in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out',", "strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None", "it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass,", "# list of the data config dictionaries to be updated during the preprocessing", "type='str', file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp", "someone doesn't have anatRegANTSinterpolation in their pipe config, # it will default to", "'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain')", "[] for dirpath, dirnames, filenames in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median'", "filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f)", "reorganize dict # TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = []", "T1 to the standard template # TODO add session information in node name", "(ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list '''", "the Merge nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name =", "reg_strat in enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node,", "% (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path", "Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws import aws_utils from CPAC.utils.utils import", "' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped anatomical from resource", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the", "'are providing inputs that have already been ' \\ 'skull-stripped.\\n\\nEither switch to using", "}) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list,", "= \"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any:", "dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in", "== 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution,", "subject = subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type", "out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file,", "3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True),", "credentials or ' \\ 'accessing the S3 bucket. Check and try again.\\n' \\", "cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list')", "= 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution", "in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s'", "= strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical from", "switch to using ' \\ 'ANTS for registration or provide input ' \\", "the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method':", "ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'),", "( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat,", "resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate", "space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template'])", "'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in their", "'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list =", "name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method,", "assign the FSL FNIRT config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config =", "'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected", "= strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list # strat_nodes_list_list", "workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err", "# [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if", "every session of the subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat,", "out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node,", "subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set(", "session_id_list = [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow =", "skullstripped is # reported to be better, but it requires very high #", "= [] for num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption and \\", "list a list of strat_nodes_list workflow: Workflow main longitudinal workflow Returns ------- new_strat", "(config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc,", "'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for", "' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped anatomical from resource", "workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({", "'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list", "import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc )", "(ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear", "in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key", "= None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config,", "strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list +=", "calculating the transform with the skullstripped is # reported to be better, but", "node to feed the anat_preproc outputs to the longitudinal template generation brain_merge_node =", "out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file,", "if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm':", "'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new]", "'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm':", "concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list')", "# it will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc')", "'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node,", "'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list", "the pipeline config. (Same as for prep_workflow) Returns ------- strat_list_ses_list : list of", "bool if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical", "\\ 'Error: %s' % e raise Exception(err_msg) if map_node_iterfield is not None: ds", "brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f)", "this is because # FNIRT requires an input with the skull still on", ") # brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') #", "# get the reorient skull-on anatomical from resource pool node, out_file = strat['anatomical_skull_leaf']", "create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp", "node_suffix = '_'.join([strat_name, subject_id]) # Merge node to feed the anat_preproc outputs to", "'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow workflow,", "out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file,", "SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' %", "= config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass':", "+ node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will", "reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, fsl_apply_warp,", "import DataSink from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg,", "(flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only", "be added to calculate it, like the multivariate template from ANTS # It", "out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node,", "TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative,", "\\ 'method options setting does not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n", "flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file,", "for sub_dict in sub_list: if 'func' in sub_dict or 'rest' in sub_dict: if", "sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list", "resource pool node, out_file = strat['anatomical_brain'] # pass the anatomical to the workflow", "(strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if", "\\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise", "'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL", "[1]): for num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method')", "file_type): if isinstance(file_list, list): if len(file_list) == 1: file_list = file_list[0] for file_name", "strategy, second level session config : configuration a configuration object containing the information", "name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container =", "+ node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal", "in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone", "to bool if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for", "'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node,", "'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni,", ") # if someone doesn't have anatRegFSLinterpolation in their pipe config, # sinc", "for every strategy strat_nodes_list_list = {} # list of the data config dictionaries", "strat_name) # Register T1 to the standard template # TODO add session information", "create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to", "out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp =", "'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') })", "= reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm,", "'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat)", "'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node,", "workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists for every strategy strat_nodes_list_list =", "skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate the", "strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard':", "brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory,", "c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni,", "each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif", "'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict =", "config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func,", "create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template", "= sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and", "in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni =", "from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass", "for num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method') !=", "are # providing already-skullstripped inputs. this is because # FNIRT requires an input", "only need to run once for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped", "if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory", "'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory", "out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None", "( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from", "the anat_preproc workflow node to be connected and added to the resource pool", "the information of the pipeline config. (Same as for prep_workflow) Returns ------- None", "(template_center_of_mass, 'cm') }) # Here we have the same strategies for the skull", "type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node,", "return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list) ==", "individual minimal preprocessing items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix,", "c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm':", "reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in", "# 2 anat + func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list,", "node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node,", "filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c,", "}) # Here we have the same strategies for the skull stripping as", "nipype import config from nipype import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni", "num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS':", "out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file,", "'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni,", "session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path)", "information of the pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' +", "[] for num_strat, strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead", "each strategy list # TODO rename and reorganize dict # TODO update strat", "workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp", "template_name: (resampled_template, 'resampled_template') }) # loop over the different skull stripping strategies for", "up for every session of the subject # TODO create a list of", "unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'],", "# FNIRT requires an input with the skull still on if already_skullstripped ==", "t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items for num_strat, strat in enumerate(reg_strat_list):", "strat_list_ses_list : list of list a list of strategies; within each strategy, a", "each strategy, a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory =", "= session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({", "raise Exception # get the skull-stripped anatomical from resource pool node, out_file =", "template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list')", "ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id )", "num_strat) ) # brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain')", "'use already-skullstripped images as ' \\ 'your inputs. This can be changed '", "session if the same dictionary as the one given to prep_workflow config :", "template (the functions are in longitudinal_preproc) # Later other algorithms could be added", "file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool # longitudinal template rsc_key", "longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id,", "in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median'", "credentials path for output if it exists try: # Get path to creds", "= os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow,", "'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni,", "= resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name:", "pipe config, # it will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c,", "dirnames, filenames in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in dirpath and", "'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni,", "( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat,", "over the different skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix =", "skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file =", "= strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf']", "= strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new]", "wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method +", "== 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\ 'registration)", "0 elif already_skullstripped == 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name',", "c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method", "config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to", "# brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain", "strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ----------", "of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp =", "other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s'", "= 'resolution_for_anat' # Node to calculate the center of mass of the standard", "'-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf':", "strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list", "pass the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else:", "= subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list]", "'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS': # this is to prevent", "None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory", "in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' %", "if not any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!]", "( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from", "name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain':", "template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow,", "node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask =", "list of dict (each dict is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped", "= \\ check_config_resources(c) new_strat_list = [] # either run FSL anatomical-to-MNI registration, or...", "ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file =", "new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if", "template to resource pool for resolution, template, template_name, tag in templates_for_resampling: resampled_template =", "'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull']", "pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource,", "\"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce value", "flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix)", "TODO how to include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node,", "path and try again.' % ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path =", "fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have all the func_preproc set", "the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para =", "workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True)", "the func_preproc set up for every session of the subject # TODO create", "skull-stripped anatomical from resource pool node, out_file = strat['functional_preprocessed_median'] # pass the anatomical", "'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set(", "list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp", "Update resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key +", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method':", "flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file,", "longitudinal workflow Returns ------- new_strat : Strategy the fork of strat with the", "'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg)", "out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in pipeline", "is imprecise # registration with skull is preferred if 1 in c.regWithSkull: #", "if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file')", "name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy()", "strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow,", "'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'),", "= '\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method options setting does not", "from resource pool node, out_file = strat['motion_correct_median'] # pass the anatomical to the", "else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path", "to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical", "registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain')", "and added to the resource pool strat_name : str name of the strategy", ": str the id of the subject sub_list : list of dict this", "already been ' \\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for registration", "a list of sessions for one subject and each session if the same", "# It would just require to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_'", "rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node,", "ANTS interpolation method may be in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"'", "write to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was", "in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' +", "return reg_strat_list # strat_nodes_list_list # for func wf? # TODO check: # 1", "# config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat)", "= 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc(", "be changed ' \\ 'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise", "skull-stripping ' \\ 'method options setting does not include either' \\ ' \\'AFNI\\'", "in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat)", "loop over the different skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix", "new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': (", "# Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the", "method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes,", "creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask'", "resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file'])", "reference file node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file", "( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import (", "{} # list of the data config dictionaries to be updated during the", ") from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress )", "can be changed ' \\ 'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg)", "not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method may", "= [] skull_list = [] for dirpath, dirnames, filenames in os.walk(working_directory): for f", "values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation", "'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init,", "% node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config,", "[\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL interpolation method may be in", "Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration", "== 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input", "rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in Outputs.any: node,", "for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour'", "# Register T1 to the standard template # TODO add session information in", "CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import CPAC", "anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni", "and reorganize dict # TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] =", "from nipype import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import", "strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we have the same strategies for", "prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc", "datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' +", "if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) #", "pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate the longitudinal template", "just require to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix )", "skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores", "node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard", "check: # 1 func alone works # 2 anat + func works, pass", "skull-on anatomical from resource pool node, out_file = strat['motion_correct_median'] # pass the anatomical", "parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') #", "out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node,", "node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard':", "'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected", "Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data", "\"%s\" was not ' \\ 'found. Check this path and try again.' %", "rsc_key) # T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key", "config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag", "ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image'])", "the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS'", "if you ' \\ 'are providing inputs that have already been ' \\", "from nipype import config from nipype import logging import nipype.pipeline.engine as pe import", "session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id,", "import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import", "workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node, out_file =", "'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp,", "out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters", "rsc_key in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config,", "preferred if 1 in c.regWithSkull: # get the skull-stripped anatomical from resource pool", "'anat' ) skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask':", "an input with the skull still on if already_skullstripped == 1: err_msg =", "config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation from longitudinal template", "template to align the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' )", "preprocessing # creds_list = [] session_id_list = [] # Loop over the sessions", "'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if", "out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS',", "pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') #", "file_list = file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None", "= strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node, out_file", "configuration object containing the information of the pipeline config. Returns ------- None '''", "'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni,", "workflow): \"\"\" Parameters ---------- strat : Strategy the strategy object you want to", "= pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix)", "file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : str", "'inputspec.reference_skull') else: # get the skullstripped anatomical from resource pool node, out_file =", "wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else:", "strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string the id", "pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution", "import Strategy, find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr", "rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node,", "fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in pipeline #", "out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to include linear", "'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in", "node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node,", "0 in fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni", "'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped anatomical from resource pool node,", "file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters", "new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list +=", "strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory", "the skullstripped is # reported to be better, but it requires very high", "= [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list): if", "t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template')", "fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the", "strat_list.append(new_strat) if not any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err =", "node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.linear_aff') node, out_file = strat['template_dilated_symmetric_brain_mask'] workflow.connect(node,", "images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template,", "fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain')", "nipype.interfaces.utility as util from indi_aws import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink", "TODO add other SS methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name", "1 in c.regWithSkull: # get the skull-stripped anatomical from resource pool node, out_file", "config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default']", "import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func )", "Strategy, find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr )", "strat = Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s'", "'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to resource pool for resolution, template,", "ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation", "'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node,", "'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node,", "xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file", "inputs that have already been ' \\ 'skull-stripped.\\n\\nEither switch to using ' \\", "= config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation from longitudinal", "func_preproc set up for every session of the subject # TODO create a", "workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For", "prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory", "a list of list ses_list_strat_list # a list of skullstripping strategies, # a", "new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if", "list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp", "workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name,", "os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath =", "str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access", "= create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual", "\"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation #", "'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return", "rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in", "if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline',", "# for func wf? # TODO check: # 1 func alone works #", "# brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain", "= 'Credentials path: \"%s\" for subject \"%s\" was not ' \\ 'found. Check", "[\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method options", "(config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ]", "# only need to run once for each subject already_skullstripped = c.already_skullstripped[0] if", "configuration a configuration object containing the information of the pipeline config. (Same as", "'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init,", "standard registration items for num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix", "pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped anatomical", "\\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores,", "ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i", "as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as", "template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over the", "the pipeline config. (Same as for prep_workflow) Returns ------- None \"\"\" workflow =", "logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl", "config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template =", "template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag", "os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\"", "(brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask')", "-> T1 Registration (Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c)", "workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc", "# TODO how to include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] #", "preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor,", "= \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in", "out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file,", "resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({", "create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal", "node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node,", "c, workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template',", "( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list =", "'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list", "'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func -> T1 Registration (Initial Linear", "avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node,", "shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int,", "name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag", "templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func,", "+ \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name", "may be in the list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) #", "str(config.skullstrip_option)) raise Exception(err) # Here we have all the anat_preproc set up for", "ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file,", "num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = [] # either run FSL anatomical-to-MNI", "[] for num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method')", ") # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with", "in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni,", "within each strategy list # TODO rename and reorganize dict # TODO update", "node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name)", "'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list", "= [strat_init_new] # only need to run once for each subject already_skullstripped =", "skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference", "f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name):", "num_strat) ) # brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain')", "space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file')", "already_skullstripped == 3: already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'],", "\"\"\" brain_list = [] skull_list = [] for dirpath, dirnames, filenames in os.walk(working_directory):", "be better, but it requires very high # quality skullstripping. If skullstripping is", "reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the", "the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from", "creds_list = [] session_id_list = [] # Loop over the sessions to create", "= tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over the different skull", "ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if", "Exception # get the skull-stripped anatomical from resource pool node, out_file = strat['anatomical_brain']", "threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list,", "\\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject =", "\\ 'to run anatomical registration with ' \\ 'the skull, but you also", "pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'],", "session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped", "files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption:", "with skull is preferred if 1 in c.regWithSkull: if already_skullstripped == 1: err_msg", "in c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni,", "'inputspec.reference_brain') # get the reorient skull-on anatomical from resource pool node, out_file =", "vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow)", "the id of the subject strat_list : list of list first level strategy,", "c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm':", "for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise", "template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)),", "a list of func preprocessed brain skull_list : list a list of func", "num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation in their pipe", "from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from", "workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm,", "# Here we have the same strategies for the skull stripping as in", "1 in getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if 'FSL' in", "node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain']", "if they are # providing already-skullstripped inputs. this is because # FNIRT requires", "template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name,", "'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except", "= strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to include linear xfm? # node,", "session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data =", "(config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat,", "strat = Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress", "func preprocessed brain skull_list : list a list of func preprocessed skull \"\"\"", "= pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name,", "this is to prevent the user from running FNIRT if they are #", "and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat),", "the skull still on # TODO ASH normalize w schema validation to bool", "== 'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp", "function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name =", "new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni,", "fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg(", "fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def", "template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list',", "# or run ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\", "# pass the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull')", "= template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over", "new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0]", "for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d'", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain')", "'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image') }) # Register tissue segmentation", "(node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name =", "Workflow # add optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config,", "c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni,", "the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para =", "input_creds_path = None except KeyError: input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"),", "sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path']", "'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init, 'default' )", "pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file')", "import nipype.pipeline.engine as pe import nipype.interfaces.afni as afni import nipype.interfaces.fsl as fsl import", "def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}',", "template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node,", "DataSink from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg,", "= reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp,", "LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear',", "\\ 'use already-skullstripped images as ' \\ 'your inputs. This can be changed", "# Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return", "resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the center of mass of the", "None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\",", "out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node,", "get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "from running FNIRT if they are # providing already-skullstripped inputs. this is because", "= create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file =", "convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list", "strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if", "'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm,", "in c.regWithSkull: # get the skull-stripped anatomical from resource pool node, out_file =", "= config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp,", "'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index ==", "enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption and", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient", "fork of strat with the resource pool updated strat_nodes_list_list : list a list", "# TODO Enforce value with schema validation # Extract credentials path for output", "strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list =", "to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node,", "preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc =", "template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory", "# pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain')", "the skull-stripped anatomical from resource pool node, out_file = strat['functional_preprocessed_median'] # pass the", "C-PAC says: Your skull-stripping ' \\ 'method options setting does not include either'", "# For each participant we have a list of dict (each dict is", "-*- import os import copy import time import shutil from nipype import config", "of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = []", "in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni,", "rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i +", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file", "resource pool node, out_file = strat['anatomical_skull_leaf'] # pass the anatomical to the workflow", "+ 1)) workflow.run() return reg_strat_list # strat_nodes_list_list # for func wf? # TODO", "for f in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f: filepath", "algorithms could be added to calculate it, like the multivariate template from ANTS", "[SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1", "node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file", "strat_name, str(num_strat)]) if rsc_key in Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key", "= strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc,", "out_file = strat['motion_correct_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni,", "workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node,", "node to be connected and added to the resource pool strat_name : str", "rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template',", "'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'),", "config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping", "registration with ' \\ 'the skull, but you also selected to ' \\", "'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg =", "node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file", "(FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional',", "node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir =", "strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat',", "= {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory", "Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni,", "strat_nodes_list_list : list a list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node, out_key", "requires very high # quality skullstripping. If skullstripping is imprecise # registration with", "[] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir", "longitudinal_preproc) # Later other algorithms could be added to calculate it, like the", "enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key in", "imprecise # registration with skull is preferred if 1 in c.regWithSkull: if already_skullstripped", "strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median'] workflow.connect(node,", ") from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import", ") new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: #", "list of list ses_list_strat_list # a list of skullstripping strategies, # a list", "resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'],", "to the standard template # TODO add session information in node name for", "unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None template_keys", "already-skullstripped images as ' \\ 'your inputs. This can be changed ' \\", "% (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys(): if rsc_key in Outputs.any: node, rsc_name", "strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ----------", "if 'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc =", "= 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac,", "a list of func preprocessed skull \"\"\" brain_list = [] skull_list = []", "workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool", "Register T1 to the standard template # TODO add session information in node", "+ node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to", "template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) #", "'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template',", "(ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni,", "id of the subject sub_list : list of dict this is a list", "out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node,", "# Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with the", "'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list),", "'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm':", "strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type):", "# add optional flag workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict,", "node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file", "reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration", "in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id)", "need to run once for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped ==", "= strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name)", "strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]):", "strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def", "enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) )", "# Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the", "shutil from nipype import config from nipype import logging import nipype.pipeline.engine as pe", "file_type=None): if type == 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp =", "subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat,", "CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import Strategy, find_files, function, Outputs from", "flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method':", "Strategy the strategy object you want to fork anat_preproc : Workflow the anat_preproc", "strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain')", "func wf? # TODO check: # 1 func alone works # 2 anat", "workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string the", ") from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node", "' \\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for registration or provide", "= None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm':", "= connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here", "of func preprocessed brain skull_list : list a list of func preprocessed skull", "creds file creds_path = '' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path)", "False # TODO Enforce value with schema validation # Extract credentials path for", "func preprocessed skull \"\"\" brain_list = [] skull_list = [] for dirpath, dirnames,", "for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d'", "workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init", "import nipype.interfaces.utility as util from indi_aws import aws_utils from CPAC.utils.utils import concat_list from", "thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list =", "else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" session \"%s\" ' \\", "stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge", "\"\"\" Parameters ---------- subject_id : str the id of the subject sub_list :", "for num_strat, strat in enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method') !=", "CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration import (", "padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list =", "+ node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof,", "-> Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC", "strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run()", "connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None):", "may be in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) #", "a list of skullstripping strategies, # a list of sessions within each strategy", "+ subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list',", "out_file = strat['functional_preprocessed_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni,", "input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node,", "= Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow", "input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" was", "'func' in sub_dict or 'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict =", "# pass the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain')", "(flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only", "reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial')", "session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() != 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' %", "dict # TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for", "as nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws", "anatomical registration with ' \\ 'the skull, but you also selected to '", "os import copy import time import shutil from nipype import config from nipype", "skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass the anatomical", "strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s'", "3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = []", "CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils", "for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d'", "num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' %", "strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node, out_file =", "as afni import nipype.interfaces.fsl as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import", "workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc,", "resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_'", "ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file =", "datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list = {}", "to feed the anat_preproc outputs to the longitudinal template generation brain_merge_node = pe.Node(", ") setattr(config, key, node) strat = Strategy() strat_list = [] node_suffix = '_'.join([subject_id,", "ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'),", "fsl_apply_warp, 'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp =", "# Later other algorithms could be added to calculate it, like the multivariate", "to prevent the user from running FNIRT if they are # providing already-skullstripped", "not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']:", "skullstripping. If skullstripping is imprecise # registration with skull is preferred if 1", "this path and try again.' % ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path", "strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other SS methods if \"AFNI\" in", "the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass", "on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource", "'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource]", "= '\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\ 'registration) will not work", "Symmetric Template, Non-linear registration (FNIRT/ANTS) new_strat_list = [] if 1 in c.runVMHC and", "T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func ->", "strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='',", "align the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file =", "to the working directory Returns ------- brain_list : list a list of func", "= reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file,", "connect_func_preproc, create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import", "# strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc',", "diff_complete) # Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) '''", "''' Parameters ---------- subject_id : string the id of the subject strat_list :", "= [] for num_strat, strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration", "in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat =", "registration or provide input ' \\ 'images that have not been already '", "Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow,", "strat_list = [strat_init_new] # only need to run once for each subject already_skullstripped", "'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name)", "(strat_name, num_strat) ) # brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file", "for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping '", "(config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative,", "out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file =", "mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, )", "out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical from resource pool node,", "resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template')", "Returns ------- strat_list_ses_list : list of list a list of strategies; within each", "TODO check: # 1 func alone works # 2 anat + func works,", "def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new", "logger.info(err_msg) raise Exception # get the skullstripped anatomical from resource pool node, out_file", "num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' %", "unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none' not", "type == 'list': for index in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file'])", "out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference')", "list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list =", "ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical from resource pool node, out_file", "# Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return", "\"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file =", "# TODO ASH normalize w schema validation to bool if already_skullstripped == 1:", "# Here we have all the func_preproc set up for every session of", "override=True) strat_list += new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): #", "% (strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node,", "same dictionary as the one given to prep_workflow config : configuration a configuration", "strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical from resource", "new_strat_list = [] for num_strat, strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI", "This can be changed ' \\ 'in your pipeline configuration ' \\ 'editor.\\n\\n'", "' \\ 'was not found. Check this path and try ' \\ 'again.'", "generate the longitudinal template (the functions are in longitudinal_preproc) # Later other algorithms", "for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp", "in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if rsc_key", "list of func preprocessed skull \"\"\" brain_list = [] skull_list = [] for", "workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp',", "Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node,", "input_creds_path, node_suffix) # Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config,", "out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: #", "= create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc,", "node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file", "reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node,", "' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err)", "= reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp,", "= \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone", "resource pool strat_name : str name of the strategy strat_nodes_list_list : list a", "config, node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip,", "TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list", "enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't", "'skull_template') }) strat_list = [strat_init_new] new_strat_list = [] if 'FSL' in c.regOption: for", "'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative',", "\\ check_config_resources(c) new_strat_list = [] # either run FSL anatomical-to-MNI registration, or... if", "node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node,", "node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat", "if someone doesn't have anatRegFSLinterpolation in their pipe config, # sinc will be", "'out_file', concat_seg_map, 'in_list2') node, out_file = reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map,", "strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node, out_file =", "!= 'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat", "\"BET\" in config.skullstrip_option: skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc =", "to create the input for the longitudinal algorithm for session in sub_list: unique_id", "access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able to", "raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone", "file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask", "node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask =", "DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container", "(config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template", "= strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') })", "strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node", "# config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat)", "from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init, connect_func_preproc, create_func_preproc,", "fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type", "= \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template')", "if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline',", "(resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory =", "# a list of skullstripping strategies, # a list of sessions within each", "ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical from resource pool node, out_file", "the anat_preproc set up for every session of the subject strat_init = Strategy()", "input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node,", "new_strat_list = [] for num_strat, strat in enumerate(strat_list): if 'ANTS' in c.regOption and", "= 'nearestneighbour' node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm,", "{ 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy()", "config, node_suffix) # Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config,", "'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file", "# if 'ANTS' in c.regOption: # strat = strat.fork() # new_strat_list.append(strat) strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({", "' \\ 'registration) will not work properly if you ' \\ 'are providing", "'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list", "strat.fork() tmp_node, out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass']", "!= 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull )", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file,", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file,", "new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list +=", "anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option for", "in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' %", "input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" session", "Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix) # Functional Image", "'in{}'.format(i + 1)) # the in{}.format take i+1 because the Merge nodes inputs", "schema validation # Extract credentials path for output if it exists try: #", "(\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type,", "out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file =", "create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import (", "'ANTS': # this is to prevent the user from running FNIRT if they", "to the resource pool strat_name : str name of the strategy strat_nodes_list_list :", "node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node,", "brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input", "except KeyError: input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\",", "brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input", "'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') })", "if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory)", "c, strat_name) return strat_list def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters", "'field_file') reg_strat.update_resource_pool({ 'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image',", "Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped", ") workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config,", "}) strat_list = [strat_init_new] new_strat_list = [] if 'FSL' in c.regOption: for num_strat,", "out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node,", "logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp =", "func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path", "ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file =", "if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): # this is to", "import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc", "out_file, fnirt_reg_anat_mni, 'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir", "create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) # individual minimal", "Check this path and try ' \\ 'again.' % (creds_path, subject_id, unique_id) raise", "skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method +", "[ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"),", "node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped", "change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1]", "linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm'] # workflow.connect(node, out_file, fsl_apply_warp, 'premat') node,", "config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name)", "}) strat_list += new_strat_list # [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS)", "strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name,", "workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm':", "= c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3:", "'resolution_for_func_preproc') ] # update resampled template to resource pool for resolution, template, template_name,", "create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import", "again.' % ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None except KeyError:", "setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The", "# registration with skull is preferred if 1 in c.regWithSkull: # get the", "session \"%s\" ' \\ 'was not found. Check this path and try '", "ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir =", "Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def", "pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id)) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory)", "node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list,", "out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical", "not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node(", "input_creds_path = None except KeyError: input_creds_path = None strat = Strategy() strat_list =", "max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method", "starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node,", "of strategies; within each strategy, a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(),", "= None strat = Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) #", "out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') #", "'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni,", "'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in their", "% node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir", "strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds,", "'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id']", "import shutil from nipype import config from nipype import logging import nipype.pipeline.engine as", "import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import ( subject_specific_template ) from CPAC.utils import", "reorient skull-on anatomical from resource pool node, out_file = strat['anatomical_skull_leaf'] # pass the", "# T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key +", "encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return ds", "try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption", "works # 2 anat + func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id,", "inputs. this is because # FNIRT requires an input with the skull still", "out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS',", "'none': brain_rsc = create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat =", "anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow) strat_list.append(new_strat) else: # TODO add other SS methods if", "== 'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file", "= { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func'", "pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para", "strat in enumerate(strat_list): # this is to prevent the user from running FNIRT", "the S3 bucket. Check and try again.\\n' \\ 'Error: %s' % e raise", "workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_symmetric_skull']", "workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list", "'str': fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' node, out_file =", "create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import", "information of the pipeline config. (Same as for prep_workflow) Returns ------- None \"\"\"", "participant we have a list of dict (each dict is a session) already_skullstripped", "changed ' \\ 'in your pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception", "session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ----------", "wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes, robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces,", "\\ strat.get('registration_method') != 'ANTS': # this is to prevent the user from running", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool", "config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in", "from CPAC.registration import ( create_fsl_flirt_linear_reg, create_fsl_fnirt_nonlinear_reg, create_register_func_to_anat, create_bbregister_func_to_anat, create_wf_calculate_ants_warp, connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard", "'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file =", "= \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c,", "strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\"", "pipeline configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped anatomical", "ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) #", "strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({", "Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id,", "% node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent,", "if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc", "configuration ' \\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped anatomical from", "concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import CPAC from CPAC.registration", "template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path,", "\"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration items for num_strat, strat in", "have the same strategies for the skull stripping as in prep_workflow if 'brain_mask'", "workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource", "\\ 'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skullstripped anatomical from resource pool", "exists try: # Get path to creds file creds_path = '' if config.awsOutputBucketCredentials:", "reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node,", "standard template to align the images with it. template_center_of_mass = pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass'", "= c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file =", "been ' \\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for registration or", "c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped", "config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing credentials or ' \\ 'accessing", "the fork of strat with the resource pool updated strat_nodes_list_list : list a", "niter=config.skullstrip_n_iterations, pushout=config.skullstrip_pushout, touchup=config.skullstrip_touchup, fill_hole=config.skullstrip_fill_hole, avoid_eyes=config.skullstrip_avoid_eyes, use_edge=config.skullstrip_use_edge, exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm,", "T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix,", "except KeyError: input_creds_path = None strat = Strategy() strat_list = [strat] node_suffix =", "'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template,", "in Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id,", "list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve') # Update resource pool", "brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new =", "ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass", "if 1 in c.regWithSkull: if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says:", "their pipe config, # it will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'):", "'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only =", "(anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we have the", "surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\",", "f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath,", "workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node,", "node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node)", "workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI Template workflow,", "= tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc,", "of the data config dictionaries to be updated during the preprocessing # creds_list", "sinc will be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if", "'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi,", "if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) #", "the sessions to create the input for the longitudinal algorithm for session in", "exp_frac=config.skullstrip_exp_frac, smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list", "ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the transform with the skullstripped is # reported", "'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), })", "= pe.Node(Function(input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template(", "== 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input", "processing credentials or ' \\ 'accessing the S3 bucket. Check and try again.\\n'", "'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant we have a list", "config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func,", "not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']:", "register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new =", "in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list = create_datasink(rsc_key + node_suffix, config,", "= subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type =", "(ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni,", "= 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name)", "'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): # this is to prevent", "sub_dict or 'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else:", "workflow, strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({", "not work properly if you ' \\ 'are providing inputs that have already", "resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat", "fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain')", "'editor.\\n\\n' logger.info(err_msg) raise Exception # get the skull-stripped anatomical from resource pool node,", "(config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat,", "'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list #", "node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT", "workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list", "selected FSL interpolation method may be in the list of values: \"trilinear\", \"sinc\",", "creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for", ": configuration a configuration object containing the information of the pipeline config. Returns", "out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node,", "doesn't have anatRegANTSinterpolation in their pipe config, # it will default to LanczosWindowedSinc", "\"spline\"]: err_msg = 'The selected FSL interpolation method may be in the list", "= '\\n\\n[!] CPAC says: You selected ' \\ 'to run anatomical registration with", "strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name,", "output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map,", "type='list', file_type='pve') # Update resource pool # longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template", "= config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list =", "'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni,", "the same dictionary as the one given to prep_workflow config : configuration a", "the anat_preproc outputs to the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\"", "\"\"\" Parameters ---------- subject_id : string the id of the subject sub_list :", "1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard':", "(ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni,", "(ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list", "= strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node, out_file", "[ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'),", "resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name,", "'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution,", "Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node,", "'resampled_template', template_center_of_mass, 'in_file') # list of lists for every strategy strat_nodes_list_list = {}", "Test for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access:", "flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only = c.fsl_linear_reg_only", "normalize w schema validation to bool if already_skullstripped == 1: err_msg = '\\n\\n[!]", "= 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run()", "reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp, 'in_file') node,", "== 2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb,", "FNIRT config file specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1", "instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\", "(config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution", "out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, \"out_file\", fsl_apply_xfm, 'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm,", "= strat['template_skull_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_skull') node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni,", "strat_list, config, node_suffix) # Distortion Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff,", "Strategy the fork of strat with the resource pool updated strat_nodes_list_list : list", "node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set( shrink_factor=config.skullstrip_shrink_factor, var_shrink_fac=config.skullstrip_var_shrink_fac, shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim, avoid_vent=config.skullstrip_avoid_vent, niter=config.skullstrip_n_iterations,", "anatRegANTSinterpolation in their pipe config, # it will default to LanczosWindowedSinc if not", "to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\",", "(flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try:", "rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, skull_merge_node, 'in{}'.format(i + 1)) workflow.run() return reg_strat_list #", "FNIRT requires an input with the skull still on # TODO ASH normalize", "strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix", "will generate the longitudinal template (the functions are in longitudinal_preproc) # Later other", "'.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list", "be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not", "'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) # Here we have all the anat_preproc set", "(fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = []", "name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat'", "concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list2') node, out_file", "reorient skull-on anatomical from resource pool node, out_file = strat['motion_correct_median'] # pass the", "still on if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for", "'\\n\\n[!] CPAC says: You selected ' \\ 'to run anatomical registration with '", "strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull')", "# if someone doesn't have anatRegFSLinterpolation in their pipe config, # sinc will", "pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork()", "(ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func -> T1 Registration (Initial", "'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi,", "inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name,", "strat in enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS': #", "workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method,", "file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the", "'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name,", "'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob')", "skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain')", "'anatomical_to_longitudinal_template_warp_' ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template_warp']) workflow.connect(template_node, \"warp_list\", ds_warp_list,", "anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : string", "# TODO update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id,", "'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': (", "anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory,", "as ' \\ 'your inputs. This can be changed ' \\ 'in your", "to using ' \\ 'ANTS for registration or provide input ' \\ 'images", "\\ 'images that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception", "the list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters", "aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able to write to bucket!') except", "iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path", "already_skullstripped = 1 resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat')", "out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({", "skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat')", "CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node,", "not ' \\ 'found. Check this path and try again.' % ( creds_path,", "% ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path", "in f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz'", "strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) # loop over the different skull stripping strategies", "connect_func_to_anat_init_reg, connect_func_to_anat_bbreg, connect_func_to_template_reg, output_func_to_standard ) from CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import (", "(Initial Linear Reg) workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func ->", "require to change it here. template_node = subject_specific_template( workflow_name='subject_specific_anat_template_' + node_suffix ) unique_id_list", "c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni,", "'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] # only need to run once", "strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm':", "their pipe config, # sinc will be default option if not hasattr(c, 'anatRegFSLinterpolation'):", "'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except", "will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation", "iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None):", "new_strat_list = [] if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]):", "anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass the anatomical to", "connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy the strategy", "dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort() return", "out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_skull') else: # get the skullstripped anatomical", "the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni,", "anatomical from resource pool node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') #", "up for every session of the subject strat_init = Strategy() templates_for_resampling = [", "'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d'", "} # For each participant we have a list of dict (each dict", "\\ 'was not found. Check this path and try ' \\ 'again.' %", "anat_preproc outputs to the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" +", "of list ses_list_strat_list # a list of skullstripping strategies, # a list of", "= config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' %", "(longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list = [] if 'FSL' in c.regOption:", "in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat)", "Parameters ---------- subject_id : string the id of the subject strat_list : list", "output if it exists try: # Get path to creds file creds_path =", "node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node,", "anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : str the id of the", "within each strategy, a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory", "methods if \"AFNI\" in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix", "strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list =", "\"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for", "------- brain_list : list a list of func preprocessed brain skull_list : list", "'the skull, but you also selected to ' \\ 'use already-skullstripped images as", "robust=config.bet_robust, skull=config.bet_skull, surfaces=config.bet_surfaces, threshold=config.bet_threshold, vertical_gradient=config.bet_vertical_gradient, ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method", "def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id", "in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption", "'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain')", "feed the anat_preproc outputs to the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)),", "map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\"", "CPAC says: FNIRT (for anatomical ' \\ 'registration) will not work properly if", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain')", "Merge nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key]", "# Loop over the sessions to create the input for the longitudinal algorithm", "# Func -> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete)", "''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id :", "connect_anat_segmentation ) from CPAC.func_preproc.func_ingress import ( connect_func_ingress ) from CPAC.func_preproc.func_preproc import ( connect_func_init,", "tissue segmentation from longitudinal template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}',", "with schema validation # Extract credentials path for output if it exists try:", "node, out_file = reg_strat[resource] workflow.connect(node, out_file, fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm,", "pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for", "(longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list = [] if", "registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_func_mni =", "anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius,", "out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical from resource pool node,", "once for each subject already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped =", "' \\ 'are providing inputs that have already been ' \\ 'skull-stripped.\\n\\nEither switch", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in", "out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node,", "monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list,", "Outputs.any: node, rsc_name = strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i],", "space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True workflow.connect(template_node,", "resampled template to resource pool for resolution, template, template_name, tag in templates_for_resampling: resampled_template", "run ANTS anatomical-to-MNI registration instead if 'ANTS' in c.regOption and \\ strat.get('registration_method') !=", "if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ----------", "interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of", "= 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file =", "= creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id,", "resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template,", ") if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear',", "of the pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id)", "(ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'),", "'inputspec.reference_skull') else: node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the", "(strat_name, num_strat) ) flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni,", "'funcRegANTSinterpolation', 'LanczosWindowedSinc') if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected", "of the standard template to align the images with it. template_center_of_mass = pe.Node(", "'anatomical_to_standard': (fsl_apply_warp, 'out_file') }) elif reg_strat.get('registration_method') == 'ANTS': ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial',", "template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node", "to calculate the center of mass of the standard template to align the", "strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\"", "num_strat, strat in enumerate(strat_list): # this is to prevent the user from running", "create_anat_datasource( 'brain_gather_%s' % unique_id) brain_rsc.inputs.inputnode.set( subject = subject_id, anat = session['brain_mask'], creds_path =", "'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps',", "of strat_nodes_list workflow: Workflow main longitudinal workflow Returns ------- new_strat : Strategy the", "as in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and \\ session['brain_mask'].lower() !=", "Outputs.any: node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name)", "strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask')", "values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation", "node, out_file = strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file,", "\\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option)) raise Exception(err) #", "' \\ 'method options setting does not include either' \\ ' \\'AFNI\\' or", "anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat,", "node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption:", "num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation in their pipe config,", "we have all the func_preproc set up for every session of the subject", "skull-stripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass the anatomical", "= False # TODO Enforce value with schema validation # Extract credentials path", "anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow)", "the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient", "os.path.abspath(config.crashLogDirectory) } # For each participant we have a list of dict (each", "\\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'):", "strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') },", "'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard':", "sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path = sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower():", "strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'),", ") workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard(", "'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file = strat['anatomical_brain']", "path and try ' \\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else:", "longitudinal template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name)", "ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name),", "strat_name='longitudinal_'+strat_name) workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal template warp rsc_key =", "= strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds,", "'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method':", "get the skull-stripped anatomical from resource pool node, out_file = strat['functional_preprocessed_median'] # pass", "method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc,", "pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map,", "'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') })", "providing inputs that have already been ' \\ 'skull-stripped.\\n\\nEither switch to using '", "pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para", "\\ 'accessing the S3 bucket. Check and try again.\\n' \\ 'Error: %s' %", "pe.MapNode(util.Function(input_names=['moving_image', 'reference', 'initial', 'rigid', 'affine', 'nonlinear', 'interp'], output_names=['out_image'], function=run_ants_apply_warp), name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['moving_image']) workflow.connect(template_node, \"output_brain_list\",", "Functional Image Preprocessing Workflow workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix) # Distortion", "\"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating", "the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node, out_file", "as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set(", "c, diff_complete) # Func -> T1/EPI Template workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c)", "strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file')", "FNIRT config file specified in pipeline # config.yml fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1", "= strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "num_strat, strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead if 'ANTS'", "ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name)", "ANTS # It would just require to change it here. template_node = subject_specific_template(", "if map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else:", "anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list =", "workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass the reference files node, out_file = strat['template_brain_for_anat']", "as fsl import nipype.interfaces.io as nio from nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility", "of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_anat_mni.inputs.inputspec.interp =", "method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list,", "i+1 because the Merge nodes inputs starts at 1 rsc_key = 'anatomical_skull_leaf' anat_preproc_node,", "filenames in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in dirpath and '.nii.gz'", "'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL',", "if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp(", "------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce", "strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') })", "( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only =", "strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have", "= [] if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): flirt_reg_func_mni =", "\\ 'skull-stripped.\\n\\nEither switch to using ' \\ 'ANTS for registration or provide input", "func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] unique_id = sub_dict['unique_id'] session_id_list.append(unique_id) try: creds_path", "all the func_preproc set up for every session of the subject # TODO", "# if someone doesn't have anatRegANTSinterpolation in their pipe config, # it will", "= strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # pass the reference file node, out_file", "in sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest']", "= session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path =", "flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.input_brain') # pass", "strat_list, c) # Func -> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list,", "to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node,", "Strategy() strat_list = [strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow #", "name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list of lists", "['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method may be in", "blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional Initial", "in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'),", "check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list", "c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list): if", "num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating", "1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for num_strat, strat in", "option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\",", "strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull", "a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list", "resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the center of", "= strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork()", "config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except:", "CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params,", "workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource", "% (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation", "reference file node, out_file = strat['template_skull_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_skull') else: node, out_file", "already-skullstripped inputs. this is because # FNIRT requires an input with the skull", "to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an", "you ' \\ 'are providing inputs that have already been ' \\ 'skull-stripped.\\n\\nEither", "\\ 'again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except", "'in_matrix_file') reg_strat.update_resource_pool({ resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list': for index in", "resource:(fsl_apply_xfm, 'out_file') }, override=True) elif type == 'list': for index in range(3): fsl_apply_xfm", "in range(3): fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map =", "ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is # reported", "'ref_file') # TODO how to include linear xfm? # node, out_file = reg_strat['anatomical_to_mni_linear_xfm']", "( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path =", "because # FNIRT requires an input with the skull still on # TODO", "anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): #", "except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if", "strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead if 'ANTS' in", "default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in", "'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni,", "strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain')", "'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) #", "information of the pipeline config. (Same as for prep_workflow) Returns ------- strat_list_ses_list :", "(\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"), (\"anat\", \"PRIORS_WHITE\"), (\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\",", "try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path):", "num_strat, strat in enumerate(strat_list): flirt_reg_func_mni = create_fsl_flirt_linear_reg( 'func_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) #", "list of sessions within each strategy list # TODO rename and reorganize dict", "interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list ) workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node,", "'rigid') node, out_file = reg_strat['ants_affine_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm']", "strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list #", "config. (Same as for prep_workflow) Returns ------- strat_list_ses_list : list of list a", "already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list", "we have a list of dict (each dict is a session) already_skullstripped =", "'inputspec.input_skull') # skull reference node, out_file = strat['template_skull_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_skull') node,", "function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file = reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map,", "'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc')", "for strat_name, strat_nodes_list in strat_nodes_list_list.items(): node_suffix = '_'.join([strat_name, subject_id]) # Merge node to", "= connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func -> T1/EPI Template workflow, strat_list =", "strategy strat_nodes_list_list : list a list of strat_nodes_list workflow: Workflow main longitudinal workflow", "of the subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'),", "strat_list = [strat_init_new] new_strat_list = [] if 'FSL' in c.regOption: for num_strat, strat", "resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' #", "config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name", "output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat'", "pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id :", "( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow,", "' \\ 'the skull, but you also selected to ' \\ 'use already-skullstripped", "pipe config, # sinc will be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c,", "'_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat =", "if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation', 'sinc') if c.funcRegFSLinterpolation not in [\"trilinear\", \"sinc\",", "'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'),", "'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = [] try: fsl_linear_reg_only", "reference files node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in", "Extract credentials path for output if it exists try: # Get path to", "\"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation # calculating the", "skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name =", "workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if", "= config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node to calculate the", "the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node, out_file", ") return ds def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat", "'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def", "'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file',", "config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method", "= [strat_init_new] new_strat_list = [] if 'FSL' in c.regOption: for num_strat, strat in", "the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file", "'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_mni = create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) #", "try: creds_path = sub_dict['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path):", "diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration (BBREG) workflow, strat_list", "rsc_name, ds, rsc_key) rsc_key = 'anatomical_brain' anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key] workflow.connect(anat_preproc_node, rsc_name, brain_merge_node,", "'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected ANTS interpolation method may be in the", "\\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg( 'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat) )", "Merge node to feed the anat_preproc outputs to the longitudinal template generation brain_merge_node", "datasink_name config subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0])", "(config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc,", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file,", "Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None strat = Strategy()", "'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling", "for dirpath, dirnames, filenames in os.walk(working_directory): for f in filenames: if 'func_get_preprocessed_median' in", "strat_init, strat_name) # Register T1 to the standard template # TODO add session", "For each participant we have a list of dict (each dict is a", "= 'The selected FSL interpolation method may be in the list of values:", "be in the list of values: \"trilinear\", \"sinc\", \"spline\"' raise Exception(err_msg) # Input", "(the functions are in longitudinal_preproc) # Later other algorithms could be added to", "strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list", "out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None", "workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\",", "import aws_utils from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_brain') # get the", "Enforce value with schema validation # Extract credentials path for output if it", "the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the reference file node, out_file", "an error processing credentials or ' \\ 'accessing the S3 bucket. Check and", "calculate the center of mass of the standard template to align the images", "in template_keys: if isinstance(getattr(config, key), str): node = create_check_for_s3_node( name=key, file_path=getattr(config, key), img_type=key_type,", "override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype',", "to write to bucket!') except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There", "'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = []", "file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config):", "the resource pool updated strat_nodes_list_list : list a list of strat_nodes_list \"\"\" new_strat", "strat_nodes_list_list = connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not", "list of list first level strategy, second level session config : configuration a", "if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }) else:", "c.anatRegANTSinterpolation # calculating the transform with the skullstripped is # reported to be", "strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm':", "'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat) except KeyError: strat_nodes_list_list[strat_name] = [new_strat]", "= [] # either run FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption:", "'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'), 'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list +=", "'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.AFNI_options.set(", "# TODO check: # 1 func alone works # 2 anat + func", "session of the subject strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat',", "# registration with skull is preferred if 1 in c.regWithSkull: if already_skullstripped ==", "= strat_nodes_list[i][rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name,", "skull_list = [] for dirpath, dirnames, filenames in os.walk(working_directory): for f in filenames:", "that have not been already ' \\ 'skull-stripped.\\n\\n' logger.info(err_msg) raise Exception flirt_reg_anat_symm_mni =", "= template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') }) merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'],", "workflow_name = 'func_preproc_longitudinal_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] =", "the longitudinal template generation brain_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_brain_merge_\" + node_suffix) skull_merge_node =", "# get the skull-stripped anatomical from resource pool node, out_file = strat['anatomical_brain'] #", "enumerate(strat_list): if 'ANTS' in c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\", "[] # either run FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for", "reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') }, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']:", "anat_preproc workflow node to be connected and added to the resource pool strat_name", "rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name,", "'to run anatomical registration with ' \\ 'the skull, but you also selected", "\\ 'the skull, but you also selected to ' \\ 'use already-skullstripped images", "f: filepath = os.path.join(dirpath, f) brain_list.append(filepath) if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in", "the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para =", "second level session config : configuration a configuration object containing the information of", "'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'),", "= create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) workflow.connect(brain_rsc, 'outputspec.brain_mask', anat_preproc, 'inputspec.brain_mask') new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(", "config, subject_id, strat_name='longitudinal_'+strat_name, map_node_iterfield=['anatomical_to_longitudinal_template']) workflow.connect(template_node, \"output_brain_list\", t1_list, 'anatomical_to_longitudinal_template') # longitudinal to standard registration", "\"\"\" Parameters ---------- working_directory : string a path to the working directory Returns", "dict (each dict is a session) already_skullstripped = config.already_skullstripped[0] if already_skullstripped == 2:", "skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config,", "== 1: file_list = file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"): return file_name", "the strategy strat_nodes_list_list : list a list of strat_nodes_list workflow: Workflow main longitudinal", "= pe.MapNode(interface=fsl.ApplyXFM(), name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'],", "(ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1 ->", "config): \"\"\" Parameters ---------- subject_id : str the id of the subject sub_list", "level strategy, second level session config : configuration a configuration object containing the", "'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc',", "workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the reorient skull-on anatomical from resource pool", "'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map, 'in_list1') reg_strat.update_resource_pool({", "= 'Credentials path: \"%s\" for subject \"%s\" session \"%s\" ' \\ 'was not", "strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return strat_list_ses_list def merge_func_preproc(working_directory): \"\"\" Parameters ---------- working_directory : string a", "'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we have the same", "workflow.connect(template_node, \"output_brain_list\", ants_apply_warp, 'moving_image') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_apply_warp, 'reference') node,", "'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'), 'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # [SYMMETRIC] T1", "setting does not include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you '", "# FNIRT requires an input with the skull still on # TODO ASH", "'outputspec.composite_transform'), 'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow", "out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({", "in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]): for num_strat, strat in enumerate(strat_list):", "= connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): '''", "in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat)", "try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False # TODO Enforce value with", "# the in{}.format take i+1 because the Merge nodes inputs starts at 1", "list of strat_nodes_list workflow: Workflow main longitudinal workflow Returns ------- new_strat : Strategy", "from ANTS # It would just require to change it here. template_node =", "= reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({ 'anatomical_to_standard': (ants_apply_warp, 'out_image')", "= c.ANTs_para_T1_registration ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'),", "workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.reference_brain') # pass the reference file node, out_file = strat['template_skull_for_func_preproc']", "input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat' ) skullstrip_method = 'mask' preproc_wf_name =", "}) # loop over the different skull stripping strategies for strat_name, strat_nodes_list in", "Workflow main longitudinal workflow Returns ------- new_strat : Strategy the fork of strat", "out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat =", "new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'), 'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'), 'func_longitudinal_template_to_standard': (flirt_reg_func_mni,", "template rsc_key = 'anatomical_longitudinal_template_' ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(template_node,", "err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical ' \\ 'registration) will not", "% (strat_name, num_strat) ) # brain input node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file,", "anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file", "reg_strat[f'temporary_{resource}_list'] workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list')", "values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg) # Input registration parameters ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation", "Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing credentials", "Check and try again.\\n' \\ 'Error: %s' % e raise Exception(err_msg) if map_node_iterfield", "update strat name strat_list_ses_list = {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in", "= strat['anatomical_brain'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain')", "registration with skull is preferred if 1 in c.regWithSkull: # get the skull-stripped", "'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']: err_msg = 'The selected", "reference file node, out_file = strat['template_symmetric_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.reference_brain') # get the", "strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({ 'template_cmass': (template_center_of_mass, 'cm') }) # Here we", "could be added to calculate it, like the multivariate template from ANTS #", "= Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' %", "[0] if 'FSL' in c.regOption and 0 in fsl_linear_reg_only: for num_strat, strat in", "out_file, fnirt_reg_func_mni, 'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni,", "Parameters ---------- working_directory : string a path to the working directory Returns -------", "map_node_iterfield is not None: ds = pe.MapNode( DataSink(infields=map_node_iterfield), name='sinker_{}'.format(datasink_name), iterfield=map_node_iterfield ) else: ds", "fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified in pipeline #", "(\"other\", \"configFileTwomm\"), (\"anat\", \"template_based_segmentation_CSF\"), (\"anat\", \"template_based_segmentation_GRAY\"), (\"anat\", \"template_based_segmentation_WHITE\"), ] for key_type, key in", "new_strat : Strategy the fork of strat with the resource pool updated strat_nodes_list_list", "fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map), name=f'pick_{file_type}_{index}_{strat_name}') node, out_file", "node, out_file = strat['functional_preprocessed_median'] # pass the anatomical to the workflow workflow.connect(node, out_file,", "= config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name = 'template_skull_for_anat' resampled_template.inputs.tag = 'resolution_for_anat' # Node", "works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id", "if it exists try: # Get path to creds file creds_path = ''", "skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option: skullstrip_method = 'fsl'", "= pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) }", "skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True,", "c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id", "ants_reg_func_mni, 'inputspec.reference_brain') ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm':", "was an error processing credentials or ' \\ 'accessing the S3 bucket. Check", "import config from nipype import logging import nipype.pipeline.engine as pe import nipype.interfaces.afni as", "with skull is preferred if 1 in c.regWithSkull: # get the skull-stripped anatomical", "object containing the information of the pipeline config. (Same as for prep_workflow) Returns", "out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'),", "(ants_apply_warp, 'out_image') }) # Register tissue segmentation from longitudinal template space to native", "fsl_linear_reg_only = [0] if 'FSL' in c.regOption and 0 in fsl_linear_reg_only: for num_strat,", "'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm':", "dictionary as the one given to prep_workflow config : configuration a configuration object", "flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_func_mni.name) strat.update_resource_pool({ 'registration_method':", "to calculate it, like the multivariate template from ANTS # It would just", "}, override=True) for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask', 'seg_mixeltype', 'seg_partial_volume_map']: seg_apply_warp(strat_name=strat_name, resource=seg) #", "each session if the same dictionary as the one given to prep_workflow config", "'func_get_motion_correct_median' in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort()", "= create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias,", "'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg = 'The selected FSL", "= subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool,", "raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None template_keys =", "selected ANTS interpolation method may be in the list of values: \"Linear\", \"BSpline\",", "fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({", "[] session_id_list = [] # Loop over the sessions to create the input", "workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file =", "= strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference files node, out_file", "a list of strat_nodes_list workflow: Workflow main longitudinal workflow Returns ------- new_strat :", "smooth_final=config.skullstrip_smooth_final, push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list =", "= os.path.join( 'pipeline_%s_%s' % (config.pipelineName, strat_name), subject_id, session_id ) return ds def connect_anat_preproc_inputs(strat,", "# brain reference node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull", "anat + func works, pass anat strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\"", "strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key)", "i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key in strat_nodes_list[i].resource_pool.keys():", "except Exception as e: if config.outputDirectory.lower().startswith('s3://'): err_msg = 'There was an error processing", "create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding, radius=config.bet_radius, reduce_bias=config.bet_reduce_bias, remove_eyes=config.bet_remove_eyes,", "= strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file", "'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling =", "in config.skullstrip_option: skullstrip_method = 'afni' preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix anat_preproc = create_anat_preproc(", "[strat] node_suffix = '_'.join([subject_id, unique_id]) # Functional Ingress Workflow # add optional flag", "num_strat, strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)])", "or ' \\ 'accessing the S3 bucket. Check and try again.\\n' \\ 'Error:", "'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL',", "anat_preproc set up for every session of the subject strat_init = Strategy() templates_for_resampling", "\"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file", "'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni,", "(longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] # only need to", "out_file, ants_apply_warp, 'reference') node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file", "workflow.connect(anat_preproc_node, rsc_name, brain_merge_node, 'in{}'.format(i + 1)) # the in{}.format take i+1 because the", "or 'rest' in sub_dict: if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict", "= os.path.abspath(creds_path) else: err_msg = 'Credentials path: \"%s\" for subject \"%s\" was not", "\\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters", "reference file node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration", "run FSL anatomical-to-MNI registration, or... if 'FSL' in c.regOption: for num_strat, strat in", "and try again.' % ( creds_path, subject_id) raise Exception(err_msg) else: input_creds_path = None", "blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have all the func_preproc", "subject_id session_id strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data", "= [] session_id_list = [] # Loop over the sessions to create the", "config. (Same as for prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" +", "'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'), 'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'),", "reg_strat[resource] workflow.connect(node, out_file, pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out',", "workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node,", "alone works # 2 anat + func works, pass anat strategy list? def", "configuration a configuration object containing the information of the pipeline config. Returns -------", "[i.get_name()[0].split('_')[-1] for i in strat_nodes_list] template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, unique_id_list=unique_id_list", "strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy the strategy object you", "fsl_linear_reg_only: for num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg(", "in their pipe config, # it will default to LanczosWindowedSinc if not hasattr(c,", "config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data ds.inputs.container = os.path.join( 'pipeline_%s_%s' % (config.pipelineName,", "multivariate template from ANTS # It would just require to change it here.", "strat in enumerate(reg_strat_list): for rsc_key in strat.resource_pool.keys(): rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)]) if", "strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file')", "'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution',", "'FSL': fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg( 'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node,", "(strat_name, num_strat) ) node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file", "'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'),", "are in longitudinal_preproc) # Later other algorithms could be added to calculate it,", "create a list of list ses_list_strat_list # a list of skullstripping strategies, #", "also selected to ' \\ 'use already-skullstripped images as ' \\ 'your inputs.", "out_key = new_strat['anatomical'] workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat') tmp_node, out_key = new_strat['template_cmass'] workflow.connect(tmp_node, out_key,", "any(o in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says:", "Check this path and try again.' % ( creds_path, subject_id) raise Exception(err_msg) else:", "(Same as for prep_workflow) Returns ------- None \"\"\" workflow = pe.Workflow(name=\"anat_longitudinal_template_\" + str(subject_id))", "\"%s\" session \"%s\" ' \\ 'was not found. Check this path and try", "not found. Check this path and try ' \\ 'again.' % (creds_path, subject_id,", "pipe config, # sinc will be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c,", "push_to_edge=config.skullstrip_push_to_edge, use_skull=config.skullstrip_use_skull, perc_int=config.skullstrip_perc_int, max_inter_iter=config.skullstrip_max_inter_iter, blur_fwhm=config.skullstrip_blur_fwhm, fac=config.skullstrip_fac, monkey=config.skullstrip_monkey, mask_vol=config.skullstrip_mask_vol ) new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(", "s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not", "anat_preproc, strat_name, strat_nodes_list_list, workflow): \"\"\" Parameters ---------- strat : Strategy the strategy object", "logger.info(err_msg) raise Exception # get the skull-stripped anatomical from resource pool node, out_file", "already_skullstripped = c.already_skullstripped[0] if already_skullstripped == 2: already_skullstripped = 0 elif already_skullstripped ==", "workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node,", "anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'), 'anatomical_brain_mask': ( anat_preproc, 'outputspec.brain_mask'), }) try: strat_nodes_list_list[strat_name].append(new_strat)", "template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True),", "list # TODO rename and reorganize dict # TODO update strat name strat_list_ses_list", "[strat_init_new] new_strat_list = [] if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list):", "an input with the skull still on # TODO ASH normalize w schema", "+ template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag =", "strategy, a list of sessions \"\"\" datasink = pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory", "strat_name map_node_iterfield Returns ------- \"\"\" try: encrypt_data = bool(config.s3Encryption[0]) except: encrypt_data = False", "1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) new_strat_list = [] # either run", "preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name )", "list of the data config dictionaries to be updated during the preprocessing #", "Correction workflow, strat_list = connect_distortion_correction(workflow, strat_list, config, diff, blip, fmap_rp_list, node_suffix) ses_list_strat_list[node_suffix] =", "new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list) == 1:", "session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower():", "from CPAC.utils.utils import ( check_config_resources, check_system_deps, get_scan_params, get_tr ) logger = logging.getLogger('nipype.workflow') def", "0 elif already_skullstripped == 3: already_skullstripped = 1 sub_mem_gb, num_cores_per_sub, num_ants_cores = \\", "name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}', iterfield=['reference', 'in_matrix_file']) fsl_apply_xfm.inputs.interp = 'nearestneighbour' pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'], output_names=['file_name'], function=pick_map),", "= c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm':", "strategy strat_nodes_list_list = {} # list of the data config dictionaries to be", "it will default to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if", "create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, session_id_list[i], 'longitudinal_'+strat_name) workflow.connect(node, rsc_name, ds, rsc_key) rsc_key =", "pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys = encrypt_data", "'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'), 'symmetric_mni_to_anatomical_nonlinear_xfm': ( ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'), 'anat_to_symmetric_mni_ants_composite_xfm': ( ants_reg_anat_symm_mni, 'outputspec.composite_transform'), 'symmetric_anatomical_to_standard':", "subject_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None strat", "strat['functional_preprocessed_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') #", "template space to native space fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(), name=f'fsl_xfm_longitudinal_to_native_{strat_name}', iterfield=['in_file']) fsl_convert_xfm.inputs.invert_xfm = True", "subject # TODO create a list of list ses_list_strat_list # a list of", "if len(file_list) == 1: file_list = file_list[0] for file_name in file_list: if file_name.endswith(f\"{file_type}_{index}.nii.gz\"):", "% node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name ) new_strat, strat_nodes_list_list =", "= [] try: fsl_linear_reg_only = c.fsl_linear_reg_only except AttributeError: fsl_linear_reg_only = [0] if 'FSL'", ") # brain input node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_brain') #", "node, rsc_name = strat[rsc_key] ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name) workflow.connect(node,", "reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if 'ANTS' in", "skull is preferred if 1 in c.regWithSkull: if already_skullstripped == 1: err_msg =", "include either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format(", "be default option if not hasattr(c, 'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not", "= subject_id, anat = session['brain_mask'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type =", "\"output_brain_list\", fsl_apply_warp, 'in_file') node, out_file = reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO", "os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test for s3 write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path,", "preprocessed skull \"\"\" brain_list = [] skull_list = [] for dirpath, dirnames, filenames", "# loop over the different skull stripping strategies for strat_name, strat_nodes_list in strat_nodes_list_list.items():", "flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass", "( subject_specific_template ) from CPAC.utils import Strategy, find_files, function, Outputs from CPAC.utils.utils import", "imprecise # registration with skull is preferred if 1 in c.regWithSkull: # get", "if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat) strat.append_name(flirt_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm':", "None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni,", "\"%s\" for subject \"%s\" was not ' \\ 'found. Check this path and", "+ 1)) # the in{}.format take i+1 because the Merge nodes inputs starts", "for one subject and each session if the same dictionary as the one", "for num_strat, strat in enumerate(strat_list): # or run ANTS anatomical-to-MNI registration instead if", "True workflow.connect(template_node, \"warp_list\", fsl_convert_xfm, 'in_file') def seg_apply_warp(strat_name, resource, type='str', file_type=None): if type ==", "workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c) ''' return workflow, strat_list def func_longitudinal_template_wf(subject_id, strat_list,", "on # TODO ASH normalize w schema validation to bool if already_skullstripped ==", "strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL FNIRT config file specified", "flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni, 'outputspec.output_brain') }) strat_list += new_strat_list new_strat_list = []", "prevent the user from running FNIRT if they are # providing already-skullstripped inputs.", "utf-8 -*- import os import copy import time import shutil from nipype import", "ants_reg_anat_symm_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file,", "in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your skull-stripping ' \\ 'method", "connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if \"BET\" in config.skullstrip_option:", "node_suffix) # Functional Initial Prep Workflow workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix)", "'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template", "c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name,", "strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) if not any(o in config.skullstrip_option", "Strategy() strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix)", "strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list =", "ds = pe.Node( DataSink(), name='sinker_{}'.format(datasink_name) ) ds.inputs.base_directory = config.outputDirectory ds.inputs.creds_path = creds_path ds.inputs.encrypt_bucket_keys", "# creds_list = [] session_id_list = [] # Loop over the sessions to", "flirt_reg_anat_symm_mni, 'inputspec.reference_brain') # if 'ANTS' in c.regOption: # strat = strat.fork() # new_strat_list.append(strat)", "connect_anat_preproc_inputs( strat, anat_preproc, skullstrip_method + \"_skullstrip\", strat_nodes_list_list, workflow) strat_list.append(new_strat) elif already_skullstripped: skullstrip_method =", "strat['template_dilated_symmetric_brain_mask'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': (", "create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None): \"\"\" Parameters ---------- datasink_name config subject_id session_id", "of func preprocessed skull \"\"\" brain_list = [] skull_list = [] for dirpath,", ") skullstrip_method = 'mask' preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc,", "workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list') workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list') reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow,", "create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not hasattr(c, 'funcRegANTSinterpolation'): setattr(c,", "pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({", "in c.regOption: for num_strat, strat in enumerate(strat_list): # this is to prevent the", "Strategy() templates_for_resampling = [ (config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat,", "Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc,", "return file_name return None def anat_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id :", "updated strat_nodes_list_list : list a list of strat_nodes_list \"\"\" new_strat = strat.fork() tmp_node,", "node, out_file = reg_strat['ants_initial_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node,", "'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.ref_mask') # assign the FSL", "'' if config.awsOutputBucketCredentials: creds_path = str(config.awsOutputBucketCredentials) creds_path = os.path.abspath(creds_path) if config.outputDirectory.lower().startswith('s3://'): # Test", "create_func_preproc, create_wf_edit_func ) from CPAC.distortion_correction.distortion_correction import ( connect_distortion_correction ) from CPAC.longitudinal_pipeline.longitudinal_preproc import (", "subject_id]) # Merge node to feed the anat_preproc outputs to the longitudinal template", "items for i in range(len(strat_nodes_list)): rsc_nodes_suffix = \"_%s_%d\" % (node_suffix, i) for rsc_key", "sub_list : list of dict this is a list of sessions for one", "'There was an error processing credentials or ' \\ 'accessing the S3 bucket.", "properly if you ' \\ 'are providing inputs that have already been '", "on if already_skullstripped == 1: err_msg = '\\n\\n[!] CPAC says: FNIRT (for anatomical", "for prep_workflow) Returns ------- strat_list_ses_list : list of list a list of strategies;", "skull, but you also selected to ' \\ 'use already-skullstripped images as '", "TODO create a list of list ses_list_strat_list # a list of skullstripping strategies,", "diff, blip, fmap_rp_list = connect_func_ingress(workflow, strat_list, config, sub_dict, subject_id, input_creds_path, node_suffix) # Functional", "ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file,", "'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='template_skull_for_anat') resampled_template.inputs.resolution = config.resolution_for_anat resampled_template.inputs.template = config.template_skull_for_anat resampled_template.inputs.template_name =", "'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool node, out_file =", "ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from resource pool node, out_file", "functions are in longitudinal_preproc) # Later other algorithms could be added to calculate", "out_key, anat_preproc, 'inputspec.template_cmass') new_strat.append_name(anat_preproc.name) new_strat.update_resource_pool({ 'anatomical_brain': ( anat_preproc, 'outputspec.brain'), 'anatomical_skull_leaf': ( anat_preproc, 'outputspec.reorient'),", "'resolution_for_anat'), (config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'), (config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'),", "pass the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain') if", "os.path.abspath(config.crashLogDirectory) } for sub_dict in sub_list: if 'func' in sub_dict or 'rest' in", "concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file',", "os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc,", "'registration_method': 'FSL', 'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'), 'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'), 'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain') }) strat_list", "node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.input_brain') # brain reference node, out_file", "= create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set( subject = subject_id, anat = session['anat'], creds_path =", "else: input_creds_path = None except KeyError: input_creds_path = None strat = Strategy() strat_list", "reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to include linear xfm? #", "the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # get the reorient skull-on anatomical from", "strategy list? def func_preproc_longitudinal_wf(subject_id, sub_list, config): \"\"\" Parameters ---------- subject_id : string the", "strat.get('registration_method') != 'FSL': ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull", "'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # if someone doesn't have anatRegANTSinterpolation", "strat_nodes_list_list = {} # list of the data config dictionaries to be updated", "'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials", "= pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'], function=concat_list), name=f'concat_{file_type}_{index}_{strat_name}') if index == 0: workflow.connect(fsl_apply_xfm, 'out_file', concat_seg_map,", "pick_map(file_list, index, file_type): if isinstance(file_list, list): if len(file_list) == 1: file_list = file_list[0]", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the", "center of mass of the standard template to align the images with it.", "'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni,", "'initial') node, out_file = reg_strat['ants_rigid_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'rigid') node, out_file = reg_strat['ants_affine_xfm']", "except AttributeError: fsl_linear_reg_only = [0] if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:", "out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.reference_brain') node, out_file = strat['template_symmetric_skull'] workflow.connect(node, out_file,", ": list a list of func preprocessed skull \"\"\" brain_list = [] skull_list", "want to fork anat_preproc : Workflow the anat_preproc workflow node to be connected", "config.workingDirectory template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id ) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost,", "num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'), 'motion_correct_median':", "'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'), 'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'), 'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'), 'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') })", "}) strat_list += new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow,", "workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map", "of list a list of strategies; within each strategy, a list of sessions", "from CPAC.utils.utils import concat_list from CPAC.utils.interfaces.datasink import DataSink from CPAC.utils.interfaces.function import Function import", "someone doesn't have anatRegFSLinterpolation in their pipe config, # sinc will be default", "'motion_correct_median': (longitudinal_template_node, 'skull_template') }) strat_list = [strat_init_new] new_strat_list = [] if 'FSL' in", "= 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name)", "the skull stripping as in prep_workflow if 'brain_mask' in session.keys() and session['brain_mask'] and", "every strategy strat_nodes_list_list = {} # list of the data config dictionaries to", "= strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_anat_mni.name) strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True)", "# longitudinal to standard registration items for num_strat, strat in enumerate(reg_strat_list): for rsc_key", "set up for every session of the subject strat_init = Strategy() templates_for_resampling =", "= {} strat_list_ses_list['func_default'] = [] for sub_ses_id, strat_nodes_list in ses_list_strat_list.items(): strat_list_ses_list['func_default'].append(strat_nodes_list[0]) workflow.run() return", "workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.ref_mask') #", "= strat['motion_correct_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull')", "ds_warp_list, 'anatomical_to_longitudinal_template_warp') # T1 in longitudinal template space rsc_key = 'anatomical_to_longitudinal_template_' t1_list =", "'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list", "\\ 'registration) will not work properly if you ' \\ 'are providing inputs", "specified in pipeline # config.yml fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig if 1 in fsl_linear_reg_only: strat", "(config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), #", "method may be in the list of values: \"Linear\", \"BSpline\", \"LanczosWindowedSinc\"' raise Exception(err_msg)", "KeyError: strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list,", "it, like the multivariate template from ANTS # It would just require to", ") template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config.longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list')", "in config.skullstrip_option for o in [\"AFNI\", \"BET\"]): err = '\\n\\n[!] C-PAC says: Your", "s3_write_access: raise Exception('Not able to write to bucket!') except Exception as e: if", "subject = subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type", "# pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass", "nipype.interfaces.utility import Merge, IdentityInterface import nipype.interfaces.utility as util from indi_aws import aws_utils from", "again.\\n' \\ 'Error: %s' % e raise Exception(err_msg) if map_node_iterfield is not None:", "-> T1 Registration (BBREG) workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete) # Func", "None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, already_skullstripped=True, config=config, wf_name=preproc_wf_name", "write access s3_write_access = \\ aws_utils.test_bucket_access(creds_path, config.outputDirectory) if not s3_write_access: raise Exception('Not able", "= strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['motion_correct_median']", "' \\ 'accessing the S3 bucket. Check and try again.\\n' \\ 'Error: %s'", "'resolution_for_anat' # Node to calculate the center of mass of the standard template", "ants_reg_func_mni = \\ create_wf_calculate_ants_warp( 'func_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) if not", "strat.get('registration_method') != 'FSL': ants_reg_anat_mni = \\ create_wf_calculate_ants_warp( 'anat_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull", "interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate the longitudinal template (the", "skullstrip_method = 'fsl' preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config,", "if isinstance(file_list, list): if len(file_list) == 1: file_list = file_list[0] for file_name in", "create_fsl_fnirt_nonlinear_reg( 'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) # brain input node, out_file = strat['anatomical_brain']", "c.regOption: for num_strat, strat in enumerate(strat_list): # this is to prevent the user", "c.funcRegFSLinterpolation node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.input_brain') # pass the reference", "'anatRegFSLinterpolation'): setattr(c, 'anatRegFSLinterpolation', 'sinc') if c.anatRegFSLinterpolation not in [\"trilinear\", \"sinc\", \"spline\"]: err_msg =", "'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling:", "subject_id, anat = session['anat'], creds_path = input_creds_path, dl_dir = config.workingDirectory, img_type = 'anat'", "strat_nodes_list_list[strat_name] = [new_strat] return new_strat, strat_nodes_list_list def pick_map(file_list, index, file_type): if isinstance(file_list, list):", "import copy import time import shutil from nipype import config from nipype import", "config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] #", "(config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name,", "'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [", "(longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask': (brain_mask, 'out_file') }) strat_list = [strat_init_new] #", "= pe.Node(nio.DataSink(), name='sinker') datasink.inputs.base_directory = config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name", "'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'), 'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'),", "str the id of the subject sub_list : list of dict this is", "Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name) return strat_list def create_datasink(datasink_name,", "node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline, padding=config.bet_padding,", "Input registration parameters flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, flirt_reg_anat_mni,", "'inputspec.input_brain') # brain reference node, out_file = strat['template_brain_for_anat'] workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') #", "num_ants_cores = \\ check_config_resources(c) new_strat_list = [] # either run FSL anatomical-to-MNI registration,", "num_strat, strat in enumerate(strat_list): if strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' %", "strat['template_brain_for_anat'] workflow.connect(node, out_file, flirt_reg_anat_mni, 'inputspec.reference_brain') if 'ANTS' in c.regOption: strat = strat.fork() new_strat_list.append(strat)", "# Extract credentials path for output if it exists try: # Get path", "in dirpath and '.nii.gz' in f: filepath = os.path.join(dirpath, f) skull_list.append(filepath) brain_list.sort() skull_list.sort()", "[] skull_list = [] for dirpath, dirnames, filenames in os.walk(working_directory): for f in", "strat.get('registration_method') == 'FSL': fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg( 'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat) ) node, out_file", "\"sinc\", \"spline\"' raise Exception(err_msg) # Input registration parameters flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation node, out_file", "# skull input node, out_file = strat['motion_correct_median'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.input_skull') # skull", "'affine') node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, ants_apply_warp, 'nonlinear') ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation reg_strat.update_resource_pool({", "'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain') }) strat_list += new_strat_list ''' # Func -> T1 Registration", "pool for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name',", "list of dict this is a list of sessions for one subject and", "strat.append_name(flirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'), 'symmetric_mni_to_anatomical_linear_xfm': ( flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'), 'symmetric_anatomical_to_standard': ( flirt_reg_anat_symm_mni,", "= pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new = strat_init.fork()", "strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'), 'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'),", "= template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool({ template_name: (resampled_template, 'resampled_template') })", "from resource pool node, out_file = strat['anatomical_brain'] # pass the anatomical to the", "'inputspec.reference_skull') node, out_file = strat['func_longitudinal_to_mni_linear_xfm'] workflow.connect(node, out_file, fnirt_reg_func_mni, 'inputspec.linear_aff') node, out_file = strat['template_ref_mask']", "else: # get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain']", "dl_dir = config.workingDirectory, img_type = 'anat' ) strat.update_resource_pool({ 'anatomical': (anat_rsc, 'outputspec.anat') }) strat.update_resource_pool({", "= { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # strat_nodes_list = strat_list['func_default'] strat_init =", "config, # sinc will be default option if not hasattr(c, 'funcRegFSLinterpolation'): setattr(c, 'funcRegFSLinterpolation',", "to LanczosWindowedSinc if not hasattr(c, 'anatRegANTSinterpolation'): setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc') if c.anatRegANTSinterpolation not in", "template # TODO add session information in node name for num_reg_strat, reg_strat in", "strat.update_resource_pool({ 'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'), 'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list", "workflow.connect(template_node, 'brain_template', ds_template, rsc_key) # T1 to longitudinal template warp rsc_key = 'anatomical_to_longitudinal_template_warp_'", "[] # Loop over the sessions to create the input for the longitudinal", "}) strat_list = [strat_init_new] # only need to run once for each subject", "% node_suffix anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name) anat_preproc.inputs.BET_options.set( frac=config.bet_frac, mask_boolean=config.bet_mask_boolean, mesh_boolean=config.bet_mesh_boolean, outline=config.bet_outline,", "strat_list += new_strat_list new_strat_list = [] for num_strat, strat in enumerate(strat_list): # or", "brain_mask, 'in_file') strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'anatomical_brain': (longitudinal_template_node, 'brain_template'), 'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'), 'anatomical_brain_mask':", "strat['anatomical_brain'] workflow.connect(node, out_file, ants_reg_anat_symm_mni, 'inputspec.moving_brain') # pass the reference file node, out_file =", "strat['motion_correct_median'] # pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_skull') #", "= config.workingDirectory session_id_list = [] ses_list_strat_list = {} workflow_name = 'func_preproc_longitudinal_' + str(subject_id)", "registration, or... if 'FSL' in c.regOption: for num_strat, strat in enumerate(strat_list): # this", "img_type=key_type, creds_path=input_creds_path, dl_dir=config.workingDirectory ) setattr(config, key, node) strat = Strategy() strat_list = []", "node, out_file = strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file", "out_file, ants_reg_func_mni, 'inputspec.moving_skull') # pass the reference file node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node,", "node_suffix strat.append_name(brain_rsc.name) strat.update_resource_pool({ 'anatomical_brain_mask': (brain_rsc, 'outputspec.anat') }) anat_preproc = create_anat_preproc( method=skullstrip_method, config=config, wf_name=preproc_wf_name)", "strat_init, strat_name): sub_mem_gb, num_cores_per_sub, num_ants_cores = \\ check_config_resources(c) strat_init_new = strat_init.fork() strat_init_new.update_resource_pool({ 'functional_preprocessed_median':", "fsl_linear_reg_only: strat = strat.fork() new_strat_list.append(strat) strat.append_name(fnirt_reg_func_mni.name) strat.update_resource_pool({ 'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'), 'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain')", "'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled template to resource", "either' \\ ' \\'AFNI\\' or \\'BET\\'.\\n\\n Options you ' \\ 'provided:\\nskullstrip_option: {0}\\n\\n'.format( str(config.skullstrip_option))", "'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative',", "resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'),", "if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else:", "new_strat_list # Inserting Segmentation Preprocessing Workflow workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name)", "of mass of the standard template to align the images with it. template_center_of_mass", "parameters ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation # calculating the transform with the skullstripped is #", "pick_seg_map, 'file_list') pick_seg_map.inputs.index=index pick_seg_map.inputs.file_type=file_type workflow.connect(pick_seg_map, 'file_name', fsl_apply_xfm, 'in_file') workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm,", "ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'),", "config.workingDirectory workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.crashLogDirectory) } # For each participant", "2: already_skullstripped = 0 elif already_skullstripped == 3: already_skullstripped = 1 resampled_template =", "= None strat.append_name(ants_reg_func_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'), 'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'), 'ants_affine_xfm':", "node_suffix) skull_merge_node = pe.Node( interface=Merge(len(strat_nodes_list)), name=\"anat_longitudinal_skull_merge_\" + node_suffix) # This node will generate", "'inputspec.reference_brain') ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None strat.append_name(ants_reg_anat_mni.name) strat.update_resource_pool({ 'registration_method': 'ANTS', 'ants_initial_xfm': (ants_reg_anat_mni,", "workflow.connect(node, out_file, concat_seg_map, 'in_list1') reg_strat.update_resource_pool({ f'temporary_{resource}_list':(concat_seg_map, 'out_list') }, override=True) reg_strat.update_resource_pool({ resource:(concat_seg_map, 'out_list') },", "'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc') ] # update resampled", ": Strategy the fork of strat with the resource pool updated strat_nodes_list_list :", "says: You selected ' \\ 'to run anatomical registration with ' \\ 'the", "'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat), num_threads=num_ants_cores, reg_ants_skull=c.regWithSkull ) # Input registration parameters ants_reg_anat_symm_mni.inputs.inputspec.interp =", "= c.anatRegANTSinterpolation # calculating the transform with the skullstripped is # reported to", "= pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') #", "work properly if you ' \\ 'are providing inputs that have already been", "strat_list = [] node_suffix = '_'.join([subject_id, unique_id]) anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix) anat_rsc.inputs.inputnode.set(", "'accessing the S3 bucket. Check and try again.\\n' \\ 'Error: %s' % e", "pass the anatomical to the workflow workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.moving_skull') # pass the", "strat['template_brain_for_anat'] workflow.connect(node, out_file, ants_reg_anat_mni, 'inputspec.reference_brain') # pass the reference file node, out_file =", "be updated during the preprocessing # creds_list = [] session_id_list = [] #", "'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file = strat['template_brain_for_anat']", "err_msg = 'There was an error processing credentials or ' \\ 'accessing the", "= create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation", "workflow.connect(node, out_file, fnirt_reg_anat_mni, 'inputspec.reference_brain') # skull input node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file,", "# Merge node to feed the anat_preproc outputs to the longitudinal template generation", "node_suffix) ses_list_strat_list[node_suffix] = strat_list # Here we have all the func_preproc set up", "'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain') }, override=True) strat_list += new_strat_list new_strat_list = [] for num_strat,", "CPAC.registration.utils import run_ants_apply_warp from CPAC.utils.datasource import ( resolve_resolution, create_anat_datasource, create_func_datasource, create_check_for_s3_node ) from", "in enumerate(strat_list): if 'FSL' in c.regOption and \\ strat.get('registration_method') != 'ANTS': # this", "pe.Node( interface=afni.CenterMass(), name='template_skull_for_anat_center_of_mass' ) template_center_of_mass.inputs.cm_file = \"template_center_of_mass.txt\" workflow.connect(resampled_template, 'resampled_template', template_center_of_mass, 'in_file') # list", "''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.workingDirectory workflow.config['execution']", "strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c) # Func -> T1 Registration (BBREG) workflow,", "for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'],", "'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative',", "'resolution_for_anat'), (config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'), (config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'), (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'),", "'resampled_template') }) # loop over the different skull stripping strategies for strat_name, strat_nodes_list", "None except KeyError: input_creds_path = None template_keys = [ (\"anat\", \"PRIORS_CSF\"), (\"anat\", \"PRIORS_GRAY\"),", "# pass the reference files node, out_file = strat['template_brain_for_func_preproc'] workflow.connect(node, out_file, flirt_reg_func_mni, 'inputspec.reference_brain')", "TODO ASH normalize w schema validation to bool if already_skullstripped == 1: err_msg", "skull_list.append(filepath) brain_list.sort() skull_list.sort() return brain_list, skull_list def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name): sub_mem_gb,", "= [] # Loop over the sessions to create the input for the", "and \\ strat.get('registration_method') != 'ANTS': # this is to prevent the user from", "strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask, 'in_file') strat_init_new", "create_fsl_flirt_linear_reg( 'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat) ) # if someone doesn't have anatRegFSLinterpolation in", "from CPAC.anat_preproc.anat_preproc import ( create_anat_preproc ) from CPAC.seg_preproc.seg_preproc import ( connect_anat_segmentation ) from", "workflow, strat_init, strat_name): brain_mask = pe.Node(interface=fsl.maths.MathsCommand(), name=f'longitudinal_anatomical_brain_mask_{strat_name}') brain_mask.inputs.args = '-bin' workflow.connect(longitudinal_template_node, 'brain_template', brain_mask,", "'inputspec.ref_mask') strat.append_name(fnirt_reg_anat_symm_mni.name) strat.update_resource_pool({ 'anatomical_to_symmetric_mni_nonlinear_xfm': ( fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'), 'symmetric_anatomical_to_standard': ( fnirt_reg_anat_symm_mni, 'outputspec.output_brain') }, override=True)", "strat_list.append(new_strat) elif already_skullstripped: skullstrip_method = None preproc_wf_name = 'anat_preproc_already_%s' % node_suffix anat_preproc =", "workflow.connect(brain_merge_node, 'out', fsl_apply_xfm, 'reference') workflow.connect(fsl_convert_xfm, 'out_file', fsl_apply_xfm, 'in_matrix_file') concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'], output_names=['out_list'],", "out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_brain') node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, fnirt_reg_anat_symm_mni, 'inputspec.input_skull') node, out_file", "seg_apply_warp(strat_name=strat_name, resource=seg) # apply warp on list seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob') seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files',", "enumerate(reg_strat_list): if reg_strat.get('registration_method') == 'FSL': fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name), iterfield=['in_file']) workflow.connect(template_node, \"output_brain_list\", fsl_apply_warp,", "get the skullstripped anatomical from resource pool node, out_file = strat['anatomical_brain'] # pass", "= None strat.append_name(ants_reg_anat_symm_mni.name) strat.update_resource_pool({ 'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'), 'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'), 'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'),", "# TODO create a list of list ses_list_strat_list # a list of skullstripping", "= strat['functional_preprocessed_median'] workflow.connect(node, out_file, ants_reg_func_mni, 'inputspec.moving_brain') # pass the reference file node, out_file", "c.regOption and \\ strat.get('registration_method') != 'FSL': ants_reg_anat_symm_mni = \\ create_wf_calculate_ants_warp( 'anat_symmetric_mni_ants_register_%s_%d' % (strat_name,", "# get the reorient skull-on anatomical from resource pool node, out_file = strat['motion_correct_median']", "= reg_strat['template_brain_for_anat'] workflow.connect(node, out_file, fsl_apply_warp, 'ref_file') # TODO how to include linear xfm?" ]
[ "for accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root app when", "libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command line session, using", "\"\"\" Initialise the command line session, using the correct expansion type to convert", "parse. Args: expansion (String): FamiTracker expansion chip to use as reference for parsing", "(String): Name of local file to be housed in same directory as script", "data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information passed to the parser", "if no filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for parsing.", "if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name", "and output readable data for our visualiser to parse. Args: expansion (String): FamiTracker", "to None. \"\"\" self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker", "same directory as script execution. Defaults to None. \"\"\" self.expansion = expansion self.filename", "packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command line", "their respective packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the", "expansion chip to use as reference for parsing channel data. Defaults to None.", "to be housed in same directory as script execution. Defaults to None. \"\"\"", "< len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app = App(sys.argv[1])", "be housed in same directory as script execution. Defaults to None. \"\"\" self.expansion", "root app when file is executed via the command line.\"\"\" if 2 <", "get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README", "divert all export file conversions and error handling to their respective packages and", "execution if no filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for", "CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information passed to the", "\"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command line session, using the", "created by time.time() floating point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path,", "filename provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if '__main__'", "parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way", "using the correct expansion type to convert and output readable data for our", "format. \"\"\" if self.expansion is None: \"\"\"Terminate execution if no expansion chip is", "= App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app = App(sys.argv[1]) else: app =", "filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\\n') sys.exit()", "local file to be housed in same directory as script execution. Defaults to", "all export file conversions and error handling to their respective packages and libraries.", "to divert all export file conversions and error handling to their respective packages", "int(time.time()) \"\"\"Remove decimal places created by time.time() floating point precision for clean filenames.\"\"\"", "2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app =", "App: \"\"\" Base container class to divert all export file conversions and error", "line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv):", "correct format. \"\"\" if self.expansion is None: \"\"\"Terminate execution if no expansion chip", "in same directory as script execution. Defaults to None. \"\"\" self.expansion = expansion", "__init__(self, expansion=None, filename=None): \"\"\" Initialise the command line session, using the correct expansion", "command line session, using the correct expansion type to convert and output readable", "README for accepted formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution if", "None: \"\"\"Terminate execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid", "and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command line session,", "execution. Defaults to None. \"\"\" self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename)", "import parser.export as export import parser.read as read class App: \"\"\" Base container", "parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in", "time import constants import parser.correct as correct import parser.export as export import parser.read", "full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON config and CSV data", "line is in the correct format. \"\"\" if self.expansion is None: \"\"\"Terminate execution", "output readable data for our visualiser to parse. Args: expansion (String): FamiTracker expansion", "self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading the file if", "accepted formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution if no filename", "channel data. Defaults to None. filename (String): Name of local file to be", "conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the README for accepted formats. Terminating...\\n')", "Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way of", "as reference for parsing channel data. Defaults to None. filename (String): Name of", "expansion type to convert and output readable data for our visualiser to parse.", "data for our visualiser to parse. Args: expansion (String): FamiTracker expansion chip to", "of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the README for accepted formats.", "export file as there are existing problems that mask required data.\"\"\" self.reader =", "= expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there", "provide a valid .txt file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure", "None: \"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt", "way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the README for accepted", "respective packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command", "to convert and output readable data for our visualiser to parse. Args: expansion", "container class to divert all export file conversions and error handling to their", "conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit()", "FamiTracker export file as there are existing problems that mask required data.\"\"\" self.reader", "housed in same directory as script execution. Defaults to None. \"\"\" self.expansion =", "correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are existing problems that mask required", "= self.reader.start() \"\"\"Attempt to start reading the file if validation passes.\"\"\" timestamp =", "self.reader.start() \"\"\"Attempt to start reading the file if validation passes.\"\"\" timestamp = int(time.time())", "as export import parser.read as read class App: \"\"\" Base container class to", "FamiTracker expansion chip to use as reference for parsing channel data. Defaults to", "data. Defaults to None. filename (String): Name of local file to be housed", "\"\"\" Ensure that the information passed to the parser by the user via", "reading the file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created", "len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app = App(sys.argv[1]) else:", "read class App: \"\"\" Base container class to divert all export file conversions", "name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't", "sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'):", "correct expansion type to convert and output readable data for our visualiser to", "= export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON config and", "doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference", "parser by the user via the command line is in the correct format.", "if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app", "is None: \"\"\"Terminate execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a", "sys.exit() if self.filename is None: \"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please", "the command line is in the correct format. \"\"\" if self.expansion is None:", "to JSON config and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the", "information passed to the parser by the user via the command line is", "file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created by time.time()", "session, using the correct expansion type to convert and output readable data for", "and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information passed to", "to parse. Args: expansion (String): FamiTracker expansion chip to use as reference for", "\"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file", "Args: expansion (String): FamiTracker expansion chip to use as reference for parsing channel", "directory as script execution. Defaults to None. \"\"\" self.expansion = expansion self.filename =", "\"\"\" if self.expansion is None: \"\"\"Terminate execution if no expansion chip is provided.\"\"\"", "self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\"", "self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON config and CSV data files.\"\"\"", "the README for accepted formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution", "None. filename (String): Name of local file to be housed in same directory", "export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON config and CSV", "\"\"\"Initialise root app when file is executed via the command line.\"\"\" if 2", "the correct format. \"\"\" if self.expansion is None: \"\"\"Terminate execution if no expansion", "sys.stdout.write('Invalid filename provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if", "valid .txt file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't", "correct import parser.export as export import parser.read as read class App: \"\"\" Base", "parser.export as export import parser.read as read class App: \"\"\" Base container class", "the file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created by", "import sys import time import constants import parser.correct as correct import parser.export as", "provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\\n') sys.exit() elif not", "files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information passed to the parser by", "for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing", "a valid .txt file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity", "Base container class to divert all export file conversions and error handling to", "Ensure that the information passed to the parser by the user via the", "start writing to JSON config and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure", "filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON", "parser.read as read class App: \"\"\" Base container class to divert all export", "file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in", "in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion", "sys.stdout.write('Please provide a valid expansion chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower()", "error handling to their respective packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None):", "doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the", "read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading the file if validation passes.\"\"\"", "for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the", "not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid", "Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root app when file is executed", "constants import parser.correct as correct import parser.export as export import parser.read as read", "use as reference for parsing channel data. Defaults to None. filename (String): Name", "get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the", "sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\"", "app when file is executed via the command line.\"\"\" if 2 < len(sys.argv):", "case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please", "expansion=None, filename=None): \"\"\" Initialise the command line session, using the correct expansion type", "\"\"\" self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file", "is in the correct format. \"\"\" if self.expansion is None: \"\"\"Terminate execution if", "provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if '__main__' ==", "execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip", "point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to", "to start reading the file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal", "filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are existing problems that", "by time.time() floating point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion)", "convert and output readable data for our visualiser to parse. Args: expansion (String):", "\"\"\" Base container class to divert all export file conversions and error handling", "existing problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt", "full_path = self.reader.start() \"\"\"Attempt to start reading the file if validation passes.\"\"\" timestamp", "validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created by time.time() floating point", "accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root app when file", "visualiser to parse. Args: expansion (String): FamiTracker expansion chip to use as reference", "executed via the command line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2])", "case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference", "elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid", "a valid expansion chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in", "def __init__(self, expansion=None, filename=None): \"\"\" Initialise the command line session, using the correct", "for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get", "provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if self.filename is", "the command line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1", "no filename is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\\n')", "import parser.correct as correct import parser.export as export import parser.read as read class", "== __name__: \"\"\"Initialise root app when file is executed via the command line.\"\"\"", "is provided.\"\"\" sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\\n') sys.exit() elif", "chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name for parsing. Terminating...\\n')", "there are existing problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path =", "Defaults to None. \"\"\" self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite", "expansion (String): FamiTracker expansion chip to use as reference for parsing channel data.", "'__main__' == __name__: \"\"\"Initialise root app when file is executed via the command", "the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README for accepted", "file is executed via the command line.\"\"\" if 2 < len(sys.argv): app =", "to None. filename (String): Name of local file to be housed in same", "in the correct format. \"\"\" if self.expansion is None: \"\"\"Terminate execution if no", "self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to JSON config", "user via the command line is in the correct format. \"\"\" if self.expansion", "Initialise the command line session, using the correct expansion type to convert and", "if '__main__' == __name__: \"\"\"Initialise root app when file is executed via the", "the parser by the user via the command line is in the correct", "to the parser by the user via the command line is in the", "valid expansion chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions():", "reference for parsing channel data. Defaults to None. filename (String): Name of local", "conversions and error handling to their respective packages and libraries. \"\"\" def __init__(self,", "places created by time.time() floating point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp,", "as script execution. Defaults to None. \"\"\" self.expansion = expansion self.filename = filename", "self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are existing problems that mask", "timestamp = int(time.time()) \"\"\"Remove decimal places created by time.time() floating point precision for", "reference the README for accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise", "as read class App: \"\"\" Base container class to divert all export file", "chip to use as reference for parsing channel data. Defaults to None. filename", "handling to their respective packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\"", "expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are", "App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app = App(sys.argv[1]) else: app = App()", "\"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please", "no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name for", "import constants import parser.correct as correct import parser.export as export import parser.read as", "self.expansion is None: \"\"\"Terminate execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide", "to use as reference for parsing channel data. Defaults to None. filename (String):", "file to be housed in same directory as script execution. Defaults to None.", "\"\"\"Rewrite FamiTracker export file as there are existing problems that mask required data.\"\"\"", "expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name for parsing.", "if self.filename is None: \"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please provide", "time.time() floating point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start()", "via the command line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif", "is provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name for parsing. Terminating...\\n') sys.exit()", "command line is in the correct format. \"\"\" if self.expansion is None: \"\"\"Terminate", "the command line session, using the correct expansion type to convert and output", "Name of local file to be housed in same directory as script execution.", "that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start", "expansion chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure", "is None: \"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please provide a valid", "passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created by time.time() floating point precision", "None. \"\"\" self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export", "of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README for accepted formats. Terminating...\\n')", "chip provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if self.filename", "\"\"\"Attempt to start writing to JSON config and CSV data files.\"\"\" def validateParameters(self):", "import time import constants import parser.correct as correct import parser.export as export import", "start reading the file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places", "parsing channel data. Defaults to None. filename (String): Name of local file to", "provided.\"\"\" sys.stdout.write('Please provide a valid expansion chip name for parsing. Terminating...\\n') sys.exit() elif", "sys import time import constants import parser.correct as correct import parser.export as export", "of local file to be housed in same directory as script execution. Defaults", "class App: \"\"\" Base container class to divert all export file conversions and", "data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading the file", "floating point precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt", "app = App(sys.argv[1], sys.argv[2]) elif 1 < len(sys.argv): app = App(sys.argv[1]) else: app", "self.exporter.start() \"\"\"Attempt to start writing to JSON config and CSV data files.\"\"\" def", "sys.stdout.write('Invalid expansion chip provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit()", "\"\"\"Remove decimal places created by time.time() floating point precision for clean filenames.\"\"\" self.exporter", "not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename", "readable data for our visualiser to parse. Args: expansion (String): FamiTracker expansion chip", "Please reference the README for accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__:", "command line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1], sys.argv[2]) elif 1 <", "way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README for accepted formats.", "the README for accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root", "formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution if no filename is", "required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading the", "in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the README", ".txt file for parsing. Terminating...\\n') sys.exit() elif not self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get", "passed to the parser by the user via the command line is in", "constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip", "config and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information passed", "via the command line is in the correct format. \"\"\" if self.expansion is", "the correct expansion type to convert and output readable data for our visualiser", "as correct import parser.export as export import parser.read as read class App: \"\"\"", "\"\"\"Attempt to start reading the file if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove", "problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to", "README for accepted formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root app", "formats. Terminating...\\n') sys.exit() if '__main__' == __name__: \"\"\"Initialise root app when file is", "\"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided.", "as there are existing problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path", "that the information passed to the parser by the user via the command", "def validateParameters(self): \"\"\" Ensure that the information passed to the parser by the", "clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start writing to", "writing to JSON config and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that", "type to convert and output readable data for our visualiser to parse. Args:", "self.filename is None: \"\"\"Terminate execution if no filename is provided.\"\"\" sys.stdout.write('Please provide a", "export import parser.read as read class App: \"\"\" Base container class to divert", "self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are existing", "the information passed to the parser by the user via the command line", "(String): FamiTracker expansion chip to use as reference for parsing channel data. Defaults", "Please reference the README for accepted formats. Terminating...\\n') sys.exit() if self.filename is None:", "and error handling to their respective packages and libraries. \"\"\" def __init__(self, expansion=None,", "for parsing channel data. Defaults to None. filename (String): Name of local file", "line session, using the correct expansion type to convert and output readable data", "our visualiser to parse. Args: expansion (String): FamiTracker expansion chip to use as", "filename=None): \"\"\" Initialise the command line session, using the correct expansion type to", "parser.correct as correct import parser.export as export import parser.read as read class App:", "script execution. Defaults to None. \"\"\" self.expansion = expansion self.filename = filename self.validateParameters()", "chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity", "reference the README for accepted formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate", "in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided. Please reference the README for", "JSON config and CSV data files.\"\"\" def validateParameters(self): \"\"\" Ensure that the information", "to start writing to JSON config and CSV data files.\"\"\" def validateParameters(self): \"\"\"", "if self.expansion is None: \"\"\"Terminate execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please", "= read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading the file if validation", "elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way of", "class to divert all export file conversions and error handling to their respective", "if validation passes.\"\"\" timestamp = int(time.time()) \"\"\"Remove decimal places created by time.time() floating", "__name__: \"\"\"Initialise root app when file is executed via the command line.\"\"\" if", "to their respective packages and libraries. \"\"\" def __init__(self, expansion=None, filename=None): \"\"\" Initialise", "the user via the command line is in the correct format. \"\"\" if", "for our visualiser to parse. Args: expansion (String): FamiTracker expansion chip to use", "file conversions and error handling to their respective packages and libraries. \"\"\" def", "mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start() \"\"\"Attempt to start reading", "Terminating...\\n') sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the", "provide a valid expansion chip name for parsing. Terminating...\\n') sys.exit() elif self.expansion.lower() not", "self.expansion = expansion self.filename = filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as", "Defaults to None. filename (String): Name of local file to be housed in", "decimal places created by time.time() floating point precision for clean filenames.\"\"\" self.exporter =", "\"\"\"Terminate execution if no expansion chip is provided.\"\"\" sys.stdout.write('Please provide a valid expansion", "for accepted formats. Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution if no", "export file conversions and error handling to their respective packages and libraries. \"\"\"", "filename (String): Name of local file to be housed in same directory as", "sys.exit() if '__main__' == __name__: \"\"\"Initialise root app when file is executed via", "file as there are existing problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename)", "are existing problems that mask required data.\"\"\" self.reader = read.FileReader(self.filename) full_path = self.reader.start()", "= int(time.time()) \"\"\"Remove decimal places created by time.time() floating point precision for clean", "validateParameters(self): \"\"\" Ensure that the information passed to the parser by the user", "self.filename.lower().endswith('.txt'): \"\"\"Ensure case-sensitivity doesn't get in the way of conversions.\"\"\" sys.stdout.write('Invalid filename provided.", "expansion chip provided. Please reference the README for accepted formats. Terminating...\\n') sys.exit() if", "sys.exit() elif self.expansion.lower() not in constants.expansions(): \"\"\"Ensure case-sensitivity doesn't get in the way", "Terminating...\\n') sys.exit() if self.filename is None: \"\"\"Terminate execution if no filename is provided.\"\"\"", "by the user via the command line is in the correct format. \"\"\"", "is executed via the command line.\"\"\" if 2 < len(sys.argv): app = App(sys.argv[1],", "precision for clean filenames.\"\"\" self.exporter = export.DataExporter(timestamp, full_path, self.expansion) self.exporter.start() \"\"\"Attempt to start", "import parser.read as read class App: \"\"\" Base container class to divert all", "= filename self.validateParameters() correct.FixExport(self.filename) \"\"\"Rewrite FamiTracker export file as there are existing problems", "when file is executed via the command line.\"\"\" if 2 < len(sys.argv): app", "the way of conversions.\"\"\" sys.stdout.write('Invalid expansion chip provided. Please reference the README for" ]
[ ": PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering", "scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture", "- com_position[1], -0.5 * height ] faces = [ 0, 1, 2, 0,", "3.0 dtheta = 2.0 * np.pi / 12.0 ctheta = np.cos( dtheta *", ") elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] )", ") elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] )", "-0.5 * height ] faces = [ 0, 1, 2, 0, 2, 3,", "0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3,", "loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart'", "simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] )", "* height ] faces = [ 0, 1, 2, 0, 2, 3, 0,", "), 0.5 * height ] vertices = [ inner_rad * ctheta - com_position[0],", ") ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0", ") ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation(", "1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0", "-1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0", "RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi", "= 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad", "0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0", "\"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation ) ) for", "{}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation", "7, 6, 4, 6, 5 ] def create_path_part( idx ) : height =", "0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0", "as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 /", "1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE", "* stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad", ") ) for i in range( 0, 12 ) : height = 1.0", "RAMP_FACES = [ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0,", "5, 0, 5, 1, 0, 3, 7, 0, 7, 4, 2, 6, 7,", ") * dtheta ), 0.5 * height ] vertices = [ inner_rad *", "- com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n", "[ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [", "runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" )", "runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer =", "200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0,", "[ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0],", "( idx + 0.5 ) * dtheta ), 0.5 * height ] vertices", "visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) :", "0.5 * height, inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1],", "- com_position[0], outer_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n", "[ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES =", "= np.cos( dtheta * idx ) stheta = np.sin( dtheta * idx )", "COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ]", "com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ] faces = [", "com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta -", "* stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad", "tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi / 6 ] )", "choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend", "ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 * height, outer_rad *", "inner_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0],", "3, 0, 2, 1, 0, 3, 2, 1, 2, 3 ] COM_RAMP =", "idx + 1 ) ) stheta_n = np.sin( dtheta * ( idx +", "outer_rad ) com_position = [ half_rad * np.cos( ( idx + 0.5 )", "COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0],", "half_rad = 0.5* ( inner_rad + outer_rad ) dtheta = 2.0 * np.pi", "- com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n", ") runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer", "6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] return vertices,", "create_path_part( idx ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0", "1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0,", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif", "height ] faces = [ 0, 1, 2, 0, 2, 3, 0, 4,", "= 3.0 dtheta = 2.0 * np.pi / 12.0 ctheta = np.cos( dtheta", ") : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN )", "simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName(", ": sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) :", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress(", "* np.pi / 12.0 com_position = [ half_rad * np.cos( ( i +", "+ outer_rad ) com_position = [ half_rad * np.cos( ( idx + 0.5", "0.5 * height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format(", "-1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3,", ": PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print(", ") * dtheta ), 0.5 * height ] vertices, faces = create_path_part( i", "inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height,", "4, 7, 6, 4, 6, 5 ] return vertices, faces if __name__ ==", "outer_rad = 3.0 dtheta = 2.0 * np.pi / 12.0 ctheta = np.cos(", "12 ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad", "0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0", "- COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 -", "height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ),", "- com_position[0], inner_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta", "[ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0],", "stheta - com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0], outer_rad *", "* dtheta ), half_rad * np.sin( ( idx + 0.5 ) * dtheta", "dtheta ), 0.5 * height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody(", "0, 3, 7, 0, 7, 4, 2, 6, 7, 2, 7, 3, 1,", "= tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario()", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] ) simulation.Step()", "loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\",", "= np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta * ( idx", "* ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 * height, outer_rad", "inner_rad + outer_rad ) dtheta = 2.0 * np.pi / 12.0 com_position =", "1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi /", "= 2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi / 12.0 ctheta", "stheta = np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta * (", "TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh(", "return vertices, faces if __name__ == '__main__' : if len( sys.argv ) >", "1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0,", "] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R", "visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName(", "0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0", "* ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 * height, outer_rad", "loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND", "0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ]", "0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0", ") ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario )", "PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics", "6, 7, 2, 7, 3, 1, 5, 6, 1, 6, 2, 4, 7,", "com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n -", "'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print(", "inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0],", "2.0 * np.pi / 12.0 com_position = [ half_rad * np.cos( ( i", "tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON =", "sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet'", "* ( idx + 1 ) ) stheta_n = np.sin( dtheta * (", "half_rad * np.cos( ( i + 0.5 ) * dtheta ), half_rad *", "rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0", "vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND", "+ 0.5 ) * dtheta ), 0.5 * height ] vertices, faces =", "0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if", "com_position[0], inner_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta -", "def create_path_part( idx ) : height = 1.0 inner_rad = 2.0 outer_rad =", "COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],", "range( 0, 12 ) : height = 1.0 inner_rad = 2.0 outer_rad =", "#!/usr/bin/env python import sys import loco import tinymath as tm import numpy as", ": PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif", "import sys import loco import tinymath as tm import numpy as np PHYSICS_BACKEND", "elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' :", "0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0,", "- COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1, 0,", "3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 -", "COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0, 2,", "-1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [", ": break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) :", "outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 * height,", "- COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 -", "\"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse", ") elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] )", "2, 0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0,", "0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0", "0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0", "#### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] )", "* dtheta ), half_rad * np.sin( ( i + 0.5 ) * dtheta", "outer_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0],", "import tinymath as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND =", "half_rad * np.cos( ( idx + 0.5 ) * dtheta ), half_rad *", "dtheta ), half_rad * np.sin( ( idx + 0.5 ) * dtheta ),", "print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [", "tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi / 6 ]", "height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 *", "] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ]", "com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0], inner_rad * stheta -", "stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad *", "\"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime =", "tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ],", "* height, inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5", "[ inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 *", "COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0],", "0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1],", "com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation =", "ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ] faces", "1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ]", "= loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend:", "= scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient", "np.pi / 12.0 com_position = [ half_rad * np.cos( ( i + 0.5", "0, 1, 2, 0, 2, 3, 0, 4, 5, 0, 5, 1, 0,", "np.sin( dtheta * ( idx + 1 ) ) half_rad = 0.5* (", ") : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] ) simulation.Step() visualizer.Update() runtime.DestroySimulation() runtime.DestroyVisualizer()", "height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 *", "while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R )", "__name__ == '__main__' : if len( sys.argv ) > 1 : choice_backend =", "faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0,", "[ 0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\",", "/ 3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0", "/ 4, np.pi / 6 ] ) ) #### rotation = tm.rotation( tm.Vector3f(", "scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) )", "sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM(", "PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0", "stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad *", "] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ]", "'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM", "0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0", "2, 6, 7, 2, 7, 3, 1, 5, 6, 1, 6, 2, 4,", ") elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] )", "5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] def", "-1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0", "loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] ) simulation.Step() visualizer.Update() runtime.DestroySimulation()", "- com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n", "RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0", "= loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0 /", "ctheta = np.cos( dtheta * idx ) stheta = np.sin( dtheta * idx", "3, 2, 1, 2, 3 ] COM_RAMP = [ 0.0, 7.0 / 9.0,", "ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad *", "- COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 -", ") ctheta_n = np.cos( dtheta * ( idx + 1 ) ) stheta_n", "vertices = [ inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1],", "com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n -", "= runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\"", "np.cos( dtheta * idx ) stheta = np.sin( dtheta * idx ) ctheta_n", "0.5 ) * dtheta ), 0.5 * height ] vertices, faces = create_path_part(", "1 ) ) stheta_n = np.sin( dtheta * ( idx + 1 )", "= np.sin( dtheta * ( idx + 1 ) ) half_rad = 0.5*", "COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],", "com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n -", "/ 12.0 com_position = [ half_rad * np.cos( ( i + 0.5 )", "dtheta * ( idx + 1 ) ) stheta_n = np.sin( dtheta *", "0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [", "np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta * ( idx +", "1.0 ], rotation ) ) for i in range( 0, 12 ) :", "[ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0, 5, 1,", "python import sys import loco import tinymath as tm import numpy as np", ") scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation", "PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation =", "* height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i", ") print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f(", "1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation", "rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) )", "0, 1, 3, 0, 2, 1, 0, 3, 2, 1, 2, 3 ]", ") sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture =", "1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0", "loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else", ") floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse =", ": simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [", "scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody(", "com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta -", "6, 5 ] return vertices, faces if __name__ == '__main__' : if len(", "1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1,", "= sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend ==", "for i in range( 0, 12 ) : height = 1.0 inner_rad =", ": sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) :", "3.0 half_rad = 0.5* ( inner_rad + outer_rad ) dtheta = 2.0 *", "scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor", "6, 2, 4, 7, 6, 4, 6, 5 ] return vertices, faces if", "vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces,", "= [ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0, 5,", "[ half_rad * np.cos( ( idx + 0.5 ) * dtheta ), half_rad", "com_position[1], -0.5 * height ] faces = [ 0, 1, 2, 0, 2,", "[ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [", "6, 4, 6, 5 ] def create_path_part( idx ) : height = 1.0", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if", "7, 6, 4, 6, 5 ] return vertices, faces if __name__ == '__main__'", "+ outer_rad ) dtheta = 2.0 * np.pi / 12.0 com_position = [", "rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0", "= [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE )", ": if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset()", "1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0", "loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario", ") : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT )", "ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad *", "[ 1.0, -1.0, 1.0 ], rotation ) ) for i in range( 0,", "- com_position[0], inner_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta", "outer_rad = 3.0 half_rad = 0.5* ( inner_rad + outer_rad ) dtheta =", "dtheta * idx ) stheta = np.sin( dtheta * idx ) ctheta_n =", "] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1, 0, 3, 2,", "COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1],", "- COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 -", "( i + 0.5 ) * dtheta ), 0.5 * height ] vertices,", "] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress(", "1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0", "runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" )", "[ half_rad * np.cos( ( i + 0.5 ) * dtheta ), half_rad", ") ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation )", "[ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [", "faces if __name__ == '__main__' : if len( sys.argv ) > 1 :", "- com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta", ") #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ]", "= [ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES", "4, 2, 6, 7, 2, 7, 3, 1, 5, 6, 1, 6, 2,", "0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [", "i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime(", "tinymath as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW", "height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 *", "create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(),", ") ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ],", "inner_rad + outer_rad ) com_position = [ half_rad * np.cos( ( idx +", "= loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend ==", "- COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 -", "12.0 ctheta = np.cos( dtheta * idx ) stheta = np.sin( dtheta *", "0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0", "* ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 * height, outer_rad", "0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break", "loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP", "+ 1 ) ) stheta_n = np.sin( dtheta * ( idx + 1", "= loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer(", "1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0", ") scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) )", "dtheta ), half_rad * np.sin( ( i + 0.5 ) * dtheta ),", "), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND,", "\"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0,", "COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1],", "1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0", "loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime", "- COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 -", "-1.0, 1.0 ], rotation ) ) for i in range( 0, 12 )", "elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' :", "2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0", "PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend", "4, 7, 6, 4, 6, 5 ] def create_path_part( idx ) : height", "10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0", "[ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) :", "RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh(", ") rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario", "7, 3, 1, 5, 6, 1, 6, 2, 4, 7, 6, 4, 6,", "1.0, -1.0, 1.0 ], rotation ) ) for i in range( 0, 12", "simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress(", "5, 1, 0, 3, 7, 0, 7, 4, 2, 6, 7, 2, 7,", "= [ inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5", ": sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) :", "0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive()", "height, inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 *", "ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad *", "loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) :", "np.pi / 6 ] ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi", "0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0,", "scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1,", "numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0", "0, 12 ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0", "1 ) ) half_rad = 0.5* ( inner_rad + outer_rad ) com_position =", "stheta - com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0], outer_rad *", "1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0", ") scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation", "- com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n", "inner_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0],", "loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario", "* dtheta ), 0.5 * height ] vertices, faces = create_path_part( i )", "= 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3,", "- COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 -", "rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi /", "TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1, 0, 3, 2, 1,", ") : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume()", "* height ] vertices = [ inner_rad * ctheta - com_position[0], inner_rad *", "outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height,", "[ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while", "loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN", "- COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES", "* height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5", "scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation )", "half_rad * np.sin( ( idx + 0.5 ) * dtheta ), 0.5 *", "* np.pi / 12.0 ctheta = np.cos( dtheta * idx ) stheta =", ": sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) :", "/ 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0", "( inner_rad + outer_rad ) dtheta = 2.0 * np.pi / 12.0 com_position", "TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0", "break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause()", "\"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [", ": height = 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0", "COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1,", "loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [", "0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0,", "if __name__ == '__main__' : if len( sys.argv ) > 1 : choice_backend", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif", "1, 0, 3, 2, 1, 2, 3 ] COM_RAMP = [ 0.0, 7.0", "1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0,", "#### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi", "ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 * height, outer_rad *", ") > 1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND", "1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad +", "0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0,", "* height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5", "* ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad", "3, 7, 0, 7, 4, 2, 6, 7, 2, 7, 3, 1, 5,", "if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' :", "0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0", "[ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES,", "= loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format(", "- COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 -", "com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n -", "stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0], inner_rad *", "1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5,", "1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [", "== 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) )", "] def create_path_part( idx ) : height = 1.0 inner_rad = 2.0 outer_rad", "rotation ) ) for i in range( 0, 12 ) : height =", "height = 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5* (", "COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES =", "0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ]", "0.5 ) * dtheta ), 0.5 * height ] vertices = [ inner_rad", "= 0.5* ( inner_rad + outer_rad ) dtheta = 2.0 * np.pi /", ") ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ],", "3, 0, 4, 5, 0, 5, 1, 0, 3, 7, 0, 7, 4,", "* np.sin( ( i + 0.5 ) * dtheta ), 0.5 * height", "[ np.pi / 3, np.pi / 4, np.pi / 6 ] ) )", "if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0,", ") * dtheta ), half_rad * np.sin( ( idx + 0.5 ) *", ") for i in range( 0, 12 ) : height = 1.0 inner_rad", "- COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 -", "= [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 -", "as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON", "= 3.0 half_rad = 0.5* ( inner_rad + outer_rad ) dtheta = 2.0", "- COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 -", "[ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES,", "0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0", "0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 -", "0.5* ( inner_rad + outer_rad ) dtheta = 2.0 * np.pi / 12.0", "), half_rad * np.sin( ( i + 0.5 ) * dtheta ), 0.5", ") half_rad = 0.5* ( inner_rad + outer_rad ) com_position = [ half_rad", "half_rad * np.sin( ( i + 0.5 ) * dtheta ), 0.5 *", "= [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [", ") : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P )", "outer_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0],", "- COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES", "], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0,", "), 0.5 * height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh(", "200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0,", "loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh(", "* stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad", "0, 7, 4, 2, 6, 7, 2, 7, 3, 1, 5, 6, 1,", "RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation ) ) for i", "- COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 -", "1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0],", "] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] )", "scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ]", "- COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 -", "= runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\"", "np.cos( dtheta * ( idx + 1 ) ) stheta_n = np.sin( dtheta", "0.5, [ 1.0, -1.0, 1.0 ], rotation ) ) for i in range(", "COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1],", "- COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 -", "7, 4, 2, 6, 7, 2, 7, 3, 1, 5, 6, 1, 6,", "i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC", "COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0],", "\"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody(", "i + 0.5 ) * dtheta ), half_rad * np.sin( ( i +", "3, np.pi / 4, np.pi / 6 ] ) ) #### rotation =", "* idx ) ctheta_n = np.cos( dtheta * ( idx + 1 )", "com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta -", "0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0", "np.pi / 4, np.pi / 6 ] ) ) #### rotation = tm.rotation(", "* height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5", "6, 5 ] def create_path_part( idx ) : height = 1.0 inner_rad =", "faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND )", "0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular =", "== 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND =", "/ 12.0 ctheta = np.cos( dtheta * idx ) stheta = np.sin( dtheta", "9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 -", "-0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1],", ") ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ],", ": choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif", "+ 1 ) ) half_rad = 0.5* ( inner_rad + outer_rad ) com_position", "np.sin( ( idx + 0.5 ) * dtheta ), 0.5 * height ]", "( i + 0.5 ) * dtheta ), half_rad * np.sin( ( i", ") scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC )", "1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1],", "2, 1, 2, 3 ] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0", ") scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation", "scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody(", "COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],", "* stheta - com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0], outer_rad", "rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario =", "loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation ) )", "= 2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad + outer_rad )", "scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient =", "COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0],", "] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ]", "dtheta * ( idx + 1 ) ) half_rad = 0.5* ( inner_rad", "'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5,", "sys.argv ) > 1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco' :", "np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0,", ") dtheta = 2.0 * np.pi / 12.0 com_position = [ half_rad *", "COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0, 2, 3, 0, 4,", "5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] return", "= tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) ) rotation", "] return vertices, faces if __name__ == '__main__' : if len( sys.argv )", "np.pi / 3, np.pi / 4, np.pi / 6 ] ) ) ####", "/ 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0", "-1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5,", "/ 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0", "COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1],", "'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi", "height = 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0 *", "outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0],", "COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1],", "0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0", "= loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere(", "-200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0,", "tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation", ") : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad =", "1, 2, 0, 2, 3, 0, 4, 5, 0, 5, 1, 0, 3,", "0.5 ) * dtheta ), half_rad * np.sin( ( i + 0.5 )", "sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM(", "np.cos( ( idx + 0.5 ) * dtheta ), half_rad * np.sin( (", "0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0,", ") : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta =", "ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 * height, outer_rad *", "2, 1, 0, 3, 2, 1, 2, 3 ] COM_RAMP = [ 0.0,", "scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation )", "/ 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0", "inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 * height,", "com_position = [ half_rad * np.cos( ( idx + 0.5 ) * dtheta", "stheta_n - com_position[1], -0.5 * height ] faces = [ 0, 1, 2,", "COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0],", ") ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) )", "- COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0,", "0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif", "1, 6, 2, 4, 7, 6, 4, 6, 5 ] def create_path_part( idx", "( idx + 1 ) ) stheta_n = np.sin( dtheta * ( idx", "sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard'", "0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(),", "outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 * height,", "dtheta = 2.0 * np.pi / 12.0 com_position = [ half_rad * np.cos(", "-0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1],", "1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [", "1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0", "floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5,", "* idx ) stheta = np.sin( dtheta * idx ) ctheta_n = np.cos(", "choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] ) simulation.Step() visualizer.Update()", "PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario )", "outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0],", "com_position[0], inner_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta -", "COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2]", "1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO", "tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane(", "com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n -", "-1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [", "RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation ) ) for i in", "2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad + outer_rad ) dtheta", "RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere", ") scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) )", "height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 *", "6 ] ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2,", "1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0,", "PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend:", "* ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad", "- COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 -", "( idx + 0.5 ) * dtheta ), half_rad * np.sin( ( idx", "\"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody(", "0, 4, 5, 0, 5, 1, 0, 3, 7, 0, 7, 4, 2,", "half_rad = 0.5* ( inner_rad + outer_rad ) com_position = [ half_rad *", "5 ] def create_path_part( idx ) : height = 1.0 inner_rad = 2.0", "- com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n", "RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0", "[ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular", "i in range( 0, 12 ) : height = 1.0 inner_rad = 2.0", "COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES =", "= [ half_rad * np.cos( ( i + 0.5 ) * dtheta ),", "inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad + outer_rad", "loco import tinymath as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND", "COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2,", "dtheta ), 0.5 * height ] vertices = [ inner_rad * ctheta -", "COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1,", "1.0, [ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES,", "= 2.0 * np.pi / 12.0 com_position = [ half_rad * np.cos( (", "dtheta * idx ) ctheta_n = np.cos( dtheta * ( idx + 1", "( inner_rad + outer_rad ) com_position = [ half_rad * np.cos( ( idx", "idx + 0.5 ) * dtheta ), half_rad * np.sin( ( idx +", "0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1],", "* np.cos( ( i + 0.5 ) * dtheta ), half_rad * np.sin(", "loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT", "* stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0], inner_rad", "floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7", "- com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ] faces =", ": height = 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5*", "tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) ) rotation =", "idx ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta", "floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE", "* height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5", "- com_position[0], outer_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n", "- com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0], inner_rad * stheta", "idx ) ctheta_n = np.cos( dtheta * ( idx + 1 ) )", "] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],", "scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation )", "], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0,", "0.5 * height ] vertices = [ inner_rad * ctheta - com_position[0], inner_rad", "scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation )", "1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0, 2, 3,", "height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 *", "len( sys.argv ) > 1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco'", "COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2]", "2, 0, 2, 3, 0, 4, 5, 0, 5, 1, 0, 3, 7,", "2, 4, 7, 6, 4, 6, 5 ] return vertices, faces if __name__", ") : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM(", ") ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi /", "+ 0.5 ) * dtheta ), half_rad * np.sin( ( i + 0.5", "* height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5", "vertices, faces if __name__ == '__main__' : if len( sys.argv ) > 1", "faces = [ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0,", "== 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND =", "- COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 -", "] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f()", "== 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND =", "[ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES,", "loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation ) )", "in range( 0, 12 ) : height = 1.0 inner_rad = 2.0 outer_rad", "com_position = [ half_rad * np.cos( ( i + 0.5 ) * dtheta", "COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1],", "3, 1, 5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5", "* height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5", "loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P", "TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh(", "0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0", "COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1],", "- COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 -", "2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi / 12.0 ctheta =", "inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi / 12.0", "12.0 com_position = [ half_rad * np.cos( ( i + 0.5 ) *", "stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad *", "ctheta_n = np.cos( dtheta * ( idx + 1 ) ) stheta_n =", "0.1, [ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES,", "3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 -", ") stheta_n = np.sin( dtheta * ( idx + 1 ) ) half_rad", "com_position[0], outer_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n -", "-1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [", "- COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0,", "2, 4, 7, 6, 4, 6, 5 ] def create_path_part( idx ) :", "COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1],", "- COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 -", "sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM(", "COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0],", "{}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3,", "3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 -", "0, 5, 1, 0, 3, 7, 0, 7, 4, 2, 6, 7, 2,", "COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],", "0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2,", "2, 3, 0, 4, 5, 0, 5, 1, 0, 3, 7, 0, 7,", ": if len( sys.argv ) > 1 : choice_backend = sys.argv[1] if choice_backend", "tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation(", "> 1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND =", ") simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere =", ") floor = scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3,", "= [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ]", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress(", ") visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( \"sphere\" ) floor =", "0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0", "/ 2, 0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0,", ") ) stheta_n = np.sin( dtheta * ( idx + 1 ) )", ") com_position = [ half_rad * np.cos( ( idx + 0.5 ) *", "= [ half_rad * np.cos( ( idx + 0.5 ) * dtheta ),", "com_position[0], outer_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n -", "ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 * height, outer_rad *", "'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET", "- COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 -", "2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0,", "import loco import tinymath as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE", "com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n -", "loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim'", "= [ 0, 1, 3, 0, 2, 1, 0, 3, 2, 1, 2,", "7, 0, 7, 4, 2, 6, 7, 2, 7, 3, 1, 5, 6,", "= np.cos( dtheta * ( idx + 1 ) ) stheta_n = np.sin(", "1, 5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ]", "= loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend ==", "np.pi / 2, 0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [", "= scenario.GetSingleBodyByName( \"floor\" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7", ") ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ],", "0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress(", "choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND )", "* ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 * height, outer_rad", "floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7", "[ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [", "[ 0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0", "2, 7, 3, 1, 5, 6, 1, 6, 2, 4, 7, 6, 4,", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif", "choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND", "elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND", "RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\",", "] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],", "stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad *", "simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0,", "[ 0, 1, 3, 0, 2, 1, 0, 3, 2, 1, 2, 3", "2.0 * np.pi / 12.0 ctheta = np.cos( dtheta * idx ) stheta", "== '__main__' : if len( sys.argv ) > 1 : choice_backend = sys.argv[1]", "6, 4, 6, 5 ] return vertices, faces if __name__ == '__main__' :", "= create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices, faces, 1.0, com_position,", "loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0", "1, 3, 0, 2, 1, 0, 3, 2, 1, 2, 3 ] COM_RAMP", "1, 0, 3, 7, 0, 7, 4, 2, 6, 7, 2, 7, 3,", "9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 -", "], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0,", "choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND", "[ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES,", "* dtheta ), 0.5 * height ] vertices = [ inner_rad * ctheta", "loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation ) )", "TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\",", "0.5, [ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES,", "6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] def create_path_part(", "= 2.0 * np.pi / 12.0 ctheta = np.cos( dtheta * idx )", "1, 2, 3 ] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 /", "0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1],", "tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody(", ": PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif", "COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1],", "COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],", "if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif", "= loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 /", "), half_rad * np.sin( ( idx + 0.5 ) * dtheta ), 0.5", "* ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ]", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress(", "6, 2, 4, 7, 6, 4, 6, 5 ] def create_path_part( idx )", "4, 6, 5 ] def create_path_part( idx ) : height = 1.0 inner_rad", "0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0", "np.cos( ( i + 0.5 ) * dtheta ), half_rad * np.sin( (", "inner_rad * stheta_n - com_position[1], -0.5 * height ] faces = [ 0,", "floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [", "-0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1],", "] ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0,", "7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0],", "10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( \"sphere\", 0.1, [ 1.0, -1.0,", "- COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0, 2, 3, 0,", "print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND )", "sys import loco import tinymath as tm import numpy as np PHYSICS_BACKEND =", "= 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi", "\"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody(", "COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],", "outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height,", "COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1, 0, 3,", "2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0", "- com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n", "-1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0", "import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [", "] RAMP_FACES = [ 0, 1, 2, 0, 2, 3, 0, 4, 5,", "COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],", "/ 6 ] ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi /", "- COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0,", "], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0,", "backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) ####", "- COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 -", "0, 2, 1, 0, 3, 2, 1, 2, 3 ] COM_RAMP = [", "inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height", "* stheta - com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0], outer_rad", "1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [", "rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_0\", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0", "idx ) stheta = np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta", "COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1],", "0, 3, 2, 1, 2, 3 ] COM_RAMP = [ 0.0, 7.0 /", "( idx + 1 ) ) half_rad = 0.5* ( inner_rad + outer_rad", "'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) )", "- COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0,", "- COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 -", "else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ]", "stheta_n = np.sin( dtheta * ( idx + 1 ) ) half_rad =", "5 ] return vertices, faces if __name__ == '__main__' : if len( sys.argv", "4, 5, 0, 5, 1, 0, 3, 7, 0, 7, 4, 2, 6,", "- COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 -", "= tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi / 6", "'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART", "\"sphere\", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\",", ") ) half_rad = 0.5* ( inner_rad + outer_rad ) com_position = [", "] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ]", "COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0],", "COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES =", "PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend", "backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi /", "3 ] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ]", ") : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP )", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress(", "4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1],", "- COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 -", "0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0,", "loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation ) )", "i + 0.5 ) * dtheta ), 0.5 * height ] vertices, faces", "* np.cos( ( idx + 0.5 ) * dtheta ), half_rad * np.sin(", "0.5 ) * dtheta ), half_rad * np.sin( ( idx + 0.5 )", "4, np.pi / 6 ] ) ) #### rotation = tm.rotation( tm.Vector3f( [", "- com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta", "[ np.pi / 2, 0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f(", "loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format(", "] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES", "outer_rad ) dtheta = 2.0 * np.pi / 12.0 com_position = [ half_rad", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress(", "'__main__' : if len( sys.argv ) > 1 : choice_backend = sys.argv[1] if", "* stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad", "] vertices = [ inner_rad * ctheta - com_position[0], inner_rad * stheta -", "np.pi / 12.0 ctheta = np.cos( dtheta * idx ) stheta = np.sin(", "np.sin( ( i + 0.5 ) * dtheta ), 0.5 * height ]", "7, 2, 7, 3, 1, 5, 6, 1, 6, 2, 4, 7, 6,", "- com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta", "* ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad", "] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( \"path_part_{}\".format( i ), vertices,", "- COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 -", "tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) ) rotation = tm.rotation(", ") * dtheta ), half_rad * np.sin( ( i + 0.5 ) *", "= [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 -", "* stheta_n - com_position[1], -0.5 * height ] faces = [ 0, 1,", "* np.sin( ( idx + 0.5 ) * dtheta ), 0.5 * height", "0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() :", "= [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ]", "sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM(", ") stheta = np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta *", "height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 *", "loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT", "0, 2, 3, 0, 4, 5, 0, 5, 1, 0, 3, 7, 0,", "height ] vertices = [ inner_rad * ctheta - com_position[0], inner_rad * stheta", "if len( sys.argv ) > 1 : choice_backend = sys.argv[1] if choice_backend ==", "] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5,", "+ 0.5 ) * dtheta ), half_rad * np.sin( ( idx + 0.5", ") : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT )", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running", "visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE )", "* ( idx + 1 ) ) half_rad = 0.5* ( inner_rad +", "/ 3, np.pi / 4, np.pi / 6 ] ) ) #### rotation", "0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3,", "COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],", "0.3, [ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"ramp_1\", RAMP_VERTICES,", "inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 * height,", "] faces = [ 0, 1, 2, 0, 2, 3, 0, 4, 5,", "0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0", "loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0,", "dtheta = 2.0 * np.pi / 12.0 ctheta = np.cos( dtheta * idx", "- COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 -", "0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0,", "- COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 -", "= 0.5* ( inner_rad + outer_rad ) com_position = [ half_rad * np.cos(", "1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0", "4, 6, 5 ] return vertices, faces if __name__ == '__main__' : if", ") ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( \"floor\", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() )", "elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif", "- COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 -", "0.5* ( inner_rad + outer_rad ) com_position = [ half_rad * np.cos( (", ") scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation", "1, 6, 2, 4, 7, 6, 4, 6, 5 ] return vertices, faces", ": simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif", "2, 3 ] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0", "+ 0.5 ) * dtheta ), 0.5 * height ] vertices = [", ") #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4,", "idx + 0.5 ) * dtheta ), 0.5 * height ] vertices =", "TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_1\",", "idx + 1 ) ) half_rad = 0.5* ( inner_rad + outer_rad )", "- COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 -", "rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( \"tetrahedron_0\", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0", "], rotation ) ) for i in range( 0, 12 ) : height", "1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0", "COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0],", "simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0", "- COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 -" ]
[ "values, and return copies without Nones. Returns: tuple[Iterable]: New arrays with only non-None", "from typing import Iterable, Tuple def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted", "only non-None values \"\"\" return tuple([[i for i in array if i is", "Tuple[Iterable]: \"\"\" Take inputted arrays that may contain None values, and return copies", "with only non-None values \"\"\" return tuple([[i for i in array if i", "def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that may contain None", "that may contain None values, and return copies without Nones. Returns: tuple[Iterable]: New", "Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that may contain None values, and", "Nones. Returns: tuple[Iterable]: New arrays with only non-None values \"\"\" return tuple([[i for", "arrays with only non-None values \"\"\" return tuple([[i for i in array if", "may contain None values, and return copies without Nones. Returns: tuple[Iterable]: New arrays", "-> Tuple[Iterable]: \"\"\" Take inputted arrays that may contain None values, and return", "and return copies without Nones. Returns: tuple[Iterable]: New arrays with only non-None values", "tuple([[i for i in array if i is not None] for array in", "contain None values, and return copies without Nones. Returns: tuple[Iterable]: New arrays with", "tuple[Iterable]: New arrays with only non-None values \"\"\" return tuple([[i for i in", "typing import Iterable, Tuple def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays", "None values, and return copies without Nones. Returns: tuple[Iterable]: New arrays with only", "Returns: tuple[Iterable]: New arrays with only non-None values \"\"\" return tuple([[i for i", "New arrays with only non-None values \"\"\" return tuple([[i for i in array", "non-None values \"\"\" return tuple([[i for i in array if i is not", "Iterable, Tuple def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that may", "for i in array if i is not None] for array in arrays])", "arrays that may contain None values, and return copies without Nones. Returns: tuple[Iterable]:", "Take inputted arrays that may contain None values, and return copies without Nones.", "inputted arrays that may contain None values, and return copies without Nones. Returns:", "copies without Nones. Returns: tuple[Iterable]: New arrays with only non-None values \"\"\" return", "without Nones. Returns: tuple[Iterable]: New arrays with only non-None values \"\"\" return tuple([[i", "\"\"\" return tuple([[i for i in array if i is not None] for", "return copies without Nones. Returns: tuple[Iterable]: New arrays with only non-None values \"\"\"", "return tuple([[i for i in array if i is not None] for array", "import Iterable, Tuple def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that", "\"\"\" Take inputted arrays that may contain None values, and return copies without", "Tuple def remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that may contain", "values \"\"\" return tuple([[i for i in array if i is not None]", "remove_nones(*arrays: Iterable) -> Tuple[Iterable]: \"\"\" Take inputted arrays that may contain None values," ]
[ "= [x.strip('\\n') for x in infd.readlines()] # code taken from above def readme():", "readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.',", "MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Text Processing ::", ":: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=INSTALL_REQUIRES, include_package_data=True, zip_safe=False, python_requires='>=3.6')", "Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python ::", "Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Text", "def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing", "f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status", ":: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic", "microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI", "'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(),", "from above def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find", "classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT", "x in infd.readlines()] # code taken from above def readme(): with open('README.md') as", "infd.readlines()] # code taken from above def readme(): with open('README.md') as f: return", "__version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd:", "from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()]", "Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=INSTALL_REQUIRES, include_package_data=True,", "return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status ::", "this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for", "'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7',", "setuptools import setup, find_packages # random values __version__ = '0.1.0' # this part", "f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3", "open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] # code taken", "part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x", "values __version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as", "in infd.readlines()] # code taken from above def readme(): with open('README.md') as f:", "as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] # code taken from", "for x in infd.readlines()] # code taken from above def readme(): with open('README.md')", "possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License ::", "Language :: Python :: 2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy',", "events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved", ":: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Text Processing", "a little from setuptools import setup, find_packages # random values __version__ = '0.1.0'", "'Programming Language :: Python :: 2.7', 'Topic :: Text Processing :: Linguistic', ],", "version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha',", "# random values __version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with", "from setuptools import setup, find_packages # random values __version__ = '0.1.0' # this", "[x.strip('\\n') for x in infd.readlines()] # code taken from above def readme(): with", ":: Python :: 2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder',", "modified a little from setuptools import setup, find_packages # random values __version__ =", "little from setuptools import setup, find_packages # random values __version__ = '0.1.0' #", "2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT',", "long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved ::", "taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in", "setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 -", "= '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES", "# code taken from above def readme(): with open('README.md') as f: return f.read()", "taken from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little from setuptools import setup, find_packages #", "# taken from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little from setuptools import setup, find_packages", "description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License", "open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[", ":: 2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>',", "from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little from setuptools import setup, find_packages # random", "as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development", "'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License',", "above def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible", "with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] # code", "INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] # code taken from above def", "Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming", "with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(),", "License', 'Programming Language :: Python :: 2.7', 'Topic :: Text Processing :: Linguistic',", "taken from above def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__,", "# this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n')", "- Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python", "Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=INSTALL_REQUIRES, include_package_data=True, zip_safe=False,", ":: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=INSTALL_REQUIRES,", "random values __version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt')", "find_packages # random values __version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker", "setup, find_packages # random values __version__ = '0.1.0' # this part taken from", "http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little from setuptools import setup, find_packages # random values", "and modified a little from setuptools import setup, find_packages # random values __version__", "OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic ::", "infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] # code taken from above", "code taken from above def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder',", ":: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language", "Python :: 2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>',", "3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language ::", "import setup, find_packages # random values __version__ = '0.1.0' # this part taken", "https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\\n') for x in infd.readlines()] #", "'0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES =" ]
[ "from enum import Enum, auto class Line(Enum): RIGHT = auto() NEXT = auto()", "enum import Enum, auto class Line(Enum): RIGHT = auto() NEXT = auto() BELOW", "<filename>enums.py from enum import Enum, auto class Line(Enum): RIGHT = auto() NEXT =", "import Enum, auto class Line(Enum): RIGHT = auto() NEXT = auto() BELOW =", "Enum, auto class Line(Enum): RIGHT = auto() NEXT = auto() BELOW = auto()" ]
[ "in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank)", "not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s", "ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin", "] rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\"", "90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME,", "import subprocess from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s'", "and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history", "not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray =", "> 3: curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:])", "300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray)", "lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now", "coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number", "is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now,", "rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number =", "elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap =", "subprocess import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter)", "im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) &", "= 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now =", "= \"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" #", "coin_history = [0, ] rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン", "1, 1, 1, 1, 1] lap_index = [] coin_history = [0, ] +", "'%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND", "True curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1,", "-f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 #", "lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count > 4 and len(lap_history)", "0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk =", "[0, ] rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log]", "1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s", "frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray =", "temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\",", "ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW", "formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ", "rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" %", "lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png', '') #", "= [] coin_history = [0, ] rank_history = [12, ] im_before_coin = 0", "WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4", "現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png', '')", "== 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history = [1,", "rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)", "= 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False #", "cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap =", "2 elif curent_lap == 2 and lap_3_count > 4 and len(lap_history) > 40:", "import os import time import warnings import subprocess from subprocess import PIPE formatter", "shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0)", "- 2.0) else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now:", "1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]", "4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる", "= cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1,", "= lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap = 1", "+ coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now,", "rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if", "if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW -", "rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin", "1] lap_index = [] coin_history = [0, ] + coin_history[-2:] rank_history = [12,", "lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap ==", "# 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn =", "all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history =", "for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1 lap_history", "coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) ==", "= lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する", "\"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる", "[] coin_history = [0, ] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:]", "is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1", "RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視", "40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode", "1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0, ]", "elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history,", "[] coin_history = [0, ] rank_history = [12, ] im_before_coin = 0 #", "curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap =", "/ PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin))", "パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME", "0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False", "3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num = 0", "coin = cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag:", "logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap,", "画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray,", "lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if", "elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if", "[0, ] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history,", "cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray)", "+= 1 res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE,", "is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する", "# 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1,", "0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする", "# レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history = [1, 1, 1,", "rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if", "get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3", "lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap = 1 if", "レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list =", "curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2)", "continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap", "draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: #", "frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0)", "- 10) / PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin in", "rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新", "1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0]", "False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False for _", "1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank", "+ 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index)", "# 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png',", "\" % lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3 and len(lap_history)", "output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if __name__ == '__main__': delete_temp_file()", "cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num +=", "if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num", "= False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False for", "0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) #", "not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag,", "1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history", "> 20: lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count > 4", "lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\"", "ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) #", "coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1)", "% lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3 and len(lap_history) >", "# レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index,", "len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap", "[1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index = []", "elif curent_lap == 2 and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode", "delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す", "if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1", "lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap", "= get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and", ": %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS", "] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag,", "TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6", "= [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index =", "# xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def", "ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray)", "# 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND", "> 4 and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode", "import * import logging import os import time import warnings import subprocess from", "# 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'):", "= 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 #", "1, 1, 1, 1, 1] lap_index = [] coin_history = [0] rank_history =", "-t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND =", "# 3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3] if", "output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag:", "= 1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1,", "frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn", "ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot", "len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count >", "im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS -t OBS -f", "= \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 #", "coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST:", "1)] is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1, 1,", "continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\")", "# レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if __name__ ==", "coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if __name__ == '__main__': delete_temp_file() run_server()", "レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1]", "IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False", "1, 1, 1, 1] lap_index = [] coin_history = [0] rank_history = [12]", "10) / PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin in COIN_LIST:", "PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif", "len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now and", "im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag)", "[] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history,", "= cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME,", "time import warnings import subprocess from subprocess import PIPE formatter = '%(levelname)s :", "coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag,", "from utils import * import logging import os import time import warnings import", "rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)", "処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND =", "スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE =", "= ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '')", "= [0, ] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history,", "1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) >", "is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap)", "coin_history = [0, ] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap,", "3 and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap == 2 and", "%(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot", "OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND", "cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk", "= rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap", "from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO,", "logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST:", "= cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3,", "and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '')", "\" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒)", "レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history = [1, 1, 1, 1,", "= get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag =", "3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3] if not", "res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray,", "import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore')", "1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0,", "logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS", "+ 1)] is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1,", "coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag))", "[0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index,", "1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0] rank_history", "if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray", "is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1, 1, 1,", "lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count", "PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"###################################", "text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:,", "rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2)", "4 and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode >", "coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1)", "1, 1, 1] lap_index = [] coin_history = [0, ] + coin_history[-2:] rank_history", "= 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:])", "# OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS -t", "LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3)", "= [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now)", "lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now", "1] lap_index = [] coin_history = [0, ] rank_history = [12, ] im_before_coin", "is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap", "SC_COMMAND = \"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\"", "and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history", "= False curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1,", "lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \"", "utils import * import logging import os import time import warnings import subprocess", "in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) #", "# 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif", "cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp", "= lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap ==", "== 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history = [1,", "is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin", "False curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1,", "and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin =", "lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap == 1 and lap_2_count >", "レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if __name__ == '__main__':", "lap_index = [] coin_history = [0, ] + coin_history[-2:] rank_history = [12, ]", "画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE =", "import time import warnings import subprocess from subprocess import PIPE formatter = '%(levelname)s", "lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history)", "1, 1] lap_index = [] coin_history = [0, ] + coin_history[-2:] rank_history =", "指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1, 1,", "break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS", "curent_lap == 2 and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode =", "is_racing_now = True curent_lap = 1 lap_history = [1, 1, 1, 1, 1,", "1, 1] lap_index = [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) #", "キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH =", "subprocess from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log',", "LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2)", "import logging import os import time import warnings import subprocess from subprocess import", "rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0):", "lap_index = [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap,", "2 and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode = 3 #", "1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index", "DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False", "-f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray =", "lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index", "if len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now", "cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2", "= 3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): #", "1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history = [1, 1,", "is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1]", "is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s", "OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray", "-t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue", "and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count", "get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap =", "not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap =", "rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and", "= '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\"", "= 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) -", "if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap", "ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and", "lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now:", "レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)", "draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now =", "== 2 and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode = 3", "lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count", "(400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp =", "logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False", "ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH", "stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray", "frame_num = 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now", ": %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND =", "coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: #", "] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif", "stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray =", "1, 1] lap_index = [] coin_history = [0, ] rank_history = [12, ]", "= 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num =", "] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1)", "rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank, is_racing_flag)) if lap in", "is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif", "20: lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count > 4 and", "rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode", "= [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now,", "all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history =", "0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history = [1, 1,", "xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server():", "6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認", "def run_server(): frame_num = 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH +", "# ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) /", "# キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH", "time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180]", "\"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド", "= coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin, rank,", "is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"Finish!!!!\") if __name__", "logging import os import time import warnings import subprocess from subprocess import PIPE", "lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3 and len(lap_history) > 20:", "lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True", "lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag", "res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND)", "3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10)", "in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1 lap_history = [1,", "if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s", "'') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank", "get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1", "画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not", "WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3", "OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS -t OBS", "COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) +", "is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap", "= LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count =", "lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if", "os import time import warnings import subprocess from subprocess import PIPE formatter =", "1, 1, 1, 1, 1] lap_index = [] coin_history = [0, ] rank_history", "curent_lap == 1 and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode =", "coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True,", "= frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray", "1, 1, 1] lap_index = [] coin_history = [0, ] rank_history = [12,", "= coin_history[-1] frame_num += 1 res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\",", "is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue", "run_server(): frame_num = 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)]", "rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1]", "time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue", "> curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else:", "coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number)", "= [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap =", "and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap", "in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count =", "1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if", "> 3 and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap == 2", "else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index =", "% is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin =", "= 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF", "is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank =", "# レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list", "= 2 elif curent_lap == 2 and lap_3_count > 4 and len(lap_history) >", "subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not", "lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" %", "== 1 and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode = 2", "rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3]", "ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank", "rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない", "\"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先", "coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap, coin,", "+ 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history]", "else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] +", "= [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file()", "# スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE", "cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2,", "1, 1, 1] lap_index = [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE)", "lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0)", "lap_index = [] coin_history = [0, ] rank_history = [12, ] im_before_coin =", "0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF ==", "[12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if", "frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300))", "= [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history,", "[12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue", "'') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1]", "coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index,", "= [] coin_history = [0, ] + coin_history[-2:] rank_history = [12, ] +", "= lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" %", "+ 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap = 3", "= get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap", "warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f \" #", "warnings import subprocess from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s :", "%s \" % lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3 and", "1 and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode = 2 elif", "elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3:", "logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3", "lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap == 1 and", "curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1,", "== ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res =", "is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now:", "% (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) +", "elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif", "if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else:", "curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1):", "3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する", "continue frame_gray = cv2.imread(\"temp_raw.png\", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400,", "frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin)", "= subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if", "logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得", "1, 1, 1, 1] lap_index = [] coin_history = [0, ] + coin_history[-2:]", "continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history,", "if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in", "_ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1 lap_history =", "is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): #", "ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png',", "# レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank =", "= rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin =", "= lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap == 1 and lap_2_count", "直前の時刻のコイン while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break", "[12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info(\"レースを終了\")", "= [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\")", "[False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1", "= 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if", "IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num", "lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info(\"[lap_history] %s \" % lap_history[-6:]) if curent_lap == 1", "rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap =", "3: curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) ==", "while(True): logging.info(\"[log] is_racing_now==%s\" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break #", "%(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t", "# 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE", "= 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 #", "1, 1, 1, 1] lap_index = [] coin_history = [0, ] rank_history =", "+ rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now", "is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png',", "and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する", "import warnings import subprocess from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s", "lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin", "if curent_lap == 1 and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode", "range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1 lap_history = [1, 1,", "(lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1", "curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap", "= True curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1,", "1, 1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history =", "OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res:", "frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し", "= lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now =", "and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap:", "else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else:", "###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME =", "output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info(\"レースを開始\") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:])", "= [0, ] rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True):", "* import logging import os import time import warnings import subprocess from subprocess", "& 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1", "2.0) else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1])", "frame_num += 1 res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE,", "> 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap =", "1] lap_index = [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる", "format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f \"", "# ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False for _ in", "= cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap", "0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res", "OBS -f \" # スクリーンショット用コマンド TEMP_IMG_FILENAME = \"temp.png\" # キャプチャ結果の保存先 WAIT_SECOND = 0.2", "1 res = subprocess.run(\"screenshot OBS -t OBS -f temp_raw.png\", shell=True, stdout=PIPE, stderr=PIPE, text=True)", "lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info(\"lap:%s coin:%s rank:%s is_racing_flag==%s\" % (lap,", "'') coin = cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not", "is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode =", "coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1]", "is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1", "level=logging.INFO, format=formatter) warnings.simplefilter('ignore') \"\"\"################################### パラメータ ###################################\"\"\" SC_COMMAND = \"screenshot OBS -t OBS -f" ]
[ "10, 11, 20, 30] # i, g, N, i, g, N, N @fixture", "coding: utf-8 -*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture", "@fixture def cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture", "quality @fixture def similarity(cluster_ids): sim = lambda c, d: (c * 1.01 +", "sim = lambda c, d: (c * 1.01 + d) def similarity(c): return", "d: (c * 1.01 + d) def similarity(c): return get_closest_clusters(c, cluster_ids, sim) return", "N, i, g, N, N @fixture def cluster_groups(): return {0: 'noise', 1: 'good',", "-*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array", "return c return quality @fixture def similarity(cluster_ids): sim = lambda c, d: (c", "import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def", "cluster_ids(): return [0, 1, 2, 10, 11, 20, 30] # i, g, N,", "c return quality @fixture def similarity(cluster_ids): sim = lambda c, d: (c *", "fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids():", "N, N @fixture def cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua', 11:", "\"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import", "fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import (get_closest_clusters,", "30] # i, g, N, i, g, N, N @fixture def cluster_groups(): return", "return quality @fixture def similarity(cluster_ids): sim = lambda c, d: (c * 1.01", "11: 'good'} @fixture def quality(): def quality(c): return c return quality @fixture def", "[0, 1, 2, 10, 11, 20, 30] # i, g, N, i, g,", "20, 30] # i, g, N, i, g, N, N @fixture def cluster_groups():", "i, g, N, i, g, N, N @fixture def cluster_groups(): return {0: 'noise',", "<gh_stars>0 # -*- coding: utf-8 -*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from", "-*- coding: utf-8 -*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import", "# Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2, 10, 11, 20,", "2, 10, 11, 20, 30] # i, g, N, i, g, N, N", "'good', 10: 'mua', 11: 'good'} @fixture def quality(): def quality(c): return c return", "similarity(cluster_ids): sim = lambda c, d: (c * 1.01 + d) def similarity(c):", "#------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2, 10, 11,", "lambda c, d: (c * 1.01 + d) def similarity(c): return get_closest_clusters(c, cluster_ids,", "# -*- coding: utf-8 -*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest", "return [0, 1, 2, 10, 11, 20, 30] # i, g, N, i,", "# i, g, N, i, g, N, N @fixture def cluster_groups(): return {0:", "def quality(): def quality(c): return c return quality @fixture def similarity(cluster_ids): sim =", "pytest import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture", "@fixture def similarity(cluster_ids): sim = lambda c, d: (c * 1.01 + d)", "'good'} @fixture def quality(): def quality(c): return c return quality @fixture def similarity(cluster_ids):", "c, d: (c * 1.01 + d) def similarity(c): return get_closest_clusters(c, cluster_ids, sim)", "#------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures", "quality(c): return c return quality @fixture def similarity(cluster_ids): sim = lambda c, d:", "(c * 1.01 + d) def similarity(c): return get_closest_clusters(c, cluster_ids, sim) return similarity", "Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ #", "10: 'mua', 11: 'good'} @fixture def quality(): def quality(c): return c return quality", "def quality(c): return c return quality @fixture def similarity(cluster_ids): sim = lambda c,", "#------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2, 10, 11, 20, 30] #", "= lambda c, d: (c * 1.01 + d) def similarity(c): return get_closest_clusters(c,", "phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0,", "# Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------", "#------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture from phy.io.array import (get_closest_clusters, )", "from pytest import fixture from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------", "utf-8 -*- \"\"\"Test fixtures.\"\"\" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ from pytest import fixture from", "1, 2, 10, 11, 20, 30] # i, g, N, i, g, N,", "quality(): def quality(c): return c return quality @fixture def similarity(cluster_ids): sim = lambda", ") #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2, 10,", "@fixture def cluster_ids(): return [0, 1, 2, 10, 11, 20, 30] # i,", "i, g, N, N @fixture def cluster_groups(): return {0: 'noise', 1: 'good', 10:", "Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2, 10, 11, 20, 30]", "(get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1, 2,", "def cluster_ids(): return [0, 1, 2, 10, 11, 20, 30] # i, g,", "from phy.io.array import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return", "1: 'good', 10: 'mua', 11: 'good'} @fixture def quality(): def quality(c): return c", "{0: 'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture def quality(): def quality(c):", "'mua', 11: 'good'} @fixture def quality(): def quality(c): return c return quality @fixture", "def cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture def", "'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture def quality(): def quality(c): return", "import (get_closest_clusters, ) #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ @fixture def cluster_ids(): return [0, 1,", "@fixture def quality(): def quality(c): return c return quality @fixture def similarity(cluster_ids): sim", "def similarity(cluster_ids): sim = lambda c, d: (c * 1.01 + d) def", "N @fixture def cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'}", "return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture def quality(): def", "11, 20, 30] # i, g, N, i, g, N, N @fixture def", "cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'} @fixture def quality():", "g, N, N @fixture def cluster_groups(): return {0: 'noise', 1: 'good', 10: 'mua',", "g, N, i, g, N, N @fixture def cluster_groups(): return {0: 'noise', 1:" ]
[ "countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName =", "ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100)", "models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject", "= models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100)", "models # Create your models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName =", "firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class", "= models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100)", "models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName", "here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject", "class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject =", "= models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model):", "subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName =", "class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject =", "models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName", "Create your models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName", "django.db import models # Create your models here. class Contact(models.Model): firstName = models.CharField(max_length=100)", "import models # Create your models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName", "<filename>accounts/models.py from django.db import models # Create your models here. class Contact(models.Model): firstName", "= models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100)", "# Create your models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100)", "lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName =", "Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100)", "models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100) subject = models.CharField(max_length=100) class ContactDetails(models.Model): firstName", "your models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName =", "from django.db import models # Create your models here. class Contact(models.Model): firstName =", "models here. class Contact(models.Model): firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) countryName = models.CharField(max_length=100)" ]
[ "you want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing", "print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing )the directories in current directory....\\n\",listdir(k))", "Directory is: \",getcwd()) k=input(\"Enter Directory Where you want to jump: \") print(\"Changing Directory.......\")", "import * print(getcwd()) print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you want", "Directory Where you want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is:", "\",getcwd()) k=input(\"Enter Directory Where you want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current", "is: \",getcwd()) k=input(\"Enter Directory Where you want to jump: \") print(\"Changing Directory.......\") chdir(k)", "os import * print(getcwd()) print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you", "* print(getcwd()) print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you want to", "want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing )the", "k=input(\"Enter Directory Where you want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory", "print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you want to jump: \") print(\"Changing", "from os import * print(getcwd()) print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where", "print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you want to jump: \")", "print(getcwd()) print(listdir('D:\\\\Users\\\\Mohit\\\\PycharmProjects\\\\PythonLab\\\\venv')) print(\"Current Directory is: \",getcwd()) k=input(\"Enter Directory Where you want to jump:", "jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing )the directories in", "Where you want to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd())", "\") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing )the directories in current", "to jump: \") print(\"Changing Directory.......\") chdir(k) print(\"Current Directory is: \",getcwd()) print(\"Listing )the directories" ]
[]
[ "https://github.com/globien/easy-python # https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 #", "random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 =", "if car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门", "== car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print(\"不换的获奖概率:\", 获奖次数_不换/试验次数) print(\"换的获奖概率: \",", "== car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数):", "bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print(\"不换的获奖概率:\", 获奖次数_不换/试验次数) print(\"换的获奖概率:", "for i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car =", "door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 +", "不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i", "获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in", "= random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0]", "只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print(\"不换的获奖概率:\",", "# 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for", "bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet)", "挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for", "door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet == car: #", "换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet =", "# 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它!", "Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 #", "# 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet", "= 100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"]", "bet = door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 =", "三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除", "= 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数):", "car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数): #", "挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list:", "host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) #", "换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car", "random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list", "random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: #", "换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list", "host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门", "bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in", "random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car", "import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数", "# 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet ==", "获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000", "# 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1", "# 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1", "= 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 #", "汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换", "[\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet)", "if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i", "# 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if", "# 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案", "door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list)", "# 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in", "1 for i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car", "100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] #", "= random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car:", "door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet", "for i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car =", "# https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器", "https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换", "host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list) #", "i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list)", "# 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list =", "# 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号", "in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) #", "car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet ==", "car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print(\"不换的获奖概率:\", 获奖次数_不换/试验次数) print(\"换的获奖概率: \", 获奖次数_换/试验次数)", "验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0", "range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后", "# 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 #", "# 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验", "= random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if", "= [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) #", "i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list)", "汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"]", "# 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数): # 换的实验", "# 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet", "试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list =", "problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器", "去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet == car:", "三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet", "获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数): # 换的实验 door_list =", "0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数): #", "host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet", "car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard)", "[\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门", "= random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换", "# 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard =", "不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数): # 换的实验 door_list", "= [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖", "bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 =", "# https://github.com/globien/easy-python # https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0", "0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验", "= 获奖次数_不换 + 1 for i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"]", "= door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换", "# 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car)", "range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后", "作者:西岛闲鱼 # https://github.com/globien/easy-python # https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 =", "= random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门", "现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if", "if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print(\"不换的获奖概率:\", 获奖次数_不换/试验次数)", "+ 1 for i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号", "car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 #", "# 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 =", "去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换", "# 作者:西岛闲鱼 # https://github.com/globien/easy-python # https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换", "discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet =", "# 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案", "不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet =", "现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard", "in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) #", "random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] #", "这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = [\"A\",\"B\",\"C\"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list)", "获奖次数_不换 + 1 for i in range(试验次数): # 换的实验 door_list = [\"A\",\"B\",\"C\"] #", "random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 +", "in range(试验次数): # 不换的实验 door_list = [\"A\",\"B\",\"C\"] # 三扇门的编号 car = random.choice(door_list) #" ]
[ "name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your", "null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis',", "-*- # Generated by Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals", "preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date',", "migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations", "name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to", "migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction", "2018-06-03 06:54 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone", "on 2018-06-03 06:54 from __future__ import unicode_literals from django.db import migrations, models import", "django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField(", "migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField(", "name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ),", "Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2,", "your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField(", "-*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-03 06:54 from", "06:54 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class", "model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField(", "name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time',", "unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [", "amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')],", "__future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies", "'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ),", "), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type',", "Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date',", "by Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from django.db import", "bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction',", "), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')],", "class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account',", "max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited", "Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField(", "migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50,", "'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount',", "= [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),", "field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name',", "field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'),", "default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit',", "model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction',", "), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True,", "Generated by Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from django.db", "[ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ),", "model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited", "name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction type'), ),", "# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-03 06:54", "date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'),", "'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12,", "migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit',", "field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True,", "model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction type'),", "field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField(", "migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction", "max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'),", "model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction',", "verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis", "Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction", "django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'),", "default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),", "preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False,", "import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies =", "default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction',", "dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True,", "'0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField(", "import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [", "model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'),", "models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations =", "migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi", "1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from django.db import migrations, models", "# Generated by Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from", "Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from django.db import migrations,", "] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account',", "operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date',", "from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks',", "verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ),", "('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0,", "verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from", "name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc',", "from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration):", "field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction type'), ), ]", "import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ]", "migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ),", "field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'),", "), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50,", "name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date',", "field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC", "= [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True),", "), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi',", "model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'),", "), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ),", "coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-03 06:54 from __future__", "utf-8 -*- # Generated by Django 1.11 on 2018-06-03 06:54 from __future__ import", "model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select", "[ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False,", "field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True,", "name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account',", "migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField(", "('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction',", "('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ),", "), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True)," ]
[ ". import generation from . import schemas from . import constants from .", "generation from . import schemas from . import constants from . import utils", "from . import generation from . import schemas from . import constants from", "from . import analysis from . import generation from . import schemas from", ". import analysis from . import generation from . import schemas from .", "analysis from . import generation from . import schemas from . import constants", "import generation from . import schemas from . import constants from . import", "import analysis from . import generation from . import schemas from . import", "<reponame>code-lab-org/tatc from . import analysis from . import generation from . import schemas" ]
[ "sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element", "models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day,", "def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return", "Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description = models.CharField(max_length=240,", "+ self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False)", "\"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True,", "= models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description = models.CharField(max_length=240, unique=False, null=True)", "@staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors):", "setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description = models.CharField(max_length=240, unique=False,", "self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description", "on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime')", "models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return", "SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def", "= models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def", "+ \"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50,", "value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day,", "class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod", "__str__(self): return self.sensor.element + ' ' + str(self.value) + \"@\" + self.datetime.isoformat() class", "adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField()", "as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime =", "return self.sensor.element + ' ' + str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model):", "SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' + str(self.value) +", "sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day,", "import models from adapter import models as adapter_models class SensorUsage(models.Model): value = models.FloatField()", "return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime')", "sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' + str(self.value) + \"@\" +", "models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year,", "monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year,", "self.sensor.element + ' ' + str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting", "from adapter import models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor =", "def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + '", "return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' + str(self.value)", "= models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors):", "datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod", "datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return", "get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' '", "get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month,", "str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value =", "models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime", "' + str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True)", "+ str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value", "@staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element +", "datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' + str(self.value) + \"@\"", "models from adapter import models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor", "django.db import models from adapter import models as adapter_models class SensorUsage(models.Model): value =", "= models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month,", "class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description =", "' ' + str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20,", "def __str__(self): return self.sensor.element + ' ' + str(self.value) + \"@\" + self.datetime.isoformat()", "+ ' ' + str(self.value) + \"@\" + self.datetime.isoformat() class Settings(models.Model): setting =", "adapter import models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor,", "import models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE)", "datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self):", "monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' +", "SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def", "from django.db import models from adapter import models as adapter_models class SensorUsage(models.Model): value" ]
[ "<filename>pySimpleGUI/cookbook/1a_one_shot_win.py import PySimpleGUI as simpleGUI ''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot", "simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title - key', layout) event, values =", "Recipe - Pattern 1A - \"One-shot Window\" - (The Simplest Pattern) The One-shot", "text clicked, a list item chosen, etc, or 'WIN_CLOSED' if the user closes", "def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(),", "= window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if __name__ == '__main__':", "Thus the design pattern can get the value of whatever was input by", "to quickly grab some information and then be closed. The window is read", "and a dictionary of 'values'. The 'event' is what caused the read to", "using the X. The 'values' is a dictionary of values of all the", "layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One", "be closed. The window is read and then closed. When you \"read\" a", "its key will be auto-numbered and is zero in this case. Thus the", "event, values = window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If", "'form' meant to quickly grab some information and then be closed. The window", "you are returned a tuple consisting of an 'event' and a dictionary of", "key will be auto-numbered and is zero in this case. Thus the design", "window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If you want to", "values of all the input-style elements. Dictionaries use keys to define entries. If", "''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ]", "''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\" - (The Simplest Pattern)", "closed. When you \"read\" a window, you are returned a tuple consisting of", "(The Simplest Pattern) The One-shot window is one that pops up, collects some", "X. The 'values' is a dictionary of values of all the input-style elements.", "your elements do not specificy a key, one is provided for you. These", "is what caused the read to return. It could be a button press,", "does not specify a key for the 'InputText' element, so its key will", "entered', text_input) ''' If you want to use your own key instead of", "simpleGUI.Window('One Shot Title - key', layout) event, values = window.read() window.close() text_input =", "layout = [ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ]", "then closed. When you \"read\" a window, you are returned a tuple consisting", "once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit own key')],", "read and then closed. When you \"read\" a window, you are returned a", "what caused the read to return. It could be a button press, some", "is zero in this case. Thus the design pattern can get the value", "value of whatever was input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout =", "window using the X. The 'values' is a dictionary of values of all", "or 'WIN_CLOSED' if the user closes the window using the X. The 'values'", "window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if __name__ == '__main__': one_shot() one_shot_key()", "one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window =", "you \"read\" a window, you are returned a tuple consisting of an 'event'", "starting at zero. This design pattern does not specify a key for the", "= [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot", "data, and then disappears. It is more or less a 'form' meant to", "'event' is what caused the read to return. It could be a button", "caused the read to return. It could be a button press, some text", "[simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout) event, values =", "windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title", "window, you are returned a tuple consisting of an 'event' and a dictionary", "The window is read and then closed. When you \"read\" a window, you", "a key, one is provided for you. These auto-numbered keys are ints starting", "1A - \"One-shot Window\" - (The Simplest Pattern) The One-shot window is one", "meant to quickly grab some information and then be closed. The window is", "a button press, some text clicked, a list item chosen, etc, or 'WIN_CLOSED'", "elements. Dictionaries use keys to define entries. If your elements do not specificy", "then be closed. The window is read and then closed. When you \"read\"", "do not specificy a key, one is provided for you. These auto-numbered keys", "provided for you. These auto-numbered keys are ints starting at zero. This design", "event, values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if __name__", "and then closed. When you \"read\" a window, you are returned a tuple", "input-style elements. Dictionaries use keys to define entries. If your elements do not", "text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If you want to use your", "of all the input-style elements. Dictionaries use keys to define entries. If your", "are ints starting at zero. This design pattern does not specify a key", "] window = simpleGUI.Window('One Shot Title - key', layout) event, values = window.read()", "one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()]", "= window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If you want", "key, one is provided for you. These auto-numbered keys are ints starting at", "auto-numbered keys are ints starting at zero. This design pattern does not specify", "in this case. Thus the design pattern can get the value of whatever", "text_input) ''' If you want to use your own key instead of an", "instead of an auto-generated once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot", "[simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout)", "are returned a tuple consisting of an 'event' and a dictionary of 'values'.", "a list item chosen, etc, or 'WIN_CLOSED' if the user closes the window", "one is provided for you. These auto-numbered keys are ints starting at zero.", "zero. This design pattern does not specify a key for the 'InputText' element,", "want to use your own key instead of an auto-generated once. ''' def", "is provided for you. These auto-numbered keys are ints starting at zero. This", "layout) event, values = window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) '''", "closes the window using the X. The 'values' is a dictionary of values", "window = simpleGUI.Window('One Shot Title', layout) event, values = window.read() window.close() text_input =", "whatever was input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My", "dictionary of values of all the input-style elements. Dictionaries use keys to define", "this case. Thus the design pattern can get the value of whatever was", "a dictionary of values of all the input-style elements. Dictionaries use keys to", "elements do not specificy a key, one is provided for you. These auto-numbered", "so its key will be auto-numbered and is zero in this case. Thus", "key for the 'InputText' element, so its key will be auto-numbered and is", "def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window", "- (The Simplest Pattern) The One-shot window is one that pops up, collects", "an 'event' and a dictionary of 'values'. The 'event' is what caused the", "be auto-numbered and is zero in this case. Thus the design pattern can", "of whatever was input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [", "''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')],", "quickly grab some information and then be closed. The window is read and", "Pattern 1A - \"One-shot Window\" - (The Simplest Pattern) The One-shot window is", "[simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout) event, values = window.read()", "to return. It could be a button press, some text clicked, a list", "to define entries. If your elements do not specificy a key, one is", "button press, some text clicked, a list item chosen, etc, or 'WIN_CLOSED' if", "a window, you are returned a tuple consisting of an 'event' and a", "consisting of an 'event' and a dictionary of 'values'. The 'event' is what", "simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout) event, values = window.read() window.close()", "the value of whatever was input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout", "[ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window =", "keys are ints starting at zero. This design pattern does not specify a", "not specify a key for the 'InputText' element, so its key will be", "tuple consisting of an 'event' and a dictionary of 'values'. The 'event' is", "if the user closes the window using the X. The 'values' is a", "import PySimpleGUI as simpleGUI ''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\"", "be a button press, some text clicked, a list item chosen, etc, or", "information and then be closed. The window is read and then closed. When", "a tuple consisting of an 'event' and a dictionary of 'values'. The 'event'", "is read and then closed. When you \"read\" a window, you are returned", "Pattern) The One-shot window is one that pops up, collects some data, and", "keys to define entries. If your elements do not specificy a key, one", "window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If you want to use", "specificy a key, one is provided for you. These auto-numbered keys are ints", "a dictionary of 'values'. The 'event' is what caused the read to return.", "\"One-shot Window\" - (The Simplest Pattern) The One-shot window is one that pops", "user closes the window using the X. The 'values' is a dictionary of", "some text clicked, a list item chosen, etc, or 'WIN_CLOSED' if the user", "entries. If your elements do not specificy a key, one is provided for", "for you. These auto-numbered keys are ints starting at zero. This design pattern", "''' If you want to use your own key instead of an auto-generated", "PySimpleGUI as simpleGUI ''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\" -", "design pattern does not specify a key for the 'InputText' element, so its", "Title', layout) event, values = window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input)", "you. These auto-numbered keys are ints starting at zero. This design pattern does", "[simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One", "'values' is a dictionary of values of all the input-style elements. Dictionaries use", "some information and then be closed. The window is read and then closed.", "simpleGUI.popup('You entered', text_input) ''' If you want to use your own key instead", "'values'. The 'event' is what caused the read to return. It could be", "then disappears. It is more or less a 'form' meant to quickly grab", "ints starting at zero. This design pattern does not specify a key for", "referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(),", "values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if __name__ ==", "'InputText' element, so its key will be auto-numbered and is zero in this", "less a 'form' meant to quickly grab some information and then be closed.", "get the value of whatever was input by referencing 'values[0]'. ''' def one_shot(my_theme=''):", "etc, or 'WIN_CLOSED' if the user closes the window using the X. The", "It is more or less a 'form' meant to quickly grab some information", "define entries. If your elements do not specificy a key, one is provided", "a key for the 'InputText' element, so its key will be auto-numbered and", "window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout) event, values", "values = window.read() window.close() text_input = values[0] simpleGUI.popup('You entered', text_input) ''' If you", "to use your own key instead of an auto-generated once. ''' def one_shot_key():", "window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if __name__ == '__main__': one_shot()", "up, collects some data, and then disappears. It is more or less a", "you want to use your own key instead of an auto-generated once. '''", "own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title - key',", "or less a 'form' meant to quickly grab some information and then be", "key instead of an auto-generated once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My", "one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot", "One-shot window is one that pops up, collects some data, and then disappears.", "pops up, collects some data, and then disappears. It is more or less", "could be a button press, some text clicked, a list item chosen, etc,", "Dictionaries use keys to define entries. If your elements do not specificy a", "key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title - key', layout)", "was input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot", "zero in this case. Thus the design pattern can get the value of", "values[0] simpleGUI.popup('You entered', text_input) ''' If you want to use your own key", "window = simpleGUI.Window('One Shot Title - key', layout) event, values = window.read() window.close()", "[ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title',", "item chosen, etc, or 'WIN_CLOSED' if the user closes the window using the", "- \"One-shot Window\" - (The Simplest Pattern) The One-shot window is one that", "window is one that pops up, collects some data, and then disappears. It", "The 'values' is a dictionary of values of all the input-style elements. Dictionaries", "all the input-style elements. Dictionaries use keys to define entries. If your elements", "specify a key for the 'InputText' element, so its key will be auto-numbered", "is more or less a 'form' meant to quickly grab some information and", "disappears. It is more or less a 'form' meant to quickly grab some", "Window\" - (The Simplest Pattern) The One-shot window is one that pops up,", "the read to return. It could be a button press, some text clicked,", "\"read\" a window, you are returned a tuple consisting of an 'event' and", "Shot Title', layout) event, values = window.read() window.close() text_input = values[0] simpleGUI.popup('You entered',", "input by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')],", "The One-shot window is one that pops up, collects some data, and then", "one that pops up, collects some data, and then disappears. It is more", "more or less a 'form' meant to quickly grab some information and then", "window is read and then closed. When you \"read\" a window, you are", "use your own key instead of an auto-generated once. ''' def one_shot_key(): layout", "https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\" - (The Simplest Pattern) The", "If your elements do not specificy a key, one is provided for you.", "return. It could be a button press, some text clicked, a list item", "will be auto-numbered and is zero in this case. Thus the design pattern", "the design pattern can get the value of whatever was input by referencing", "simpleGUI.Window('One Shot Title', layout) event, values = window.read() window.close() text_input = values[0] simpleGUI.popup('You", "Shot Title - key', layout) event, values = window.read() window.close() text_input = values['-IN-']", "the X. The 'values' is a dictionary of values of all the input-style", "] window = simpleGUI.Window('One Shot Title', layout) event, values = window.read() window.close() text_input", "collects some data, and then disappears. It is more or less a 'form'", "for the 'InputText' element, so its key will be auto-numbered and is zero", "case. Thus the design pattern can get the value of whatever was input", "If you want to use your own key instead of an auto-generated once.", "Simplest Pattern) The One-shot window is one that pops up, collects some data,", "the user closes the window using the X. The 'values' is a dictionary", "- Pattern 1A - \"One-shot Window\" - (The Simplest Pattern) The One-shot window", "is one that pops up, collects some data, and then disappears. It is", "and then be closed. The window is read and then closed. When you", "list item chosen, etc, or 'WIN_CLOSED' if the user closes the window using", "of an auto-generated once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows", "= simpleGUI.Window('One Shot Title', layout) event, values = window.read() window.close() text_input = values[0]", "whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title -", "This design pattern does not specify a key for the 'InputText' element, so", "[simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title - key', layout) event,", "layout) event, values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input) if", "'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()]", "= [ [simpleGUI.Text('My one-shot windows whit own key')], [simpleGUI.InputText(key='-IN-')], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window", "element, so its key will be auto-numbered and is zero in this case.", "can get the value of whatever was input by referencing 'values[0]'. ''' def", "returned a tuple consisting of an 'event' and a dictionary of 'values'. The", "'WIN_CLOSED' if the user closes the window using the X. The 'values' is", "= simpleGUI.Window('One Shot Title - key', layout) event, values = window.read() window.close() text_input", "pattern can get the value of whatever was input by referencing 'values[0]'. '''", "of 'values'. The 'event' is what caused the read to return. It could", "chosen, etc, or 'WIN_CLOSED' if the user closes the window using the X.", "the input-style elements. Dictionaries use keys to define entries. If your elements do", "closed. The window is read and then closed. When you \"read\" a window,", "that pops up, collects some data, and then disappears. It is more or", "some data, and then disappears. It is more or less a 'form' meant", "an auto-generated once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit", "The 'event' is what caused the read to return. It could be a", "pattern does not specify a key for the 'InputText' element, so its key", "design pattern can get the value of whatever was input by referencing 'values[0]'.", "of an 'event' and a dictionary of 'values'. The 'event' is what caused", "and then disappears. It is more or less a 'form' meant to quickly", "auto-generated once. ''' def one_shot_key(): layout = [ [simpleGUI.Text('My one-shot windows whit own", "the 'InputText' element, so its key will be auto-numbered and is zero in", "simpleGUI ''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\" - (The Simplest", "auto-numbered and is zero in this case. Thus the design pattern can get", "[simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title - key', layout) event, values", "- key', layout) event, values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered',", "read to return. It could be a button press, some text clicked, a", "a 'form' meant to quickly grab some information and then be closed. The", "at zero. This design pattern does not specify a key for the 'InputText'", "These auto-numbered keys are ints starting at zero. This design pattern does not", "one-shot window.')], [simpleGUI.InputText()], [simpleGUI.Submit(), simpleGUI.Cancel()] ] window = simpleGUI.Window('One Shot Title', layout) event,", "your own key instead of an auto-generated once. ''' def one_shot_key(): layout =", "grab some information and then be closed. The window is read and then", "Title - key', layout) event, values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You", "by referencing 'values[0]'. ''' def one_shot(my_theme=''): layout = [ [simpleGUI.Text('My one-shot window.')], [simpleGUI.InputText()],", "the window using the X. The 'values' is a dictionary of values of", "not specificy a key, one is provided for you. These auto-numbered keys are", "as simpleGUI ''' https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern Recipe - Pattern 1A - \"One-shot Window\" - (The", "is a dictionary of values of all the input-style elements. Dictionaries use keys", "press, some text clicked, a list item chosen, etc, or 'WIN_CLOSED' if the", "use keys to define entries. If your elements do not specificy a key,", "When you \"read\" a window, you are returned a tuple consisting of an", "'event' and a dictionary of 'values'. The 'event' is what caused the read", "and is zero in this case. Thus the design pattern can get the", "= values[0] simpleGUI.popup('You entered', text_input) ''' If you want to use your own", "key', layout) event, values = window.read() window.close() text_input = values['-IN-'] simpleGUI.popup('You entered', text_input)", "dictionary of 'values'. The 'event' is what caused the read to return. It", "clicked, a list item chosen, etc, or 'WIN_CLOSED' if the user closes the", "of values of all the input-style elements. Dictionaries use keys to define entries.", "own key instead of an auto-generated once. ''' def one_shot_key(): layout = [", "It could be a button press, some text clicked, a list item chosen," ]
[ "the state of the entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents,", "agent: Agent, decision: Action): \"\"\" Enact the decisions made by the agent in", "k > 0: length = sqrt(e1x * e1x + e1y * e1y) k", "of the environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents,", "k = k / length if k < length: if sqrt(c1x * c1x", "v2y e2x = v3x - v2x e2y = v3y - v2y k =", "k * k) <= asteroid.radius: return True # Third edge c3x = asteroid.centre_x", "decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire", "the particles, asteroids and the agents ships. \"\"\" destroyed_particles = [] preserved_particles =", "+ 140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x =", "for particle in particles: if particle not in destroyed_particles and\\ 0 < particle.centre_x", "the collision detection between the ship and asteroids. \"\"\" # Detection adapted from", "Particle from game.agent import Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\" Is", "# Check if the vertices of the ship are intersecting the asteroid if", "Calculates if an asteroid is visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y", "- v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y", "self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key release update the", "0: length = sqrt(e2x * e2x + e2y * e2y) k = k", "if start_x == 0: velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3, -1)", "[])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent in agents: if", "Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif", "/ 5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation", "asteroids and the agents ships. \"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids", "particle in particles: if particle not in destroyed_particles and\\ 0 < particle.centre_x <", "not in destroyed_particles and\\ 0 < particle.centre_x < window_width and 0 < particle.centre_y", "or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents:", "None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection between the", "in particles: if particle not in destroyed_particles and\\ 0 < particle.centre_x < window_width", "v3x)) >= 0: return True # Check if edges intersect circle # First", "preserved_agents = agents reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids,", "decision: Action): \"\"\" Enact the decisions made by the agent in the order", "? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\"", ":param symbol: The key release. :param modifiers: ? \"\"\" for agent in self.agents:", "- (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x", "by the agent in the order they are given. :param agent: The agent", "in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle)", "game.agent import Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\" Is the game", "= int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2", "length = sqrt(e2x * e2x + e2y * e2y) k = k /", "generator') def add_particle(self, particle): \"\"\" Adds a particle to the list of current", "particle not in destroyed_particles and\\ 0 < particle.centre_x < window_width and 0 <", "random.randint(0, window.height) if start_x == 0: velocity_x = random.randint(1, 3) else: velocity_x =", "window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\"", "= 2 OVER = 3 class Game: \"\"\" Handles the interaction between the", "circle.radius * circle.radius): return True else: return False def start(self): \"\"\" Run the", "Third edge c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y e3x", "seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game state from INPLAY to", "- v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x", "asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius)", "apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle from game.agent import Agent, Action", "GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\"", "from game.agent import Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\" Is the", "window: The window to create the entities on. \"\"\" self.window = window self.agents:", "elif decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is", "Enum from math import cos, sin, sqrt from typing import List, Tuple from", "in the order they are given. :param agent: The agent that is carrying", "3) else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x =", "0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y -", "Check if the vertices of the ship are intersecting the asteroid if self.is_inside(v1x,", "k > 0: length = sqrt(e2x * e2x + e2y * e2y) k", "int = window.width self.window_height: int = window.height self.points: int = 0 def draw(self):", "v3x)*(asteroid.centre_y - v3x)) >= 0: return True # Check if edges intersect circle", "key presses update the actions of the user agents. :param symbol: The key", "GameState(Enum): \"\"\" Is the game currently running, paused or is it game over.", "1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game", "from typing import List, Tuple from time import time from apscheduler.schedulers.background import BackgroundScheduler", "entities. \"\"\" for agent in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for", "c1x + c1y * c1y - k * k) <= asteroid.radius: return True", "decisions made by the agent in the order they are given. :param agent:", "agent: The agent that is carrying out the action :param decision: The action", "- v3x)) >= 0: return True # Check if edges intersect circle #", "0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def", "an asteroid is visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y", "k * k) <= asteroid.radius: return True return False def is_inside(self, x, y,", "k < length: if sqrt(c2x * c2x + c2y * c2y - k", "random from enum import Enum from math import cos, sin, sqrt from typing", "Initialise the agents, particles, asteroids (and asteroid creator), state of the game, points", "generator') def pause_toggle(self): \"\"\" Sets the game state from INPLAY to PAUSED and", "int]: \"\"\" Updates the game entity objects. This includes the particles, asteroids and", "((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0:", "# Check if edges intersect circle # First edge c1x = asteroid.centre_x -", "__init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and asteroid creator),", "int = window.height self.points: int = 0 def draw(self): \"\"\" Draws the entities.", "enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the decisions made by the agent", "agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire()", "if not self.agents: self.game_over() if self.points / 5 > self.level and self.seconds_between_asteroid_generation >", "agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision", "generator') self.level = 1 self.state: GameState = GameState.INPLAY self.window_width: int = window.width self.window_height:", "e1x + e1y * e1y) k = k / length if k <", "k * k) <= asteroid.radius: return True # Second edge c2x = asteroid.centre_x", "# Second edge c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y", "= v3y - v2y k = c2x * e2x + c2y * e2y", "- k * k) <= asteroid.radius: return True # Third edge c3x =", "= asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y e1x = v2x -", "(ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing -", ":param window: The window to create the entities on. \"\"\" self.window = window", "= agents reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, []))", "start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of", "symbol: The key release. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_release(symbol,", "preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the decisions", "agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent", "140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140))) # Check if", "int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height *", "import Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\" Is the game currently", "in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key release update", "+ asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles:", "- v1y e1x = v2x - v1x e1y = v2y - v1y k", "agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision", "out the action :param decision: The action to enact. \"\"\" agent_ship = agent.get_ship()", "3 class Game: \"\"\" Handles the interaction between the agents and the environment.", "= sqrt(e1x * e1x + e1y * e1y) k = k / length", "agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid", "* e1x + e1y * e1y) k = k / length if k", "e2y if k > 0: length = sqrt(e2x * e2x + e2y *", "key = pyglet.window.key class GameState(Enum): \"\"\" Is the game currently running, paused or", "= random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3) else:", "asteroids (and asteroid creator), state of the game, points and agents. :param window:", "if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >=", "asteroid creator), state of the game, points and agents. :param window: The window", "* c2y - k * k) <= asteroid.radius: return True # Third edge", "def on_key_press(self, symbol, modifiers): \"\"\" On key presses update the actions of the", "= sqrt(e3x * e3x + e3y * e3y) k = k / length", "or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True # Check if", "action to enact. \"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif", "the user agents. :param symbol: The key pressed. :param modifiers: ? \"\"\" for", "* c3y - k * k) <= asteroid.radius: return True return False def", "= GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a particle to the", "(2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height *", "the decisions made by the agent in the order they are given. :param", "= GameState.INPLAY self.window_width: int = window.width self.window_height: int = window.height self.points: int =", "-> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity objects. This includes", "- v1x c1y = asteroid.centre_y - v1y e1x = v2x - v1x e1y", "self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator')", "= random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y = random.choice([0, window.height]) if", "if the vertices of the ship are intersecting the asteroid if self.is_inside(v1x, v1y,", "length if k < length: if sqrt(c2x * c2x + c2y * c2y", "agents and the environment. Handles the updating of the environment. \"\"\" def __init__(self,", "Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif", "pyglet.window.key class GameState(Enum): \"\"\" Is the game currently running, paused or is it", "\"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height,", "sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y", "'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState = GameState.INPLAY self.window_width: int", "running, paused or is it game over. \"\"\" INPLAY = 1 PAUSED =", "velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x = random.randint(0, window.width)", "y, circle): if ((x - circle.centre_x) * (x - circle.centre_x) + (y -", "sqrt(c3x * c3x + c3y * c3y - k * k) <= asteroid.radius:", "from enum import Enum from math import cos, sin, sqrt from typing import", "if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY", ":param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol,", "* e3y if k > 0: length = sqrt(e3x * e3x + e3y", "circle): if ((x - circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y)", "the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\"", "particle to the list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\"", "asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 *", "= sqrt(e2x * e2x + e2y * e2y) k = k / length", "window, agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and asteroid creator), state", "- (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return True # Check if", "of the game when the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def", "- circle.centre_y) * (y - circle.centre_y) <= circle.radius * circle.radius): return True else:", "the entities on. \"\"\" self.window = window self.agents: List[Agent] = agents self.particles: List[Particle]", "of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid. This", "the environment. Handles the updating of the environment. \"\"\" def __init__(self, window, agents:", "Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire =", "the user agents. :param symbol: The key release. :param modifiers: ? \"\"\" for", "random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x", "random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y,", "True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid", "<= asteroid.radius: return True # Second edge c2x = asteroid.centre_x - v2x c2y", "3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates", "# Check if circle center inside the ship if ((v2y - v1y)*(asteroid.centre_x -", "decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST:", "= int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height", "> 0: length = sqrt(e1x * e1x + e1y * e1y) k =", "then we just call here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x", "particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates", "Action): \"\"\" Enact the decisions made by the agent in the order they", "if circle center inside the ship if ((v2y - v1y)*(asteroid.centre_x - v1x) -", "self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key presses update the", "<= asteroid.radius: return True return False def is_inside(self, x, y, circle): if ((x", "ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y", "could be in the Asteroid class and then we just call here asteroid.generate().", "asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True # Check", "- v2y k = c2x * e2x + c2y * e2y if k", "the agents, particles, asteroids (and asteroid creator), state of the game, points and", "= int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height", "cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140))) #", "self.points += reward if not self.agents: self.game_over() if self.points / 5 > self.level", "Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing)))", "- v1y k = c1x * e1x + c1y * e1y if k", "True return False def is_inside(self, x, y, circle): if ((x - circle.centre_x) *", "else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x = random.randint(0,", "decision: The action to enact. \"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT:", "self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds", "intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x,", "agent that is carrying out the action :param decision: The action to enact.", "The action to enact. \"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right()", "k / length if k < length: if sqrt(c3x * c3x + c3y", "circle.centre_y) * (y - circle.centre_y) <= circle.radius * circle.radius): return True else: return", "\"\"\" The end of the game when the player dies. \"\"\" self.asteroid_creator.pause() self.state", "enum import Enum from math import cos, sin, sqrt from typing import List,", "self.agents) self.points += reward if not self.agents: self.game_over() if self.points / 5 >", "Sets the game state from INPLAY to PAUSED and vice versa. \"\"\" if", "agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def update(self):", "The key release. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_release(symbol, modifiers)", "class. As in the calculations could be in the Asteroid class and then", "* e2x + e2y * e2y) k = k / length if k", "else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y,", "= True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1", "adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y", "self.window = window self.agents: List[Agent] = agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid]", "\"\"\" Updates the game entity objects. This includes the particles, asteroids and the", "paused or is it game over. \"\"\" INPLAY = 1 PAUSED = 2", "preserved_asteroids = [] preserved_agents = agents reward = 0 for agent in agents:", "(window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius", "(ship.height * sin(ship.facing - 140))) # Check if the vertices of the ship", "= asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y e3x = v1x -", "v1y e1x = v2x - v1x e1y = v2y - v1y k =", "As in the calculations could be in the Asteroid class and then we", "modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key release update the actions of", "random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x == 0: velocity_x = random.randint(1,", "c3y * c3y - k * k) <= asteroid.radius: return True return False", "the entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\", "/ length if k < length: if sqrt(c1x * c1x + c1y *", "= c2x * e2x + c2y * e2y if k > 0: length", "Updates the game entity objects. This includes the particles, asteroids and the agents", "3) else: start_x = random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y ==", "Handles the updating of the environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\"", "> self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25", "* c3x + c3y * c3y - k * k) <= asteroid.radius: return", "in asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False", "1 self.state: GameState = GameState.INPLAY self.window_width: int = window.width self.window_height: int = window.height", "= v2y - v1y k = c1x * e1x + c1y * e1y", "game over. \"\"\" INPLAY = 1 PAUSED = 2 OVER = 3 class", "= k / length if k < length: if sqrt(c2x * c2x +", "entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent],", "self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points +=", "the list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an", "Creates an asteroid. This also seems like it should be in the entity", "< window_width and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids,", "e2x + e2y * e2y) k = k / length if k <", "e3y) k = k / length if k < length: if sqrt(c3x *", "\"\"\" Is the game currently running, paused or is it game over. \"\"\"", "in the entity class. As in the calculations could be in the Asteroid", "True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle", "- v3y e3x = v1x - v3x e3y = v1y - v3y k", "int(ship.centre_y + (ship.height * sin(ship.facing - 140))) # Check if the vertices of", "False def is_inside(self, x, y, circle): if ((x - circle.centre_x) * (x -", "List, Tuple from time import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import", "Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid,", "\"\"\" Update the state of the entities \"\"\" if self.state == GameState.INPLAY: self.particles,", "is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn()", "False def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The", "generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a", "2 OVER = 3 class Game: \"\"\" Handles the interaction between the agents", "* e1x + c1y * e1y if k > 0: length = sqrt(e1x", "particles, asteroids (and asteroid creator), state of the game, points and agents. :param", "asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if", "< length: if sqrt(c2x * c2x + c2y * c2y - k *", "release update the actions of the user agents. :param symbol: The key release.", "OVER = 3 class Game: \"\"\" Handles the interaction between the agents and", "destroyed_particles = [] preserved_particles = [] preserved_asteroids = [] preserved_agents = agents reward", "[] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window),", "asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x", "asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y e3x = v1x - v3x", "Calculates the collision detection between the ship and asteroids. \"\"\" # Detection adapted", "c2x + c2y * c2y - k * k) <= asteroid.radius: return True", "v3x c3y = asteroid.centre_y - v3y e3x = v1x - v3x e3y =", "for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key", "Is the game currently running, paused or is it game over. \"\"\" INPLAY", "k) <= asteroid.radius: return True return False def is_inside(self, x, y, circle): if", "< -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) ->", "pyglet import random from enum import Enum from math import cos, sin, sqrt", "self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a particle to the list of", "agents reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent,", "asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x", "v3y - v2y k = c2x * e2x + c2y * e2y if", "an asteroid. This also seems like it should be in the entity class.", "False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle in particles: if", "\"\"\" On key release update the actions of the user agents. :param symbol:", "Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\" Is the game currently running,", "for agent in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle in", "Tuple from time import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid,", "agents, particles, asteroids (and asteroid creator), state of the game, points and agents.", "window.height]) if start_y == 0: velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3,", "asteroid.centre_y - v2y e2x = v3x - v2x e2y = v3y - v2y", "(ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing -", "self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self,", "c1y = asteroid.centre_y - v1y e1x = v2x - v1x e1y = v2y", "particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True", "the game when the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self,", "sqrt from typing import List, Tuple from time import time from apscheduler.schedulers.background import", "ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x", "window_width, window_height): \"\"\" Calculates if an asteroid is visible. \"\"\" return (window_height +", "(y - circle.centre_y) <= circle.radius * circle.radius): return True else: return False def", "and vice versa. \"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator')", "= v3x - v2x e2y = v3y - v2y k = c2x *", "random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\"", "asteroid.draw() for particle in self.particles: particle.draw() def update(self): \"\"\" Update the state of", "agent in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle in self.particles:", "- circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y) * (y -", "agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key release", "elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if", "end of the game when the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER", "and agents. :param window: The window to create the entities on. \"\"\" self.window", "k > 0: length = sqrt(e3x * e3x + e3y * e3y) k", "not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle not in destroyed_particles", "def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection between the ship and", "= pyglet.window.key class GameState(Enum): \"\"\" Is the game currently running, paused or is", "/= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the", "= random.choice([0, window.height]) if start_y == 0: velocity_y = random.randint(1, 3) else: velocity_y", "the agents and the environment. Handles the updating of the environment. \"\"\" def", "\"\"\" On key presses update the actions of the user agents. :param symbol:", "particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action):", "self.particles, self.asteroids, self.agents) self.points += reward if not self.agents: self.game_over() if self.points /", "asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True # Check if circle center inside", "asteroid): return True # Check if circle center inside the ship if ((v2y", "- v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return True #", "v2x e2y = v3y - v2y k = c2x * e2x + c2y", "if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if not", "self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game state", "asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid],", "order they are given. :param agent: The agent that is carrying out the", "- v3x)*(asteroid.centre_y - v3x)) >= 0: return True # Check if edges intersect", "presses update the actions of the user agents. :param symbol: The key pressed.", "return True else: return False def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start()", "the game, points and agents. :param window: The window to create the entities", "v2x c2y = asteroid.centre_y - v2y e2x = v3x - v2x e2y =", "int = 0 def draw(self): \"\"\" Draws the entities. \"\"\" for agent in", "game currently running, paused or is it game over. \"\"\" INPLAY = 1", "agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()):", "are given. :param agent: The agent that is carrying out the action :param", "def is_inside(self, x, y, circle): if ((x - circle.centre_x) * (x - circle.centre_x)", "in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw()", "self.state: GameState = GameState.INPLAY self.window_width: int = window.width self.window_height: int = window.height self.points:", "the entities. \"\"\" for agent in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw()", "add_particle(self, particle): \"\"\" Adds a particle to the list of current particles. \"\"\"", "particles: if particle not in destroyed_particles and\\ 0 < particle.centre_x < window_width and", "the agent in the order they are given. :param agent: The agent that", "5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /=", "k = c3x * e3x + c3y * e3y if k > 0:", "e3x + c3y * e3y if k > 0: length = sqrt(e3x *", "* (x - circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y) <=", "in self.particles: particle.draw() def update(self): \"\"\" Update the state of the entities \"\"\"", "the actions of the user agents. :param symbol: The key release. :param modifiers:", "= window.width self.window_height: int = window.height self.points: int = 0 def draw(self): \"\"\"", "window.width self.window_height: int = window.height self.points: int = 0 def draw(self): \"\"\" Draws", "self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent in agents: if self.intersecting_ship(asteroid,", "from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y =", "[] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator')", "entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width,", "- v3x e3y = v1y - v3y k = c3x * e3x +", "self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key presses update", "== GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents)", "Check if edges intersect circle # First edge c1x = asteroid.centre_x - v1x", "agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not None:", "v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) -", "0: length = sqrt(e3x * e3x + e3y * e3y) k = k", "INPLAY to PAUSED and vice versa. \"\"\" if self.state is GameState.INPLAY: self.state =", "window): \"\"\" Creates an asteroid. This also seems like it should be in", "+ c2y * c2y - k * k) <= asteroid.radius: return True #", "edge c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y e3x =", "self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs()", "asteroid is visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y <", "= False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle in particles:", "= k / length if k < length: if sqrt(c3x * c3x +", "0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for", "v2y k = c2x * e2x + c2y * e2y if k >", "self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y,", "self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection between the ship", "ship and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x +", "[] preserved_particles = [] preserved_asteroids = [] preserved_agents = agents reward = 0", "particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if", "> 0: length = sqrt(e2x * e2x + e2y * e2y) k =", "sqrt(e2x * e2x + e2y * e2y) k = k / length if", "e2x + c2y * e2y if k > 0: length = sqrt(e2x *", "v1y - v3y k = c3x * e3x + c3y * e3y if", "for asteroid in asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid", "asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y e2x = v3x - v2x", "creator), state of the game, points and agents. :param window: The window to", "velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width,", "intersect circle # First edge c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y", "also seems like it should be in the entity class. As in the", "- v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return", "asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle],", "calculations could be in the Asteroid class and then we just call here", "the ship are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y,", "int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height *", "v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y +", "-1) velocity_y = random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y = random.choice([0,", "(v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x -", "self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\"", "agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates", "cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x", "to PAUSED and vice versa. \"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED", "v3y e3x = v1x - v3x e3y = v1y - v3y k =", "\"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid. This also seems like", "self.asteroids, self.agents) self.points += reward if not self.agents: self.game_over() if self.points / 5", "seems like it should be in the entity class. As in the calculations", "if k < length: if sqrt(c1x * c1x + c1y * c1y -", "e2y = v3y - v2y k = c2x * e2x + c2y *", "id='asteroid generator') self.level = 1 self.state: GameState = GameState.INPLAY self.window_width: int = window.width", "# First edge c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y", "'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game state from INPLAY", "class GameState(Enum): \"\"\" Is the game currently running, paused or is it game", "Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of the game", "- v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x)", "* e1y if k > 0: length = sqrt(e1x * e1x + e1y", "game when the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol,", "from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle from game.agent import Agent,", "for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid,", "+ e1y * e1y) k = k / length if k < length:", "- circle.centre_y) <= circle.radius * circle.radius): return True else: return False def start(self):", "start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x == 0: velocity_x", "import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle from game.agent", "v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True #", "if particle not in destroyed_particles and\\ 0 < particle.centre_x < window_width and 0", "k = c2x * e2x + c2y * e2y if k > 0:", "agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left()", "reward if not self.agents: self.game_over() if self.points / 5 > self.level and self.seconds_between_asteroid_generation", "ship): \"\"\" Calculates the collision detection between the ship and asteroids. \"\"\" #", "asteroid.radius: return True return False def is_inside(self, x, y, circle): if ((x -", "v1x e1y = v2y - v1y k = c1x * e1x + c1y", "symbol, modifiers): \"\"\" On key release update the actions of the user agents.", "((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0", "ship are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid)", "key release update the actions of the user agents. :param symbol: The key", "the game entity objects. This includes the particles, asteroids and the agents ships.", "id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game state from INPLAY to PAUSED", "window_width and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents,", "if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is", "0: length = sqrt(e1x * e1x + e1y * e1y) k = k", "state from INPLAY to PAUSED and vice versa. \"\"\" if self.state is GameState.INPLAY:", "= random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y", "destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles:", "\"\"\" Enact the decisions made by the agent in the order they are", "< length: if sqrt(c1x * c1x + c1y * c1y - k *", "List[Agent], int]: \"\"\" Updates the game entity objects. This includes the particles, asteroids", "to enact. \"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision", "- (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x", "\"\"\" return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width", "c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y e3x = v1x", "-asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle],", "This includes the particles, asteroids and the agents ships. \"\"\" destroyed_particles = []", "+ c3y * e3y if k > 0: length = sqrt(e3x * e3x", "* e3x + e3y * e3y) k = k / length if k", "e1y if k > 0: length = sqrt(e1x * e1x + e1y *", "c2y - k * k) <= asteroid.radius: return True # Third edge c3x", "not self.agents: self.game_over() if self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01:", "return True # Check if circle center inside the ship if ((v2y -", "it game over. \"\"\" INPLAY = 1 PAUSED = 2 OVER = 3", "1) == 0: start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x", "self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval',", "or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width,", "for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid =", "enact. \"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is", "decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST:", "detection between the ship and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x", "v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \\", "= 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update()", "< length: if sqrt(c3x * c3x + c3y * c3y - k *", "class Game: \"\"\" Handles the interaction between the agents and the environment. Handles", "((x - circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y) * (y", "0: start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x == 0:", "asteroid.centre_y - v3y e3x = v1x - v3x e3y = v1y - v3y", "math import cos, sin, sqrt from typing import List, Tuple from time import", "for particle in self.particles: particle.draw() def update(self): \"\"\" Update the state of the", "the Asteroid class and then we just call here asteroid.generate(). \"\"\" if random.randint(0,", "self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def update(self): \"\"\" Update the state", "+ (ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing", "140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x", "= random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x == 0: velocity_x =", "that is carrying out the action :param decision: The action to enact. \"\"\"", "\"\"\" Draws the entities. \"\"\" for agent in self.agents: agent.draw() for asteroid in", "0: velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3,", "self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid:", "edge c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y e1x =", "\"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and", "* ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140)))", "destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle in", "particle): \"\"\" Adds a particle to the list of current particles. \"\"\" self.particles.append(particle)", "< asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids:", "v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x +", "- v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and", "c1y - k * k) <= asteroid.radius: return True # Second edge c2x", "c3x * e3x + c3y * e3y if k > 0: length =", "in the calculations could be in the Asteroid class and then we just", "Second edge c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y e2x", "(x - circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y) <= circle.radius", "import BackgroundScheduler from game.entities import Asteroid, Particle from game.agent import Agent, Action key", "random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y", "asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game", "+= 1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle", "length = sqrt(e1x * e1x + e1y * e1y) k = k /", "list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid.", "+= 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def", "v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x -", "self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState = GameState.INPLAY self.window_width:", "velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15))", "is it game over. \"\"\" INPLAY = 1 PAUSED = 2 OVER =", "window_height): destroyed_asteroid = True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward", "if edges intersect circle # First edge c1x = asteroid.centre_x - v1x c1y", "* k) <= asteroid.radius: return True return False def is_inside(self, x, y, circle):", "True # Second edge c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y -", "sqrt(c2x * c2x + c2y * c2y - k * k) <= asteroid.radius:", "True else: return False def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def", "- k * k) <= asteroid.radius: return True # Second edge c2x =", "\"\"\" Creates an asteroid. This also seems like it should be in the", "if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles,", "actions of the user agents. :param symbol: The key release. :param modifiers: ?", "* e2x + c2y * e2y if k > 0: length = sqrt(e2x", "GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points", "k = k / length if k < length: if sqrt(c3x * c3x", "random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self,", "window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]:", "class and then we just call here asteroid.generate(). \"\"\" if random.randint(0, 1) ==", "particles, asteroids and the agents ships. \"\"\" destroyed_particles = [] preserved_particles = []", "destroyed_particles and\\ 0 < particle.centre_x < window_width and 0 < particle.centre_y < window_height:", "First edge c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y e1x", "* e2y) k = k / length if k < length: if sqrt(c2x", "= v1x - v3x e3y = v1y - v3y k = c3x *", "= asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y e2x = v3x -", "= v1y - v3y k = c3x * e3x + c3y * e3y", "else: return False def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self):", "\"\"\" INPLAY = 1 PAUSED = 2 OVER = 3 class Game: \"\"\"", "start_x = random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y == 0: velocity_y", "start_y = random.choice([0, window.height]) if start_y == 0: velocity_y = random.randint(1, 3) else:", "\"\"\" Adds a particle to the list of current particles. \"\"\" self.particles.append(particle) def", "-1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid,", "= asteroid.centre_y - v3y e3x = v1x - v3x e3y = v1y -", "reward def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the decisions made by", "edge c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y e2x =", "sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y", "= random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height):", "game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of the game when the", "< asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or", "v3y k = c3x * e3x + c3y * e3y if k >", "self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def", ":param agent: The agent that is carrying out the action :param decision: The", "update(self): \"\"\" Update the state of the entities \"\"\" if self.state == GameState.INPLAY:", "v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) -", "entity class. As in the calculations could be in the Asteroid class and", "currently running, paused or is it game over. \"\"\" INPLAY = 1 PAUSED", "v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True # Check if circle center", "the game currently running, paused or is it game over. \"\"\" INPLAY =", "(v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x -", "self.particles: particle.draw() def update(self): \"\"\" Update the state of the entities \"\"\" if", "+ (2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height", "* sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y =", "window_width, window_height): destroyed_asteroid = True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid):", "import cos, sin, sqrt from typing import List, Tuple from time import time", "entity objects. This includes the particles, asteroids and the agents ships. \"\"\" destroyed_particles", "c3y - k * k) <= asteroid.radius: return True return False def is_inside(self,", "Agent, decision: Action): \"\"\" Enact the decisions made by the agent in the", "c2y = asteroid.centre_y - v2y e2x = v3x - v2x e2y = v3y", "state of the game, points and agents. :param window: The window to create", "k = c1x * e1x + c1y * e1y if k > 0:", "* c1y - k * k) <= asteroid.radius: return True # Second edge", "e2y * e2y) k = k / length if k < length: if", "(window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height,", "- v3x c3y = asteroid.centre_y - v3y e3x = v1x - v3x e3y", "if sqrt(c2x * c2x + c2y * c2y - k * k) <=", "import random from enum import Enum from math import cos, sin, sqrt from", ":param decision: The action to enact. \"\"\" agent_ship = agent.get_ship() if decision is", "agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On key release update the actions", "call here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x = random.choice([0, window.width])", "particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent:", "+ asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius <", "def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and asteroid", "+ c1y * e1y if k > 0: length = sqrt(e1x * e1x", "def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an asteroid is visible. \"\"\"", "= random.randint(0, window.height) if start_x == 0: velocity_x = random.randint(1, 3) else: velocity_x", "modifiers): \"\"\" On key release update the actions of the user agents. :param", "= True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if", "the agents ships. \"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids = []", "The end of the game when the player dies. \"\"\" self.asteroid_creator.pause() self.state =", "= int(ship.centre_x + (ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height", "\\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >=", "self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level", "1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in", "import pyglet import random from enum import Enum from math import cos, sin,", "if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the", "k) <= asteroid.radius: return True # Third edge c3x = asteroid.centre_x - v3x", "in the Asteroid class and then we just call here asteroid.generate(). \"\"\" if", "self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState = GameState.INPLAY", "seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState = GameState.INPLAY self.window_width: int =", "e3x + e3y * e3y) k = k / length if k <", "<= circle.radius * circle.radius): return True else: return False def start(self): \"\"\" Run", "asteroid in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def update(self): \"\"\" Update", "particle.draw() def update(self): \"\"\" Update the state of the entities \"\"\" if self.state", "pressed. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self,", "sqrt(e1x * e1x + e1y * e1y) k = k / length if", "\"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of the", "k < length: if sqrt(c3x * c3x + c3y * c3y - k", "given. :param agent: The agent that is carrying out the action :param decision:", "e3y = v1y - v3y k = c3x * e3x + c3y *", "15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an asteroid is visible.", "k / length if k < length: if sqrt(c1x * c1x + c1y", "= int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height", "should be in the entity class. As in the calculations could be in", "asteroid in asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid =", "c2y * c2y - k * k) <= asteroid.radius: return True # Third", "0 def draw(self): \"\"\" Draws the entities. \"\"\" for agent in self.agents: agent.draw()", "= random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x,", "start_y == 0: velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x", "< -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def", "particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent in", "- v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and", "velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3, 3)", "return False def is_inside(self, x, y, circle): if ((x - circle.centre_x) * (x", "agents. :param window: The window to create the entities on. \"\"\" self.window =", "state of the entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward", "on_key_press(self, symbol, modifiers): \"\"\" On key presses update the actions of the user", "elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire)", "length = sqrt(e3x * e3x + e3y * e3y) k = k /", "= [] preserved_particles = [] preserved_asteroids = [] preserved_agents = agents reward =", "return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact", "between the ship and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x =", "\"\"\" if random.randint(0, 1) == 0: start_x = random.choice([0, window.width]) start_y = random.randint(0,", "- v1y)) >= 0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x", "agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and asteroid creator), state of", "0 < particle.centre_x < window_width and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle)", "= GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle):", "0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid", "like it should be in the entity class. As in the calculations could", "k / length if k < length: if sqrt(c2x * c2x + c2y", "= [] preserved_agents = agents reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(),", "k < length: if sqrt(c1x * c1x + c1y * c1y - k", "True # Third edge c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y -", "= v2x - v1x e1y = v2y - v1y k = c1x *", "* e3x + c3y * e3y if k > 0: length = sqrt(e3x", "modifiers): \"\"\" On key presses update the actions of the user agents. :param", "e1x + c1y * e1y if k > 0: length = sqrt(e1x *", "v3x - v2x e2y = v3y - v2y k = c2x * e2x", "def draw(self): \"\"\" Draws the entities. \"\"\" for agent in self.agents: agent.draw() for", "(2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing +", "* cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140)))", "v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return True", "= GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key presses update the actions", "is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost()", "length: if sqrt(c2x * c2x + c2y * c2y - k * k)", "particle.centre_y, asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid)", "cos, sin, sqrt from typing import List, Tuple from time import time from", "agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation", "if k > 0: length = sqrt(e1x * e1x + e1y * e1y)", "circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y) <= circle.radius * circle.radius):", "GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a particle to the list", "modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers):", "are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\", "self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid", "self.window_height: int = window.height self.points: int = 0 def draw(self): \"\"\" Draws the", "# Third edge c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y - v3y", "for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid", "the order they are given. :param agent: The agent that is carrying out", "from math import cos, sin, sqrt from typing import List, Tuple from time", "agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity objects.", "+ c1y * c1y - k * k) <= asteroid.radius: return True #", "circle.centre_y) <= circle.radius * circle.radius): return True else: return False def start(self): \"\"\"", "agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision", "\"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key presses", "the calculations could be in the Asteroid class and then we just call", "List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the", "- circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y) <= circle.radius *", "((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0", "window.height) if start_x == 0: velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3,", "of the game, points and agents. :param window: The window to create the", "* c1x + c1y * c1y - k * k) <= asteroid.radius: return", "if ((x - circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y) *", "actions of the user agents. :param symbol: The key pressed. :param modifiers: ?", "typing import List, Tuple from time import time from apscheduler.schedulers.background import BackgroundScheduler from", "Draws the entities. \"\"\" for agent in self.agents: agent.draw() for asteroid in self.asteroids:", "self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True # Check if circle", "\"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of the game when the player", "= [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid", "sqrt(e3x * e3x + e3y * e3y) k = k / length if", "= [] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda:", "start_x == 0: velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y", "e1y * e1y) k = k / length if k < length: if", "agents. :param symbol: The key release. :param modifiers: ? \"\"\" for agent in", "== 0: velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y =", "GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def", "particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid. This also seems", "time import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle from", "includes the particles, asteroids and the agents ships. \"\"\" destroyed_particles = [] preserved_particles", "cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision", "out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an asteroid is visible. \"\"\" return", "\"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height", "here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x = random.choice([0, window.width]) start_y", "sqrt(c1x * c1x + c1y * c1y - k * k) <= asteroid.radius:", "if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid =", "points and agents. :param window: The window to create the entities on. \"\"\"", "e2y) k = k / length if k < length: if sqrt(c2x *", "* sin(ship.facing - 140))) # Check if the vertices of the ship are", "when the player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers):", "\"\"\" Calculates if an asteroid is visible. \"\"\" return (window_height + asteroid.radius <", ":param symbol: The key pressed. :param modifiers: ? \"\"\" for agent in self.agents:", "List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity objects. This", "of the entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward =", "+ (ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing", "it should be in the entity class. As in the calculations could be", "vertices of the ship are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\", "= \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if not self.agents:", "BackgroundScheduler from game.entities import Asteroid, Particle from game.agent import Agent, Action key =", "preserved_particles = [] preserved_asteroids = [] preserved_agents = agents reward = 0 for", "visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\", "agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in", "edges intersect circle # First edge c1x = asteroid.centre_x - v1x c1y =", "- v3y k = c3x * e3x + c3y * e3y if k", "if sqrt(c3x * c3x + c3y * c3y - k * k) <=", "circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y)", "= agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif", "and then we just call here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0:", "window.width]) start_y = random.randint(0, window.height) if start_x == 0: velocity_x = random.randint(1, 3)", "[] preserved_asteroids = [] preserved_agents = agents reward = 0 for agent in", "agents ships. \"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids = [] preserved_agents", "GameState = GameState.INPLAY self.window_width: int = window.width self.window_height: int = window.height self.points: int", "is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self,", "INPLAY = 1 PAUSED = 2 OVER = 3 class Game: \"\"\" Handles", "Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity objects. This includes the", "int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height *", "* e2y if k > 0: length = sqrt(e2x * e2x + e2y", "self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval',", "* cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x =", "- k * k) <= asteroid.radius: return True return False def is_inside(self, x,", "= agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler()", "between the agents and the environment. Handles the updating of the environment. \"\"\"", "def update(self): \"\"\" Update the state of the entities \"\"\" if self.state ==", "elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is", "* e3y) k = k / length if k < length: if sqrt(c3x", "self.agents: List[Agent] = agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator", "if an asteroid is visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y or", "agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for", "velocity_y = random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y = random.choice([0, window.height])", "and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y))", "Update the state of the entities \"\"\" if self.state == GameState.INPLAY: self.particles, self.asteroids,", "Asteroid, Particle from game.agent import Agent, Action key = pyglet.window.key class GameState(Enum): \"\"\"", "0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y -", "pause_toggle(self): \"\"\" Sets the game state from INPLAY to PAUSED and vice versa.", "elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision is", "e3y if k > 0: length = sqrt(e3x * e3x + e3y *", "k) <= asteroid.radius: return True # Second edge c2x = asteroid.centre_x - v2x", "from INPLAY to PAUSED and vice versa. \"\"\" if self.state is GameState.INPLAY: self.state", ">= 0 and \\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y", "the entity class. As in the calculations could be in the Asteroid class", ">= 0: return True # Check if edges intersect circle # First edge", "= c3x * e3x + c3y * e3y if k > 0: length", "def on_key_release(self, symbol, modifiers): \"\"\" On key release update the actions of the", "BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1", "* circle.radius): return True else: return False def start(self): \"\"\" Run the game.", "http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y", "the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y,", "v2x - v1x e1y = v2y - v1y k = c1x * e1x", "window.height self.points: int = 0 def draw(self): \"\"\" Draws the entities. \"\"\" for", "self.game_over() if self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level +=", "game state from INPLAY to PAUSED and vice versa. \"\"\" if self.state is", "List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity objects. This includes the particles,", "particle.centre_x < window_width and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles,", "+ (ship.height * sin(ship.facing - 140))) # Check if the vertices of the", "sin(ship.facing - 140))) # Check if the vertices of the ship are intersecting", "reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for", "e2x = v3x - v2x e2y = v3y - v2y k = c2x", "v1x c1y = asteroid.centre_y - v1y e1x = v2x - v1x e1y =", "start_y = random.randint(0, window.height) if start_x == 0: velocity_x = random.randint(1, 3) else:", "asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids: for agent in agents:", "self.is_inside(v3x, v3y, asteroid): return True # Check if circle center inside the ship", "cannon_fire = agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship):", "objects. This includes the particles, asteroids and the agents ships. \"\"\" destroyed_particles =", "- v2y e2x = v3x - v2x e2y = v3y - v2y k", "(and asteroid creator), state of the game, points and agents. :param window: The", "= 3 class Game: \"\"\" Handles the interaction between the agents and the", "= 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState", "List[Agent] = agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator =", "0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state: GameState =", "= 1 self.state: GameState = GameState.INPLAY self.window_width: int = window.width self.window_height: int =", "if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle not in", "on_key_release(self, symbol, modifiers): \"\"\" On key release update the actions of the user", "def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the decisions made by the", "game entity objects. This includes the particles, asteroids and the agents ships. \"\"\"", "The window to create the entities on. \"\"\" self.window = window self.agents: List[Agent]", "the updating of the environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise", "made by the agent in the order they are given. :param agent: The", "/ length if k < length: if sqrt(c3x * c3x + c3y *", "intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection between the ship and asteroids.", "asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid):", "agents. :param symbol: The key pressed. :param modifiers: ? \"\"\" for agent in", "def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid],", "self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True", "\\ ((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >=", "be in the Asteroid class and then we just call here asteroid.generate(). \"\"\"", "c1y * e1y if k > 0: length = sqrt(e1x * e1x +", "if k > 0: length = sqrt(e3x * e3x + e3y * e3y)", "3) else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x,", "self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if", "of the ship are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x,", "def asteroid_generate(self, window): \"\"\" Creates an asteroid. This also seems like it should", "ship if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y))", "x, y, circle): if ((x - circle.centre_x) * (x - circle.centre_x) + (y", "the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end of the game when", "user agents. :param symbol: The key release. :param modifiers: ? \"\"\" for agent", "GameState.INPLAY self.window_width: int = window.width self.window_height: int = window.height self.points: int = 0", "On key presses update the actions of the user agents. :param symbol: The", "self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets the game state from", "self.points: int = 0 def draw(self): \"\"\" Draws the entities. \"\"\" for agent", "if self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1", "def add_particle(self, particle): \"\"\" Adds a particle to the list of current particles.", "self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid. This also seems like it", "asteroid.update() for particle in particles: if particle not in destroyed_particles and\\ 0 <", "v1x - v3x e3y = v1y - v3y k = c3x * e3x", "\"\"\" for agent in self.agents: agent.draw() for asteroid in self.asteroids: asteroid.draw() for particle", "sin, sqrt from typing import List, Tuple from time import time from apscheduler.schedulers.background", "<reponame>JCKing97/Agents4Asteroids<gh_stars>1-10 import pyglet import random from enum import Enum from math import cos,", "game, points and agents. :param window: The window to create the entities on.", "velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x = random.randint(-3, 3)", "# Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2 * ship.height *", "the actions of the user agents. :param symbol: The key pressed. :param modifiers:", "on. \"\"\" self.window = window self.agents: List[Agent] = agents self.particles: List[Particle] = []", "= 0 def draw(self): \"\"\" Draws the entities. \"\"\" for agent in self.agents:", "return False def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\"", "True # Check if circle center inside the ship if ((v2y - v1y)*(asteroid.centre_x", "the ship if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y -", "return True # Second edge c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y", "+ e3y * e3y) k = k / length if k < length:", "asteroid.radius: return True # Third edge c3x = asteroid.centre_x - v3x c3y =", "the environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents, particles,", "environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the agents, particles, asteroids", "140))) # Check if the vertices of the ship are intersecting the asteroid", "Check if circle center inside the ship if ((v2y - v1y)*(asteroid.centre_x - v1x)", "if k > 0: length = sqrt(e2x * e2x + e2y * e2y)", "asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x = random.choice([0, window.width]) start_y =", "a particle to the list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window):", "dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key", "v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y +", "= int(ship.centre_y + (ship.height * sin(ship.facing - 140))) # Check if the vertices", "in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height):", "asteroid.radius: return True # Second edge c2x = asteroid.centre_x - v2x c2y =", "e3x = v1x - v3x e3y = v1y - v3y k = c3x", "Action key = pyglet.window.key class GameState(Enum): \"\"\" Is the game currently running, paused", "v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140))) # Check if the", "destroyed_asteroid = True for particle in particles: if self.is_inside(particle.centre_x, particle.centre_y, asteroid): reward +=", "/ length if k < length: if sqrt(c2x * c2x + c2y *", "asteroid, window_width, window_height): \"\"\" Calculates if an asteroid is visible. \"\"\" return (window_height", "+ 140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y =", "+ e2y * e2y) k = k / length if k < length:", "* k) <= asteroid.radius: return True # Third edge c3x = asteroid.centre_x -", "* k) <= asteroid.radius: return True # Second edge c2x = asteroid.centre_x -", "== 0: velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3, -1) velocity_x =", "v3y, asteroid): return True # Check if circle center inside the ship if", "- v1x e1y = v2y - v1y k = c1x * e1x +", "user agents. :param symbol: The key pressed. :param modifiers: ? \"\"\" for agent", "(y - circle.centre_y) * (y - circle.centre_y) <= circle.radius * circle.radius): return True", "if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle in particles: if self.is_inside(particle.centre_x,", "and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward", "window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision:", "1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self):", "agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for", "symbol, modifiers): \"\"\" On key presses update the actions of the user agents.", "self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if not self.agents: self.game_over() if self.points", "if start_y == 0: velocity_y = random.randint(1, 3) else: velocity_y = random.randint(-3, -1)", "agent in the order they are given. :param agent: The agent that is", "c3y * e3y if k > 0: length = sqrt(e3x * e3x +", "Asteroid class and then we just call here asteroid.generate(). \"\"\" if random.randint(0, 1)", "\"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def on_key_release(self, symbol, modifiers): \"\"\" On", "+ c3y * c3y - k * k) <= asteroid.radius: return True return", "reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide())", "* e1y) k = k / length if k < length: if sqrt(c1x", "preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action): \"\"\"", "== 0: start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height) if start_x ==", "circle # First edge c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y -", "self.state == GameState.INPLAY: self.particles, self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids,", "cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x", "self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level = 1 self.state:", "game_over(self): \"\"\" The end of the game when the player dies. \"\"\" self.asteroid_creator.pause()", "\"\"\" agent_ship = agent.get_ship() if decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT:", "vice versa. \"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else:", "return True # Check if edges intersect circle # First edge c1x =", "Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif", "just call here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x = random.choice([0,", "is_inside(self, x, y, circle): if ((x - circle.centre_x) * (x - circle.centre_x) +", "from game.entities import Asteroid, Particle from game.agent import Agent, Action key = pyglet.window.key", "k = k / length if k < length: if sqrt(c2x * c2x", "self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation =", "return True return False def is_inside(self, x, y, circle): if ((x - circle.centre_x)", "of the user agents. :param symbol: The key pressed. :param modifiers: ? \"\"\"", "window to create the entities on. \"\"\" self.window = window self.agents: List[Agent] =", "self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a particle to", "is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision is Action.BOOST: agent_ship.boost()", "updating of the environment. \"\"\" def __init__(self, window, agents: List[Agent]): \"\"\" Initialise the", "- 140))) # Check if the vertices of the ship are intersecting the", "asteroid, ship): \"\"\" Calculates the collision detection between the ship and asteroids. \"\"\"", "preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the decisions made", "if k < length: if sqrt(c2x * c2x + c2y * c2y -", "random.choice([0, window.height]) if start_y == 0: velocity_y = random.randint(1, 3) else: velocity_y =", "symbol: The key pressed. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol,", "On key release update the actions of the user agents. :param symbol: The", "0: return True # Check if edges intersect circle # First edge c1x", "= asteroid.centre_y - v1y e1x = v2x - v1x e1y = v2y -", "entities on. \"\"\" self.window = window self.agents: List[Agent] = agents self.particles: List[Particle] =", "e1y = v2y - v1y k = c1x * e1x + c1y *", "start_y, velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an", "The agent that is carrying out the action :param decision: The action to", "player dies. \"\"\" self.asteroid_creator.pause() self.state = GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On", "to the list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates", "+ c2y * e2y if k > 0: length = sqrt(e2x * e2x", "decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def", "v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x -", "import List, Tuple from time import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities", "* (y - circle.centre_y) <= circle.radius * circle.radius): return True else: return False", "import Enum from math import cos, sin, sqrt from typing import List, Tuple", "is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire", "-asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius) def entity_update(self,", "decision is Action.TURNRIGHT: agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN:", "in destroyed_particles and\\ 0 < particle.centre_x < window_width and 0 < particle.centre_y <", "if sqrt(c1x * c1x + c1y * c1y - k * k) <=", "length: if sqrt(c3x * c3x + c3y * c3y - k * k)", "or\\ self.is_inside(v3x, v3y, asteroid): return True # Check if circle center inside the", "+ (y - circle.centre_y) * (y - circle.centre_y) <= circle.radius * circle.radius): return", "This also seems like it should be in the entity class. As in", "interaction between the agents and the environment. Handles the updating of the environment.", "import Asteroid, Particle from game.agent import Agent, Action key = pyglet.window.key class GameState(Enum):", "v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \\", "asteroid.centre_y - v1y e1x = v2x - v1x e1y = v2y - v1y", "v3x e3y = v1y - v3y k = c3x * e3x + c3y", "= random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y == 0: velocity_y =", "\"\"\" Handles the interaction between the agents and the environment. Handles the updating", "v2y - v1y k = c1x * e1x + c1y * e1y if", "= k / length if k < length: if sqrt(c1x * c1x +", "self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if not self.agents: self.game_over() if", "c1x = asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y e1x = v2x", "< window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent,", "c2x * e2x + c2y * e2y if k > 0: length =", "destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle not", "collision detection between the ship and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459", "update the actions of the user agents. :param symbol: The key release. :param", "GameState.OVER def on_key_press(self, symbol, modifiers): \"\"\" On key presses update the actions of", "particle in self.particles: particle.draw() def update(self): \"\"\" Update the state of the entities", "List[Agent]): \"\"\" Initialise the agents, particles, asteroids (and asteroid creator), state of the", "is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection", "not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\" Calculates the collision detection between", "the ship and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x", "PAUSED and vice versa. \"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid", "ships. \"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids = [] preserved_agents =", "create the entities on. \"\"\" self.window = window self.agents: List[Agent] = agents self.particles:", "is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator')", "< particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self,", "inside the ship if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y", "List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation,", "the interaction between the agents and the environment. Handles the updating of the", "* c2x + c2y * c2y - k * k) <= asteroid.radius: return", "\"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state =", "asteroid_generate(self, window): \"\"\" Creates an asteroid. This also seems like it should be", "True # Check if edges intersect circle # First edge c1x = asteroid.centre_x", "c1y * c1y - k * k) <= asteroid.radius: return True # Second", "- 140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140))) # Check", "length if k < length: if sqrt(c1x * c1x + c1y * c1y", "the game state from INPLAY to PAUSED and vice versa. \"\"\" if self.state", "v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return True # Check", "self.level and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda:", "= window.height self.points: int = 0 def draw(self): \"\"\" Draws the entities. \"\"\"", "return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\\ (window_width +", "they are given. :param agent: The agent that is carrying out the action", "= 1 PAUSED = 2 OVER = 3 class Game: \"\"\" Handles the", "def pause_toggle(self): \"\"\" Sets the game state from INPLAY to PAUSED and vice", "circle center inside the ship if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x", "def start(self): \"\"\" Run the game. \"\"\" self.asteroid_creator.start() def game_over(self): \"\"\" The end", "+ (2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x + (ship.height * cos(ship.facing", "draw(self): \"\"\" Draws the entities. \"\"\" for agent in self.agents: agent.draw() for asteroid", "140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y", "The key pressed. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers)", "= BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5 self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') self.level =", "\"\"\" Sets the game state from INPLAY to PAUSED and vice versa. \"\"\"", "v1y k = c1x * e1x + c1y * e1y if k >", "+= reward if not self.agents: self.game_over() if self.points / 5 > self.level and", "> 0: length = sqrt(e3x * e3x + e3y * e3y) k =", "= [] preserved_asteroids = [] preserved_agents = agents reward = 0 for agent", "in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, [])) self.enact_decision(agent, agent.decide()) agent.get_ship().update() for asteroid in asteroids:", "to create the entities on. \"\"\" self.window = window self.agents: List[Agent] = agents", "e1y) k = k / length if k < length: if sqrt(c1x *", "v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \\ ((v3y -", "environment. Handles the updating of the environment. \"\"\" def __init__(self, window, agents: List[Agent]):", "preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle not in destroyed_particles and\\ 0", "* ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing)))", "length if k < length: if sqrt(c3x * c3x + c3y * c3y", "and\\ 0 < particle.centre_x < window_width and 0 < particle.centre_y < window_height: particle.update()", "window.width) start_y = random.choice([0, window.height]) if start_y == 0: velocity_y = random.randint(1, 3)", "window_height): \"\"\" Calculates if an asteroid is visible. \"\"\" return (window_height + asteroid.radius", "e1x = v2x - v1x e1y = v2y - v1y k = c1x", "def game_over(self): \"\"\" The end of the game when the player dies. \"\"\"", "reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if not", "and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x))", "key pressed. :param modifiers: ? \"\"\" for agent in self.agents: agent.on_key_press(symbol, modifiers) def", "else: start_x = random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y == 0:", "< particle.centre_x < window_width and 0 < particle.centre_y < window_height: particle.update() preserved_particles.append(particle) return", "= c1x * e1x + c1y * e1y if k > 0: length", "from time import time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle", "decision is Action.BOOST: agent_ship.boost() elif decision is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE:", "length: if sqrt(c1x * c1x + c1y * c1y - k * k)", "time from apscheduler.schedulers.background import BackgroundScheduler from game.entities import Asteroid, Particle from game.agent import", "Game: \"\"\" Handles the interaction between the agents and the environment. Handles the", "in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def update(self): \"\"\" Update the", "random.randint(0, window.width) start_y = random.choice([0, window.height]) if start_y == 0: velocity_y = random.randint(1,", "action :param decision: The action to enact. \"\"\" agent_ship = agent.get_ship() if decision", "v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y +", "(v1x - v3x)*(asteroid.centre_y - v3x)) >= 0: return True # Check if edges", "asteroid. This also seems like it should be in the entity class. As", "velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an asteroid is", "window self.agents: List[Agent] = agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] = []", "else: self.state = GameState.INPLAY self.asteroid_creator.resume_job('asteroid generator') def add_particle(self, particle): \"\"\" Adds a particle", "asteroid.centre_x < -asteroid.radius) def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid], agents: List[Agent])", "is Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is", "v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing))) v2x = int(ship.centre_x +", "versa. \"\"\" if self.state is GameState.INPLAY: self.state = GameState.PAUSED self.asteroid_creator.pause_job('asteroid generator') else: self.state", "self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return True", "= random.randint(-3, -1) velocity_x = random.randint(-3, 3) self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15)) def", "if self.is_inside(v1x, v1y, asteroid) or\\ self.is_inside(v2x, v2y, asteroid) or\\ self.is_inside(v3x, v3y, asteroid): return", "c2x = asteroid.centre_x - v2x c2y = asteroid.centre_y - v2y e2x = v3x", "if random.randint(0, 1) == 0: start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height)", "for asteroid in self.asteroids: asteroid.draw() for particle in self.particles: particle.draw() def update(self): \"\"\"", "and asteroids. \"\"\" # Detection adapted from http://www.phatcode.net/articles.php?id=459 v1x = int(ship.centre_x + (2", "self.asteroid_creator.start() def game_over(self): \"\"\" The end of the game when the player dies.", "agent_ship.turn_right() elif decision is Action.TURNLEFT: agent_ship.turn_left() elif decision is Action.STOPTURN: agent_ship.stop_turn() elif decision", "- v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y", "<= asteroid.radius: return True # Third edge c3x = asteroid.centre_x - v3x c3y", "game.entities import Asteroid, Particle from game.agent import Agent, Action key = pyglet.window.key class", "1 PAUSED = 2 OVER = 3 class Game: \"\"\" Handles the interaction", "return True # Third edge c3x = asteroid.centre_x - v3x c3y = asteroid.centre_y", "\"\"\" Initialise the agents, particles, asteroids (and asteroid creator), state of the game,", "c2y * e2y if k > 0: length = sqrt(e2x * e2x +", "c3y = asteroid.centre_y - v3y e3x = v1x - v3x e3y = v1y", "or is it game over. \"\"\" INPLAY = 1 PAUSED = 2 OVER", "is visible. \"\"\" return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius)", "self.level = 1 self.state: GameState = GameState.INPLAY self.window_width: int = window.width self.window_height: int", "asteroid.centre_x - v1x c1y = asteroid.centre_y - v1y e1x = v2x - v1x", "\"\"\" self.window = window self.agents: List[Agent] = agents self.particles: List[Particle] = [] self.asteroids:", "v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y -", "update the actions of the user agents. :param symbol: The key pressed. :param", "be in the entity class. As in the calculations could be in the", "List[Asteroid], agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]: \"\"\" Updates the game entity", "of the user agents. :param symbol: The key release. :param modifiers: ? \"\"\"", "self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if", "the action :param decision: The action to enact. \"\"\" agent_ship = agent.get_ship() if", "destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update() for particle in particles: if particle not in destroyed_particles and\\", "the vertices of the ship are intersecting the asteroid if self.is_inside(v1x, v1y, asteroid)", "* sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140)))", "PAUSED = 2 OVER = 3 class Game: \"\"\" Handles the interaction between", "self.agents: self.game_over() if self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01: self.level", "we just call here asteroid.generate(). \"\"\" if random.randint(0, 1) == 0: start_x =", "random.randint(-3, -1) velocity_y = random.randint(-3, 3) else: start_x = random.randint(0, window.width) start_y =", "over. \"\"\" INPLAY = 1 PAUSED = 2 OVER = 3 class Game:", "List[Particle] = [] self.asteroids: List[Asteroid] = [] self.asteroid_creator = BackgroundScheduler() self.seconds_between_asteroid_generation = 0.5", "velocity_x, velocity_y, 15)) def out_of_window(self, asteroid, window_width, window_height): \"\"\" Calculates if an asteroid", "and the agents ships. \"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids =", "preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width, window_height): destroyed_asteroid = True for particle", "= window self.agents: List[Agent] = agents self.particles: List[Particle] = [] self.asteroids: List[Asteroid] =", "is carrying out the action :param decision: The action to enact. \"\"\" agent_ship", "if k < length: if sqrt(c3x * c3x + c3y * c3y -", "self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation, id='asteroid generator') def pause_toggle(self): \"\"\" Sets", "Action.STOPBOOST: agent_ship.stop_boost() elif decision is Action.FIRE: cannon_fire = agent_ship.fire() if cannon_fire is not", "c1x * e1x + c1y * e1y if k > 0: length =", "- v2x)*(asteroid.centre_y - v2y)) >= 0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x)", "or asteroid.centre_y < -asteroid.radius) or\\ (window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x <", "\"\"\" Calculates the collision detection between the ship and asteroids. \"\"\" # Detection", "random.randint(0, 1) == 0: start_x = random.choice([0, window.width]) start_y = random.randint(0, window.height) if", "circle.radius): return True else: return False def start(self): \"\"\" Run the game. \"\"\"", "preserved_particles, preserved_asteroids, preserved_agents, reward def enact_decision(self, agent: Agent, decision: Action): \"\"\" Enact the", "current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self, window): \"\"\" Creates an asteroid. This also", "Adds a particle to the list of current particles. \"\"\" self.particles.append(particle) def asteroid_generate(self,", "\"\"\" destroyed_particles = [] preserved_particles = [] preserved_asteroids = [] preserved_agents = agents", "center inside the ship if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x -", "Enact the decisions made by the agent in the order they are given.", "self.window_width: int = window.width self.window_height: int = window.height self.points: int = 0 def", "and the environment. Handles the updating of the environment. \"\"\" def __init__(self, window,", "int(ship.centre_x + (2 * ship.height * cos(ship.facing))) v1y = int(ship.centre_y + (2 *", ">= 0 and \\ ((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y", "- v2x c2y = asteroid.centre_y - v2y e2x = v3x - v2x e2y", "= asteroid.centre_y - v2y e2x = v3x - v2x e2y = v3y -", "- v2x e2y = v3y - v2y k = c2x * e2x +", "c3x + c3y * c3y - k * k) <= asteroid.radius: return True", "(ship.height * cos(ship.facing + 140))) v2y = int(ship.centre_y + (ship.height * sin(ship.facing +", "* cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140)))", "+ (ship.height * cos(ship.facing - 140))) v3y = int(ship.centre_y + (ship.height * sin(ship.facing", "carrying out the action :param decision: The action to enact. \"\"\" agent_ship =", "asteroid): reward += 1 destroyed_asteroid = True destroyed_particles.append(particle) if not destroyed_asteroid: preserved_asteroids.append(asteroid) asteroid.update()", "\\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward if not self.agents: self.game_over()", "[] preserved_agents = agents reward = 0 for agent in agents: agent.perceive(agent.get_perception_type()(agent.get_ship(), particles,", "agent.get_ship().update() for asteroid in asteroids: for agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent)", "int(ship.centre_y + (ship.height * sin(ship.facing + 140))) v3x = int(ship.centre_x + (ship.height *", "agent in agents: if self.intersecting_ship(asteroid, agent.get_ship()): preserved_agents.remove(agent) destroyed_asteroid = False if self.out_of_window(asteroid, window_width,", "Handles the interaction between the agents and the environment. Handles the updating of", "= agent_ship.fire() if cannon_fire is not None: self.particles.append(cannon_fire) def intersecting_ship(self, asteroid, ship): \"\"\"", "> 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval', seconds=self.seconds_between_asteroid_generation,", "and self.seconds_between_asteroid_generation > 0.01: self.level += 1 self.asteroid_creator.remove_all_jobs() self.seconds_between_asteroid_generation /= 1.25 self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window),", "0: velocity_x = random.randint(1, 3) else: velocity_x = random.randint(-3, -1) velocity_y = random.randint(-3,", "e3y * e3y) k = k / length if k < length: if", "self.asteroids, self.agents, reward = \\ self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents) self.points += reward" ]
[ "kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept':", ":param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\" params =", "**kwargs): \"\"\" Requests and processes ESI json file :param data_source: ['tranquility', 'singularity'] :param", "for ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\"", ":return: ((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\" params = () for", "tuples and changes parameter names for ESI :param kwargs: :return: ((parameter, value), (parameter,", "['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...]", "json file :param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy',", "version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE',", "parameter == 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language'", "'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value))", "parameter == 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value)) return params", "file :param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1',", "= 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter,", "json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response =", "value is None: continue if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter", "Requests and processes ESI json file :param data_source: ['tranquility', 'singularity'] :param version: ESI", "= {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers, params=params, proxies=proxies)", "params = () for parameter, value in kwargs.items(): if value is None: continue", "path, proxies=None, **kwargs): \"\"\" Requests and processes ESI json file :param data_source: ['tranquility',", "and processes ESI json file :param data_source: ['tranquility', 'singularity'] :param version: ESI version", "value), ...) :rtype: tuple \"\"\" params = () for parameter, value in kwargs.items():", "def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value tuples and changes parameter", "ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT',", "'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping protocol to", "(parameter, value), ...) :rtype: tuple \"\"\" params = () for parameter, value in", "['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping protocol", "'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping protocol to the", ":param proxies: Dictionary mapping protocol to the URL of the proxy :param kwargs:", "URL of the proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype:", "the proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\"", "of keyword, value tuples and changes parameter names for ESI :param kwargs: :return:", "HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes ESI json file :param data_source:", "request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes ESI json file", "Creates a tuple of keyword, value tuples and changes parameter names for ESI", "import loads def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value tuples and", "'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params =", "if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter =", "version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes ESI json file :param", "names for ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple", "'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param", "parameter names for ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype:", ":return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response", "changes parameter names for ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...)", "['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param", "() for parameter, value in kwargs.items(): if value is None: continue if parameter", "the URL of the proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text)", "...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies:", ":param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2',", "import requests from json import loads def _args_to_params(kwargs): \"\"\" Creates a tuple of", "= () for parameter, value in kwargs.items(): if value is None: continue if", "json import loads def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value tuples", "def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes ESI json", "params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes ESI", "((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\" params = () for parameter,", "kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\" params = ()", "Dictionary mapping protocol to the URL of the proxy :param kwargs: parameters for", "= 'Accept-Language' params = (*params, (parameter, value)) return params def request(data_source, version, HTTP_method,", "value in kwargs.items(): if value is None: continue if parameter == 'if_none_match': parameter", "if value is None: continue if parameter == 'if_none_match': parameter = 'If-None-Match' if", "in kwargs.items(): if value is None: continue if parameter == 'if_none_match': parameter =", "protocol to the URL of the proxy :param kwargs: parameters for the endpoint", "a tuple of keyword, value tuples and changes parameter names for ESI :param", "tuple \"\"\" params = () for parameter, value in kwargs.items(): if value is", "proxies=None, **kwargs): \"\"\" Requests and processes ESI json file :param data_source: ['tranquility', 'singularity']", "value)) return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and", ":rtype: dict \"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method,", "tuple of keyword, value tuples and changes parameter names for ESI :param kwargs:", "parameter, value in kwargs.items(): if value is None: continue if parameter == 'if_none_match':", "for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'} params", "path: endpoint :param proxies: Dictionary mapping protocol to the URL of the proxy", "'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param", "continue if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter", "== 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params", "return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests and processes", "to the URL of the proxy :param kwargs: parameters for the endpoint :return:", "...] :param path: endpoint :param proxies: Dictionary mapping protocol to the URL of", "proxies: Dictionary mapping protocol to the URL of the proxy :param kwargs: parameters", "processes ESI json file :param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev',", "parameter = 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params = (*params,", "ESI json file :param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest',", ":param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET',", "mapping protocol to the URL of the proxy :param kwargs: parameters for the", "\"\"\" params = () for parameter, value in kwargs.items(): if value is None:", "of the proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict", "for parameter, value in kwargs.items(): if value is None: continue if parameter ==", "(*params, (parameter, value)) return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\"", "'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method:", "\"\"\" Requests and processes ESI json file :param data_source: ['tranquility', 'singularity'] :param version:", "ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple \"\"\" params", "'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path:", "...) :rtype: tuple \"\"\" params = () for parameter, value in kwargs.items(): if", "endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs)", "{'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers, params=params, proxies=proxies) return", "'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value)) return params def request(data_source,", "= (*params, (parameter, value)) return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs):", "and changes parameter names for ESI :param kwargs: :return: ((parameter, value), (parameter, value),", "== 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value)) return params def", "params = (*params, (parameter, value)) return params def request(data_source, version, HTTP_method, path, proxies=None,", "HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping", "loads def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value tuples and changes", "\"\"\" Creates a tuple of keyword, value tuples and changes parameter names for", "keyword, value tuples and changes parameter names for ESI :param kwargs: :return: ((parameter,", "parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'}", "kwargs.items(): if value is None: continue if parameter == 'if_none_match': parameter = 'If-None-Match'", "\"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers,", "is None: continue if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter ==", "_args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value tuples and changes parameter names", ":param path: endpoint :param proxies: Dictionary mapping protocol to the URL of the", "from json import loads def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword, value", "headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers, params=params,", "data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...]", "proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers", "endpoint :param proxies: Dictionary mapping protocol to the URL of the proxy :param", ":rtype: tuple \"\"\" params = () for parameter, value in kwargs.items(): if value", "value tuples and changes parameter names for ESI :param kwargs: :return: ((parameter, value),", ":param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers =", "dict \"\"\" headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}',", "'Accept-Language' params = (*params, (parameter, value)) return params def request(data_source, version, HTTP_method, path,", "'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint", "the endpoint :return: json.load(requests.get().text) :rtype: dict \"\"\" headers = {'accept': 'application/json'} params =", "value), (parameter, value), ...) :rtype: tuple \"\"\" params = () for parameter, value", "version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST',", "'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping protocol to the URL", "requests from json import loads def _args_to_params(kwargs): \"\"\" Creates a tuple of keyword,", "if parameter == 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value)) return", "None: continue if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language':", ":param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary", "parameter = 'Accept-Language' params = (*params, (parameter, value)) return params def request(data_source, version,", "'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers, params=params, proxies=proxies) return loads(response.text)", "(parameter, value)) return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): \"\"\" Requests" ]
[ "= \"\", treasures: [int] = [], conclusion: str = \"\", goal: str =", "self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) >", "<reponame>Softyy/gloomhaven-campaign-manager<filename>gloomhaven/models/scenario.py from dash_html_components import P from .scenario_event import ScenarioEvent class Scenario(): def __init__(self,", "requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements =", "return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else", "def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements", "[\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules: str =", "str): return [P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def", "= rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self):", "def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[],", "ScenarioEvent = {}, rewards: [str] = [], special_rules: str = \"\", event_2: ScenarioEvent", "= introduction self.treasures = treasures self.conclusion = conclusion self.goal = goal self.tiles =", "anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements +", "subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements", "[str] = [], special_rules: str = \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent", "\"\", treasures: [int] = [], conclusion: str = \"\", goal: str = \"Kill", "= \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str]", "{}, boss_special_1: str = \"\", boss_special_2: str = \"\"): self.id = id self.title", "text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self):", "ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2: str = \"\"): self.id =", "class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None,", "rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements", "__repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if (id == 1): return self.event_1", "alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction = introduction", "ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False,", "\"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] =", "if (id == 1): return self.event_1 elif (id == 2): return self.event_2 elif", "scenario_type self.introduction = introduction self.treasures = treasures self.conclusion = conclusion self.goal = goal", "event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules: str = \"\", event_2:", "self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction", "self.rewards = rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def", "{}, event_3: ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2: str = \"\"):", "ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2", "2): return self.event_2 elif (id == 3): return self.event_3 else: return ScenarioEvent() def", "tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3)", "self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements", "self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards", "self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures", "= \"\", boss_special_2: str = \"\"): self.id = id self.title = title self.requirements", "0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for", "@staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for requirement in requirements]", "def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for requirement in requirements] def", "title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str", "= title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements =", "self.goal = goal self.tiles = tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2", "== 1): return self.event_1 elif (id == 2): return self.event_2 elif (id ==", "conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type", "= {}, boss_special_1: str = \"\", boss_special_2: str = \"\"): self.id = id", "lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str = \"\", treasures: [int] =", "id self.title = title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements", "= board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards =", "self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules", "title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements", "= self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements,", "1): return self.event_1 elif (id == 2): return self.event_2 elif (id == 3):", "str = \"Kill all enemies\", board_square: str = \"A-1\", tiles: [str] = [\"A1a\",", "(id == 3): return self.event_3 else: return ScenarioEvent() def get_next_event(self, id: int): return", "from dash_html_components import P from .scenario_event import ScenarioEvent class Scenario(): def __init__(self, id,", "self.treasures = treasures self.conclusion = conclusion self.goal = goal self.tiles = tiles self.board_square", "from .scenario_event import ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[],", "special_rules: str = \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1:", "str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards:", "[str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules:", "\"\"): self.id = id self.title = title self.requirements = requirements self.anti_requirements = anti_requirements", "self.id = id self.title = title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements", "= new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements =", "def get_event(self, id: int): if (id == 1): return self.event_1 elif (id ==", "= [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules: str", "(id == 1): return self.event_1 elif (id == 2): return self.event_2 elif (id", "requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str =", "= 'main', introduction: str = \"\", treasures: [int] = [], conclusion: str =", "\"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str = \"\",", "= \"\"): self.id = id self.title = title self.requirements = requirements self.anti_requirements =", "event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2:", "= id self.title = title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements =", "= requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations =", "= goal self.tiles = tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 =", "str = 'main', introduction: str = \"\", treasures: [int] = [], conclusion: str", "ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules", "return f'{self.id}-{self.title}' def get_event(self, id: int): if (id == 1): return self.event_1 elif", "ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1", "self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures = treasures self.conclusion", "= scenario_type self.introduction = introduction self.treasures = treasures self.conclusion = conclusion self.goal =", "self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2", "== 2): return self.event_2 elif (id == 3): return self.event_3 else: return ScenarioEvent()", "anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main',", "ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2: str", "\"Complete\") return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0", "= self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements", "elif (id == 2): return self.event_2 elif (id == 3): return self.event_3 else:", "self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations", "\"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")] +", "self.title = title self.requirements = requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements", "board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards", "str = \"\"): self.id = id self.title = title self.requirements = requirements self.anti_requirements", "goal self.tiles = tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2)", "boss_special_2: str = \"\"): self.id = id self.title = title self.requirements = requirements", "= {}, event_3: ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2: str =", "in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if (id ==", "= ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules =", "new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements", "Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[],", "global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements", "if len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return", "self.event_2 elif (id == 3): return self.event_3 else: return ScenarioEvent() def get_next_event(self, id:", "[], special_rules: str = \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {},", "= \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str =", "self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements)", "= self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if", "len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement}", "self.introduction = introduction self.treasures = treasures self.conclusion = conclusion self.goal = goal self.tiles", "\"Kill all enemies\", board_square: str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1:", "id: int): if (id == 1): return self.event_1 elif (id == 2): return", "new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str =", "{}, rewards: [str] = [], special_rules: str = \"\", event_2: ScenarioEvent = {},", "lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures = treasures", "self.tiles = tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3", "global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str", "all enemies\", board_square: str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent", "= boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements,", "P from .scenario_event import ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[],", "'main', introduction: str = \"\", treasures: [int] = [], conclusion: str = \"\",", "get_event(self, id: int): if (id == 1): return self.event_1 elif (id == 2):", "\"\", boss_special_2: str = \"\"): self.id = id self.title = title self.requirements =", "self.event_1 elif (id == 2): return self.event_2 elif (id == 3): return self.event_3", "[int] = [], conclusion: str = \"\", goal: str = \"Kill all enemies\",", "= boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements", "__init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None,", "= {}, rewards: [str] = [], special_rules: str = \"\", event_2: ScenarioEvent =", "self.scenario_type = scenario_type self.introduction = introduction self.treasures = treasures self.conclusion = conclusion self.goal", "= subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements =", "[P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id:", "conclusion: str = \"\", goal: str = \"Kill all enemies\", board_square: str =", "anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations", "= tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1) self.event_2 = ScenarioEvent(**event_2) self.event_3 =", "event_3: ScenarioEvent = {}, boss_special_1: str = \"\", boss_special_2: str = \"\"): self.id", "boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\")", "= ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1 =", "== 3): return self.event_3 else: return ScenarioEvent() def get_next_event(self, id: int): return self.get_event(id+1)", "str, cond: str): return [P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self): return", ".scenario_event import ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[],", "= party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements =", "self.conclusion = conclusion self.goal = goal self.tiles = tiles self.board_square = board_square self.event_1", "+ alt_requirements if len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond:", "import ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[],", "self.event_2 = ScenarioEvent(**event_2) self.event_3 = ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1", "id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type:", "> 0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})')", "boss_special_1: str = \"\", boss_special_2: str = \"\"): self.id = id self.title =", "enemies\", board_square: str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent =", "for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if", "elif (id == 3): return self.event_3 else: return ScenarioEvent() def get_next_event(self, id: int):", "cond: str): return [P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}'", "rewards: [str] = [], special_rules: str = \"\", event_2: ScenarioEvent = {}, event_3:", "= [], conclusion: str = \"\", goal: str = \"Kill all enemies\", board_square:", "personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures = treasures self.conclusion = conclusion", "requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if (id == 1):", "introduction: str = \"\", treasures: [int] = [], conclusion: str = \"\", goal:", "tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] = [],", "self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements", "= ScenarioEvent(**event_3) self.rewards = rewards self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 =", "\"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements", "(id == 2): return self.event_2 elif (id == 3): return self.event_3 else: return", "[]) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for requirement in", "requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if (id", "str = \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str", "return [P(f'{requirement} ({cond})') for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self,", "treasures: [int] = [], conclusion: str = \"\", goal: str = \"Kill all", "conclusion self.goal = goal self.tiles = tiles self.board_square = board_square self.event_1 = ScenarioEvent(**event_1)", "requirements + anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else [])", "return self.event_1 elif (id == 2): return self.event_2 elif (id == 3): return", "[], conclusion: str = \"\", goal: str = \"Kill all enemies\", board_square: str", "conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str = \"\", treasures:", "= alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction =", "alt_requirements if len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str):", "str = \"\", treasures: [int] = [], conclusion: str = \"\", goal: str", "f'{self.id}-{self.title}' def get_event(self, id: int): if (id == 1): return self.event_1 elif (id", "self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type", "self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return", "party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction:", "board_square: str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"], event_1: ScenarioEvent = {},", "self.special_rules = special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements =", "boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements =", "+ anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else []) @staticmethod", "import P from .scenario_event import ScenarioEvent class Scenario(): def __init__(self, id, title, requirements=[],", "str = \"\", boss_special_2: str = \"\"): self.id = id self.title = title", "= lost_achievements self.personal_requirements = personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures =", "self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\")", "({cond})') for requirement in requirements] def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int):", "self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements", "introduction self.treasures = treasures self.conclusion = conclusion self.goal = goal self.tiles = tiles", "([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements: str,", "self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements +", "self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\")", "dash_html_components import P from .scenario_event import ScenarioEvent class Scenario(): def __init__(self, id, title,", "self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")]", "\"A2b\"], event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules: str = \"\",", "= \"\", goal: str = \"Kill all enemies\", board_square: str = \"A-1\", tiles:", "= conditional_achievements self.alt_requirements = alt_requirements self.lost_achievements = lost_achievements self.personal_requirements = personal_requirements self.scenario_type =", "= global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements =", "= treasures self.conclusion = conclusion self.goal = goal self.tiles = tiles self.board_square =", "goal: str = \"Kill all enemies\", board_square: str = \"A-1\", tiles: [str] =", "requirements self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations = new_locations", "+ ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else []) @staticmethod def text_and_cond_to_html(requirements:", "def __repr__(self): return f'{self.id}-{self.title}' def get_event(self, id: int): if (id == 1): return", "= conclusion self.goal = goal self.tiles = tiles self.board_square = board_square self.event_1 =", "alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str = \"\", treasures: [int]", "self.anti_requirements = anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations", "= special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html(", "= personal_requirements self.scenario_type = scenario_type self.introduction = introduction self.treasures = treasures self.conclusion =", "subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str = \"\",", "= \"Kill all enemies\", board_square: str = \"A-1\", tiles: [str] = [\"A1a\", \"A2b\"],", "party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements", "scenario_type: str = 'main', introduction: str = \"\", treasures: [int] = [], conclusion:", "str = \"\", goal: str = \"Kill all enemies\", board_square: str = \"A-1\",", "= anti_requirements self.party_achievements = party_achievements self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations =", "treasures self.conclusion = conclusion self.goal = goal self.tiles = tiles self.board_square = board_square", "alt_requirements = self.text_and_cond_to_html( self.alt_requirements, \"Complete\") return requirements + anti_requirements + ([P(\"Or\")] + alt_requirements", "personal_requirements=None, scenario_type: str = 'main', introduction: str = \"\", treasures: [int] = [],", "\"\", goal: str = \"Kill all enemies\", board_square: str = \"A-1\", tiles: [str]", "anti_requirements + ([P(\"Or\")] + alt_requirements if len(alt_requirements) > 0 else []) @staticmethod def", "special_rules self.boss_special_1 = boss_special_1 self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements,", "= [], special_rules: str = \"\", event_2: ScenarioEvent = {}, event_3: ScenarioEvent =", "else []) @staticmethod def text_and_cond_to_html(requirements: str, cond: str): return [P(f'{requirement} ({cond})') for requirement", "requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html( self.anti_requirements, \"Incomplete\") alt_requirements = self.text_and_cond_to_html(", "return self.event_2 elif (id == 3): return self.event_3 else: return ScenarioEvent() def get_next_event(self,", "self.global_achievements = global_achievements self.new_locations = new_locations self.subset_of_locations = subset_of_locations self.conditional_achievements = conditional_achievements self.alt_requirements", "self.boss_special_2 = boss_special_2 def requirements_to_html(self): requirements = self.text_and_cond_to_html( self.requirements, \"Complete\") anti_requirements = self.text_and_cond_to_html(", "int): if (id == 1): return self.event_1 elif (id == 2): return self.event_2" ]
[ "# Input: dividend = 7, divisor = -3 # Output: -2 # Note:", "1 while divisor <= dividend: current >>= 1 currentResult >>= 1 if current", "Given two integers dividend and divisor, divide two integers # without using multiplication,", "your function returns 231 − 1 when the division result overflows. class Solution:", "3 # Example 2: # Input: dividend = 7, divisor = -3 #", "range: # [−231, 231 − 1]. For the purpose of this problem, assume", "= 0 current = divisor currentResult = 1 while current <= dividend: current", "For the purpose of this problem, assume that # your function returns 231", "divisor = -3 # Output: -2 # Note: # Both dividend and divisor", "# 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if dividend >=", "dividend >= 0 and divisor < 0 or dividend <= 0 and divisor", "0. # Assume we are dealing with an environment # which could only", ">= 0 and divisor < 0 or dividend <= 0 and divisor >", "within the 32-bit signed integer range: # [−231, 231 − 1]. For the", "The integer division should truncate toward zero. # Example 1: # Input: dividend", "-3 # Output: -2 # Note: # Both dividend and divisor will be", "two integers dividend and divisor, divide two integers # without using multiplication, division", "after dividing dividend by divisor. # The integer division should truncate toward zero.", "if current <= dividend: dividend -= current result += currentResult return min(sign *", "two integers # without using multiplication, division and mod operator. # Return the", "and divisor > 0: sign = -1 dividend = abs(dividend) divisor = abs(divisor)", "overflows. class Solution: def divide(self, dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作,", "integers within the 32-bit signed integer range: # [−231, 231 − 1]. For", "-> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647", "zero. # Example 1: # Input: dividend = 10, divisor = 3 #", "or dividend <= 0 and divisor > 0: sign = -1 dividend =", "0 current = divisor currentResult = 1 while current <= dividend: current <<=", "int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT =", "Integers.py # Given two integers dividend and divisor, divide two integers # without", "<= dividend: current >>= 1 currentResult >>= 1 if current <= dividend: dividend", "# Note: # Both dividend and divisor will be 32-bit signed integers. #", "int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。", "< 0 or dividend <= 0 and divisor > 0: sign = -1", "the quotient after dividing dividend by divisor. # The integer division should truncate", "MAX_INT = 2147483647 sign = 1 if dividend >= 0 and divisor <", "# without using multiplication, division and mod operator. # Return the quotient after", ">>= 1 currentResult >>= 1 if current <= dividend: dividend -= current result", "Input: dividend = 10, divisor = 3 # Output: 3 # Example 2:", "function returns 231 − 1 when the division result overflows. class Solution: def", "# 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if", "be 0. # Assume we are dealing with an environment # which could", "> 0: sign = -1 dividend = abs(dividend) divisor = abs(divisor) result =", "= divisor currentResult = 1 while current <= dividend: current <<= 1 currentResult", "Assume we are dealing with an environment # which could only store integers", "integer division should truncate toward zero. # Example 1: # Input: dividend =", "先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1", "dividend = abs(dividend) divisor = abs(divisor) result = 0 current = divisor currentResult", "# Assume we are dealing with an environment # which could only store", "problem, assume that # your function returns 231 − 1 when the division", "currentResult = 1 while current <= dividend: current <<= 1 currentResult <<= 1", "are dealing with an environment # which could only store integers within the", "Both dividend and divisor will be 32-bit signed integers. # The divisor will", "the purpose of this problem, assume that # your function returns 231 −", "the division result overflows. class Solution: def divide(self, dividend: int, divisor: int) ->", "0 and divisor < 0 or dividend <= 0 and divisor > 0:", "int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign", "# Return the quotient after dividing dividend by divisor. # The integer division", "<= dividend: dividend -= current result += currentResult return min(sign * result, MAX_INT)", "1]. For the purpose of this problem, assume that # your function returns", "which could only store integers within the 32-bit signed integer range: # [−231,", "while current <= dividend: current <<= 1 currentResult <<= 1 while divisor <=", "Return the quotient after dividing dividend by divisor. # The integer division should", "− 1]. For the purpose of this problem, assume that # your function", "with an environment # which could only store integers within the 32-bit signed", "result = 0 current = divisor currentResult = 1 while current <= dividend:", "abs(dividend) divisor = abs(divisor) result = 0 current = divisor currentResult = 1", "abs(divisor) result = 0 current = divisor currentResult = 1 while current <=", "currentResult >>= 1 if current <= dividend: dividend -= current result += currentResult", "current <= dividend: current <<= 1 currentResult <<= 1 while divisor <= dividend:", "integers. # The divisor will never be 0. # Assume we are dealing", "sign = -1 dividend = abs(dividend) divisor = abs(divisor) result = 0 current", "1: # Input: dividend = 10, divisor = 3 # Output: 3 #", "= 3 # Output: 3 # Example 2: # Input: dividend = 7,", "# Both dividend and divisor will be 32-bit signed integers. # The divisor", "while divisor <= dividend: current >>= 1 currentResult >>= 1 if current <=", "divisor = abs(divisor) result = 0 current = divisor currentResult = 1 while", "will never be 0. # Assume we are dealing with an environment #", "32-bit signed integer range: # [−231, 231 − 1]. For the purpose of", "current <= dividend: dividend -= current result += currentResult return min(sign * result,", "# Given two integers dividend and divisor, divide two integers # without using", "divisor will be 32-bit signed integers. # The divisor will never be 0.", "7, divisor = -3 # Output: -2 # Note: # Both dividend and", "<<= 1 while divisor <= dividend: current >>= 1 currentResult >>= 1 if", "divisor <= dividend: current >>= 1 currentResult >>= 1 if current <= dividend:", "divisor = 3 # Output: 3 # Example 2: # Input: dividend =", "purpose of this problem, assume that # your function returns 231 − 1", "Solution: def divide(self, dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到", "truncate toward zero. # Example 1: # Input: dividend = 10, divisor =", "environment # which could only store integers within the 32-bit signed integer range:", "dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 #", "1 currentResult <<= 1 while divisor <= dividend: current >>= 1 currentResult >>=", "但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if dividend", "divisor currentResult = 1 while current <= dividend: current <<= 1 currentResult <<=", "and divisor < 0 or dividend <= 0 and divisor > 0: sign", "division should truncate toward zero. # Example 1: # Input: dividend = 10,", "divisor. # The integer division should truncate toward zero. # Example 1: #", "# Example 1: # Input: dividend = 10, divisor = 3 # Output:", "by divisor. # The integer division should truncate toward zero. # Example 1:", "1 if dividend >= 0 and divisor < 0 or dividend <= 0", "1 while current <= dividend: current <<= 1 currentResult <<= 1 while divisor", "could only store integers within the 32-bit signed integer range: # [−231, 231", "# [−231, 231 − 1]. For the purpose of this problem, assume that", "divisor < 0 or dividend <= 0 and divisor > 0: sign =", "<= 0 and divisor > 0: sign = -1 dividend = abs(dividend) divisor", "# Input: dividend = 10, divisor = 3 # Output: 3 # Example", "= -1 dividend = abs(dividend) divisor = abs(divisor) result = 0 current =", "= 1 while current <= dividend: current <<= 1 currentResult <<= 1 while", "10, divisor = 3 # Output: 3 # Example 2: # Input: dividend", "signed integers. # The divisor will never be 0. # Assume we are", "of this problem, assume that # your function returns 231 − 1 when", "quotient after dividing dividend by divisor. # The integer division should truncate toward", "0 and divisor > 0: sign = -1 dividend = abs(dividend) divisor =", "current >>= 1 currentResult >>= 1 if current <= dividend: dividend -= current", "= -3 # Output: -2 # Note: # Both dividend and divisor will", "toward zero. # Example 1: # Input: dividend = 10, divisor = 3", "dividing dividend by divisor. # The integer division should truncate toward zero. #", "we are dealing with an environment # which could only store integers within", "integers # without using multiplication, division and mod operator. # Return the quotient", "divisor > 0: sign = -1 dividend = abs(dividend) divisor = abs(divisor) result", "dividend and divisor will be 32-bit signed integers. # The divisor will never", "dividend by divisor. # The integer division should truncate toward zero. # Example", "be 32-bit signed integers. # The divisor will never be 0. # Assume", "current = divisor currentResult = 1 while current <= dividend: current <<= 1", "比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if dividend >= 0", "# Output: 3 # Example 2: # Input: dividend = 7, divisor =", "def divide(self, dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 #", "= 2147483647 sign = 1 if dividend >= 0 and divisor < 0", "<reponame>WatsonWangZh/CodingPractice<filename>LeetCode/Python3/Math/29. Divide Two Integers.py # Given two integers dividend and divisor, divide two", "1 currentResult >>= 1 if current <= dividend: dividend -= current result +=", "2147483647 sign = 1 if dividend >= 0 and divisor < 0 or", "only store integers within the 32-bit signed integer range: # [−231, 231 −", "sign = 1 if dividend >= 0 and divisor < 0 or dividend", "[−231, 231 − 1]. For the purpose of this problem, assume that #", "Output: 3 # Example 2: # Input: dividend = 7, divisor = -3", "= abs(divisor) result = 0 current = divisor currentResult = 1 while current", "# Example 2: # Input: dividend = 7, divisor = -3 # Output:", "integers dividend and divisor, divide two integers # without using multiplication, division and", "= 7, divisor = -3 # Output: -2 # Note: # Both dividend", "assume that # your function returns 231 − 1 when the division result", "<<= 1 currentResult <<= 1 while divisor <= dividend: current >>= 1 currentResult", "Input: dividend = 7, divisor = -3 # Output: -2 # Note: #", "class Solution: def divide(self, dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, #", "# The integer division should truncate toward zero. # Example 1: # Input:", "division and mod operator. # Return the quotient after dividing dividend by divisor.", "current <<= 1 currentResult <<= 1 while divisor <= dividend: current >>= 1", "0: sign = -1 dividend = abs(dividend) divisor = abs(divisor) result = 0", "that # your function returns 231 − 1 when the division result overflows.", "without using multiplication, division and mod operator. # Return the quotient after dividing", "Example 1: # Input: dividend = 10, divisor = 3 # Output: 3", "dividend <= 0 and divisor > 0: sign = -1 dividend = abs(dividend)", "dividend = 7, divisor = -3 # Output: -2 # Note: # Both", "divisor will never be 0. # Assume we are dealing with an environment", "dividend and divisor, divide two integers # without using multiplication, division and mod", "Divide Two Integers.py # Given two integers dividend and divisor, divide two integers", "231 − 1]. For the purpose of this problem, assume that # your", "will be 32-bit signed integers. # The divisor will never be 0. #", "1 when the division result overflows. class Solution: def divide(self, dividend: int, divisor:", "1 if current <= dividend: dividend -= current result += currentResult return min(sign", "0 or dividend <= 0 and divisor > 0: sign = -1 dividend", "-1 dividend = abs(dividend) divisor = abs(divisor) result = 0 current = divisor", "溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if dividend >= 0 and divisor", "# Output: -2 # Note: # Both dividend and divisor will be 32-bit", "<= dividend: current <<= 1 currentResult <<= 1 while divisor <= dividend: current", "# 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign =", ">>= 1 if current <= dividend: dividend -= current result += currentResult return", "dividend = 10, divisor = 3 # Output: 3 # Example 2: #", "divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。 # 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT", "and mod operator. # Return the quotient after dividing dividend by divisor. #", "and divisor, divide two integers # without using multiplication, division and mod operator.", "= 1 if dividend >= 0 and divisor < 0 or dividend <=", "divide two integers # without using multiplication, division and mod operator. # Return", "multiplication, division and mod operator. # Return the quotient after dividing dividend by", "an environment # which could only store integers within the 32-bit signed integer", "if dividend >= 0 and divisor < 0 or dividend <= 0 and", "this problem, assume that # your function returns 231 − 1 when the", "currentResult <<= 1 while divisor <= dividend: current >>= 1 currentResult >>= 1", "store integers within the 32-bit signed integer range: # [−231, 231 − 1].", "division result overflows. class Solution: def divide(self, dividend: int, divisor: int) -> int:", "should truncate toward zero. # Example 1: # Input: dividend = 10, divisor", "the 32-bit signed integer range: # [−231, 231 − 1]. For the purpose", "Example 2: # Input: dividend = 7, divisor = -3 # Output: -2", "dividend: current <<= 1 currentResult <<= 1 while divisor <= dividend: current >>=", "= abs(dividend) divisor = abs(divisor) result = 0 current = divisor currentResult =", "returns 231 − 1 when the division result overflows. class Solution: def divide(self,", "231 − 1 when the division result overflows. class Solution: def divide(self, dividend:", "using multiplication, division and mod operator. # Return the quotient after dividing dividend", "3 # Output: 3 # Example 2: # Input: dividend = 7, divisor", "divisor, divide two integers # without using multiplication, division and mod operator. #", "2: # Input: dividend = 7, divisor = -3 # Output: -2 #", "The divisor will never be 0. # Assume we are dealing with an", "when the division result overflows. class Solution: def divide(self, dividend: int, divisor: int)", "operator. # Return the quotient after dividing dividend by divisor. # The integer", "integer range: # [−231, 231 − 1]. For the purpose of this problem,", "and divisor will be 32-bit signed integers. # The divisor will never be", "-2 # Note: # Both dividend and divisor will be 32-bit signed integers.", "# 溢出只可能是向上溢出,通过min操作进行过滤。 MAX_INT = 2147483647 sign = 1 if dividend >= 0 and", "mod operator. # Return the quotient after dividing dividend by divisor. # The", "# The divisor will never be 0. # Assume we are dealing with", "result overflows. class Solution: def divide(self, dividend: int, divisor: int) -> int: #", "Note: # Both dividend and divisor will be 32-bit signed integers. # The", "Output: -2 # Note: # Both dividend and divisor will be 32-bit signed", "# your function returns 231 − 1 when the division result overflows. class", "− 1 when the division result overflows. class Solution: def divide(self, dividend: int,", "dividend: current >>= 1 currentResult >>= 1 if current <= dividend: dividend -=", "divide(self, dividend: int, divisor: int) -> int: # 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作, # 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到 # 比被减数小一些的减数的倍数current。不断减去且缩小current。", "32-bit signed integers. # The divisor will never be 0. # Assume we", "# which could only store integers within the 32-bit signed integer range: #", "= 10, divisor = 3 # Output: 3 # Example 2: # Input:", "Two Integers.py # Given two integers dividend and divisor, divide two integers #", "signed integer range: # [−231, 231 − 1]. For the purpose of this", "never be 0. # Assume we are dealing with an environment # which", "dealing with an environment # which could only store integers within the 32-bit" ]
[ "new): # Get the current slider values a = amplitude.value b = offset.value", "n_samples = 1500 random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples,", "serve sliders.py at your command prompt. Then navigate to the URL http://localhost:5006/sliders in", "to change the properties of the ``sin`` curve, or type into the title", "text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current slider values", "curve, or type into the title text box to update the title of", "into the title text box to update the title of the plot. Use", "sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA ###", "np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied =", "2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data", "= a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for w in", "as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP", "import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure,", "matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET", "y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation", "X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4", "freq.value # Generate the new curve x = np.linspace(0, 4*np.pi, N) y =", "# Generate the new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x", "make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4", "200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y))", "x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' #", "'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N =", "= Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) #", "= Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1)", "def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new):", "= figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y',", "Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y',", "from sklearn import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import", "= X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 =", "http://localhost:5006/sliders in your browser. ''' import numpy as np from bokeh.io import curdoc", "up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5,", "UP THE DATA ### n_samples = 1500 random_state = 170 # Dataset #1", "Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400,", "from bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource,", "= text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current slider", "Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq =", "start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0,", "4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot", "= np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400,", "plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])", "from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn", "source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi],", "Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set", "in your browser. ''' import numpy as np from bokeh.io import curdoc from", "slider values a = amplitude.value b = offset.value w = phase.value k =", "end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def", "= 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 =", "Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2,", "start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks", "X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718,", "up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname,", "= freq.value # Generate the new curve x = np.linspace(0, 4*np.pi, N) y", "Scrub the sliders to change the properties of the ``sin`` curve, or type", "of the plot. Use the ``bokeh serve`` command to run the example by", "w = phase.value k = freq.value # Generate the new curve x =", "document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title =", "'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\", value='my sine", "in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and add", "import curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput", "function explorer with slider widgets. Scrub the sliders to change the properties of", "Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])", "title of the plot. Use the ``bokeh serve`` command to run the example", "new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) +", "import figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as plt", "Set up layouts and add to document inputs = column(text, offset, amplitude, phase,", "X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied,", "= phase.value k = freq.value # Generate the new curve x = np.linspace(0,", "by executing: bokeh serve sliders.py at your command prompt. Then navigate to the", "= 1500 random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state)", "X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100],", "with slider widgets. Scrub the sliders to change the properties of the ``sin``", "Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))", "sliders to change the properties of the ``sin`` curve, or type into the", "column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show", "executing: bokeh serve sliders.py at your command prompt. Then navigate to the URL", "4*np.pi, N) y = a*np.sin(k*x + w) + b source.data = dict(x=x, y=y)", "w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and", "def update_data(attrname, old, new): # Get the current slider values a = amplitude.value", "to update the title of the plot. Use the ``bokeh serve`` command to", "y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot =", "170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0],", "source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my", "ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up", "end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value", "Set up data N = 200 x = np.linspace(0, 4*np.pi, N) y =", "dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set", "update the title of the plot. Use the ``bokeh serve`` command to run", "= ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2,", "= [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0],", "print(source1, source2, source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\",", "to run the example by executing: bokeh serve sliders.py at your command prompt.", "X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3,", "= np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set", "y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text =", "from sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples = 1500", "the sliders to change the properties of the ``sin`` curve, or type into", "4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set", "random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y ==", "ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3,", "offset.value w = phase.value k = freq.value # Generate the new curve x", "= ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 =", "2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\",", "end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0,", "0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y", "= ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine", "title text box to update the title of the plot. Use the ``bokeh", "KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples =", "# Set up widgets text = TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\",", "''' Present an interactive function explorer with slider widgets. Scrub the sliders to", "X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1]", "numpy as np from bokeh.io import curdoc from bokeh.layouts import column, row from", "import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import", "or type into the title text box to update the title of the", "#4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4,", "#2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2", "box to update the title of the plot. Use the ``bokeh serve`` command", "Present an interactive function explorer with slider widgets. Scrub the sliders to change", "plot. Use the ``bokeh serve`` command to run the example by executing: bokeh", "np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b source.data = dict(x=x,", "widgets text = TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0,", "k = freq.value # Generate the new curve x = np.linspace(0, 4*np.pi, N)", "### n_samples = 1500 random_state = 170 # Dataset #1 X, y =", "plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3,", "Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y", "source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4", "= np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 =", "value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up", "source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N = 200", "cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered", "the ``bokeh serve`` command to run the example by executing: bokeh serve sliders.py", "figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source,", "update_data) # Set up layouts and add to document inputs = column(text, offset,", "for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts", "4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text", "X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X,", "[offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and add to", "the new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w)", "sklearn import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans", "X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state)", "N = 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source =", "plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N", "your command prompt. Then navigate to the URL http://localhost:5006/sliders in your browser. '''", "transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 =", "== 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1]", "SET UP THE DATA ### n_samples = 1500 random_state = 170 # Dataset", "the ``sin`` curve, or type into the title text box to update the", "y = a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for w", "value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\",", "text box to update the title of the plot. Use the ``bokeh serve``", "step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value',", "an interactive function explorer with slider widgets. Scrub the sliders to change the", "at your command prompt. Then navigate to the URL http://localhost:5006/sliders in your browser.", "new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the", "import numpy as np from bokeh.io import curdoc from bokeh.layouts import column, row", "properties of the ``sin`` curve, or type into the title text box to", "import KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples", "from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting", "import make_blobs ### SET UP THE DATA ### n_samples = 1500 random_state =", "from bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot", "plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP THE", "2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered =", "bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as", "source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\", value='my sine wave')", "b source.data = dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value',", "source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1,", "Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi],", "DATA ### n_samples = 1500 random_state = 170 # Dataset #1 X, y", "# Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1]", "import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ###", "up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X',", "line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\", value='my sine wave') offset =", "= figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3,", "current slider values a = amplitude.value b = offset.value w = phase.value k", "plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#,", "np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400,", "curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b", "prompt. Then navigate to the URL http://localhost:5006/sliders in your browser. ''' import numpy", "= ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set", "command to run the example by executing: bokeh serve sliders.py at your command", "line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N = 200 x =", "#3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0],", "output_file('clustering.html') ''' # Set up data N = 200 x = np.linspace(0, 4*np.pi,", "update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): #", "hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs", "type into the title text box to update the title of the plot.", "np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up", "a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for w in [offset,", "tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up", "X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3,", "Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] #", "Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5,", "line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N = 200 x", "sliders.py at your command prompt. Then navigate to the URL http://localhost:5006/sliders in your", "a = amplitude.value b = offset.value w = phase.value k = freq.value #", "text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current slider values a", "ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets import", "tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') '''", "= X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y ==", "the properties of the ``sin`` curve, or type into the title text box", "browser. ''' import numpy as np from bokeh.io import curdoc from bokeh.layouts import", "phase, freq]: w.on_change('value', update_data) # Set up layouts and add to document inputs", "bokeh serve sliders.py at your command prompt. Then navigate to the URL http://localhost:5006/sliders", "slider widgets. Scrub the sliders to change the properties of the ``sin`` curve,", "layouts and add to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs,", "np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0],", "the current slider values a = amplitude.value b = offset.value w = phase.value", "= make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation =", "np from bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models import", "-0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] #", "+ w) + b source.data = dict(x=x, y=y) for w in [offset, amplitude,", "Set up widgets text = TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0,", "X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2))", "Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def", "freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname,", "random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341],", "explorer with slider widgets. Scrub the sliders to change the properties of the", "= X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso", "of the ``sin`` curve, or type into the title text box to update", "up layouts and add to document inputs = column(text, offset, amplitude, phase, freq)", "the title of the plot. Use the ``bokeh serve`` command to run the", "= column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title = \"Sliders\" '''", "[[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1]", "y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up", "= Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old,", "X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3))", "Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new):", "plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x',", "sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples = 1500 random_state", "plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\", value='my", "old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get", "and add to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot,", "figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as plt from", "bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import", "Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4)", "x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b source.data", "show(plot) output_file('clustering.html') ''' # Set up data N = 200 x = np.linspace(0,", "the title text box to update the title of the plot. Use the", "run the example by executing: bokeh serve sliders.py at your command prompt. Then", "''' # Set up data N = 200 x = np.linspace(0, 4*np.pi, N)", "make_blobs ### SET UP THE DATA ### n_samples = 1500 random_state = 170", "your browser. ''' import numpy as np from bokeh.io import curdoc from bokeh.layouts", "ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\",", "to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title", "from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA", "Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase", "= np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b source.data =", "THE DATA ### n_samples = 1500 random_state = 170 # Dataset #1 X,", "# Set up layouts and add to document inputs = column(text, offset, amplitude,", "import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from", "= make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset", "values a = amplitude.value b = offset.value w = phase.value k = freq.value", "wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0,", "curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from", "start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text =", "old, new): # Get the current slider values a = amplitude.value b =", "widgets. Scrub the sliders to change the properties of the ``sin`` curve, or", "update_title) def update_data(attrname, old, new): # Get the current slider values a =", "= ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot =", "X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso =", "transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples,", "command prompt. Then navigate to the URL http://localhost:5006/sliders in your browser. ''' import", "Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan", "end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1,", "plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current", "+ b source.data = dict(x=x, y=y) for w in [offset, amplitude, phase, freq]:", "up data N = 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x)", "= offset.value w = phase.value k = freq.value # Generate the new curve", "wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set", "== 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1,", "X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2", "= np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied", "interactive function explorer with slider widgets. Scrub the sliders to change the properties", "y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\",", "bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import", "serve`` command to run the example by executing: bokeh serve sliders.py at your", "N) y = a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for", "add to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800))", "### Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5,", "example by executing: bokeh serve sliders.py at your command prompt. Then navigate to", "= 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x,", "b = offset.value w = phase.value k = freq.value # Generate the new", "# Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title)", "# Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3", "freq]: w.on_change('value', update_data) # Set up layouts and add to document inputs =", "= dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) #", "Get the current slider values a = amplitude.value b = offset.value w =", "Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot", "1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1))", "y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up", "offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0,", "X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0,", "ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4,", "#1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset", "figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6)", "source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot", "= Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq", "text = TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1)", "''' import numpy as np from bokeh.io import curdoc from bokeh.layouts import column,", "amplitude.value b = offset.value w = phase.value k = freq.value # Generate the", "Use the ``bokeh serve`` command to run the example by executing: bokeh serve", "``bokeh serve`` command to run the example by executing: bokeh serve sliders.py at", "line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title=\"title\", value='my sine wave') offset", "phase.value k = freq.value # Generate the new curve x = np.linspace(0, 4*np.pi,", "the plot. Use the ``bokeh serve`` command to run the example by executing:", "title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)", "update_data(attrname, old, new): # Get the current slider values a = amplitude.value b", "# Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation)", "# Get the current slider values a = amplitude.value b = offset.value w", "Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4))", "data N = 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source", "start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\",", "X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 =", "up widgets text = TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0,", "source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0,", "phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1, step=0.1)", "Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3", "plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot)", "= X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5],", "value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase =", "[-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset", "x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) #", "value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text", "callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old,", "w.on_change('value', update_data) # Set up layouts and add to document inputs = column(text,", "0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1", "random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1", "w) + b source.data = dict(x=x, y=y) for w in [offset, amplitude, phase,", "``sin`` curve, or type into the title text box to update the title", "x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets", "X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y", "to the URL http://localhost:5006/sliders in your browser. ''' import numpy as np from", "import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets", "source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ###", "0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3", "N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot", "navigate to the URL http://localhost:5006/sliders in your browser. ''' import numpy as np", "X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2", "as np from bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models", "# Set up plot plot = figure(plot_height=400, plot_width=400, title=\"my sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0,", "step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\", value=1.0, start=0.1, end=5.1,", "sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0,", "bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider,", "# Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y ==", "== 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 =", "1500 random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1,", "= amplitude.value b = offset.value w = phase.value k = freq.value # Generate", "URL http://localhost:5006/sliders in your browser. ''' import numpy as np from bokeh.io import", "step=0.1) amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0,", "output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster", "Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]", "the URL http://localhost:5006/sliders in your browser. ''' import numpy as np from bokeh.io", "TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan import", "title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html')", "TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude =", "# Set up data N = 200 x = np.linspace(0, 4*np.pi, N) y", "inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title = \"Sliders\"", "Then navigate to the URL http://localhost:5006/sliders in your browser. ''' import numpy as", "Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 =", "X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500],", "Generate the new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x +", "the example by executing: bokeh serve sliders.py at your command prompt. Then navigate", "amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and add to document", "change the properties of the ``sin`` curve, or type into the title text", "### SET UP THE DATA ### n_samples = 1500 random_state = 170 #", "make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549,", "y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] #", "amplitude = Slider(title=\"amplitude\", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi)", "datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets", "ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot = figure(plot_height=400,", "2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2,", "source2, source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title=\"Clusters\", tools=\"crosshair,pan,reset,save,wheel_zoom\",", "value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title=\"phase\", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title=\"frequency\",", "source.data = dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data)", "sine wave\", tools=\"crosshair,pan,reset,save,wheel_zoom\", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) #", "row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from", "= TextInput(title=\"title\", value='my sine wave') offset = Slider(title=\"offset\", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude" ]
[ "in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML request type. Args:", "def get(self): \"\"\"Endpoint to get the help for the running service.\"\"\" rc, out", "*x: None # type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource):", "= os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not", "request type. Args: value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including", "\"\"\"Makes a flask_restplus.Resource method expect a page xml and/or respond with a page", "Help for pagexml field in swagger documentation. options_help (str): Help for config field", "Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask,", "raise BadRequest('Expected to receive all images referenced in the Page XML ('+str(len(images_xml))+') but", ":: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+'", "tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1, process_queue)).start() app.run(host=cfg.host, port=cfg.port, debug=cfg.debug)", "parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a page xml and/or", ":: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except", "images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize']", "json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as", "pagexml pagexml.set_omnius_schema() from time import time from functools import wraps from subprocess import", "= time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml", "in swagger documentation. options_help (str): Help for config field in swagger documentation. response_help", "found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc !=", "the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems", "FileStorage from werkzeug.exceptions import BadRequest from prance.util import url from prance.convert import convert_url", "'+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images referenced in", "@api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for", "is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string'])", "= ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help)", "None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images']", "thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception):", "if fname not in images_xml: raise BadRequest('Received image not referenced in the Page", "= Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc =", "req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f:", "'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests,", "re import sys import json import shutil import queue import threading import tempfile", "help='Maximum number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string", "execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests,", "<<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements", "pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for", "shutil import queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from time import", "= super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is not None: pxml", "Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner", "in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for", "only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images", "continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except", "as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir is not", "__init__(self, api, images_help='Images with file names as referenced in the Page XML if", "None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in", "default=4, help='Maximum number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix", "os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out", "command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE,", "return pxml process_queue = queue.Queue() # type: ignore ## Processor thread function ##", "help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo,", "= write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[':", "import url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for", "import Flask, Response, request, abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures", "from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort", "for fname in images_received: if fname not in images_xml: raise BadRequest('Received image not", "flask_restplus.Resource method expect a page xml and/or respond with a page xml.\"\"\" method", "parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI application ##", "def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given images or page xml", "num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex))", "endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\"", "Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. ::", "tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for", "(str): Help for images field in swagger documentation. pagexml_help (str): Help for pagexml", "finally: if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None", "replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int,", "wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo", "to get the help for the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if", "on given images or page xml file.\"\"\" start_time = time() done_queue = queue.Queue()", "class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path,", "parser.parse_args(env=True) ## Create a Flask WSGI application ## app = Flask(__name__) # pylint:", "## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running", "valid Page XML file.', options_help='Optional configuration options to be used for processing.', response_help='Resulting", "@api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint", "req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class", "## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl", "pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x", "# type: ignore ## Processor thread function ## def start_processing(thread, process_queue): num_requests =", "ignore ## Processor thread function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir", "and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads):", "help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.')", "raise ValueError('Expected pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml =", "debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML request type. Args: value:", "queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0])))", "spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page XML.\"\"\" def parse_args(self,", "a request to a temporal directory. Args: req_dict (dict): Parsed Page XML request.", "if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to be used", "from prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser", "stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out", "import json import shutil import queue import threading import tempfile import pagexml pagexml.set_omnius_schema()", "options_help='Optional configuration options to be used for processing.', response_help='Resulting Page XML after processing.'):", "def start_processing(thread, process_queue): num_requests = 0 tmpdir = None while True: try: done_queue,", "pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while", "\"\"\"Returns the parser object for the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler',", "a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An", "set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received", "num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request", "req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options'])", "\"\"\"Endpoint to get the help for the running service.\"\"\" rc, out = run_tesseract_recognize('--help')", "cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ ==", "not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if", "failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml))", "and the PageXML 'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml to", "def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from a request to", "prance>=0.15.0 \"\"\" import os import re import sys import json import shutil import", "\"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ =", "req_dict (dict): Parsed Page XML request. prefix (str): Prefix for temporal directory name.", "default=False, help='Whether to run in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page", "werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util import url from prance.convert", "parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run", "\"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.')", "== 1 and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not", "the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile,", "PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response,", "= run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+'", "in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir,", "in the Page XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration", "getting version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource):", "Help for pagexml response in swagger documentation. \"\"\" self.api = api self.response_help =", "service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help for the running service.\"\"\"", "parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use \"%%s\" in string", "$Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements", "'string' representation and the PageXML 'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected", "config field in swagger documentation. response_help (str): Help for pagexml response in swagger", "location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str,", "receive all images referenced in the Page XML ('+str(len(images_xml))+') but only got a", "close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__", "ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir is not None:", "ex)) finally: if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir =", "parse_args that additionally does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml']", "self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml',", "'+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.')", "sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread", "images or page xml file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict))", "or page xml file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while", "directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None:", "ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread,", "is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request:", "proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ## Parse", "0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint", "tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present,", "proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ##", "for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright", "\"\"\"Extension of parse_args that additionally does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs)", "to the temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml']", "parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug", "for config field in swagger documentation. response_help (str): Help for pagexml response in", "out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version from tesseract-recognize", "list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[': opts = json.loads(opts[0]) if", "from subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from", "and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir,", "== '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif", "page xml and responding with a page xml.\"\"\" def __init__(self, api, images_help='Images with", "the Page XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options", "= lambda *x: None # type: ignore ## Definition of endpoints ## @api.route('/openapi.json')", "None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1, process_queue)).start() app.run(host=cfg.host,", "the version of the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc !=", "req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is not None:", "in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving", "type=int, default=4, help='Maximum number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize',", "with optionally a page xml and responding with a page xml.\"\"\" def __init__(self,", "response in swagger documentation. \"\"\" self.api = api self.response_help = response_help parser =", "jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ## Definition of endpoints", "abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, '", "a tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd,", "the 'string' representation and the PageXML 'object'. \"\"\" if type(value) != FileStorage: raise", "temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not", "method expect a page xml and/or respond with a page xml.\"\"\" method =", "continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' %", "configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to run in", "xml from a request to a temporal directory. Args: req_dict (dict): Parsed Page", "request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0: raise", "images_xml: raise BadRequest('Received image not referenced in the Page XML: '+fname) if len(images_xml)", "return cmd_rc, cmd_out if __name__ == '__main__': ## Parse config ## parser =", "pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to be used for processing.',", "out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help from tesseract-recognize", "command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api)", "Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\"", "be of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename':", "Page XML file.', options_help='Optional configuration options to be used for processing.', response_help='Resulting Page", "convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain'])", "@wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True),", "= proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ == '__main__':", "Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API ##", "werkzeug.exceptions import BadRequest from prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True):", "ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize", "action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append',", "\"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements", "fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in", "and/or respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method)", "xml 'filename', the 'string' representation and the PageXML 'object'. \"\"\" if type(value) !=", "else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out =", "0: abort(500, 'problems getting version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain')", "response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The", "run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use \"%%s\"", "help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process')", "'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g'", "documentation. response_help (str): Help for pagexml response in swagger documentation. \"\"\" self.api =", "to a temporal directory. Args: req_dict (dict): Parsed Page XML request. prefix (str):", "Help for images field in swagger documentation. pagexml_help (str): Help for pagexml field", "num_requests = 0 tmpdir = None while True: try: done_queue, req_dict = process_queue.get(True,", "len(images_received): raise BadRequest('Expected to receive all images referenced in the Page XML ('+str(len(images_xml))+')", "process_queue = queue.Queue() # type: ignore ## Processor thread function ## def start_processing(thread,", "Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir is", "' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type: ignore", "yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to run", "ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort from flask_restplus import Api,", "!= len(images_received): raise BadRequest('Expected to receive all images referenced in the Page XML", "['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\")", "Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the", "Args: req_dict (dict): Parsed Page XML request. prefix (str): Prefix for temporal directory", "the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return parser", "https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re import", "try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts", "is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images", "!= 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml =", "a flask_restplus.Resource method expect a page xml and/or respond with a page xml.\"\"\"", "page xml.\"\"\" def __init__(self, api, images_help='Images with file names as referenced in the", "'+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.')", "req_dict['images']] for fname in images_received: if fname not in images_xml: raise BadRequest('Received image", "time from functools import wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse", "mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using", "self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return", "XML request type. Args: value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary", "tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ## Definition of", "flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest", "\"\"\"Endpoint for running tesseract-recognize on given images or page xml file.\"\"\" start_time =", "def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content,", "process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts)", "XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to be", "after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance.", "a temporal directory. Args: req_dict (dict): Parsed Page XML request. prefix (str): Prefix", "+('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type: ignore ##", "server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return parser def", "import re import sys import json import shutil import queue import threading import", "Returns: The path to the temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix,", "'', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname", "name. basedir (str): Base temporal directory. Returns: The path to the temporal directory", "fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in", "API endpoints. Use \"%%s\" in string to replace by the API version.') parser.add_argument('--host',", "request to a temporal directory. Args: req_dict (dict): Parsed Page XML request. prefix", "images with optionally a page xml and responding with a page xml.\"\"\" def", "tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with optionally a page", "ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort from flask_restplus import", "self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml =", "'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed", "saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml =", "json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self):", "'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\"", "!= 0: abort(500, 'problems getting help from tesseract-recognize command :: '+str(out)) return Response(out,", "= tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir,", "API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME>", "\"\"\"Class for parsing requests including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of", "not None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml = set()", "for endpoints receiving images with optionally a page xml and responding with a", "location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser =", "by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000,", "= api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append',", "\"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc =", "os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with optionally", "default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4,", "self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response(", "@requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re import sys", "os import re import sys import json import shutil import queue import threading", "all API endpoints. Use \"%%s\" in string to replace by the API version.')", "running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help for the running", "done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError:", "[os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received: if fname not in", "type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes a", "pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API ## api =", "num_requests, ex)) finally: if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir", "the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help for the", "else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return", "BadRequest('Received image not referenced in the Page XML: '+fname) if len(images_xml) != len(images_received):", "pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname in", "tool for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>>", "a page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method)", "app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml))", "None # type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def", "= ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out =", "import sys import json import shutil import queue import threading import tempfile import", "temporal directory. Returns: The path to the temporal directory where saved. \"\"\" tmpdir", "page xml from a request to a temporal directory. Args: req_dict (dict): Parsed", "dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w')", "method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict)", ":: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+'", "except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir", "string for all API endpoints. Use \"%%s\" in string to replace by the", "(str): Prefix for temporal directory name. basedir (str): Base temporal directory. Returns: The", "request, abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from", "'__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create", "super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is not None: pxml =", "running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ## Definition", "for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help", "FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml,", "ValueError('Expected pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML()", "for pagexml response in swagger documentation. \"\"\" self.api = api self.response_help = response_help", "page xml 'filename', the 'string' representation and the PageXML 'object'. \"\"\" if type(value)", "Response, request, abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage", "TypePageXML(value): \"\"\"Parse Page XML request type. Args: value: The raw type value. Returns:", "fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is", "queue.Queue() # type: ignore ## Processor thread function ## def start_processing(thread, process_queue): num_requests", "break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful,", "in swagger documentation. \"\"\" self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True)", "to receive all images referenced in the Page XML ('+str(len(images_xml))+') but only got", "num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+'", "pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread,", "STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request,", "type. Args: value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the", "xml and responding with a page xml.\"\"\" def __init__(self, api, images_help='Images with file", "cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out", "parser def TypePageXML(value): \"\"\"Parse Page XML request type. Args: value: The raw type", "import os import re import sys import json import shutil import queue import", "import pagexml pagexml.set_omnius_schema() from time import time from functools import wraps from subprocess", "basedir='/tmp'): \"\"\"Writes images and page xml from a request to a temporal directory.", "Page XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to", "temporal directory name. basedir (str): Base temporal directory. Returns: The path to the", "not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found", "command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path", "cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate()", "from time import time from functools import wraps from subprocess import Popen, PIPE,", "@api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the", "'+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize", "app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml", "some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and", "pagexml_help (str): Help for pagexml field in swagger documentation. options_help (str): Help for", "= parser.parse_args(env=True) ## Create a Flask WSGI application ## app = Flask(__name__) #", "given images or page xml file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue,", "path to the temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if", "import tempfile import pagexml pagexml.set_omnius_schema() from time import time from functools import wraps", "pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser):", "Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation and", "Page XML request. prefix (str): Prefix for temporal directory name. basedir (str): Base", "def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml',", "Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API", "in req_dict['images']] for fname in images_received: if fname not in images_xml: raise BadRequest('Received", "return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from a", "% (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type: ignore ## Processor", "arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)", "value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml", "image not referenced in the Page XML: '+fname) if len(images_xml) != len(images_received): raise", "re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for", "return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def", "cfg = parser.parse_args(env=True) ## Create a Flask WSGI application ## app = Flask(__name__)", "of the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500,", "python3 \"\"\"Command line tool for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$", "= pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class", "write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[': opts", "Dictionary including the page xml 'filename', the 'string' representation and the PageXML 'object'.", "on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing", "service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version", "for parsing requests including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args", "help for the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc != 0:", "endpoints. Use \"%%s\" in string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1',", "value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a", "app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus", "tesseract-recognize on given images or page xml file.\"\"\" start_time = time() done_queue =", "done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests,", "flask_restplus Api instance. images_help (str): Help for images field in swagger documentation. pagexml_help", "cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of", "time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml =", "\"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str):", "response_help (str): Help for pagexml response in swagger documentation. \"\"\" self.api = api", "value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class", "opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc,", "from werkzeug.exceptions import BadRequest from prance.util import url from prance.convert import convert_url def", "= [os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received: if fname not", "## Create a Flask WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name", "f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename)))", "def __init__(self, api, images_help='Images with file names as referenced in the Page XML", "= run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help from tesseract-recognize command", "queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g'", "!= FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8')", "Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus", "self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict =", "class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page XML.\"\"\" def parse_args(self, **kwargs):", "API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for", "doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner =", "shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return", "<NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1", "XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images referenced", "basedir (str): Base temporal directory. Returns: The path to the temporal directory where", "running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version of the running", "parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self, method):", "is not None: pxml = req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'):", "mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML request type. Args: value: The", "method): \"\"\"Makes a flask_restplus.Resource method expect a page xml and/or respond with a", "ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.')", "## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get", "images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received: if", "get(self): \"\"\"Endpoint to get the help for the running service.\"\"\" rc, out =", "if rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out))", "images_xml = set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename'))", "API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore", "+= 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and", "import BadRequest from prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns", "parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml", "type(value) != FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.') spxml =", "for pagexml field in swagger documentation. options_help (str): Help for config field in", "convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for the command line tool.\"\"\" parser", "import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import", "options_help (str): Help for config field in swagger documentation. response_help (str): Help for", "stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc,", "= url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200,", "#!/usr/bin/env python3 \"\"\"Command line tool for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version:", "try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml,", "instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints.", "tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0] ==", "return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain'])", "version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200,", "spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string':", "to get the version of the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if", "pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+'", "(str): Base temporal directory. Returns: The path to the temporal directory where saved.", "= set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname)", "get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI application ## app =", "parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix',", "__call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a page xml and/or respond with", "Page XML request type. Args: value: The raw type value. Returns: dict[str, {str,PageXML}]:", "__name__ == '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True)", "application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create", "action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of", "if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+'", "jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re import sys import", "for all API endpoints. Use \"%%s\" in string to replace by the API", "type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint", "abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions", "to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False,", "to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use", "+('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request", "ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get", "parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False,", "thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed", "= self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'})", "cmd_out if __name__ == '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg", "= ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration", "= run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version from tesseract-recognize command", "unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml))", "a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page", "a Flask WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger =", "RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally:", "elif req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise", "class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with optionally a page xml", "@api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl =", "= self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml", "default='/tesseract-recognize', help='Prefix string for all API endpoints. Use \"%%s\" in string to replace", "default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging", "temporal directory. Args: req_dict (dict): Parsed Page XML request. prefix (str): Prefix for", "## Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize", "the page xml 'filename', the 'string' representation and the PageXML 'object'. \"\"\" if", "execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given images or", "options to be used for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for", "Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode", "API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None #", "2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import", "parsing requests including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that", "Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images", "the parser object for the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger,", "page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method)", "json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class", "pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing", "cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ## Parse config", "the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version of the", "' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else:", "where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml", "def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does some Page XML checks.\"\"\"", "tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a", "help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help)", "API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for", "os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution", "json import shutil import queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from", "sys import json import shutil import queue import threading import tempfile import pagexml", "write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from a request to a", "for images field in swagger documentation. pagexml_help (str): Help for pagexml field in", "description='Help for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help", "<<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import", "abort(500, 'problems getting help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests", "app.logger = parser.logger ## Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger',", "\"\"\"Decorator class for endpoints receiving images with optionally a page xml and responding", "for the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500,", "and page xml from a request to a temporal directory. Args: req_dict (dict):", "configuration options to be used for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer", "(flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for images field in swagger", "opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename)))", "help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self,", "images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc", "got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and", "= Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API", "\"\"\"Writes images and page xml from a request to a temporal directory. Args:", "= parser.logger ## Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0',", "get the help for the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc", "not None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1, process_queue)).start()", "location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes", "for temporal directory name. basedir (str): Base temporal directory. Returns: The path to", "field in swagger documentation. response_help (str): Help for pagexml response in swagger documentation.", "(str): Help for config field in swagger documentation. response_help (str): Help for pagexml", "get the version of the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc", ":: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except", "for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received =", "parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return parser def TypePageXML(value):", "return parser def TypePageXML(value): \"\"\"Parse Page XML request type. Args: value: The raw", "charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd", "tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API", "of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version of", "images and page xml from a request to a temporal directory. Args: req_dict", "swagger documentation. \"\"\" self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images',", "receiving images with optionally a page xml and responding with a page xml.\"\"\"", "response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML,", "parser object for the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True,", "images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received: if fname", "not None: pxml = req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname", "Create a Flask WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger", "url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for the", "image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with", "mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def", "api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help)", "pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page XML.\"\"\"", "## Processor thread function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir =", "run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc", "in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0:", "type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser", "{str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation and the PageXML", "\"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename'])", "Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does some Page", "image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o',", "raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts)", "'+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue", "type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string'", "'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received:", "ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get", "parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does some Page XML checks.\"\"\" req_dict", "request. prefix (str): Prefix for temporal directory name. basedir (str): Base temporal directory.", "documentation. \"\"\" self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files',", "!= 0: abort(500, 'problems getting version from tesseract-recognize command :: '+str(out)) return Response(out,", "dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation and the", "in swagger documentation. pagexml_help (str): Help for pagexml field in swagger documentation. options_help", "tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource):", "file.', options_help='Optional configuration options to be used for processing.', response_help='Resulting Page XML after", "xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def", "referenced in the Page XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional", "opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[': opts =", "= queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05)", "@api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint", "'+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400,", "version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the", "Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for images field", "is not None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1,", "to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return", "Flask WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger", "def TypePageXML(value): \"\"\"Parse Page XML request type. Args: value: The raw type value.", "req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs", "images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with optionally a page xml and", "for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help for", "checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is not", "= 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict):", "'+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue", "num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1", "parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form',", "running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting", "directory. Returns: The path to the temporal directory where saved. \"\"\" tmpdir =", "file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread,", "as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex:", "Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask", "run in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML request type.", "out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts:", "0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+'", "% (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+'", "line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to", "len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images referenced in the Page", "images field in swagger documentation. pagexml_help (str): Help for pagexml field in swagger", "cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for thread in", "def get(self): \"\"\"Endpoint to get the version of the running service.\"\"\" rc, out", "including the page xml 'filename', the 'string' representation and the PageXML 'object'. \"\"\"", "not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for thread", "while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue", "start_processing(thread, process_queue): num_requests = 0 tmpdir = None while True: try: done_queue, req_dict", "to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances", "responding with a page xml.\"\"\" def __init__(self, api, images_help='Images with file names as", "images_help (str): Help for images field in swagger documentation. pagexml_help (str): Help for", "mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self):", "respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method", "1 and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None:", "from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util import url from", "done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True,", "function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir = None while True:", "return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd =", "be used for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class.", "class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for images", "images_help='Images with file names as referenced in the Page XML if given.', pagexml_help='Optional", "# type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self):", "rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help from", "True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if", "= list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[': opts = json.loads(opts[0])", "'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml to be of type", "def get_cli_parser(logger=True): \"\"\"Returns the parser object for the command line tool.\"\"\" parser =", "None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml = set() for", "method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args):", "ActionYesNo from flask import Flask, Response, request, abort from flask_restplus import Api, Resource,", "f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return", "content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the", "import queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from time import time", "if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for", "BadRequest from prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the", "required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource", "listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether", "version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda", "except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, '", "get(self): \"\"\"Endpoint to get the version of the running service.\"\"\" rc, out =", "required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[],", "rc != 0: abort(500, 'problems getting version from tesseract-recognize command :: '+str(out)) return", "functools import wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser,", "api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition", "(str): Help for pagexml field in swagger documentation. options_help (str): Help for config", "Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize", "parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use \"%%s\" in string to", "required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def", "os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else:", "= value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml}", "return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a", "start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests,", "if len(opts) == 1 and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml']", "thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue()", "prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x:", "KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if", "in the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict", "= process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if", "does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None", "and responding with a page xml.\"\"\" def __init__(self, api, images_help='Images with file names", "parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser", "raise BadRequest('Received image not referenced in the Page XML: '+fname) if len(images_xml) !=", "req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No", "a page xml.\"\"\" def __init__(self, api, images_help='Images with file names as referenced in", "the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems", "from a request to a temporal directory. Args: req_dict (dict): Parsed Page XML", "service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version of the running service.\"\"\"", "jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort from", "= response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files',", "using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE,", "Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images']", "in images_xml: raise BadRequest('Received image not referenced in the Page XML: '+fname) if", "from functools import wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse import", "tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing,", "representation and the PageXML 'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml", "process_queue): num_requests = 0 tmpdir = None while True: try: done_queue, req_dict =", "value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation", "0 tmpdir = None while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests", "description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type:", "req_dict): \"\"\"Endpoint for running tesseract-recognize on given images or page xml file.\"\"\" start_time", "0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) ==", "string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.')", "for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.')", "if type(value) != FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.') spxml", "= self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict", "instance. images_help (str): Help for images field in swagger documentation. pagexml_help (str): Help", "raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the", "parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number", "import wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile,", "post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given images or page xml file.\"\"\"", "all images referenced in the Page XML ('+str(len(images_xml))+') but only got a subset", "self.parser = parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a page", "## app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create a", "(time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on", "service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help", "directory name. basedir (str): Base temporal directory. Returns: The path to the temporal", "## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a", "type: ignore ## Processor thread function ## def start_processing(thread, process_queue): num_requests = 0", "x in req_dict['images']] for fname in images_received: if fname not in images_xml: raise", "'+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() #", "error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads',", "= convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.')", "\"\"\"Endpoint to get the version of the running service.\"\"\" rc, out = run_tesseract_recognize('--version')", "OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version')", "prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from a request to a temporal", "given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to be used for", "the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c)", "for x in req_dict['images']] for fname in images_received: if fname not in images_xml:", "a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does some", "with a page xml.\"\"\" def __init__(self, api, images_help='Images with file names as referenced", "req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml;", "time import time from functools import wraps from subprocess import Popen, PIPE, STDOUT", "Args: value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page", "req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from a request", "\"\"\"Parse Page XML request type. Args: value: The raw type value. Returns: dict[str,", "@api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given", "@api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version of the running service.\"\"\" rc,", "swagger documentation. pagexml_help (str): Help for pagexml field in swagger documentation. options_help (str):", "= pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex:", "pxml = req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$',", "action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse", "open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image", "as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir,", "a page xml and responding with a page xml.\"\"\" def __init__(self, api, images_help='Images", "tempfile import pagexml pagexml.set_omnius_schema() from time import time from functools import wraps from", "if __name__ == '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg =", "command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the", "tmpdir = None while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests +=", "while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir =", "run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help from tesseract-recognize command ::", "with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for", "req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml = set() for page in", "if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for", "api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for images field in", "failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given images or page", "'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page XML.\"\"\" def", "The path to the temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir)", "@author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements", "ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension", "Base temporal directory. Returns: The path to the temporal directory where saved. \"\"\"", "of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI", "isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec.", "pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as", "FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml", "number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for", "0: abort(500, 'problems getting help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain')", "Help for config field in swagger documentation. response_help (str): Help for pagexml response", "opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize", "XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is", "import threading import tempfile import pagexml pagexml.set_omnius_schema() from time import time from functools", "rc, out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed ::", "subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml", "logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int,", "pagexml.set_omnius_schema() from time import time from functools import wraps from subprocess import Popen,", "proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc", "(time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type: ignore ## Processor thread", "is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in", "None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir,", "('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes images and page xml from", "class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to", "with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method =", "## def start_processing(thread, process_queue): num_requests = 0 tmpdir = None while True: try:", "field in swagger documentation. pagexml_help (str): Help for pagexml field in swagger documentation.", "help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize", "xml.\"\"\" def __init__(self, api, images_help='Images with file names as referenced in the Page", "to be used for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request", "= self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return", "version of the running service.\"\"\" rc, out = run_tesseract_recognize('--version') if rc != 0:", "os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None:", "Flask, Response, request, abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import", "**kwargs): \"\"\"Extension of parse_args that additionally does some Page XML checks.\"\"\" req_dict =", "type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False,", "on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue =", "the PageXML 'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml to be", "2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0", "Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API',", "server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>>", "= get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI application ## app", "Parsed Page XML request. prefix (str): Prefix for temporal directory name. basedir (str):", "'+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if", "get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _", "req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images", "get_cli_parser(logger=True): \"\"\"Returns the parser object for the command line tool.\"\"\" parser = ArgumentParser(", "given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT,", "'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on given images", "Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict,", "req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image", "req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page,", "queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break", "= re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']]", "not referenced in the Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected", "WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ##", "the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port", "for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ##", "of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename,", "Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def", "requests including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally", "opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename'])))", "sec.') return pxml process_queue = queue.Queue() # type: ignore ## Processor thread function", "The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename',", "processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api):", "(dict): Parsed Page XML request. prefix (str): Prefix for temporal directory name. basedir", "= done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on", "return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints receiving images with optionally a", "return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution", "not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator", "prefix (str): Prefix for temporal directory name. basedir (str): Base temporal directory. Returns:", "queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from time import time from", "for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api", "== '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ##", "(str): Help for pagexml response in swagger documentation. \"\"\" self.api = api self.response_help", "method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func):", "run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version from tesseract-recognize command ::", "@requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re import sys import json", "\"\"\"Command line tool for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author", "tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False,", "from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help", "BadRequest('Expected to receive all images referenced in the Page XML ('+str(len(images_xml))+') but only", "not in images_xml: raise BadRequest('Received image not referenced in the Page XML: '+fname)", "action='append', help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect", "and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml = set() for page", "names as referenced in the Page XML if given.', pagexml_help='Optional valid Page XML", "api, images_help='Images with file names as referenced in the Page XML if given.',", "Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util import", "is not None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml =", "True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict)", "def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given arguments.\"\"\" cmd = ['tesseract-recognize'] cmd.extend(list(args))", "req_dict['pagexml'] is not None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml", "swagger documentation. response_help (str): Help for pagexml response in swagger documentation. \"\"\" self.api", "pagexml field in swagger documentation. options_help (str): Help for config field in swagger", "swagger documentation. options_help (str): Help for config field in swagger documentation. response_help (str):", "for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for", "self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True,", "images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type':", "opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is", "= json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not", "on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to", "if rc != 0: abort(500, 'problems getting help from tesseract-recognize command :: '+str(out))", "_ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running", "run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' ::", "done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir)", "# pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API ## api", "from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for the command", "file names as referenced in the Page XML if given.', pagexml_help='Optional valid Page", "of parse_args that additionally does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if", "if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images referenced in the", "help='Whether to run in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML", "for the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg',", "Prefix for temporal directory name. basedir (str): Base temporal directory. Returns: The path", "of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all", "type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in", "done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread", "'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in req_dict['images']:", "= None while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1", "'problems getting version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class", "the temporal directory where saved. \"\"\" tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is", "raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml'))", "opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty:", "shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1, process_queue)).start() app.run(host=cfg.host, port=cfg.port,", "in images_received: if fname not in images_xml: raise BadRequest('Received image not referenced in", "pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper", "subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask", "to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl)", "process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except", "xml and/or respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method = self.api.response(200,", "import FileStorage from werkzeug.exceptions import BadRequest from prance.util import url from prance.convert import", "pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml)", "flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re import sys import json import", "a page xml and/or respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method", "PageXML 'object'. \"\"\" if type(value) != FileStorage: raise ValueError('Expected pagexml to be of", "\"\"\" self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage,", "def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a page xml and/or respond", "= req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '',", "stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode(\"utf-8\") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if", "import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort from flask_restplus", "The flask_restplus Api instance. images_help (str): Help for images field in swagger documentation.", "running tesseract-recognize on given images or page xml file.\"\"\" start_time = time() done_queue", "images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for", ":: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400:", "None: pxml = req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname =", "pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command", "@api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the help for the running service.\"\"\" rc,", "threading import tempfile import pagexml pagexml.set_omnius_schema() from time import time from functools import", "tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with", "@requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os import re", "ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): \"\"\"Endpoint to", "prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object", "image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class for endpoints", "images_received: if fname not in images_xml: raise BadRequest('Received image not referenced in the", "page xml file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True:", "class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running", "if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as", "0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir,", "running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting", "help='Prefix string for all API endpoints. Use \"%%s\" in string to replace by", ":: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running", "successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type:", "while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not", "config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI", "rc != 0: abort(500, 'problems getting help from tesseract-recognize command :: '+str(out)) return", "proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ## Parse config ## parser", "disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API ## api = Api(app,", "additionally does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not", "images referenced in the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')')", "directory. Args: req_dict (dict): Parsed Page XML request. prefix (str): Prefix for temporal", "json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None:", "import time from functools import wraps from subprocess import Popen, PIPE, STDOUT from", "absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource):", "except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception", "used for processing.', response_help='Resulting Page XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args:", "= proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ## Parse config ##", "default=[], action='append', help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method", "XML request. prefix (str): Prefix for temporal directory name. basedir (str): Base temporal", "XML file.', options_help='Optional configuration options to be used for processing.', response_help='Resulting Page XML", "xml file.\"\"\" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try:", "description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func,", "done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts =", "not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in req_dict['images']:", "the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content)", "= Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.')", "\"\"\" import os import re import sys import json import shutil import queue", "class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to", "'+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError", "{'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including", "import shutil import queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from time", "type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object':", "processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help", "from flask import Flask, Response, request, abort from flask_restplus import Api, Resource, reqparse", "import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from", "page in pxml.select('//_:Page'): fname = re.sub(r'\\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename)", "if rc != 0: abort(500, 'problems getting version from tesseract-recognize command :: '+str(out))", "abort(500, 'problems getting version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help')", "None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: \"\"\"Decorator class", "if req_dict['pagexml'] is not None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object']", "RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread,", "num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self,", "fname not in images_xml: raise BadRequest('Received image not referenced in the Page XML:", "OpenAPI(Resource): def get(self): \"\"\"Endpoint to get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json'))", "'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests including a Page", "XML after processing.'): \"\"\"Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api", "field in swagger documentation. options_help (str): Help for config field in swagger documentation.", "Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util", "cmd_rc, cmd_out if __name__ == '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__))", "'+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful,", "parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.')", "XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does some Page XML", "in swagger documentation. response_help (str): Help for pagexml response in swagger documentation. \"\"\"", "@copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0", "a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to", "except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing", "Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'})", "Use \"%%s\" in string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname", "sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ## Definition of endpoints ##", "description='Version of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the version", "len(opts) == 1 and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is", "class for endpoints receiving images with optionally a page xml and responding with", "description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum", "import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for the command line tool.\"\"\"", "Processor thread function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir = None", "to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port',", "that additionally does some Page XML checks.\"\"\" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is", "tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml),", "('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'):", "the help for the running service.\"\"\" rc, out = run_tesseract_recognize('--help') if rc !=", "= queue.Queue() # type: ignore ## Processor thread function ## def start_processing(thread, process_queue):", "headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): \"\"\"Runs a tesseract-recognize command using given", "optionally a page xml and responding with a page xml.\"\"\" def __init__(self, api,", "@images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): \"\"\"Endpoint for running tesseract-recognize on", "Api instance. images_help (str): Help for images field in swagger documentation. pagexml_help (str):", "getting help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0", "<NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 \"\"\" import os", "= 0 tmpdir = None while True: try: done_queue, req_dict = process_queue.get(True, 0.05)", "in string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen", "if req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir", "line tool for the tesseract-recognize API server.\"\"\" \"\"\" @version $Version: 2020.01.13$ @author <NAME>", "@api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self): \"\"\"Endpoint to get the", "reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util import url", "flask import Flask, Response, request, abort from flask_restplus import Api, Resource, reqparse from", "in the Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive", "= parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a page xml", "'problems getting help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests =", "for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return", "@requirements prance>=0.15.0 \"\"\" import os import re import sys import json import shutil", "pagexml response in swagger documentation. \"\"\" self.api = api self.response_help = response_help parser", "req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty:", "get the OpenAPI json.\"\"\" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return", "with file names as referenced in the Page XML if given.', pagexml_help='Optional valid", "default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug',", "object for the command line tool.\"\"\" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__)", "'+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed ::", "including a Page XML.\"\"\" def parse_args(self, **kwargs): \"\"\"Extension of parse_args that additionally does", "'+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and", "expect a page xml and/or respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method)", "for running tesseract-recognize on given images or page xml file.\"\"\" start_time = time()", "1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0]", "@version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl", "help=options_help) self.parser = parser def __call__(self, method): \"\"\"Makes a flask_restplus.Resource method expect a", "documentation. options_help (str): Help for config field in swagger documentation. response_help (str): Help", "lambda *x: None # type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class", "from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class", "in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use \"%%s\" in", "\"%%s\" in string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to", "'filename', the 'string' representation and the PageXML 'object'. \"\"\" if type(value) != FileStorage:", "return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): \"\"\"Class for parsing requests", "the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict def", "documentation. pagexml_help (str): Help for pagexml field in swagger documentation. options_help (str): Help", "rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version from", "failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' %", "title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None", "referenced in the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return", "## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI application", "from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import", "the Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all", "pxml process_queue = queue.Queue() # type: ignore ## Processor thread function ## def", "page xml and/or respond with a page xml.\"\"\" method = self.api.expect(self.parser)(method) method =", "thread function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir = None while", "rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml", "url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version", "to run in debugging mode.') return parser def TypePageXML(value): \"\"\"Parse Page XML request", "ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options',", "fname in images_received: if fname not in images_xml: raise BadRequest('Received image not referenced", "method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args()", "as referenced in the Page XML if given.', pagexml_help='Optional valid Page XML file.',", "req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')])", "'[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images']", "referenced in the Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to", "file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to run in parallel.')", "but only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): \"\"\"Writes", "XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_',", "endpoints receiving images with optionally a page xml and responding with a page", "None while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir", "parser.logger ## Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix,", "= method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def", "prance.convert import convert_url def get_cli_parser(logger=True): \"\"\"Returns the parser object for the command line" ]
[ "Dict, Any from models.location import Location class Entity: \"\"\"Class that represents an abstract", "@classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity from a", "k in entity_attributes if k != 'location' } if 'location' in entity_attributes: location", "from models.location import Location class Entity: \"\"\"Class that represents an abstract Entity with", "k: entity_dict[k] for k in entity_attributes if k != 'location' } if 'location'", "entity_attributes if k != 'location' } if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'],", "<filename>models/entity.py from typing import Dict, Any from models.location import Location class Entity: \"\"\"Class", "attributes_dict = { k: entity_dict[k] for k in entity_attributes if k != 'location'", "k != 'location' } if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return", "= { k: entity_dict[k] for k in entity_attributes if k != 'location' }", "Dict[str, Any]): \"\"\"Method to instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes =", "Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k in", "entity_dict[k] for k in entity_attributes if k != 'location' } if 'location' in", "instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = {", "class Entity: \"\"\"Class that represents an abstract Entity with standard methods\"\"\" @classmethod def", "in entity_attributes if k != 'location' } if 'location' in entity_attributes: location =", "standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity", "= cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k in entity_attributes if k", "Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k]", "models.location import Location class Entity: \"\"\"Class that represents an abstract Entity with standard", "Location class Entity: \"\"\"Class that represents an abstract Entity with standard methods\"\"\" @classmethod", "import Location class Entity: \"\"\"Class that represents an abstract Entity with standard methods\"\"\"", "(JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k in entity_attributes", "{ k: entity_dict[k] for k in entity_attributes if k != 'location' } if", "represents an abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]):", "for k in entity_attributes if k != 'location' } if 'location' in entity_attributes:", "if k != 'location' } if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng'])", "'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return cls(**{**attributes_dict, **{'location': location}}) return cls(**attributes_dict)", "from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity from a Dict (JSON)\"\"\"", "if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return cls(**{**attributes_dict, **{'location': location}}) return", "an Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k:", "entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k in entity_attributes if", "an abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method", "from typing import Dict, Any from models.location import Location class Entity: \"\"\"Class that", "that represents an abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str,", "typing import Dict, Any from models.location import Location class Entity: \"\"\"Class that represents", "!= 'location' } if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return cls(**{**attributes_dict,", "import Dict, Any from models.location import Location class Entity: \"\"\"Class that represents an", "entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes", "'location' } if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return cls(**{**attributes_dict, **{'location':", "with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an", "\"\"\"Class that represents an abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict:", "cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k in entity_attributes if k !=", "Entity: \"\"\"Class that represents an abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls,", "Any from models.location import Location class Entity: \"\"\"Class that represents an abstract Entity", "from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for", "a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict = { k: entity_dict[k] for k", "abstract Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to", "methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity from", "\"\"\"Method to instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict", "} if 'location' in entity_attributes: location = Location(lat=entity_dict['lat'], lng=entity_dict['lng']) return cls(**{**attributes_dict, **{'location': location}})", "Any]): \"\"\"Method to instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys()", "def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate an Entity from a Dict", "to instantiate an Entity from a Dict (JSON)\"\"\" entity_attributes = cls.__dataclass_fields__.keys() attributes_dict =", "Entity with standard methods\"\"\" @classmethod def from_dict(cls, entity_dict: Dict[str, Any]): \"\"\"Method to instantiate" ]
[ "not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for i in name:", "range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in", "name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2", "not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c): for", "in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for i in name: answer=max(answer,len(name[i]))", "in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in", "if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j in", "range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2]", "ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1", "if ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1 not", "ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for i in", "in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c): for i", "line,name=infile[1:],{} for i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if", "name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j", "not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2)", "not in name[ce1]:name[ce1].append(ce2) for j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if", "if ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c): for i in range(r-1):", "ce2 not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in", "in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not", "name[ce1]:name[ce1].append(ce2) for j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if", "r,c=map(int,infile[0].split()) line,name=infile[1:],{} for i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2:", "if ce2 not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not", "j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not", "i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2", "if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for i", "for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if", "range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2]", "in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for", "ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c):", "<filename>AIO/invasion/test.py<gh_stars>1-10 infile=open('invin.txt','r').readlines() r,c=map(int,infile[0].split()) line,name=infile[1:],{} for i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1]", "ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j]", "in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if", "ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1]", "for j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1", "in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in", "in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not", "not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1)", "infile=open('invin.txt','r').readlines() r,c=map(int,infile[0].split()) line,name=infile[1:],{} for i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if", "if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if", "for i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1", "range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in", "for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if", "ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1]", "ce1 not in name:name[ce1]=[ce2] if ce2 not in name:name[ce2]=[ce1] if ce1 not in", "in name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0", "name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for i in name: answer=max(answer,len(name[i])) open('invout.txt','w').write(str(answer))", "i in range(r): for j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not", "j in range(c-1): ce1,ce2=line[i][j],line[i][j+1] if ce1!=ce2: if ce1 not in name:name[ce1]=[ce2] if ce2", "name:name[ce2]=[ce1] if ce1 not in name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) answer=0 for", "in name[ce1]:name[ce1].append(ce2) for j in range(c): for i in range(r-1): ce1,ce2=line[i][j],line[i+1][j] if ce1!=ce2:", "name[ce2]:name[ce2].append(ce1) if ce2 not in name[ce1]:name[ce1].append(ce2) for j in range(c): for i in" ]
[ "= paddle self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed", "if pos[0] <= 0: self.xspeed = 0 if pos[2] >= 500: self.xspeed =", "class Paddle: def __init__(self, canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100,", "= self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0 if pos[2] >= 500:", "draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed =", "ball properties and functions class Ball: def __init__(self, canvas, color, size, paddle): self.canvas", "1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0]", "don't let the ball hit the bottom! # KidsCanCode - Intro to Programming", "== True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self,", "__init__(self, canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id,", "<= 0: self.xspeed = 0 if pos[2] >= 500: self.xspeed = 0 def", "canvas to draw on tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500,", "200, 300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>',", "Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score:", "to start') # Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score:", "pos[3] <= paddle_pos[3]: return True return False # Define paddle properties and functions", "= False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id)", "0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>',", "self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0)", "speed_down(self, evt): self.speed_factor /= 1.2 def main(): # Create window and canvas to", "import random import time # Define ball properties and functions class Ball: def", "time # Define ball properties and functions class Ball: def __init__(self, canvas, color,", "= 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up)", "self.speed_factor /= 1.2 def main(): # Create window and canvas to draw on", "Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit any key to start')", "paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3]", "'red', 25, paddle) input('hit any key to start') # Animation loop while ball.hit_bottom", "# pass just does nothing pass def speed_up(self, evt): self.speed_factor *= 1.2 def", "pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed = -3", "and functions class Ball: def __init__(self, canvas, color, size, paddle): self.canvas = canvas", "10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left)", "to draw on tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400,", "input('hit any key to start') # Animation loop while ball.hit_bottom == False: ball.draw()", "self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score += 1 def", "any key to start') # Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw()", "# Simple pong game - don't let the ball hit the bottom! #", "height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update()", "the bottom! # KidsCanCode - Intro to Programming from tkinter import * import", "def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <=", "-3 self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id)", "# Create window and canvas to draw on tk = Tk() tk.title(\"Ball Game\")", "fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False", "Ball: def __init__(self, canvas, color, size, paddle): self.canvas = canvas self.paddle = paddle", "self.yspeed = 3 if pos[3] >= 400: self.hit_bottom = True if pos[0] <=", "evt): # pass just does nothing pass def speed_up(self, evt): self.speed_factor *= 1.2", "def main(): # Create window and canvas to draw on tk = Tk()", "paddle) input('hit any key to start') # Animation loop while ball.hit_bottom == False:", "'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit any key to start') #", "self.xspeed = -2 * self.speed_factor def move_right(self, evt): self.xspeed = 0 * self.speed_factor", "ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game", "self.yspeed = -1 self.hit_bottom = False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed,", "300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down)", "self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id)", "pos[2] >= 500: self.xspeed = 0 def move_left(self, evt): self.xspeed = -2 *", ">= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return False # Define paddle", "pong game - don't let the ball hit the bottom! # KidsCanCode -", "= canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed", "key to start') # Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label,", "functions class Ball: def __init__(self, canvas, color, size, paddle): self.canvas = canvas self.paddle", "and canvas to draw on tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk,", "let the ball hit the bottom! # KidsCanCode - Intro to Programming from", "1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def main(): # Create window and", "ball hit the bottom! # KidsCanCode - Intro to Programming from tkinter import", "ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label =", "color, size, paddle): self.canvas = canvas self.paddle = paddle self.id = canvas.create_oval(10, 10,", "random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score = 0 def draw(self): self.canvas.move(self.id,", "Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5,", "canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5,", "evt): self.speed_factor /= 1.2 def main(): # Create window and canvas to draw", "tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label =", "canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed =", "Define paddle properties and functions class Paddle: def __init__(self, canvas, color): self.canvas =", "self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0", "pass def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2", "0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0:", "pos[0] <= 0: self.xspeed = 3 if pos[2] >= 500: self.xspeed = -3", "self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop)", "bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle", "canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue')", "# Define paddle properties and functions class Paddle: def __init__(self, canvas, color): self.canvas", "pos[3] >= 400: self.hit_bottom = True if pos[0] <= 0: self.xspeed = 3", "# Define ball properties and functions class Ball: def __init__(self, canvas, color, size,", "paddle_pos[3]: return True return False # Define paddle properties and functions class Paddle:", "self.canvas = canvas self.paddle = paddle self.id = canvas.create_oval(10, 10, size, size, fill=color)", "Define ball properties and functions class Ball: def __init__(self, canvas, color, size, paddle):", "on tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya", "Programming from tkinter import * import random import time # Define ball properties", "5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red',", "500: self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed =", "3 if pos[3] >= 400: self.hit_bottom = True if pos[0] <= 0: self.xspeed", "= 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def", "functions class Paddle: def __init__(self, canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0,", "def stop(self, evt): # pass just does nothing pass def speed_up(self, evt): self.speed_factor", "self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score = 0 def", "-3 if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score +=", "self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed", "paddle): self.canvas = canvas self.paddle = paddle self.id = canvas.create_oval(10, 10, size, size,", "self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def main(): # Create", "def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def", "1.2 def main(): # Create window and canvas to draw on tk =", "to Programming from tkinter import * import random import time # Define ball", "self.paddle = paddle self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100)", "pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0 if pos[2] >=", "*= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def main(): # Create window", "def speed_down(self, evt): self.speed_factor /= 1.2 def main(): # Create window and canvas", "0 def move_left(self, evt): self.xspeed = -2 * self.speed_factor def move_right(self, evt): self.xspeed", "False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label", "self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3", "= Ball(canvas, 'red', 25, paddle) input('hit any key to start') # Animation loop", "self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos =", "-2 * self.speed_factor def move_right(self, evt): self.xspeed = 0 * self.speed_factor def stop(self,", "and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return", "0) pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0 if pos[2]", "pos[1] <= 0: self.yspeed = 3 if pos[3] >= 400: self.hit_bottom = True", "= -2 * self.speed_factor def move_right(self, evt): self.xspeed = 0 * self.speed_factor def", "ball = Ball(canvas, 'red', 25, paddle) input('hit any key to start') # Animation", "self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0 if", "if pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed =", "= canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball =", "Ball(canvas, 'red', 25, paddle) input('hit any key to start') # Animation loop while", "text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle)", "= self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >=", "import time # Define ball properties and functions class Ball: def __init__(self, canvas,", "__init__(self, canvas, color, size, paddle): self.canvas = canvas self.paddle = paddle self.id =", "def move_right(self, evt): self.xspeed = 0 * self.speed_factor def stop(self, evt): # pass", "<= 0: self.yspeed = 3 if pos[3] >= 400: self.hit_bottom = True if", "self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1]", "tkinter import * import random import time # Define ball properties and functions", "def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed", "== False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over", ">= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <=", "self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>',", "bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle =", "# KidsCanCode - Intro to Programming from tkinter import * import random import", "canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor = 1", "does nothing pass def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor", "\"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update() if", "<= paddle_pos[3]: return True return False # Define paddle properties and functions class", "0: self.xspeed = 0 if pos[2] >= 500: self.xspeed = 0 def move_left(self,", "self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if", "self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1]", "properties and functions class Ball: def __init__(self, canvas, color, size, paddle): self.canvas =", "True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos):", "bottom! # KidsCanCode - Intro to Programming from tkinter import * import random", "self.xspeed = 0 def move_left(self, evt): self.xspeed = -2 * self.speed_factor def move_right(self,", "= self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3 if pos[3] >= 400:", "<filename>pong_game.py # Simple pong game - don't let the ball hit the bottom!", "= canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed =", "draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed =", "= -3 self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos =", "- Intro to Programming from tkinter import * import random import time #", "self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3)", "1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self):", "True return False # Define paddle properties and functions class Paddle: def __init__(self,", "self.hit_bottom = False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos =", "if pos[0] <= 0: self.xspeed = 3 if pos[2] >= 500: self.xspeed =", "self.hit_bottom = True if pos[0] <= 0: self.xspeed = 3 if pos[2] >=", "= Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW,", ">= 500: self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed", "if pos[3] >= 400: self.hit_bottom = True if pos[0] <= 0: self.xspeed =", "0 * self.speed_factor def stop(self, evt): # pass just does nothing pass def", "random import time # Define ball properties and functions class Ball: def __init__(self,", "= 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <=", "self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed,", "False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if", "paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:", "canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30))", "def __init__(self, canvas, color, size, paddle): self.canvas = canvas self.paddle = paddle self.id", "-1 self.hit_bottom = False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos", "return True return False # Define paddle properties and functions class Paddle: def", "self.xspeed = 0 if pos[2] >= 500: self.xspeed = 0 def move_left(self, evt):", "3 if pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed", "whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas,", "self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <= 0:", "pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True", "* self.speed_factor def move_right(self, evt): self.xspeed = 0 * self.speed_factor def stop(self, evt):", "= 0 def move_left(self, evt): self.xspeed = -2 * self.speed_factor def move_right(self, evt):", "paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit any key", "500: self.xspeed = 0 def move_left(self, evt): self.xspeed = -2 * self.speed_factor def", ">= 500: self.xspeed = 0 def move_left(self, evt): self.xspeed = -2 * self.speed_factor", "the ball hit the bottom! # KidsCanCode - Intro to Programming from tkinter", "/= 1.2 def main(): # Create window and canvas to draw on tk", "self.score += 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0]", "= 3 if pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos) == True:", "pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3 if pos[3] >=", "canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0", "self.xspeed = 0 * self.speed_factor def stop(self, evt): # pass just does nothing", "tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip')", "= 3 if pos[3] >= 400: self.hit_bottom = True if pos[0] <= 0:", "evt): self.xspeed = -2 * self.speed_factor def move_right(self, evt): self.xspeed = 0 *", "size, paddle): self.canvas = canvas self.paddle = paddle self.id = canvas.create_oval(10, 10, size,", "move_right(self, evt): self.xspeed = 0 * self.speed_factor def stop(self, evt): # pass just", "pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if", "self.speed_factor def stop(self, evt): # pass just does nothing pass def speed_up(self, evt):", "tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update() if __name__", "canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas,", "+= 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and", "Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update()", "hit the bottom! # KidsCanCode - Intro to Programming from tkinter import *", "400: self.hit_bottom = True if pos[0] <= 0: self.xspeed = 3 if pos[2]", "self.xspeed = -3 if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed = random.randrange(-3,3)", "self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3 if pos[3] >= 400: self.hit_bottom", "if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and", "0: self.yspeed = 3 if pos[3] >= 400: self.hit_bottom = True if pos[0]", "= random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2]", "size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom", "self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos", "text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update()", "Simple pong game - don't let the ball hit the bottom! # KidsCanCode", "self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score", "label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball", "25, paddle) input('hit any key to start') # Animation loop while ball.hit_bottom ==", "Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label", "start') # Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score))", "loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01)", "if pos[1] <= 0: self.yspeed = 3 if pos[3] >= 400: self.hit_bottom =", "fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>',", "pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return False # Define", "and functions class Paddle: def __init__(self, canvas, color): self.canvas = canvas self.id =", "pos[0] <= 0: self.xspeed = 0 if pos[2] >= 500: self.xspeed = 0", "self.canvas.coords(self.id) if pos[0] <= 0: self.xspeed = 0 if pos[2] >= 500: self.xspeed", "anchor=NW, text=\"Score: 0\") tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25,", "while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) #", "False # Define paddle properties and functions class Paddle: def __init__(self, canvas, color):", "paddle properties and functions class Paddle: def __init__(self, canvas, color): self.canvas = canvas", "tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update() if __name__ ==", "class Ball: def __init__(self, canvas, color, size, paddle): self.canvas = canvas self.paddle =", "self.xspeed = 3 if pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos) ==", "KidsCanCode - Intro to Programming from tkinter import * import random import time", "0 if pos[2] >= 500: self.xspeed = 0 def move_left(self, evt): self.xspeed =", "window and canvas to draw on tk = Tk() tk.title(\"Ball Game\") canvas =", "def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed", "self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor", "- don't let the ball hit the bottom! # KidsCanCode - Intro to", "size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom =", "= canvas self.paddle = paddle self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id,", "paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return False # Define paddle properties", "Paddle: def __init__(self, canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10,", "0\") tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit", "tk.update() paddle = Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit any", "if pos[2] >= 500: self.xspeed = 0 def move_left(self, evt): self.xspeed = -2", "self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos", "# Animation loop while ball.hit_bottom == False: ball.draw() paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks()", "canvas, color, size, paddle): self.canvas = canvas self.paddle = paddle self.id = canvas.create_oval(10,", "canvas self.paddle = paddle self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245,", "100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score = 0", "True if pos[0] <= 0: self.xspeed = 3 if pos[2] >= 500: self.xspeed", "game - don't let the ball hit the bottom! # KidsCanCode - Intro", "if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score += 1", "self.xspeed = random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if", "if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return False #", "paddle self.id = canvas.create_oval(10, 10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed =", "pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3]", "self.xspeed, self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3 if", "Intro to Programming from tkinter import * import random import time # Define", "evt): self.xspeed = 0 * self.speed_factor def stop(self, evt): # pass just does", "* self.speed_factor def stop(self, evt): # pass just does nothing pass def speed_up(self,", "pass just does nothing pass def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self,", "= 0 if pos[2] >= 500: self.xspeed = 0 def move_left(self, evt): self.xspeed", "self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right) self.canvas.bind_all('<KeyPress-Down>', self.speed_down) self.canvas.bind_all('<KeyPress-Up>', self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id,", "= Paddle(canvas, 'blue') ball = Ball(canvas, 'red', 25, paddle) input('hit any key to", "random.randrange(-3,3) self.score += 1 def hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >=", "= random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score = 0 def draw(self):", "self.speed_factor def move_right(self, evt): self.xspeed = 0 * self.speed_factor def stop(self, evt): #", "= 0 * self.speed_factor def stop(self, evt): # pass just does nothing pass", "just does nothing pass def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt):", "# Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update() if __name__ == '__main__': main()", "= canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor =", "= -3 if self.hit_paddle(pos) == True: self.yspeed = -3 self.xspeed = random.randrange(-3,3) self.score", "return False # Define paddle properties and functions class Paddle: def __init__(self, canvas,", "100, 10, fill=color) self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>',", "import * import random import time # Define ball properties and functions class", "move_left(self, evt): self.xspeed = -2 * self.speed_factor def move_right(self, evt): self.xspeed = 0", "Create window and canvas to draw on tk = Tk() tk.title(\"Ball Game\") canvas", "10, size, size, fill=color) self.canvas.move(self.id, 245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1", "<= paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return", "= Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip') canvas.pack()", "evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def main(): #", "paddle.draw() canvas.itemconfig(label, text=\"Score: \"+str(ball.score)) tk.update_idletasks() tk.update() time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME", "draw on tk = Tk() tk.title(\"Ball Game\") canvas = Canvas(tk, width=500, height=400, bd=0,", "width=500, height=400, bd=0, bg='papaya whip') canvas.pack() label = canvas.create_text(5, 5, anchor=NW, text=\"Score: 0\")", ">= 400: self.hit_bottom = True if pos[0] <= 0: self.xspeed = 3 if", "self.yspeed) pos = self.canvas.coords(self.id) if pos[1] <= 0: self.yspeed = 3 if pos[3]", "hit_paddle(self, pos): paddle_pos = self.canvas.coords(self.paddle.id) if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:", "paddle_pos[2]: if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: return True return False", "def __init__(self, canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color)", "time.sleep(0.01) # Game Over go_label = canvas.create_text(250,200,text=\"GAME OVER\",font=(\"Helvetica\",30)) tk.update() if __name__ == '__main__':", "= -1 self.hit_bottom = False self.score = 0 def draw(self): self.canvas.move(self.id, self.xspeed, self.yspeed)", "self.canvas.move(self.id, 200, 300) self.xspeed = 0 self.speed_factor = 1 self.canvas.bind_all('<KeyPress-Left>', self.move_left) self.canvas.bind_all('<KeyPress-Right>', self.move_right)", "stop(self, evt): # pass just does nothing pass def speed_up(self, evt): self.speed_factor *=", "nothing pass def speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /=", "canvas, color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200,", "245, 100) self.xspeed = random.randrange(-3,3) self.yspeed = -1 self.hit_bottom = False self.score =", "self.speed_up) self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0]", "self.canvas.bind_all('<KeyPress-space>', self.stop) def draw(self): self.canvas.move(self.id, self.xspeed, 0) pos = self.canvas.coords(self.id) if pos[0] <=", "properties and functions class Paddle: def __init__(self, canvas, color): self.canvas = canvas self.id", "speed_up(self, evt): self.speed_factor *= 1.2 def speed_down(self, evt): self.speed_factor /= 1.2 def main():", "= True if pos[0] <= 0: self.xspeed = 3 if pos[2] >= 500:", "0: self.xspeed = 3 if pos[2] >= 500: self.xspeed = -3 if self.hit_paddle(pos)", "main(): # Create window and canvas to draw on tk = Tk() tk.title(\"Ball", "from tkinter import * import random import time # Define ball properties and", "color): self.canvas = canvas self.id = canvas.create_rectangle(0,0, 100, 10, fill=color) self.canvas.move(self.id, 200, 300)", "and pos[3] <= paddle_pos[3]: return True return False # Define paddle properties and", "* import random import time # Define ball properties and functions class Ball:", "def move_left(self, evt): self.xspeed = -2 * self.speed_factor def move_right(self, evt): self.xspeed =", "<= 0: self.xspeed = 3 if pos[2] >= 500: self.xspeed = -3 if" ]
[ "the OnBoard class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler'", "sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5'", "import ( patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard,", "= patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start", "= 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board =", "client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite", "b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) #", "def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly. \"\"\" test_msg = 'test:message'", "it will exhaust the side_effect list and cause # a StopIteration error. This", "on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the IncomingMessageHandler class", "handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board", "This is a messy solution but it works to # test for now.", "'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link)", "MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test", "properly. \"\"\" # Create some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1',", "suite of tests surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test", "start streaming data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link", "'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board,", "b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link =", "OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def", "'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the", "'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [", "-> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\"", "= OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode())", ") from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\"", "'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike is", ") class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the IncomingMessageHandler class functionality.", "= MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages.", "mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a", "mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board", "# Verify all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1',", "\"\"\" Test incoming message is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link =", "dealt with properly. \"\"\" # Create some test sensor data messages. test_sensor_data_messages =", "is a messy solution but it works to # test for now. try:", "class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall)", "surrounding the OnBoard class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler = patch(", "mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming", "Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call", "and ignore StopIteration errors. It # will error because it will exhaust the", "StopIteration error. This is a messy solution but it works to # test", "messy solution but it works to # test for now. try: msg_handler.run() except", "messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly.", "b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual(", "mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike is dealt with", "mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link)", "= OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode())", "error. This is a messy solution but it works to # test for", "= MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2',", "in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data = {", "def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\" mock_message", "test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled", "], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual(", "], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE')", "A suite of tests surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\"", "'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data", "test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:'", "the side_effect list and cause # a StopIteration error. This is a messy", "on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\"", "exhaust the side_effect list and cause # a StopIteration error. This is a", "error because it will exhaust the side_effect list and cause # a StopIteration", "catch and ignore StopIteration errors. It # will error because it will exhaust", "from unittest.mock import ( patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board", "mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1',", "expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of", "\"\"\" # Create some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2',", "correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board =", "on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [", "on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests", "mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1',", "data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock()", "= mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2',", ") self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding", "= 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler =", "messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'),", "errors. It # will error because it will exhaust the side_effect list and", "'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\"", "= IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try catch and ignore", "def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\" mock_message", "from the bike is dealt with properly. \"\"\" # Create some test sensor", "StopIteration errors. It # will error because it will exhaust the side_effect list", "side_effect list and cause # a StopIteration error. This is a messy solution", "( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link", "mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3'])", "surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is", "OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for", "] # Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate", "call for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all", "IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly.", "the thread in a try catch and ignore StopIteration errors. It # will", "test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\" mock_message =", "data from the bike is dealt with properly. \"\"\" # Create some test", "= [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] #", "def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self,", "the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are", "MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3']", "\"\"\" A suite of tests surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self):", "all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2',", "patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class", "(b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') )", "# will error because it will exhaust the side_effect list and cause #", "'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data", "on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] )", "OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args')", "\"\"\" def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def", "test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike is dealt with properly. \"\"\"", "a messy solution but it works to # test for now. try: msg_handler.run()", "(b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE':", "# a StopIteration error. This is a messy solution but it works to", "@patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\"", "mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test", "OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the OnBoard class functionality. \"\"\" def", "mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message.", "{ 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR':", "b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'],", "of tests surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming", "send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send", "streaming data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link =", "from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A", "are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3',", "(b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5')", "message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send =", "because it will exhaust the side_effect list and cause # a StopIteration error.", "self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite", "'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming", "OnBoard class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start()", "['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the", "'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test", "self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class", "MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages. for", "is dealt with properly. \"\"\" # Create some test sensor data messages. test_sensor_data_messages", "'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike", "Test incoming message is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock()", "solution but it works to # test for now. try: msg_handler.run() except StopIteration:", "try catch and ignore StopIteration errors. It # will error because it will", "for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages", "( patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler", "msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try catch and", "'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start", "messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6',", "unittest from unittest.mock import ( patch, MagicMock ) from client.communication.messages import MessageCommand from", "b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [", "a try catch and ignore StopIteration errors. It # will error because it", "test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ]", "= mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2',", "with properly. \"\"\" # Create some test sensor data messages. test_sensor_data_messages = [", "tests surrounding the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message", "bike is dealt with properly. \"\"\" # Create some test sensor data messages.", "mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the", "on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self):", "def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike is dealt with properly.", "= [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread", "test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link", "suite of tests surrounding the OnBoard class functionality. \"\"\" def setUp(self) -> None:", "tests surrounding the OnBoard class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler =", "MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop',", "= { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ],", "= MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try", "\"\"\" Test incoming data from the bike is dealt with properly. \"\"\" #", "IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the OnBoard class functionality.", ") self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A", "self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the", "MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase):", "for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data", "it works to # test for now. try: msg_handler.run() except StopIteration: pass mock_on_board.handle_incoming_message.assert_called_with(test_msg)", "is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg]", "handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'),", "MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests", "[ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') )", "expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase):", "test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler", "[ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create", "IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the IncomingMessageHandler class functionality. \"\"\" def", "will exhaust the side_effect list and cause # a StopIteration error. This is", "import unittest from unittest.mock import ( patch, MagicMock ) from client.communication.messages import MessageCommand", "[test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in", "# Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message", "on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] )", "['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from", "b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object.", "Simulate handle_incoming_message call for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) #", "\"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly. \"\"\" test_msg =", "], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'],", "but it works to # test for now. try: msg_handler.run() except StopIteration: pass", "= MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA,", "object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the", "Create some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:'", "test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE':", ") mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send", "Verify all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'),", "mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3'])", "ignore StopIteration errors. It # will error because it will exhaust the side_effect", "from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding", "'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message):", "functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args')", "mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1',", "\"\"\" A suite of tests surrounding the OnBoard class functionality. \"\"\" def setUp(self)", "Test the start streaming data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value =", "Test incoming data from the bike is dealt with properly. \"\"\" # Create", "a StopIteration error. This is a messy solution but it works to #", "(b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual(", "MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data", "test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\" mock_message =", "mock_comm_link) # Run the thread in a try catch and ignore StopIteration errors.", "the bike is dealt with properly. \"\"\" # Create some test sensor data", "patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming", "handle_incoming_message call for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify", "(b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ],", "b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ],", "It # will error because it will exhaust the side_effect list and cause", "expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0')", "} self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') )", "mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with(", "class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the IncomingMessageHandler class functionality. \"\"\"", "class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the OnBoard class functionality. \"\"\"", "mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1',", "client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the", "of tests surrounding the OnBoard class functionality. \"\"\" def setUp(self) -> None: self._mock_message_handler", "None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test", "b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard", "# Simulate handle_incoming_message call for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message)", "\"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock()", "MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run", "MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start',", "correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6',", "[ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4',", "functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly. \"\"\" test_msg", "self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message.", "import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of", "b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock() on_board", "# Run the thread in a try catch and ignore StopIteration errors. It", "expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): \"\"\" A suite of tests surrounding the IncomingMessageHandler", "on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def", "b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock()", "Run the thread in a try catch and ignore StopIteration errors. It #", "OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages. for message in test_sensor_data_messages:", "import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the OnBoard", ").start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send", "= MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2',", ") mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): \"\"\" Test incoming data from the bike is dealt", "and cause # a StopIteration error. This is a messy solution but it", "'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self,", "= MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) #", "will error because it will exhaust the side_effect list and cause # a", "@patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\" Test the start streaming data send message. \"\"\"", "'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR')", "= MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA,", "incoming data from the bike is dealt with properly. \"\"\" # Create some", "\"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock()", "some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:'", "A suite of tests surrounding the OnBoard class functionality. \"\"\" def setUp(self) ->", "message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data =", "in a try catch and ignore StopIteration errors. It # will error because", "b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], }", "mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): \"\"\"", "setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message):", "b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock() on_board =", "[ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE')", "'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'],", "the IncomingMessageHandler class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled", "list and cause # a StopIteration error. This is a messy solution but", "\"\"\" Test the start streaming data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value", "= OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages. for message in", "OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): \"\"\" A suite of tests surrounding the OnBoard class", "unittest.mock import ( patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import", "MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3']", "# Create some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', (", "on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages. for message", "mock_build_message): \"\"\" Test the start streaming data send message. \"\"\" mock_message = 'mock_message'", "class functionality. \"\"\" def test_incoming_msg_handled(self): \"\"\" Test incoming message is handled correctly. \"\"\"", "incoming message is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect", "thread in a try catch and ignore StopIteration errors. It # will error", "cause # a StopIteration error. This is a messy solution but it works", "), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link)", "data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ),", "IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try catch and ignore StopIteration", "mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with(", "(b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5',", "MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try catch", "b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50')", "message is handled correctly. \"\"\" test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect =", "self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): \"\"\" Test the", "the start streaming data send message. \"\"\" mock_message = 'mock_message' mock_build_message.return_value = mock_message" ]
[ "# All rights reserved. # Contains utilities for working with HTML form data.", "in the POST parameters. \"\"\" values = {} ok = True for field", "= request.POST.get(field, '') if val == '' and field not in blanks: ok", "All rights reserved. # Contains utilities for working with HTML form data. def", "blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether the fields are present and", "the fields are present and non-empty in the POST parameters. \"\"\" values =", "simply checks whether the fields are present and non-empty in the POST parameters.", "if val == '' and field not in blanks: ok = False values[field]", "form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether", "field not in blanks: ok = False values[field] = val return ok, values", "whether the fields are present and non-empty in the POST parameters. \"\"\" values", "HTML form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks", "8 Studios, LLC. # All rights reserved. # Contains utilities for working with", "= True for field in fields: val = request.POST.get(field, '') if val ==", "field in fields: val = request.POST.get(field, '') if val == '' and field", "and field not in blanks: ok = False values[field] = val return ok,", "= {} ok = True for field in fields: val = request.POST.get(field, '')", "Copyright (c) 2010-2011 Lazy 8 Studios, LLC. # All rights reserved. # Contains", "Studios, LLC. # All rights reserved. # Contains utilities for working with HTML", "val == '' and field not in blanks: ok = False values[field] =", "# Contains utilities for working with HTML form data. def fetch(request, fields, blanks=[]):", "for field in fields: val = request.POST.get(field, '') if val == '' and", "in fields: val = request.POST.get(field, '') if val == '' and field not", "fields: val = request.POST.get(field, '') if val == '' and field not in", "fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether the fields are present", "are present and non-empty in the POST parameters. \"\"\" values = {} ok", "checks whether the fields are present and non-empty in the POST parameters. \"\"\"", "{} ok = True for field in fields: val = request.POST.get(field, '') if", "the POST parameters. \"\"\" values = {} ok = True for field in", "parameters. \"\"\" values = {} ok = True for field in fields: val", "def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether the fields", "True for field in fields: val = request.POST.get(field, '') if val == ''", "values = {} ok = True for field in fields: val = request.POST.get(field,", "with HTML form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply", "data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether the", "Extremely rudimentary validation simply checks whether the fields are present and non-empty in", "non-empty in the POST parameters. \"\"\" values = {} ok = True for", "fields are present and non-empty in the POST parameters. \"\"\" values = {}", "ok = True for field in fields: val = request.POST.get(field, '') if val", "for working with HTML form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary", "# Copyright (c) 2010-2011 Lazy 8 Studios, LLC. # All rights reserved. #", "rudimentary validation simply checks whether the fields are present and non-empty in the", "validation simply checks whether the fields are present and non-empty in the POST", "val = request.POST.get(field, '') if val == '' and field not in blanks:", "request.POST.get(field, '') if val == '' and field not in blanks: ok =", "Lazy 8 Studios, LLC. # All rights reserved. # Contains utilities for working", "rights reserved. # Contains utilities for working with HTML form data. def fetch(request,", "utilities for working with HTML form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely", "\"\"\" values = {} ok = True for field in fields: val =", "and non-empty in the POST parameters. \"\"\" values = {} ok = True", "'') if val == '' and field not in blanks: ok = False", "reserved. # Contains utilities for working with HTML form data. def fetch(request, fields,", "present and non-empty in the POST parameters. \"\"\" values = {} ok =", "'' and field not in blanks: ok = False values[field] = val return", "== '' and field not in blanks: ok = False values[field] = val", "working with HTML form data. def fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation", "fetch(request, fields, blanks=[]): \"\"\" Extremely rudimentary validation simply checks whether the fields are", "2010-2011 Lazy 8 Studios, LLC. # All rights reserved. # Contains utilities for", "POST parameters. \"\"\" values = {} ok = True for field in fields:", "LLC. # All rights reserved. # Contains utilities for working with HTML form", "(c) 2010-2011 Lazy 8 Studios, LLC. # All rights reserved. # Contains utilities", "Contains utilities for working with HTML form data. def fetch(request, fields, blanks=[]): \"\"\"", "\"\"\" Extremely rudimentary validation simply checks whether the fields are present and non-empty" ]
[ "level self.exception_list = exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s = \"[\"", "ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def", "str(self._base_ex) except Exception, ex: s = str(s) return s class ProcessException(PollableException): def __init__(self,", "s + os.linesep + str(self._base_ex) except Exception, ex: s = str(s) return s", "pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list = exs self.pollable_list =", "s + d + str(ex) + \":\" + str(type(ex)) d = \",\" s", "ex): self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def", "+ os.linesep + \"stderr : %s\" % (str(self.stderr)) s = s + os.linesep", "s = s + os.linesep + \"stderr : %s\" % (str(self.stderr)) s =", "s = str(s) return s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr,", "os.linesep + self.msg try: s = s + os.linesep + \"stdout : %s\"", "self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s = \"[\" d = \"\" for", "stderr self.msg = msg def __str__(self): s = \"Error while processing the service:", "def __str__(self): PollableException.__str__(self) s = \"[\" d = \"\" for ex in self.exception_list:", "stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout = stdout self.stderr = stderr", "= s + os.linesep + \"stderr : %s\" % (str(self.stderr)) s = s", "in self.exception_list: s = s + d + str(ex) + \":\" + str(type(ex))", "class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self,", "(str(self.stderr)) s = s + os.linesep + str(self._base_ex) except Exception, ex: s =", "= sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack)", "self.exception_list: s = s + d + str(ex) + \":\" + str(type(ex)) d", "def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class", "msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg):", "s = \"\" if self.msg: s = s + os.linesep + self.msg try:", "return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc,", "msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout = stdout self.stderr", "= s + os.linesep + \"stdout : %s\" % (str(self.stdout)) s = s", "def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return str(self.msg) class", "stdout self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs, pollables,", "level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list = exs self.pollable_list = pollables", "os.linesep + \"stderr : %s\" % (str(self.stderr)) s = s + os.linesep +", "msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self):", "self._svc = svc self.stdout = stdout self.stderr = stderr self.msg = msg def", "self.stdout = stdout self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self,", "class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout", "= pollables def __str__(self): PollableException.__str__(self) s = \"[\" d = \"\" for ex", "msg) self.msg = msg def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg,", "Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex)", "+ self.msg try: s = s + os.linesep + \"stdout : %s\" %", "self.msg = msg def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None):", "CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack", "msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p, ex):", "__init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback)", "msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable", "CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self,", "traceback import sys import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex", "os.linesep + str(self._base_ex) except Exception, ex: s = str(s) return s class ProcessException(PollableException):", "msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg", "svc, ex) self._svc = svc self.stdout = stdout self.stderr = stderr self.msg =", "+ str(ex) + \":\" + str(type(ex)) d = \",\" s = s +", "PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr = stderr self.exit_code = rc class", "return s def get_output(self): s = \"\" if self.msg: s = s +", "return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg)", "= rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level", "msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return str(self.msg) class ConfigException(Exception): def", "stdout self.stderr = stderr self.msg = msg def __str__(self): s = \"Error while", "if self.msg: s = s + os.linesep + self.msg try: s = s", "__str__(self): PollableException.__str__(self) s = \"[\" d = \"\" for ex in self.exception_list: s", "+ os.linesep + str(self._base_ex) except Exception, ex: s = str(s) return s class", "svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout = stdout", "= stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self,", "= p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None,", "pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr =", "self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable =", "+ os.linesep + \"stdout : %s\" % (str(self.stdout)) s = s + os.linesep", "exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def", "stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr = stderr self.exit_code =", "__str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self,", "ex) self._svc = svc self.stdout = stdout self.stderr = stderr self.msg = msg", "\"stdout : %s\" % (str(self.stdout)) s = s + os.linesep + \"stderr :", "ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout =", "= \"[\" d = \"\" for ex in self.exception_list: s = s +", "Exception, ex: s = str(s) return s class ProcessException(PollableException): def __init__(self, pollable, ex,", "traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self,", "exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self):", "except Exception, ex: s = str(s) return s class ProcessException(PollableException): def __init__(self, pollable,", "return s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable,", "rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level =", "d = \"\" for ex in self.exception_list: s = s + d +", "def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg):", ": %s\" % (str(self.stderr)) s = s + os.linesep + str(self._base_ex) except Exception,", "ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def", "ex in self.exception_list: s = s + d + str(ex) + \":\" +", "(self._svc.name) return s def get_output(self): s = \"\" if self.msg: s = s", "def __init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack =", "exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return", "= ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p", "__init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException):", "self.msg = msg def __str__(self): s = \"Error while processing the service: %s\"", "stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0],", "MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list", "def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex", "s + os.linesep + \"stdout : %s\" % (str(self.stdout)) s = s +", "os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback =", "p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException):", "self.msg: s = s + os.linesep + self.msg try: s = s +", "\"stderr : %s\" % (str(self.stderr)) s = s + os.linesep + str(self._base_ex) except", "self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level):", "__init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p,", "% (str(self.stderr)) s = s + os.linesep + str(self._base_ex) except Exception, ex: s", "\"Error while processing the service: %s\" % (self._svc.name) return s def get_output(self): s", "(str(self.stdout)) s = s + os.linesep + \"stderr : %s\" % (str(self.stderr)) s", "sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class", "__init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout", "__init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list = exs", "self.exception_list = exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s = \"[\" d", "exs[0]) self.level = level self.exception_list = exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self)", "PollableException.__str__(self) s = \"[\" d = \"\" for ex in self.exception_list: s =", "s = s + os.linesep + str(self._base_ex) except Exception, ex: s = str(s)", "__init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg =", "s = s + d + str(ex) + \":\" + str(type(ex)) d =", "class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level", "% (self._svc.name) return s def get_output(self): s = \"\" if self.msg: s =", "get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def", "= stdout self.stderr = stderr self.msg = msg def __str__(self): s = \"Error", "+ str(self._base_ex) except Exception, ex: s = str(s) return s class ProcessException(PollableException): def", "%s\" % (str(self.stdout)) s = s + os.linesep + \"stderr : %s\" %", "def get_output(self): s = \"\" if self.msg: s = s + os.linesep +", "self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self):", "get_output(self): s = \"\" if self.msg: s = s + os.linesep + self.msg", "= exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s = \"[\" d =", "s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex)", "pollables[0], exs[0]) self.level = level self.exception_list = exs self.pollable_list = pollables def __str__(self):", "import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback", "return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self,", "__str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex =", "s = s + os.linesep + \"stdout : %s\" % (str(self.stdout)) s =", "+ \":\" + str(type(ex)) d = \",\" s = s + \"]\" return", "= svc self.stdout = stdout self.stderr = stderr self.msg = msg def __str__(self):", "self.stderr = stderr self.msg = msg def __str__(self): s = \"Error while processing", "msg def __str__(self): s = \"Error while processing the service: %s\" % (self._svc.name)", "def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self)", "\"\" if self.msg: s = s + os.linesep + self.msg try: s =", "pollables def __str__(self): PollableException.__str__(self) s = \"[\" d = \"\" for ex in", "str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg):", "ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex)", "try: s = s + os.linesep + \"stdout : %s\" % (str(self.stdout)) s", "def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list =", "exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list = exs self.pollable_list", "msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def", "for ex in self.exception_list: s = s + d + str(ex) + \":\"", "= msg def __str__(self): s = \"Error while processing the service: %s\" %", "pollable, ex) self.stdout = stdout self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException):", "= msg def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self,", "+ d + str(ex) + \":\" + str(type(ex)) d = \",\" s =", "def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc", "+ os.linesep + self.msg try: s = s + os.linesep + \"stdout :", "= s + os.linesep + str(self._base_ex) except Exception, ex: s = str(s) return", "p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\",", "self.msg try: s = s + os.linesep + \"stdout : %s\" % (str(self.stdout))", ": %s\" % (str(self.stdout)) s = s + os.linesep + \"stderr : %s\"", "= level self.exception_list = exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s =", "s = \"[\" d = \"\" for ex in self.exception_list: s = s", "PollableException.__init__(self, svc, ex) self._svc = svc self.stdout = stdout self.stderr = stderr self.msg", "class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException):", "= ex exc_type, exc_value, exc_traceback = sys.exc_info() self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return", "<gh_stars>0 import traceback import sys import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex", "%s\" % (str(self.stderr)) s = s + os.linesep + str(self._base_ex) except Exception, ex:", "d + str(ex) + \":\" + str(type(ex)) d = \",\" s = s", "__init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr", "CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex)", "def __str__(self): s = \"Error while processing the service: %s\" % (self._svc.name) return", "PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return", "%s\" % (self._svc.name) return s def get_output(self): s = \"\" if self.msg: s", "self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc,", "import traceback import sys import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex =", "self.stdout = stdout self.stderr = stderr self.msg = msg def __str__(self): s =", "s + os.linesep + self.msg try: s = s + os.linesep + \"stdout", "self._base_stack = traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception):", "% (str(self.stdout)) s = s + os.linesep + \"stderr : %s\" % (str(self.stderr))", "s + os.linesep + \"stderr : %s\" % (str(self.stderr)) s = s +", "ex) self.stdout = stdout self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException): def", "= \"\" for ex in self.exception_list: s = s + d + str(ex)", "\"\" for ex in self.exception_list: s = s + d + str(ex) +", "__init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class", "svc self.stdout = stdout self.stderr = stderr self.msg = msg def __str__(self): s", "def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception):", "service: %s\" % (self._svc.name) return s def get_output(self): s = \"\" if self.msg:", "TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg)", "__str__(self): s = \"Error while processing the service: %s\" % (self._svc.name) return s", "class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self):", "str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class", "\"[\" d = \"\" for ex in self.exception_list: s = s + d", "the service: %s\" % (self._svc.name) return s def get_output(self): s = \"\" if", "= stderr self.msg = msg def __str__(self): s = \"Error while processing the", "__init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return str(self.msg) class ConfigException(Exception):", "ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex,", "class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return", "Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self,", "s = s + os.linesep + self.msg try: s = s + os.linesep", "ex: s = str(s) return s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout,", "ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc =", "stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout = stdout self.stderr =", "= s + d + str(ex) + \":\" + str(type(ex)) d = \",\"", "= s + os.linesep + self.msg try: s = s + os.linesep +", "= stdout self.stderr = stderr self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs,", "import sys import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type,", "ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc = svc self.stdout =", "+ \"stdout : %s\" % (str(self.stdout)) s = s + os.linesep + \"stderr", "= \"\" if self.msg: s = s + os.linesep + self.msg try: s", "str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class", "exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s = \"[\" d = \"\"", "def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"):", "str(s) return s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self,", "rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr = stderr self.exit_code = rc", "self.exit_code = rc class MultilevelException(PollableException): def __init__(self, exs, pollables, level): PollableException.__init__(self, pollables[0], exs[0])", "def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg", "CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self,", "processing the service: %s\" % (self._svc.name) return s def get_output(self): s = \"\"", "= str(s) return s class ProcessException(PollableException): def __init__(self, pollable, ex, stdout, stderr, rc=None):", "msg def __str__(self): return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg)", "PollableException.__init__(self, pollables[0], exs[0]) self.level = level self.exception_list = exs self.pollable_list = pollables def", "class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type, exc_value, exc_traceback = sys.exc_info()", "s = \"Error while processing the service: %s\" % (self._svc.name) return s def", "APIUsageException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg)", "= traceback.format_tb(exc_traceback) def __str__(self): return str(self._base_ex) def get_stack(self): return str(self._base_stack) class APIUsageException(Exception): def", "Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def", "ex): CloudInitDException.__init__(self, ex) self.pollable = p def __str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def", "\":\" + str(type(ex)) d = \",\" s = s + \"]\" return s", "= \"Error while processing the service: %s\" % (self._svc.name) return s def get_output(self):", "def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self,", "class TimeoutException(Exception): def __init__(self, msg): Exception.__init__(self, msg) class IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self,", "ex=None): Exception.__init__(self, msg) self._source_ex = ex class PollableException(CloudInitDException): def __init__(self, p, ex): CloudInitDException.__init__(self,", "class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self, svc, ex) self._svc", "IaaSException(CloudInitDException): def __init__(self, msg): CloudInitDException.__init__(self, msg) self.msg = msg def __str__(self): return str(self.msg)", "s def get_output(self): s = \"\" if self.msg: s = s + os.linesep", "os.linesep + \"stdout : %s\" % (str(self.stdout)) s = s + os.linesep +", "+ \"stderr : %s\" % (str(self.stderr)) s = s + os.linesep + str(self._base_ex)", "ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr = stderr", "self.level = level self.exception_list = exs self.pollable_list = pollables def __str__(self): PollableException.__str__(self) s", "str(ex) + \":\" + str(type(ex)) d = \",\" s = s + \"]\"", "def __init__(self, pollable, ex, stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout", "return str(self.msg) class ConfigException(Exception): def __init__(self, msg, ex=None): Exception.__init__(self, msg) self._source_ex = ex", "stdout, stderr, rc=None): PollableException.__init__(self, pollable, ex) self.stdout = stdout self.stderr = stderr self.exit_code", "while processing the service: %s\" % (self._svc.name) return s def get_output(self): s =", "__str__(self): return CloudInitDException.__str__(self) class ServiceException(PollableException): def __init__(self, ex, svc, msg=None, stdout=\"\", stderr=\"\"): PollableException.__init__(self,", "sys import os class CloudInitDException(Exception): def __init__(self, ex): self._base_ex = ex exc_type, exc_value," ]
[ "-> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0]", "superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True,", "is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client:", "django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User", "django_client(django_db_blocker, superuser: User) -> Client: client = Client() with django_db_blocker.unblock(): client.force_login(superuser) return client", "is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) ->", "User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock():", "@pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\",", "with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save()", "user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client =", "return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client = Client() with", "pytest from django.contrib.auth.models import User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker)", "def django_client(django_db_blocker, superuser: User) -> Client: client = Client() with django_db_blocker.unblock(): client.force_login(superuser) return", "import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User =", "from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user:", "django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True,", "User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\")", "user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client = Client()", "email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User)", "user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user", "= User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def", "user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client = Client() with django_db_blocker.unblock():", "import User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with", "@pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client = Client() with django_db_blocker.unblock(): client.force_login(superuser)", "Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create(", "import pytest from django.contrib.auth.models import User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup,", ")[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser: User) -> Client: client", "django.contrib.auth.models import User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) -> User:", "username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker, superuser:", "def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\",", "User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return user @pytest.fixture(scope=\"session\") def django_client(django_db_blocker,", "django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\") user.save() return", "User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username=\"x\", email=\"<EMAIL>\", is_staff=True, is_superuser=True, )[0] user.set_password(\"x\")", "from django.contrib.auth.models import User from django.test import Client @pytest.fixture(scope=\"session\") def superuser(django_db_setup, django_db_blocker) ->" ]
[ "url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved", ":: Python :: 3', 'License :: OSI Approved :: GNU General Public License", "description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language ::", "'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System", "Python :: 3', 'License :: OSI Approved :: GNU General Public License v3", "open('README.md', 'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some", "long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License :: OSI", "setuptools with open('README.md', 'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>',", "= fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description,", "version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(),", "packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: GNU", ":: 3', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',", "'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very", ":: GNU General Public License v3 (GPLv3)', 'Operating System :: OS Independent'], )", "classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: GNU General", ":: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System ::", "author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming", "OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: OS", "author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language", "setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url=", "'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved ::", "as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic", "basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python ::", "with open('README.md', 'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>',", "long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License ::", "long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.',", "Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: OS Independent'],", "tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License", "very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python", "fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame", "3', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating", "name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews',", "fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown',", "import setuptools with open('README.md', 'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4',", "Language :: Python :: 3', 'License :: OSI Approved :: GNU General Public", "DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3'," ]
[ "Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\")", "= Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist):", "from django.shortcuts import render # Create your views here. from django.http import HttpResponse,", "= loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request,", "loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html',", "F from .models import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name =", "raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question", "ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question", "\"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request,", "pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br />", "的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def", "render #, get_object_or_404 from django.urls import reverse from django.utils import timezone from django.views", "'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1 # F", "默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回:", "Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView):", "from django.template import loader from django.shortcuts import render #, get_object_or_404 from django.urls import", "# 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): #", "question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question':", "'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def", "try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', {", "return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise", "import generic # 通用视图 from django.db.models import F from .models import Question, Choice", "# 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question =", "# from django.template import loader from django.shortcuts import render #, get_object_or_404 from django.urls", "'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1", "Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\"", "= Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now())", "\"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。 selected_choice.save() return", "timezone from django.views import generic # 通用视图 from django.db.models import F from .models", "in latest_question_list]) # template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } #", "<gh_stars>0 from django.shortcuts import render # Create your views here. from django.http import", "# 通用视图 from django.db.models import F from .models import Question, Choice class IndexView(generic.ListView):", "render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') +", "'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except", "render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView", "name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now())", "from django.db.models import F from .models import Question, Choice class IndexView(generic.ListView): template_name =", "template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5]", "Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return", "latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for q in latest_question_list])", "question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise", "\"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for q", "模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id):", "django.shortcuts import render #, get_object_or_404 from django.urls import reverse from django.utils import timezone", "return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name =", "import HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader from django.shortcuts import render", "\"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output", "try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question})", "django.urls import reverse from django.utils import timezone from django.views import generic # 通用视图", "# 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes", "latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model", "发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id)", "except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\",", "import render # Create your views here. from django.http import HttpResponse, HttpResponseRedirect, Http404", "get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] #", "detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html',", "Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model =", "results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html',", "template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return", "Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return", "'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView):", "{'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def get_queryset(self):", "# Create your views here. from django.http import HttpResponse, HttpResponseRedirect, Http404 # from", "使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question", "/> '.join([q.question_text for q in latest_question_list]) # template = loader.get_template('polls/index.html') context = {", "template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def", "没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except", "question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\"", "没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except", "class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的", "import render #, get_object_or_404 from django.urls import reverse from django.utils import timezone from", "q in latest_question_list]) # template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list }", "here. from django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader from", "class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter(", "loader from django.shortcuts import render #, get_object_or_404 from django.urls import reverse from django.utils", "your views here. from django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template import", ").order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text", "django.utils import timezone from django.views import generic # 通用视图 from django.db.models import F", "return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise", "= 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list", "Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else:", "get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question", "try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except", "request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' #", "HttpResponseRedirect, Http404 # from django.template import loader from django.shortcuts import render #, get_object_or_404", "get_object_or_404 from django.urls import reverse from django.utils import timezone from django.views import generic", "django.views import generic # 通用视图 from django.db.models import F from .models import Question,", "index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for q in", "\"\"\" #...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\")", "template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self):", "通用视图 from django.db.models import F from .models import Question, Choice class IndexView(generic.ListView): template_name", "= Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板", "DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的", "DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用", "Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页", "reverse from django.utils import timezone from django.views import generic # 通用视图 from django.db.models", "\"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model", "vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice =", "django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader from django.shortcuts import", "} # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model =", "'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。 selected_choice.save()", "'<br /> '.join([q.question_text for q in latest_question_list]) # template = loader.get_template('polls/index.html') context =", "import loader from django.shortcuts import render #, get_object_or_404 from django.urls import reverse from", "\"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request,", "return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes')", "import F from .models import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name", "question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def get_queryset(self): #", "django.db.models import F from .models import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html'", "django.shortcuts import render # Create your views here. from django.http import HttpResponse, HttpResponseRedirect,", "Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def", "except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页", "class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html", "IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now()", "question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question':", "question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist:", "Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html',", "from django.utils import timezone from django.views import generic # 通用视图 from django.db.models import", "model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\"", "Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br", "def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try:", "# HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question", "render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html'", "{'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist:", "= 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request,", "django.template import loader from django.shortcuts import render #, get_object_or_404 from django.urls import reverse", "(KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", })", "= '<br /> '.join([q.question_text for q in latest_question_list]) # template = loader.get_template('polls/index.html') context", "Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for q in latest_question_list]) # template", "question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError,", "Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def", "\"\"\" class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回:", "except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model", "Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\")", "#...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try:", "else: selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。 selected_choice.save() return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))", "= Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单", "#...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return", "<app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return", "def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try:", "\"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request,", "import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self):", "raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try:", "= Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for q in latest_question_list]) #", "context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request):", "render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question = Question.objects.get(pk=question_id)", "# output = '<br /> '.join([q.question_text for q in latest_question_list]) # template =", "'.join([q.question_text for q in latest_question_list]) # template = loader.get_template('polls/index.html') context = { 'latest_question_list':", "from django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader from django.shortcuts", "output = '<br /> '.join([q.question_text for q in latest_question_list]) # template = loader.get_template('polls/index.html')", "for q in latest_question_list]) # template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list", "{ 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class", "get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question", "HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader from django.shortcuts import render #,", "= Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class", "'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def", "context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app", "question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice'])", "{ 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1 #", "try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question})", "question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist:", "# 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question =", "return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output =", "Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question", "from django.shortcuts import render #, get_object_or_404 from django.urls import reverse from django.utils import", "from django.views import generic # 通用视图 from django.db.models import F from .models import", "return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id): try: question =", "# template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request))", "from django.urls import reverse from django.utils import timezone from django.views import generic #", "def vote(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice", "#, get_object_or_404 from django.urls import reverse from django.utils import timezone from django.views import", "latest_question_list]) # template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context,", "Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): #", "question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message':", "context = { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context)", "Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request, question_id):", "import reverse from django.utils import timezone from django.views import generic # 通用视图 from", "= 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页", "Create your views here. from django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template", "'latest_question_list' def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list =", "= { 'latest_question_list': latest_question_list } # HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\"", "model = Question template_name = 'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return", "Http404 # from django.template import loader from django.shortcuts import render #, get_object_or_404 from", "generic # 通用视图 from django.db.models import F from .models import Question, Choice class", "'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做", "HttpResponse(template.render(context, request)) return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html'", "name>_detail.html 的模板。在我们的例子中,它将使用 \"polls/question_detail.html\" 模板 def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页", "question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。", "'polls/results.html' def get_queryset(self): # 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...投票结果页 \"\"\"def results(request, question_id):", "#...投票结果页 \"\"\"def results(request, question_id): try: question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return", "raise Http404(\"未找到!o(╯□╰)o\") try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request,", "question = Question.objects.get(pk=question_id) except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\"", "views here. from django.http import HttpResponse, HttpResponseRedirect, Http404 # from django.template import loader", ".models import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def", "有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question, 'error_message': \"请选择一个选项\", }) else: selected_choice.votes =", "render # Create your views here. from django.http import HttpResponse, HttpResponseRedirect, Http404 #", "def get_queryset(self): \"\"\"返回最近发布的5个问题\"\"\" return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] #...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5]", "#...主页 \"\"\"def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] # output = '<br /> '.join([q.question_text for", "Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/detail.html', {'question': question}) \"\"\" class ResultsView(generic.DetailView): model = Question template_name", "from .models import Question, Choice class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list'", "return render(request, 'polls/index.html', context) \"\"\" class DetailView(generic.DetailView): model = Question template_name='polls/detail.html' # 默认情况下,通用视图", "发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question return Question.objects.filter(pub_date__lte=timezone.now()) #...详情页 \"\"\"def detail(request, question_id): try: question = Question.objects.get(pk=question_id)", "except Question.DoesNotExist: raise Http404(\"未找到!o(╯□╰)o\") return render(request, 'polls/results.html', {'question': question}) \"\"\" #...处理投票表单 def vote(request,", "selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question':", "= question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # 有错误就返回到上一页 return render(request, 'polls/detail.html', { 'question': question,", "}) else: selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。 selected_choice.save() return HttpResponseRedirect(reverse('polls:results',", "import timezone from django.views import generic # 通用视图 from django.db.models import F from" ]
[ "out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item", "= [] for line in f: line = line.strip() if not ('[' in", "ValueError: print('Skipped invalid json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs =", "dialogs: %d' % cnt) # TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with", "= '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item +", "= dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt +=", "line): check_json(line) buf.append(line) if ']' in line: dlg = ''.join(buf) dlg = dlg.strip()", "('[' in line or ']' in line): check_json(line) buf.append(line) if ']' in line:", "json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt'", "print('Skipped invalid json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = []", "with open(out_file, 'w') as f: for item in dialogs: f.write(item + '\\n') out_file", "= ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf =", "[] for line in f: line = line.strip() if not ('[' in line", "% cnt) # TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w')", "'\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs:", "check_json(line): try: json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s' % line.strip()) in_file", "buf = [] cnt += 1 print('Processed dialogs: %d' % cnt) # TODO", "']' in line): check_json(line) buf.append(line) if ']' in line: dlg = ''.join(buf) dlg", "[] cnt = 0 with open(in_file) as f: buf = [] for line", "except ValueError: print('Skipped invalid json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs", "# TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f:", "1 print('Processed dialogs: %d' % cnt) # TODO split the data out_file =", "line or ']' in line): check_json(line) buf.append(line) if ']' in line: dlg =", "dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for", "'../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item + '\\n')", "= '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with open(in_file) as f: buf", "buf.append(line) if ']' in line: dlg = ''.join(buf) dlg = dlg.strip() dlg =", "in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with open(in_file) as f:", "f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item", "if ']' in line: dlg = ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2]", "print('Processed dialogs: %d' % cnt) # TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt'", "import json def check_json(line): try: json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s'", "%d' % cnt) # TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file,", "0 with open(in_file) as f: buf = [] for line in f: line", "line in f: line = line.strip() if not ('[' in line or ']'", "%s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with", "dlg[-1] dialogs.append(dlg) buf = [] cnt += 1 print('Processed dialogs: %d' % cnt)", "+ '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item in", "buf = [] for line in f: line = line.strip() if not ('['", "= line.strip() if not ('[' in line or ']' in line): check_json(line) buf.append(line)", "f: for item in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file,", "= '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item +", "TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for", "split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item", "as f: for item in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with", "in line): check_json(line) buf.append(line) if ']' in line: dlg = ''.join(buf) dlg =", "data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs:", "def check_json(line): try: json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s' % line.strip())", "dlg = ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf", "object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0", "cnt = 0 with open(in_file) as f: buf = [] for line in", "+= 1 print('Processed dialogs: %d' % cnt) # TODO split the data out_file", "line.strip() if not ('[' in line or ']' in line): check_json(line) buf.append(line) if", "= dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt += 1 print('Processed dialogs:", "'../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item + '\\n')", "dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt", "as f: buf = [] for line in f: line = line.strip() if", "[] cnt += 1 print('Processed dialogs: %d' % cnt) # TODO split the", "= [] cnt = 0 with open(in_file) as f: buf = [] for", "f: buf = [] for line in f: line = line.strip() if not", "in line: dlg = ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1]", "dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt += 1 print('Processed", "cnt) # TODO split the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as", "for item in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w')", "']' in line: dlg = ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] +", "line: dlg = ''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg)", "line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with open(in_file) as", "dialogs = [] cnt = 0 with open(in_file) as f: buf = []", "open(out_file, 'w') as f: for item in dialogs: f.write(item + '\\n') out_file =", "% line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with open(in_file)", "with open(in_file) as f: buf = [] for line in f: line =", "check_json(line) buf.append(line) if ']' in line: dlg = ''.join(buf) dlg = dlg.strip() dlg", "dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt += 1", "in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as f:", "dialogs.append(dlg) buf = [] cnt += 1 print('Processed dialogs: %d' % cnt) #", "cnt += 1 print('Processed dialogs: %d' % cnt) # TODO split the data", "for line in f: line = line.strip() if not ('[' in line or", "line = line.strip() if not ('[' in line or ']' in line): check_json(line)", "try: json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s' % line.strip()) in_file =", "in line or ']' in line): check_json(line) buf.append(line) if ']' in line: dlg", "dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = [] cnt += 1 print('Processed dialogs: %d'", "= [] cnt += 1 print('Processed dialogs: %d' % cnt) # TODO split", "invalid json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt", "'../data/raw_data/TrainingFinal.txt' dialogs = [] cnt = 0 with open(in_file) as f: buf =", "f: line = line.strip() if not ('[' in line or ']' in line):", "+ dlg[-1] dialogs.append(dlg) buf = [] cnt += 1 print('Processed dialogs: %d' %", "the data out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item in", "json object: %s' % line.strip()) in_file = '../data/raw_data/TrainingFinal.txt' dialogs = [] cnt =", "= 0 with open(in_file) as f: buf = [] for line in f:", "open(in_file) as f: buf = [] for line in f: line = line.strip()", "if not ('[' in line or ']' in line): check_json(line) buf.append(line) if ']'", "or ']' in line): check_json(line) buf.append(line) if ']' in line: dlg = ''.join(buf)", "''.join(buf) dlg = dlg.strip() dlg = dlg[:-2] + dlg[-1] dialogs.append(dlg) buf = []", "'w') as f: for item in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt'", "in f: line = line.strip() if not ('[' in line or ']' in", "json def check_json(line): try: json.loads(line.strip()[:-1]) except ValueError: print('Skipped invalid json object: %s' %", "out_file = '../data/corpora_processed/train_processed_dialogs.txt' with open(out_file, 'w') as f: for item in dialogs: f.write(item", "not ('[' in line or ']' in line): check_json(line) buf.append(line) if ']' in", "item in dialogs: f.write(item + '\\n') out_file = '../data/corpora_processed/val_processed_dialogs.txt' with open(out_file, 'w') as" ]
[ "[ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose', name='public', field=models.BooleanField(default=True, verbose_name='Public'), ),", "-*- # Generated by Django 1.9 on 2016-01-07 12:18 from __future__ import unicode_literals", "# Generated by Django 1.9 on 2016-01-07 12:18 from __future__ import unicode_literals from", "unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'),", "<reponame>suutari-ai/respa<filename>resources/migrations/0028_purpose_public.py # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-07", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations =", "class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose',", "2016-01-07 12:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):", "1.9 on 2016-01-07 12:18 from __future__ import unicode_literals from django.db import migrations, models", "by Django 1.9 on 2016-01-07 12:18 from __future__ import unicode_literals from django.db import", "models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField(", "Django 1.9 on 2016-01-07 12:18 from __future__ import unicode_literals from django.db import migrations,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ]", "Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose', name='public',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations = [", "# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-07 12:18", "= [ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose', name='public', field=models.BooleanField(default=True, verbose_name='Public'),", "12:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies", "import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('resources',", "Generated by Django 1.9 on 2016-01-07 12:18 from __future__ import unicode_literals from django.db", "coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-07 12:18 from __future__", "('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose', name='public', field=models.BooleanField(default=True, verbose_name='Public'), ), ]", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations", "from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "dependencies = [ ('resources', '0027_comments_verbose_name'), ] operations = [ migrations.AddField( model_name='purpose', name='public', field=models.BooleanField(default=True,", "-*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-07 12:18 from", "on 2016-01-07 12:18 from __future__ import unicode_literals from django.db import migrations, models class", "__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "utf-8 -*- # Generated by Django 1.9 on 2016-01-07 12:18 from __future__ import" ]
[ "continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if i ==", "if i == 2: print(\"Skipping: %d\" % i) # the continue keyword lets", "i in range(4): if i == 2: print(\"break\") # the break breaks out", "i in range(4): if i == 2: print(\"Skipping: %d\" % i) # the", "the continue keyword lets you skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak", "i == 2: print(\"break\") # the break breaks out of the loop break", "lets you skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i", "iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if i", "range(4): if i == 2: print(\"break\") # the break breaks out of the", "== 2: print(\"break\") # the break breaks out of the loop break print(i)", "in range(4): if i == 2: print(\"break\") # the break breaks out of", "loop\") for i in range(4): if i == 2: print(\"break\") # the break", "else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if i == 2: print(\"break\")", "# the continue keyword lets you skip an iteration continue print(i) else: print(\"Done...\")", "% i) # the continue keyword lets you skip an iteration continue print(i)", "print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if i == 2: print(\"break\") #", "you skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in", "continue keyword lets you skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\")", "for i in range(4): if i == 2: print(\"break\") # the break breaks", "#!/usr/bin/env python # START OMIT for i in range(4): if i == 2:", "2: print(\"Skipping: %d\" % i) # the continue keyword lets you skip an", "print(\"break\") # the break breaks out of the loop break print(i) # END", "an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if", "# START OMIT for i in range(4): if i == 2: print(\"Skipping: %d\"", "OMIT for i in range(4): if i == 2: print(\"Skipping: %d\" % i)", "2: print(\"break\") # the break breaks out of the loop break print(i) #", "print(\"Skipping: %d\" % i) # the continue keyword lets you skip an iteration", "for i in range(4): if i == 2: print(\"Skipping: %d\" % i) #", "in range(4): if i == 2: print(\"Skipping: %d\" % i) # the continue", "keyword lets you skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for", "%d\" % i) # the continue keyword lets you skip an iteration continue", "== 2: print(\"Skipping: %d\" % i) # the continue keyword lets you skip", "# the break breaks out of the loop break print(i) # END OMIT", "i == 2: print(\"Skipping: %d\" % i) # the continue keyword lets you", "START OMIT for i in range(4): if i == 2: print(\"Skipping: %d\" %", "range(4): if i == 2: print(\"Skipping: %d\" % i) # the continue keyword", "print(\"\\nBreak loop\") for i in range(4): if i == 2: print(\"break\") # the", "python # START OMIT for i in range(4): if i == 2: print(\"Skipping:", "if i == 2: print(\"break\") # the break breaks out of the loop", "i) # the continue keyword lets you skip an iteration continue print(i) else:", "print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4): if i == 2:", "skip an iteration continue print(i) else: print(\"Done...\") print(\"\\nBreak loop\") for i in range(4):" ]
[ "open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, )", "read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class =", "ActivityLog from open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView,", "( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class", "open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class =", "import ActivityLog from open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import (", "ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class", "from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class", "import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class", "= ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer update_serializer_class = ActivityLogCreateUpdateSerializer", "ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class", "ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer update_serializer_class", "ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class =", "class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView):", "from open.core.betterself.models.activity_log import ActivityLog from open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins", "create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer update_serializer_class =", "( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView):", "open.core.betterself.models.activity_log import ActivityLog from open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import", "BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer", "BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class =", "ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class", ") from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog", "= ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer", ") class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class", "from open.core.betterself.serializers.activity_log_serializers import ( ActivityLogReadSerializer, ActivityLogCreateUpdateSerializer, ) from open.core.betterself.views.mixins import ( BaseGetUpdateDeleteView, BaseCreateListView,", "model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class =", "import ( BaseGetUpdateDeleteView, BaseCreateListView, ) class ActivityLogCreateListView(BaseCreateListView): model_class = ActivityLog read_serializer_class = ActivityLogReadSerializer", "= ActivityLog read_serializer_class = ActivityLogReadSerializer create_serializer_class = ActivityLogCreateUpdateSerializer class ActivityLogGetUpdateView(BaseGetUpdateDeleteView): model_class = ActivityLog" ]
[ "<gh_stars>0 from marshmallow import Schema, fields class OrderInfoSchema(Schema): name = fields.Str() phone_number =", "import Schema, fields class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email =", "fields class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email = fields.Str() shipping_address", "OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email = fields.Str() shipping_address = fields.Nested('ShippingAddressSchema')", "class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email = fields.Str() shipping_address =", "Schema, fields class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email = fields.Str()", "from marshmallow import Schema, fields class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str()", "marshmallow import Schema, fields class OrderInfoSchema(Schema): name = fields.Str() phone_number = fields.Str() email" ]
[ "return a mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp", "} alg = LocalOutlierFactor elif self.method == \"ee\": param = { 'contamination': 0.1,", "log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp def", "self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info: continue prec, tp = self.compute_precision(outlier,", "ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50,", "for attr, dtype in self.attributes.items(): if dtype == CATEGORICAL or (dtype == TEXT", "num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 =", "self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in", "self.param = { 'm1': 3, 'm2': 5, } def get_outliers(self, data, right=None, m='m1'):", "+ distances continue # validate type and calculate cosine distance if self.attributes[attr] ==", "return has_same_left @abstractmethod def get_outliers(self, data, right=None): # return a mask pass def", "outlier is found and no outlier is present in the ground truth as", "outliers = set(outliers) tp = 0.0 # precision if len(outliers) == 0: if", "= tp / len(self.gt_idx) if log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers),", "self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance", "no outliers in the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall =", "self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False)", "TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine", "self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for", "import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__)", "== CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr]", "TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers)", "ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for", "gt_idx self.overall = None self.structured = None self.combined = None self.workers=workers self.t =", "distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X =", "num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets):", "%s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35 rects1", "import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import", "ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s", "self.min_neighbors: return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask =", "method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs):", "if log: print(\"no outlier is found and no outlier is present in the", "= (score < thred) #if it is going to remove all, then remove", "embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method,", "dis = dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr]", "attr in enumerate(left): # check if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue", "self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr]", "if len(clean) <= self.min_neighbors: return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] =", "type and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: embedded =", "= [] if len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors =", "indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i,", "else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn indicies =", "score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score < thred)", "log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall')", "BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in", "* from sklearn.neighbors import BallTree from tqdm import tqdm import numpy as np", "/ maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left", "else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis))", "recall = tp / len(self.gt_idx) if log: print(\"with %d detected outliers, recall is:", "attr in enumerate(left): # check if saved if attr in self.neighbors: distances =", "import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class", "data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt =", "naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def", "== X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate", "compute_recall(self, tp, outliers, log=True): if tp == 0: if log: print(\"with %d outliers", "np.ndarray): data = data.values if len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr]", "= self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured =", "distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): #", "= None self.structured = None self.combined = None def get_outliers(self, gradient, right=None): size", "if tp == 0: if log: print(\"with %d outliers in gt, recall is:", "/ np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data =", "logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df,", "return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left = (distances", "= { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) }", "as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger", "type and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data =", "data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width =", "= np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score =", "prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self,", "name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param", "len(nbr) == 0: continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)]", "self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0] !=", "ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def", "neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method self.df = df self.gt_idx", "ground truth as well, f1 is 1\") return 1, 0 if log: print(\"no", "= self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of", "ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param =", "{ 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return", "{} self.overall_info = {} self.eval = {} self.neighbors = {} self.neighbor_size = neighbor_size", "is 1\") return 1, 0 if log: print(\"no outlier is found, f1: 0\")", "CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] =", "nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save", "distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data)", "tp / len(outliers) if log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec))", "for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check", "remove none if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df,", "self.combined = None def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient,", "get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) #", "print(\"no outlier is found, f1: 0\") return 0, 0 for i in outliers:", "self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self,", "= self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn", "if i in self.gt_idx: tp += 1 prec = tp / len(outliers) if", "= [] for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if", "= dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr] +", "if rec*prec == 0: f1 = 0 else: f1 = 2 * (prec", "* rec) / (prec + rec) if log: print(\"f1: %.4f\" % f1) return", "gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T", "import svm from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as", "= self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method", "prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False)", "= self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive", "is found and no outlier is present in the ground truth as well,", "TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') #", "logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05,", "1\") return 1, 0 if log: print(\"no outlier is found, f1: 0\") return", "log: print(\"since no outliers in the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1", "high_dim=False): self.timer = GlobalTimer() self.method = method self.df = df self.gt_idx = gt_idx", "child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info:", "= max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1", "import ABCMeta, abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors", "neighbors nbr = self.df.index.values[row] if len(nbr) == 0: continue if self.method != \"std\":", "return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, )))", ".03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def", "data = data.values if len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr] =", "self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left):", "find low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx,", "self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one hot encoding data = self.encoder[right].get_embedding(data)", "def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True):", "= prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def", "(distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) #", "param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM", "= len(outlier) # save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers':", "tol self.structured_info = {} self.overall_info = {} self.eval = {} self.neighbors = {}", "outliers = [] if len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors", "column\"%(name, stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall,", "in enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row] if len(nbr) == 0:", "pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left):", "kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left", "= self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0]))", "self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1 =", "__init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False,", "if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors =", "dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr] + distances", "import BallTree from tqdm import tqdm import numpy as np import sklearn import", "'m1': 3, 'm2': 5, } def get_outliers(self, data, right=None, m='m1'): return abs(data -", "'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count')", "gt_idx, \"std\") self.param = { 'm1': 3, 'm2': 5, } def get_outliers(self, data,", "continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier =", "def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer =", "class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6,", "= df[attr] if not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1:", "width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for right", "tp = 0.0 # precision if len(outliers) == 0: if len(self.gt_idx) == 0:", "np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check if saved if attr in", "class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = {", "-1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask = mask.astype(int) return mask ==", "= 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and", "= [] for j, attr in enumerate(left): # check if saved if attr", "found, f1: 0\") return 0, 0 for i in outliers: if i in", "ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for right in self.overall_info], width)", "or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt =", "__init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer()", "self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self):", "# indicies of neighbors nbr = self.df.index.values[row] if len(nbr) == 0: continue if", "<reponame>rekords-uw/Profiler-public<filename>profiler/app/od.py from abc import ABCMeta, abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import", "0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number", "v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name,", "outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def", "return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child in", "= 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 =", "from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from", "= attr self.embed_txt = embed_txt self.overall = None self.structured = None self.combined =", "def get_outliers(self, data, right=None): # return a mask pass def run_attr(self, right): attr_outliers", "S = decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2", "kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for", "== 0: if len(self.gt_idx) == 0: if log: print(\"no outlier is found and", "{ 'contamination': 0.1, } alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self, df):", "self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if", "\"sever\") self.param = { } self.overall = None self.structured = None self.combined =", "self.timer.time_start(\"structured\") structured = [] for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child)", "elif self.method == \"ee\": param = { 'contamination': 0.1, } alg = EllipticEnvelope", "= 1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return", "{} self.eval = {} self.neighbors = {} self.neighbor_size = neighbor_size if knn: if", "encoders = {} for attr, dtype in self.attributes.items(): if dtype == CATEGORICAL or", "= list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall)", "column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx,", "if len(self.gt_idx) == 0: if log: print(\"since no outliers in the groud truth,", "a mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp =", "return param, alg def create_one_hot_encoder(self, df): encoders = {} for attr, dtype in", "as plt from profiler.globalvar import * from sklearn.neighbors import BallTree from tqdm import", "for j, attr in enumerate(left): # check if saved if attr in self.neighbors:", "for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not", "= { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif", "only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall']", "ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right)", "mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask = mask.astype(int) return", "= self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\")", "abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor", "in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1))", "else, categorical, find low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector,", "{} self.neighbors = {} self.neighbor_size = neighbor_size if knn: if not high_dim: self.get_neighbors", "self.method == \"ee\": param = { 'contamination': 0.1, } alg = EllipticEnvelope return", "= data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask", "right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors", "# return a mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec,", "self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive", "= LocalOutlierFactor elif self.method == \"ee\": param = { 'contamination': 0.1, } alg", "= np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score < thred) #if", "parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall =", "import LocalOutlierFactor from sklearn import svm from profiler.utility import GlobalTimer from profiler.data.embedding import", "self.neighbor_size = neighbor_size if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors", "= min_neighbors def get_default_setting(self): if self.method == \"isf\": param = { 'contamination': 0.1,", "abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical, find low frequency", "separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = []", "i in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left = (distances == 1)", "if len(outliers) == 0: if len(self.gt_idx) == 0: if log: print(\"no outlier is", "v in enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name')", "def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child in enumerate(tqdm(parent_sets)): outlier", "f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True): if tp ==", "nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr) num_outliers[i]", "= sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis =", "outlier is found, f1: 0\") return 0, 0 for i in outliers: if", "title is not None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec", "or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis =", "} alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders = {}", "attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\")", "self.df.index.values[row] if len(nbr) == 0: continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr,", "def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3,", "self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self,", "attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector,", "if len(nbr) == 0: continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right],", "outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right] =", "len(outlier) # save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers,", "# normalize each vector to take cosine distance data = data / np.linalg.norm(data,", "to remove all, then remove none if np.all(~mask): return ~mask return mask class", "category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self,", "dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data =", "0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif self.method == \"lof\":", "data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data, right=None):", "ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple')", "enumerate(left): # check if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate", "self.t = t self.tol = tol self.structured_info = {} self.overall_info = {} self.eval", "data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data,", "ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for", "2 * (prec * rec) / (prec + rec) if log: print(\"f1: %.4f\"", "info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right] = {", "gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method =", "def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall", "np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask", "create_one_hot_encoder(self, df): encoders = {} for attr, dtype in self.attributes.items(): if dtype ==", "%s for every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t)", "data.values if len(data.shape) == 1: data = data.reshape(-1, 1) if self.attributes[right] == TEXT:", "= self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance", "elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis =", "%d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name,", "stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\",", "== 0: if log: print(\"no outlier is found and no outlier is present", "= self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data = data", "for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in", "== \"ee\": param = { 'contamination': 0.1, } alg = EllipticEnvelope return param,", "self.structured_info = {} self.overall_info = {} self.eval = {} self.neighbors = {} self.neighbor_size", "== TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take", "= self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data.append(embedded / np.linalg.norm(embedded,", "indicies[i, :]] = 1 distances = self.neighbors[attr] + distances has_same_left = (distances ==", "1) if self.attributes[right] == TEXT: if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data)", "workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers,", "EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders = {} for attr, dtype", "present in the ground truth as well, f1 is 1\") return 1, 0", "compute_precision(self, outliers, log=True): outliers = set(outliers) tp = 0.0 # precision if len(outliers)", "found and no outlier is present in the ground truth as well, f1", "set(outliers) tp = 0.0 # precision if len(outliers) == 0: if len(self.gt_idx) ==", "return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1", "size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0)", "= np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): #", "knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)):", "= data.values if len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data)", "if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') # find", "return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None,", "if log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def", "j, attr in enumerate(left): # check if saved if attr in self.neighbors: distances", "knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt = embed_txt self.overall =", "width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2", "get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute", "def compute_recall(self, tp, outliers, log=True): if tp == 0: if log: print(\"with %d", "in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate cosine distance if self.attributes[attr]", "self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right in self.structured_info: fig,", "\"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest elif", "self.overall = None self.structured = None self.combined = None def get_outliers(self, gradient, right=None):", "super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall = None self.structured =", "# find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i", "= np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient - gradient_avg decompose = np.linalg.svd(G)", "gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient", "'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure", "combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers = set(outliers)", "in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def", "GlobalTimer() self.method = method self.df = df self.gt_idx = gt_idx self.overall = None", "fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data):", "None self.structured = None self.combined = None def get_outliers(self, gradient, right=None): size =", "kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr]", "distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr", "if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif", "if dtype == CATEGORICAL or (dtype == TEXT and (not self.embed_txt)): data =", "ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column", "= data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt", "!= X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') # find knn indicies", "print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log)", "data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data", "run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers", "num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right] = { 'determined_by':", "np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G,", "log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self,", "model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask == -1 y =", "log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is", "OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import * from sklearn.neighbors import BallTree", "== \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, }", "distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check if saved if", "warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ =", "logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None,", "class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = {", "= np.percentile(score, 100-p*100) mask = (score < thred) #if it is going to", "# save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors':", "= tp / len(outliers) if log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers),", "'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self,", "for i, v in enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys()))", "profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import * from sklearn.neighbors", "i in outliers: if i in self.gt_idx: tp += 1 prec = tp", "TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL", "def view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data =", "ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured,", "self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception", "in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info: continue", "0: if log: print(\"no outlier is found and no outlier is present in", "(dis <= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1])", "log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1 = 0", "save outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right]", "groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx) if", "max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances", "t is None: t = self.t unique, count = np.unique(structured, return_counts=True) outliers =", "outliers, log=log) if rec*prec == 0: f1 = 0 else: f1 = 2", "self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1))", "metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)):", "stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] =", "t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] =", "has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for", "t=None): if t is None: t = self.t unique, count = np.unique(structured, return_counts=True)", "= [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall =", "param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg =", "plt from profiler.globalvar import * from sklearn.neighbors import BallTree from tqdm import tqdm", "\"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info,", "gradient - gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v", "only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision')", "if t is None: t = self.t unique, count = np.unique(structured, return_counts=True) outliers", "if self.method == \"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers } alg", "rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column name')", "self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors =", "np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): # indicies", "TEXT and (not self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray): data =", "encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data", "def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False)", "i, row in enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row] if len(nbr)", "= self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self,", "== \"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg", "= kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]] = 1", "%.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers, title=None, log=True): if title is", "ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width)", "= self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left):", "int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif self.method ==", "self.tol)*1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left", "rec) if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self,", "= logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std',", "def compute_f1(self, outliers, title=None, log=True): if title is not None: print(\"Results for %s:\"%title)", "ABCMeta, abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import", "/ np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded =", "distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in enumerate(left): # check", "right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size,", "super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed", "high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt = embed_txt self.overall = None", "sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis", "t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] =", "and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or", "self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured),", "self.embed_txt = embed_txt self.overall = None self.structured = None self.combined = None self.algorithm", "self.eval = {} self.neighbors = {} self.neighbor_size = neighbor_size if knn: if not", "bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width", "len(self.gt_idx) if log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall", "'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets)", "self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for attr", "self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data", "self.overall_info = {} self.eval = {} self.neighbors = {} self.neighbor_size = neighbor_size if", "np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): # indicies of neighbors nbr =", "validate type and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: embedded", "self.gt_idx = gt_idx self.overall = None self.structured = None self.combined = None self.workers=workers", "np.nanstd(data) # else, categorical, find low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df,", "print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self,", "CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis", "%s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 =", "self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data = data /", "self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data", "= np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured):", "stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured,", "= (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left))", "if right in self.structured_info else 0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall',", "tp / len(self.gt_idx) if log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall))", "dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis", "vector to take cosine distance data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr]", "2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif self.method == \"ee\": param =", "if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp,", "overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers", "in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v", "# save outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save info", "enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info: continue prec,", "'contamination': 0.1, } alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders", "\"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall',", "top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask =", "recall def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if right in dict", "def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each", "tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol,", "ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self, t=None,", "distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each", "sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import svm from profiler.utility", "== 1) return has_same_left @abstractmethod def get_outliers(self, data, right=None): # return a mask", "self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\")", "calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis", "alg = IsolationForest elif self.method == \"ocsvm\": param = { 'nu': 0.1, 'kernel':", "== TEXT: if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else: data =", "None self.workers=workers self.t = t self.tol = tol self.structured_info = {} self.overall_info =", "well, f1 is 1\") return 1, 0 if log: print(\"no outlier is found,", "None: t = self.t unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count >", "find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i,", "None def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg", "mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4,", "structured = [] for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier)", "self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X", "else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate", "return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = [] if len(left) == 0:", "encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan]", "distance data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr]", "right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(),", "gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G =", "ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\")", "outliers in the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp", "rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for right in", "= np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1:", "else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one hot encoding", "0 for i in outliers: if i in self.gt_idx: tp += 1 prec", "evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured']", "np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr]", "encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if", "self.t unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers", "ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax =", "def get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data)", "f1 = 0 else: f1 = 2 * (prec * rec) / (prec", "= np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances =", "cosine distance if self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize", "np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in enumerate(left): # check if saved", "= 2 * (prec * rec) / (prec + rec) if log: print(\"f1:", "n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis / maxdis", "k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] =", "in self.gt_idx: tp += 1 prec = tp / len(outliers) if log: print(\"with", "def run_attr_structured(self, left, right): outliers = [] if len(left) == 0: return outliers", "gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall = None self.structured", "m='m1'): return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical, find", "and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1))", "def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if right in dict else", "self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured", "warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object):", "len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def", "get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values", "list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return", "= (dis <= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left = (distances ==", "IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import svm from profiler.utility import GlobalTimer", "dtype == CATEGORICAL or (dtype == TEXT and (not self.embed_txt)): data = df[attr]", "= V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score", "self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param)", "np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances =", "<= self.min_neighbors: return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask", "== TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean')", "0.1, 'n_jobs': self.workers } alg = IsolationForest elif self.method == \"ocsvm\": param =", "in dict else 0 for right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)),", "self.attributes.items(): if dtype == CATEGORICAL or (dtype == TEXT and (not self.embed_txt)): data", "== 1: data = data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt: #", "self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in", "0: if log: print(\"since no outliers in the groud truth, recall is: 1\"%(len(self.gt_idx)))", "prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec ==", "# normalize each vector to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif", "# calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr", "tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec == 0:", "column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35", "== CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else:", "take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr]", "= {} self.overall_info = {} self.eval = {} self.neighbors = {} self.neighbor_size =", "0 for right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1))", "return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True): if tp == 0:", "(prec + rec) if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1)", "= embed self.attributes = attr self.embed_txt = embed_txt self.overall = None self.structured =", "= None self.workers=workers self.t = t self.tol = tol self.structured_info = {} self.overall_info", "len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers", "right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else", "IsolationForest elif self.method == \"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma':", "= None self.structured = None self.combined = None self.workers=workers self.t = t self.tol", "== \"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest", "for right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data,", "TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data)", "f1 is 1\") return 1, 0 if log: print(\"no outlier is found, f1:", "Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured", "== 1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self,", "outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers,", "neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt = embed_txt self.overall", "tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt = embed_txt", "saved if attr in self.neighbors: distances = self.neighbors[attr] + distances continue # validate", "one hot encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean", "== TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] ==", "i in self.gt_idx: tp += 1 prec = tp / len(outliers) if log:", "= GlobalTimer() self.method = method self.df = df self.gt_idx = gt_idx self.overall =", "svm.OneClassSVM elif self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)),", "+ distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X", "np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1: data", "self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1,", "%.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True): if", "self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr] + distances has_same_left = (distances", "self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t)", "def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each", "t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt", "ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35 rects1 =", "dict, name, stat='precision'): data = [dict[right][stat] if right in dict else 0 for", "svm from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt", "self.timer.time_start(\"naive\") if separate: overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall", "'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self,", "tp == 0: if log: print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx)))", "0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n", "else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape)", "'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured']", "of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width,", "dtype in self.attributes.items(): if dtype == CATEGORICAL or (dtype == TEXT and (not", "remove all, then remove none if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector):", "if self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector", "self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is None: t = self.t unique,", "detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name, stat='precision'):", "tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors']", "high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def", "self.gt_idx: tp += 1 prec = tp / len(outliers) if log: print(\"with %d", "{ 'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest elif self.method == \"ocsvm\":", "right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i,", "'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured =", "return 0, 0 for i in outliers: if i in self.gt_idx: tp +=", "elif self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination':", "in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0", "self.attributes = attr self.embed_txt = embed_txt self.overall = None self.structured = None self.combined", "'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif self.method", "thred) #if it is going to remove all, then remove none if np.all(~mask):", "f1 = 2 * (prec * rec) / (prec + rec) if log:", "structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info,", "get_outliers(self, data, right=None): # return a mask pass def run_attr(self, right): attr_outliers =", "has_same_left @abstractmethod def get_outliers(self, data, right=None): # return a mask pass def run_attr(self,", "raise Exception kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size,", "self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive", "self.structured = None self.combined = None self.algorithm = None self.param, self.algorithm = self.get_default_setting()", "count = np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self,", "self.workers=workers self.t = t self.tol = tol self.structured_info = {} self.overall_info = {}", "GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import *", "naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall',", "data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] ==", "1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx) if log: print(\"with %d detected", "== -1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask = mask.astype(int) return mask", "= BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] =", "data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask == -1 y", "outlier is present in the ground truth as well, f1 is 1\") return", "= self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row", "def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each", "= gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G", "== 0: if log: print(\"since no outliers in the groud truth, recall is:", "if log: print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if", "= data.values if len(data.shape) == 1: data = data.reshape(-1, 1) if self.attributes[right] ==", "data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model", "and no outlier is present in the ground truth as well, f1 is", "= None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors", "for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector,", "= self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log:", "decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred =", "column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2", "= { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor", "log: print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx)", "print(\"since no outliers in the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall", "data.append(self.neighbors[attr]) continue # validate type and calculate cosine distance if self.attributes[attr] == TEXT", "data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i - 0.25, v +", "len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets,", "from abc import ABCMeta, abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest", "in the ground truth as well, f1 is 1\") return 1, 0 if", "k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left =", "cosine distance data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or", "outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if", "= embed_txt self.overall = None self.structured = None self.combined = None self.algorithm =", "m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) #", "with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall')", "[] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall", "or (dtype == TEXT and (not self.embed_txt)): data = df[attr] if not isinstance(data,", "it is going to remove all, then remove none if np.all(~mask): return ~mask", "sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn", "logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__", "is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers, title=None, log=True): if title", "np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score < thred) #if it", "in outliers: if i in self.gt_idx: tp += 1 prec = tp /", "outliers, log=True): outliers = set(outliers) tp = 0.0 # precision if len(outliers) ==", "for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if", "self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr]", "fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of", "self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data)", "self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to", "distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X =", "if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else:", "{ } self.overall = None self.structured = None self.combined = None def get_outliers(self,", "print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict,", "self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram", "= { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) }", "'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers", "check if saved if attr in self.neighbors: distances = self.neighbors[attr] + distances continue", "= nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) #", "t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined def", "= None self.structured = None self.combined = None self.algorithm = None self.param, self.algorithm", "log: print(\"no outlier is found and no outlier is present in the ground", "super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3, 'm2': 5, } def", "data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values if", "decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask", "return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance", "self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances", "None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers,", "rec, f1) def compute_recall(self, tp, outliers, log=True): if tp == 0: if log:", "then remove none if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self,", "data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise", "= self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self,", "len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i,", "def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right]", "normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] =", "== 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers =", "attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate cosine distance if", "is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if log: print(\"since no outliers", "truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx) if log:", "def filter(self, structured, t=None): if t is None: t = self.t unique, count", "#if it is going to remove all, then remove none if np.all(~mask): return", "embed_txt self.overall = None self.structured = None self.combined = None self.algorithm = None", "distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] = (dis", "= 0 else: f1 = 2 * (prec * rec) / (prec +", "# take one hot encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan =", "= self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info: continue prec, tp =", "structured): combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers =", "going to remove all, then remove none if np.all(~mask): return ~mask return mask", "0.0 # precision if len(outliers) == 0: if len(self.gt_idx) == 0: if log:", "))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): # indicies of", "__init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall =", "none if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method,", "df): encoders = {} for attr, dtype in self.attributes.items(): if dtype == CATEGORICAL", "sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol,", "self.method = method self.df = df self.gt_idx = gt_idx self.overall = None self.structured", ":]] = 1 has_same_left = (distances == 1) return has_same_left @abstractmethod def get_outliers(self,", "\"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif self.method == \"lof\": param =", "self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision')", "alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders = {} for", "and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance", "0: if log: print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0", "= None def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size", "\"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self,", "prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision':", "== TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take", "attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in enumerate(left): #", "data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number", "recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx) if log: print(\"with", "self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1,", "**kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed =", "calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for", "return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] = y mask = mask.astype(int)", "OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data,", "combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers = set(outliers) tp = 0.0", "tp, outliers, log=True): if tp == 0: if log: print(\"with %d outliers in", "0: if len(self.gt_idx) == 0: if log: print(\"no outlier is found and no", "combined def compute_precision(self, outliers, log=True): outliers = set(outliers) tp = 0.0 # precision", "left, right): outliers = [] if len(left) == 0: return outliers has_same_neighbors =", "method self.df = df self.gt_idx = gt_idx self.overall = None self.structured = None", "1 prec = tp / len(outliers) if log: print(\"with %d detected outliers, precision", "dict else 0 for right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data)", "neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn,", "self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method == \"isf\":", "= 0.0 # precision if len(outliers) == 0: if len(self.gt_idx) == 0: if", "if self.attributes[right] == TEXT: if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else:", "def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100,", "= self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers =", "for every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall']", "\"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self):", "recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if log: print(\"since no", "def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\")", "'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest elif self.method == \"ocsvm\": param", "separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else:", "import matplotlib.pyplot as plt from profiler.globalvar import * from sklearn.neighbors import BallTree from", "> self.param[m] * np.nanstd(data) # else, categorical, find low frequency items class SEVERDetector(OutlierDetector):", "and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1))", "= { 'm1': 3, 'm2': 5, } def get_outliers(self, data, right=None, m='m1'): return", "knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method self.df = df self.gt_idx =", "of neighbors nbr = self.df.index.values[row] if len(nbr) == 0: continue if self.method !=", "{ 'm1': 3, 'm2': 5, } def get_outliers(self, data, right=None, m='m1'): return abs(data", "data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask =", "outliers, title=None, log=True): if title is not None: print(\"Results for %s:\"%title) prec, tp", "= data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask == -1", "self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data =", "top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score < thred) #if it is", "data.values if len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return", "data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take", "right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier)", "3, 'm2': 5, } def get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data))", "= OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not", "knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors", "return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\")", "from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from", "num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors):", "category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta", "len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors':", "log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers,", "from sklearn.neighbors import LocalOutlierFactor from sklearn import svm from profiler.utility import GlobalTimer from", "workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method self.df", "(distances == 1) return has_same_left @abstractmethod def get_outliers(self, data, right=None): # return a", "ax = plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in", "embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data.append(embedded /", "+ rec) if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def", "i, v in enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column", "self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def", "workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr self.embed_txt =", "= structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is None: t", "run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall", "data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) #", "precision is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers, title=None, log=True): if", "'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\")", "BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0]))", "2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif self.method == \"ee\": param", "= {} for attr, dtype in self.attributes.items(): if dtype == CATEGORICAL or (dtype", "= EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders = {} for attr,", "self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise", "not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold", "None self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors", "self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\",", "0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if log: print(\"since no outliers in", "X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute distances =", "distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left def", "enumerate(left): # check if saved if attr in self.neighbors: distances = self.neighbors[attr] +", "check if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and", "self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method ==", "is not None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec =", "in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if log:", "mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values if len(data.shape) ==", "%s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec", "self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat())", "# calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = []", "for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width =", "= 1 has_same_left = (distances == 1) return has_same_left @abstractmethod def get_outliers(self, data,", "def get_default_setting(self): if self.method == \"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers", "right=None): # return a mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)]", "return prec, tp def compute_f1(self, outliers, title=None, log=True): if title is not None:", "1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0]))", "t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t,", "- np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical, find low frequency items", "method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes = attr", "for i in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left = (distances ==", "elif self.attributes[right] == CATEGORICAL: # take one hot encoding data = self.encoder[right].get_embedding(data) #", "0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)),", "run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child in enumerate(tqdm(parent_sets)): outlier =", "self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn indicies", "right): outliers = [] if len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left)", "ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer", "attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check if saved", "evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right in self.structured_info:", "info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers':", "# normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr]", "= self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured',", "in range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left = (distances == 1) return", "\"naive approach\", log=False) def view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2) =", "== 0: f1 = 0 else: f1 = 2 * (prec * rec)", "None self.structured = None self.combined = None self.algorithm = None self.param, self.algorithm =", "self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall = None self.structured = None", "every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df,", "self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1))", "right], right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr) num_outliers[i] =", "[] if len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors,", "% f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True): if tp", "gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0: if log: print(\"since", "= np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return", "data = [dict[right][stat] if right in dict else 0 for right in self.overall_info]", "if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None,", "= ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of", "def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right in", "get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1,", "data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0] != X.shape[0]:", "normalize each vector to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr]", "num_outliers[i] = len(outlier) # save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors,", "outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for", "None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def", "len(left)) # calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j,", "visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if right in dict else 0", "clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask ==", "high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed", "unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers def", "not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1: data = data.reshape(-1,", "parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child],", "right in dict else 0 for right in self.overall_info] fig, ax = plt.subplots()", "/ len(outliers) if log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return", "items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param =", "= X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size,", "min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size,", "from profiler.globalvar import * from sklearn.neighbors import BallTree from tqdm import tqdm import", "filter(self, structured, t=None): if t is None: t = self.t unique, count =", "and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots()", "ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count')", "return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined =", "# else, categorical, find low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None):", "'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax", "= self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize", "0.1, } alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self, df): encoders =", "CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt", "self.method == \"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers } alg =", "import * from sklearn.neighbors import BallTree from tqdm import tqdm import numpy as", "= self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one hot encoding data =", "each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in enumerate(left):", "calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in", "df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method", "self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left):", "elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data", "self.tol = tol self.structured_info = {} self.overall_info = {} self.eval = {} self.neighbors", "distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:", "Exception kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False)", "X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise", "self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = [] if", "f1) def compute_recall(self, tp, outliers, log=True): if tp == 0: if log: print(\"with", "take one hot encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1)", "len(self.gt_idx) == 0: if log: print(\"no outlier is found and no outlier is", "enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row] if len(nbr) == 0: continue", "return 1, 0 if log: print(\"no outlier is found, f1: 0\") return 0,", "== TEXT and (not self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray): data", "stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive", "self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method == \"isf\": param = {", "self.neighbors = {} self.neighbor_size = neighbor_size if knn: if not high_dim: self.get_neighbors =", "# find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i,", "is present in the ground truth as well, f1 is 1\") return 1,", "structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for", "run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] =", "self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def", "recall)) return recall def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if right", "len(outliers) == 0: if len(self.gt_idx) == 0: if log: print(\"no outlier is found", "get_default_setting(self): if self.method == \"isf\": param = { 'contamination': 0.1, 'n_jobs': self.workers }", "self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for", "embed self.attributes = attr self.embed_txt = embed_txt self.overall = None self.structured = None", "< thred) #if it is going to remove all, then remove none if", "data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded", "hot encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean =", "100-p*100) mask = (score < thred) #if it is going to remove all,", "+ distances has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X", "= None self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df)", "self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured", "for right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for", "__init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3, 'm2':", "from tqdm import tqdm import numpy as np import sklearn import warnings, logging", "is: 1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx) if log: print(\"with %d", "cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] ==", "self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data", "if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured',", "self.structured_info else 0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys()))", "= self.neighbors[attr] + distances continue # validate type and calculate cosine distance if", "of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df,", "= self.encoder[right].get_embedding(data) # remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model =", ":]] = 1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1])", "def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg =", "or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1]", "data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data =", "self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def", "= decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100)", "name, stat='precision'): data = [dict[right][stat] if right in dict else 0 for right", "if separate: overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall =", "indicies[i, :]] = 1 has_same_left = (distances == 1) return has_same_left @abstractmethod def", "self.overall = None self.structured = None self.combined = None self.workers=workers self.t = t", "prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate)", "if saved if attr in self.neighbors: distances = self.neighbors[attr] + distances continue #", "self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for", "'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return attr_outliers def run_all(self, parent_sets, separate=True):", "gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3, 'm2': 5, }", "rec*prec == 0: f1 = 0 else: f1 = 2 * (prec *", "ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count')", "log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers,", "each vector to take cosine distance data = data / np.linalg.norm(data, axis=1) elif", "= {} self.neighbor_size = neighbor_size if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn", "len(left)) # calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data =", "for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width)", "/ 2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif self.method == \"ee\":", "import numpy as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning)", "prec = tp / len(outliers) if log: print(\"with %d detected outliers, precision is:", "# take embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] ==", "= self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right in self.structured_info: fig, (ax1,", "= plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column", "log=True): if title is not None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers,", "has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1,", "if log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp", "profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar", "rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index", "gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx,", "1) return has_same_left @abstractmethod def get_outliers(self, data, right=None): # return a mask pass", "log: print(\"no outlier is found, f1: 0\") return 0, 0 for i in", "logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4,", "return recall def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if right in", "return 0 if len(self.gt_idx) == 0: if log: print(\"since no outliers in the", "np.percentile(score, 100-p*100) mask = (score < thred) #if it is going to remove", "self.neighbors[attr] + distances continue # validate type and calculate cosine distance if self.attributes[attr]", "self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined']", "} return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True):", "= (distances == 1) return has_same_left @abstractmethod def get_outliers(self, data, right=None): # return", "@abstractmethod def get_outliers(self, data, right=None): # return a mask pass def run_attr(self, right):", "1 recall = tp / len(self.gt_idx) if log: print(\"with %d detected outliers, recall", "width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors", "* (prec * rec) / (prec + rec) if log: print(\"f1: %.4f\" %", "alg def create_one_hot_encoder(self, df): encoders = {} for attr, dtype in self.attributes.items(): if", "%.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat] if", "t self.tol = tol self.structured_info = {} self.overall_info = {} self.eval = {}", "if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate cosine distance", "size, axis=0) G = gradient - gradient_avg decompose = np.linalg.svd(G) S = decompose[1]", "structured, t=None): if t is None: t = self.t unique, count = np.unique(structured,", "= t self.tol = tol self.structured_info = {} self.overall_info = {} self.eval =", "self.workers } alg = IsolationForest elif self.method == \"ocsvm\": param = { 'nu':", "row in enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row] if len(nbr) ==", "\\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig, ax = plt.subplots() width", "> t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined", "def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for attr in self.df:", "has_same_left = (distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1,", "right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = {", "abc import ABCMeta, abstractmethod from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from", "self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall,", "0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors,", "self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is None:", "= len(nbr) num_outliers[i] = len(outlier) # save info self.structured_info[right] = { 'determined_by': left,", "= np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check if saved if attr", "np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical, find low frequency items class", "\"std\") self.param = { 'm1': 3, 'm2': 5, } def get_outliers(self, data, right=None,", "= neighbor_size if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors =", "is None: t = self.t unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count", "for i in outliers: if i in self.gt_idx: tp += 1 prec =", "self.embed = embed self.attributes = attr self.embed_txt = embed_txt self.overall = None self.structured", "axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else:", "neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors',", "rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column %s\"%right) ax2.set_xlabel('index of tuple') ax2.set_ylabel('count') fig,", "nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <=", "right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data = data.values if len(data.shape)", "} return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child", "calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) #", "np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None,", "self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers))", "TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine", "self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3, 'm2': 5, } def get_outliers(self,", "in enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat)", "CATEGORICAL: # take one hot encoding data = self.encoder[right].get_embedding(data) # remove nan: row_has_nan", "metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for", "log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return", "if log: print(\"since no outliers in the groud truth, recall is: 1\"%(len(self.gt_idx))) return", "gradient, right=None): size = gradient.shape[0] gradient_avg = np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1),", "self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = [] if len(left) == 0: return", "'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured =", "[] for i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child", "sklearn.neighbors import LocalOutlierFactor from sklearn import svm from profiler.utility import GlobalTimer from profiler.data.embedding", "number of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self,", "stat='precision') self.visualize_stat(self.structured_info, 'structured', stat='precision') self.visualize_stat(self.overall_info, 'overall', stat='recall') self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t):", "t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] =", "SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { }", "else: f1 = 2 * (prec * rec) / (prec + rec) if", "self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) for", "for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr] +", "sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis", "knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim)", "range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr] + distances has_same_left =", "__metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False,", "else: self.get_neighbors = self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X =", "maxdis = max(self.tol, np.nanmax(dis)) dis = dis / maxdis self.neighbors[attr] = (dis <=", "if len(data.shape) == 1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders", "import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import * from sklearn.neighbors import", "* np.nanstd(data) # else, categorical, find low frequency items class SEVERDetector(OutlierDetector): def __init__(self,", "if len(data.shape) == 1: data = data.reshape(-1, 1) if self.attributes[right] == TEXT: if", "continue # validate type and calculate cosine distance if self.attributes[attr] == TEXT and", "each attribute distances = np.zeros((X.shape[0],X.shape[0])) for j, attr in enumerate(left): # check if", "= overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = [] if len(left)", "f1: 0\") return 0, 0 for i in outliers: if i in self.gt_idx:", "is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name, stat='precision'): data = [dict[right][stat]", "LocalOutlierFactor elif self.method == \"ee\": param = { 'contamination': 0.1, } alg =", "left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def", "all, then remove none if np.all(~mask): return ~mask return mask class ScikitDetector(OutlierDetector): def", "= np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean')", "self.method == \"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' }", "self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured,", "for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info", "self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = []", "= np.zeros((X.shape[0],X.shape[0])) data = [] for j, attr in enumerate(left): # check if", "warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class OutlierDetector(object): __metaclass__ = ABCMeta def", "left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute distances", "else 0 for right in self.overall_info] fig, ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data)))", "log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] =", "= plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i -", "{} for attr, dtype in self.attributes.items(): if dtype == CATEGORICAL or (dtype ==", "np.hstack(data) if data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') #", "len(clean) <= self.min_neighbors: return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan] = y", "ax = plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i", "saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate cosine", "gt_idx, \"sever\") self.param = { } self.overall = None self.structured = None self.combined", "V = decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score,", "} alg = IsolationForest elif self.method == \"ocsvm\": param = { 'nu': 0.1,", "cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis =", "- gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v =", "list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers = set(outliers) tp =", "= self.t unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return", "tp += 1 prec = tp / len(outliers) if log: print(\"with %d detected", "self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method == \"isf\": param", "maxdis self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left =", "from sklearn.neighbors import BallTree from tqdm import tqdm import numpy as np import", "for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of", "param, alg def create_one_hot_encoder(self, df): encoders = {} for attr, dtype in self.attributes.items():", "!= \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right,", "import tqdm import numpy as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning)", "self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if", "if log: print(\"no outlier is found, f1: 0\") return 0, 0 for i", "remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean)", "calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) #", "prec)) return prec, tp def compute_f1(self, outliers, title=None, log=True): if title is not", "ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35", "sklearn import svm from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot", "plt.subplots() ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i - 0.25,", "in self.structured_info else 0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info)))", "self.combined = None self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder =", "V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred = np.percentile(score, 100-p*100) mask = (score <", "return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical, find low", "(prec * rec) / (prec + rec) if log: print(\"f1: %.4f\" % f1)", "child not in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec", "None self.combined = None def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg =", "to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or", "alg = svm.OneClassSVM elif self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size /", "import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import svm from profiler.utility import", "right in self.structured_info else 0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured'])", "df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1': 3, 'm2': 5,", "X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False)", "= ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if", "truth as well, f1 is 1\") return 1, 0 if log: print(\"no outlier", "self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to", "LocalOutlierFactor from sklearn import svm from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel", "vector to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL", "None self.combined = None self.workers=workers self.t = t self.tol = tol self.structured_info =", "is found, f1: 0\") return 0, 0 for i in outliers: if i", "self.param = { } self.overall = None self.structured = None self.combined = None", "%d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) == 0:", "min_neighbors def get_default_setting(self): if self.method == \"isf\": param = { 'contamination': 0.1, 'n_jobs':", "ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers'])", "self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info, 'overall', stat='precision') self.visualize_stat(self.structured_info,", "isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1: data = data.reshape(-1, 1)", "range(len(indicies)): distances[i, indicies[i, :]] = 1 has_same_left = (distances == 1) return has_same_left", "right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) # else, categorical,", "= self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall':", "outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers, title=None, log=True):", "log=True): structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured,", "return outliers def run_combined(self, structured): combined = list(structured) combined.extend(self.overall) return combined def compute_precision(self,", "= ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for right in self.overall_info],", "alg = LocalOutlierFactor elif self.method == \"ee\": param = { 'contamination': 0.1, }", "print(\"no outlier is found and no outlier is present in the ground truth", "attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if", "STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(STDDetector, self).__init__(df, gt_idx, \"std\") self.param = { 'm1':", "self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right],", "data = data.values if len(data.shape) == 1: data = data.reshape(-1, 1) if self.attributes[right]", "= self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one", "return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05,", "of neighbors') ax1.set_ylabel('count') width = 0.35 rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0],", "find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in", "elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else:", "= self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:", "/ len(self.gt_idx) if log: print(\"with %d detected outliers, recall is: %.4f\"%(len(outliers), recall)) return", "# check if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type", "{} self.neighbor_size = neighbor_size if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else:", "t = self.t unique, count = np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]])", "method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method", "data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] ==", "self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors')", "if title is not None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log)", "'n_jobs': self.workers } alg = IsolationForest elif self.method == \"ocsvm\": param = {", "self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) #", "with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self):", "= None self.combined = None def get_outliers(self, gradient, right=None): size = gradient.shape[0] gradient_avg", "j, attr in enumerate(left): # check if saved if attr in self.neighbors: data.append(self.neighbors[attr])", "'contamination': 0.1, } alg = LocalOutlierFactor elif self.method == \"ee\": param = {", "and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance", "ax.bar(np.arange(len(data)), data) ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i - 0.25, v", "in self.attributes.items(): if dtype == CATEGORICAL or (dtype == TEXT and (not self.embed_txt)):", "precision if len(outliers) == 0: if len(self.gt_idx) == 0: if log: print(\"no outlier", "data, right=None): # return a mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right],", "{ 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp, outliers=attr_outliers, log=False) } return", "num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured", "- 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for", "ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right", "i, child in enumerate(tqdm(parent_sets)): outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in", "(ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n", "from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import svm from", "neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector): def __init__(self, df, gt_idx=None):", "if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif", "profiler.globalvar import * from sklearn.neighbors import BallTree from tqdm import tqdm import numpy", "pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False)", "\"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True): if tp == 0: if", "ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class STDDetector(OutlierDetector):", "1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return has_same_left", "every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured = self.filter(self.structured, t) self.eval['overall'] =", "))) for i, row in enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row]", "overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall", "self.min_neighbors = min_neighbors def get_default_setting(self): if self.method == \"isf\": param = { 'contamination':", "(score < thred) #if it is going to remove all, then remove none", "self.attributes[right] == CATEGORICAL: # take one hot encoding data = self.encoder[right].get_embedding(data) # remove", "sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO)", "= {} self.eval = {} self.neighbors = {} self.neighbor_size = neighbor_size if knn:", "title=None, log=True): if title is not None: print(\"Results for %s:\"%title) prec, tp =", "BallTree from tqdm import tqdm import numpy as np import sklearn import warnings,", "0 if log: print(\"no outlier is found, f1: 0\") return 0, 0 for", "= df self.gt_idx = gt_idx self.overall = None self.structured = None self.combined =", "attr in self.neighbors: distances = self.neighbors[attr] + distances continue # validate type and", "def compute_precision(self, outliers, log=True): outliers = set(outliers) tp = 0.0 # precision if", "axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient - gradient_avg decompose", "outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for i, child in enumerate(tqdm(parent_sets)):", "outliers, recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name, stat='precision'): data", "if not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1: data =", "= set(outliers) tp = 0.0 # precision if len(outliers) == 0: if len(self.gt_idx)", "and (not self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray): data = data.values", "1: data = data.reshape(-1, 1) encoders[attr] = OneHotModel(data) return encoders def get_outliers(self, data,", "} def get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m] *", "attr, dtype in self.attributes.items(): if dtype == CATEGORICAL or (dtype == TEXT and", "plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width)", "# validate type and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt:", "mask = (score < thred) #if it is going to remove all, then", "numpy as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig()", "tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec,", "np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient - gradient_avg", "\"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False)", "ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self, t=None, log=True):", "= self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance", "child) structured.extend(outlier) if child not in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False)", "self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask == -1 y = model.fit_predict(clean) mask[~row_has_nan]", "of num_neighbors\\n for column %s\"%right) ax1.set_xlabel('number of neighbors') ax1.set_ylabel('count') width = 0.35 rects1", "/ (prec + rec) if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec,", "in enumerate(left): # check if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue #", "right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers),", "compute_f1(self, outliers, title=None, log=True): if title is not None: print(\"Results for %s:\"%title) prec,", "log=False) def view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data", "return combined def compute_precision(self, outliers, log=True): outliers = set(outliers) tp = 0.0 #", "print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data,", "self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every", "if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr,", "X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') # find knn indicies =", "self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one hot", "each vector to take cosine distance data.append(embedded / np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] ==", "'auto' } alg = svm.OneClassSVM elif self.method == \"lof\": param = { 'n_neighbors':", "data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt: # take embedding data =", "{ 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg = LocalOutlierFactor elif", "t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with", "df self.gt_idx = gt_idx self.overall = None self.structured = None self.combined = None", "gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes =", "enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s]", "self.df = df self.gt_idx = gt_idx self.overall = None self.structured = None self.combined", "normalize each vector to take cosine distance data = data / np.linalg.norm(data, axis=1)", "== CATEGORICAL or (dtype == TEXT and (not self.embed_txt)): data = df[attr] if", "decompose = np.linalg.svd(G) S = decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score", "as well, f1 is 1\") return 1, 0 if log: print(\"no outlier is", "# check if saved if attr in self.neighbors: distances = self.neighbors[attr] + distances", "rec) / (prec + rec) if log: print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec,", "= ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False):", "'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif self.method == \"lof\": param", "~mask return mask class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False,", "has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for", "np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\", category=FutureWarning) logging.basicConfig() logger =", "data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find", "self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector to take cosine distance data.append(embedded", "stat='precision'): data = [dict[right][stat] if right in dict else 0 for right in", "df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param = { } self.overall = None", "= list(structured) combined.extend(self.overall) return combined def compute_precision(self, outliers, log=True): outliers = set(outliers) tp", "= self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1 = 0 else: f1", "1 has_same_left = (distances == 1) return has_same_left @abstractmethod def get_outliers(self, data, right=None):", "self.visualize_stat(self.structured_info, 'structured', stat='recall') def evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured,", "= { } self.overall = None self.structured = None self.combined = None def", "axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded)", "data, right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data) # else,", "self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers':", "frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\") self.param", "= self.get_neighbors_knn_highdim else: self.get_neighbors = self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left))", "pairwise distance for each attribute distances = np.zeros((X.shape[0],X.shape[0])) data = [] for j,", "= gradient - gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V = decompose[2]", "CATEGORICAL or (dtype == TEXT and (not self.embed_txt)): data = df[attr] if not", "1, 0 if log: print(\"no outlier is found, f1: 0\") return 0, 0", "[self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors'] if right in", "row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if len(clean) <= self.min_neighbors:", "run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr)))", "# precision if len(outliers) == 0: if len(self.gt_idx) == 0: if log: print(\"no", "import GlobalTimer from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import", "OutlierDetector(object): __metaclass__ = ABCMeta def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100,", "evaluate_structured(self, t): structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined']", "elif self.method == \"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto'", "log=log) if rec*prec == 0: f1 = 0 else: f1 = 2 *", "from sklearn import svm from profiler.utility import GlobalTimer from profiler.data.embedding import OneHotModel import", "distances continue # validate type and calculate cosine distance if self.attributes[attr] == TEXT", "def create_one_hot_encoder(self, df): encoders = {} for attr, dtype in self.attributes.items(): if dtype", "= self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False)", "= data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt: # take embedding data", "data = X[:,j].reshape(-1,1) kdt = BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data,", "run_attr_structured(self, left, right): outliers = [] if len(left) == 0: return outliers has_same_neighbors", "rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width) rects2 = ax.bar(np.arange(len(self.overall_info)), [self.structured_info[right]['avg_neighbors']", "0.1, } alg = LocalOutlierFactor elif self.method == \"ee\": param = { 'contamination':", "outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] =", "[self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0 for right in self.overall_info], width) ax.legend((rects1[0],", "= nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr)", "return 1 recall = tp / len(self.gt_idx) if log: print(\"with %d detected outliers,", "tp def compute_f1(self, outliers, title=None, log=True): if title is not None: print(\"Results for", "gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient - gradient_avg decompose =", "the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp / len(self.gt_idx)", "structured = self.filter(self.structured, t) self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure", "+= 1 prec = tp / len(outliers) if log: print(\"with %d detected outliers,", "== TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock',", "data = data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt: # take embedding", "len(outliers) if log: print(\"with %d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec,", "ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat)) def evaluate(self, t=None, log=True): structured =", "= decompose[1] V = decompose[2] top_right_v = V[np.argmax(S)].T score = np.matmul(G, top_right_v)**2 thred", "else 0 for right in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average", "rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for", "else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier info num_neighbors[i]", "i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr] + distances", "log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right", "outliers.extend(outlier) # save outlier info num_neighbors[i] = len(nbr) num_outliers[i] = len(outlier) # save", "width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\")", "= (distances == X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left))", "separate: overall = [] for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values)", "= None self.combined = None self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs)", "} alg = svm.OneClassSVM elif self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size", "df[attr] if not isinstance(data, np.ndarray): data = data.values if len(data.shape) == 1: data", "take embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL:", "df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False,", "= self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if data.shape[0]", "= IsolationForest elif self.method == \"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\",", "np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient - gradient_avg decompose = np.linalg.svd(G) S", "self.combined = None self.workers=workers self.t = t self.tol = tol self.structured_info = {}", "else: overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right):", "in the groud truth, recall is: 1\"%(len(self.gt_idx))) return 1 recall = tp /", "= [dict[right][stat] if right in dict else 0 for right in self.overall_info] fig,", "self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray): data = data.values if len(data.shape)", "np.ndarray): data = data.values if len(data.shape) == 1: data = data.reshape(-1, 1) if", "validate type and calculate cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data", "metric='cityblock', n_jobs=self.workers) # normalize distance maxdis = max(self.tol, np.nanmax(dis)) dis = dis /", "'gamma': 'auto' } alg = svm.OneClassSVM elif self.method == \"lof\": param = {", "self.structured = None self.combined = None self.workers=workers self.t = t self.tol = tol", "embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: #", "0\") return 0, 0 for i in outliers: if i in self.gt_idx: tp", "= self.df.index.values[row] if len(nbr) == 0: continue if self.method != \"std\": outlier =", "<= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left = (distances == X.shape[1]) return", "def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate:", "data = [] for j, attr in enumerate(left): # check if saved if", "np.linalg.norm(embedded, axis=1)) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))", "self.timer = GlobalTimer() self.method = method self.df = df self.gt_idx = gt_idx self.overall", "in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]] = 1 distances = self.neighbors[attr] + distances has_same_left", "= sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))", "'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = [] for", "embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data = np.hstack(data) if", "if saved if attr in self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate", "self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1),", "} self.overall = None self.structured = None self.combined = None def get_outliers(self, gradient,", "tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False) self.structured", "== TEXT: embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) data.append(embedded) else: data.append(X[:,j].reshape(-1,1)) self.neighbors[attr] = data[-1] data =", "= tol self.structured_info = {} self.overall_info = {} self.eval = {} self.neighbors =", "in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] =", "'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif self.method ==", "ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for every column\") ax.set_xlabel('column name') ax.set_ylabel('count') class", "right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier info", "data.shape[0] != X.shape[0]: print(data.shape) raise Exception kdt = BallTree(data, metric='euclidean') # find knn", "self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0], 'total_outliers': len(attr_outliers), 'precision': prec, 'recall': self.compute_recall(tp,", "[dict[right][stat] if right in dict else 0 for right in self.overall_info] fig, ax", "distance if self.attributes[attr] == TEXT and self.embed_txt: embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each", "axis=0) G = gradient - gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V", "{ 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg = svm.OneClassSVM elif self.method", "self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data) elif self.attributes[right]", "= self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors': self.df.shape[0],", "= ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width) rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width) ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers']) ax2.set_title(\"num_neighbors and \\nnum_outliers\\n for column", "= self.get_neighbors_threshold def get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance", "overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left, right): outliers = [] if len(left) ==", "len(self.gt_idx) == 0: if log: print(\"since no outliers in the groud truth, recall", "'m2': 5, } def get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data)) >", "data = self.encoder[right].get_embedding(data) elif self.attributes[right] == CATEGORICAL: # take one hot encoding data", "# remove nan: row_has_nan = np.isnan(data).any(axis=1) clean = data[~row_has_nan] model = self.algorithm(**self.param) if", "= kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i,", "not in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall']", "y = model.fit_predict(clean) mask[~row_has_nan] = y mask = mask.astype(int) return mask == -1", "0, 0 for i in outliers: if i in self.gt_idx: tp += 1", "== X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate", "param = { 'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest elif self.method", "is going to remove all, then remove none if np.all(~mask): return ~mask return", "kdt.query(data, k=self.neighbor_size, return_distance=False) self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0])) for i in range(len(indicies)): self.neighbors[attr][i, indicies[i, :]]", "'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers", "self.compute_f1(self.overall, \"naive approach\", log=False) def view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2)", "data = df[attr] if not isinstance(data, np.ndarray): data = data.values if len(data.shape) ==", "tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method self.df = df", "self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\") if log: self.visualize_stat(self.overall_info,", "EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn import svm", "rec = self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1 = 0 else:", "print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return 0 if len(self.gt_idx) ==", "distances[i, indicies[i, :]] = 1 has_same_left = (distances == 1) return has_same_left @abstractmethod", "print(\"f1: %.4f\" % f1) return \"%.4f,%.4f,%.4f\"%(prec, rec, f1) def compute_recall(self, tp, outliers, log=True):", "return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is None: t = self.t", "recall is: %.4f\"%(len(outliers), recall)) return recall def visualize_stat(self, dict, name, stat='precision'): data =", "= self.algorithm(**self.param) if len(clean) <= self.min_neighbors: return mask == -1 y = model.fit_predict(clean)", "cosine distance if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize", "= { 'contamination': 0.1, 'n_jobs': self.workers } alg = IsolationForest elif self.method ==", "= BallTree(data, metric='euclidean') # find knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i", "np.unique(structured, return_counts=True) outliers = list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined", "neighbor_size if knn: if not high_dim: self.get_neighbors = self.get_neighbors_knn else: self.get_neighbors = self.get_neighbors_knn_highdim", "take cosine distance data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL", "dis = sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis", "1: data = data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt: # take", "mask pass def run_attr(self, right): attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers,", "self.neighbors: distances = self.neighbors[attr] + distances continue # validate type and calculate cosine", "ax.set_xticks(np.arange(len(data))) ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i - 0.25, v + .03,", "plt.subplots(1,2) data = self.structured_info[right]['num_neighbors'] ax1.hist(data, bins=np.arange(data.min(), data.max()+1)) ax1.set_title(\"histogram of num_neighbors\\n for column %s\"%right)", "if len(self.gt_idx) == 0: if log: print(\"no outlier is found and no outlier", "outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier)", "prec, tp def compute_f1(self, outliers, title=None, log=True): if title is not None: print(\"Results", "structured = self.filter(self.structured, t) self.eval['structured'] = self.compute_f1(structured, \"structure only\", log=False) self.eval['combined'] = self.compute_f1(self.run_combined(structured),", "(dtype == TEXT and (not self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray):", "self.neighbors: data.append(self.neighbors[attr]) continue # validate type and calculate cosine distance if self.attributes[attr] ==", "outliers: if i in self.gt_idx: tp += 1 prec = tp / len(outliers)", "= None self.combined = None self.workers=workers self.t = t self.tol = tol self.structured_info", "if self.attributes[attr] == TEXT and self.embed_txt: data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) # normalize each vector", "len(data.shape) == 1: data = data.reshape(-1, 1) if self.attributes[right] == TEXT: if self.embed_txt:", "TEXT: if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else: data = self.encoder[right].get_embedding(data)", "attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)] prec, tp = self.compute_precision(outliers=attr_outliers, log=False) self.overall_info[right] = { 'avg_neighbors':", "self.overall = None self.structured = None self.combined = None self.algorithm = None self.param,", "[] for j, attr in enumerate(left): # check if saved if attr in", "-1), size, axis=0) G = gradient - gradient_avg decompose = np.linalg.svd(G) S =", "(distances == X.shape[1]) return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) #", "fig, ax = plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right", "= self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if self.method == \"isf\": param =", "outliers, log=True): if tp == 0: if log: print(\"with %d outliers in gt,", "self.embed[attr].get_embedding(X[:,j].reshape(-1,1)) dis = sklearn.metrics.pairwise.cosine_distances(data) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data", "num_outliers = np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): # indicies of neighbors", "tqdm import numpy as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\", category=DeprecationWarning) warnings.filterwarnings(\"ignore\",", "== CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1)", "0: f1 = 0 else: f1 = 2 * (prec * rec) /", "in self.overall_info], width) ax.legend((rects1[0], rects2[0]),['overall', 'structured']) ax.set_xticks(np.arange(len(self.overall_info))) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_title(\"average number of neighbors for", "0: continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier", "'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self,", "X.shape[1]) return has_same_left def get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise", "np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))", "== 0: continue if self.method != \"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else:", "%d detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers,", "to take cosine distance data = data / np.linalg.norm(data, axis=1) elif self.attributes[attr] ==", "\"naive approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with", "self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder = self.create_one_hot_encoder(df) self.min_neighbors = min_neighbors def get_default_setting(self): if", "continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier,", "view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2) data = self.structured_info[right]['num_neighbors']", "= self.compute_recall(tp, outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None):", "approach\", log=False) def view_neighbor_info(self): for right in self.structured_info: fig, (ax1, ax2) = plt.subplots(1,2)", "param = { 'contamination': 0.1, } alg = EllipticEnvelope return param, alg def", "= { 'contamination': 0.1, } alg = EllipticEnvelope return param, alg def create_one_hot_encoder(self,", "outliers = list(unique[count > t*self.df.shape[0]]) return outliers def run_combined(self, structured): combined = list(structured)", "log=True): if tp == 0: if log: print(\"with %d outliers in gt, recall", "= svm.OneClassSVM elif self.method == \"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2,", "self.neighbors[attr] = (dis <= self.tol)*1 distances = self.neighbors[attr] + distances has_same_left = (distances", "for i, row in enumerate(has_same_neighbors): # indicies of neighbors nbr = self.df.index.values[row] if", "self.attributes[right] == TEXT: if self.embed_txt: # take embedding data = self.embed[right].get_embedding(data) else: data", "self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol, neighbor_size=neighbor_size, knn=knn, high_dim=high_dim) self.embed = embed self.attributes", "thred = np.percentile(score, 100-p*100) mask = (score < thred) #if it is going", "None self.structured = None self.combined = None self.workers=workers self.t = t self.tol =", "def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray): data =", "get_neighbors_threshold(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute", "\"std\": outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')]", "no outlier is present in the ground truth as well, f1 is 1\")", "class ScikitDetector(OutlierDetector): def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6,", "structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t is None: t =", "knn indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]]", "indicies = kdt.query(data, k=self.neighbor_size, return_distance=False) for i in range(len(indicies)): distances[i, indicies[i, :]] =", "not None: print(\"Results for %s:\"%title) prec, tp = self.compute_precision(outliers, log=log) rec = self.compute_recall(tp,", "nbr = self.df.index.values[row] if len(nbr) == 0: continue if self.method != \"std\": outlier", "distances = self.neighbors[attr] + distances continue # validate type and calculate cosine distance", "np.nanmean(num_neighbors), 'total_outliers': len(np.unique(outliers)) } return outliers def run_structured(self, parent_sets): self.timer.time_start(\"structured\") structured = []", "self.structured = None self.combined = None def get_outliers(self, gradient, right=None): size = gradient.shape[0]", "= np.sum(gradient, axis=0)/size gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0) G = gradient -", "0.25, v + .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every", "in self.neighbors: distances = self.neighbors[attr] + distances continue # validate type and calculate", "indicies of neighbors nbr = self.df.index.values[row] if len(nbr) == 0: continue if self.method", "if len(left) == 0: return outliers has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, )))", "detected outliers, precision is: %.4f\"%(len(outliers), prec)) return prec, tp def compute_f1(self, outliers, title=None,", "right], right)] else: outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')] outliers.extend(outlier) # save outlier", "if child not in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] =", "= self.compute_precision(outliers, log=log) rec = self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1", "outlier = self.run_attr_structured(parent_sets[child], child) structured.extend(outlier) if child not in self.structured_info: continue prec, tp", "outliers=outlier, log=False) self.structured = structured return self.timer.time_end(\"structured\") def filter(self, structured, t=None): if t", "if right in dict else 0 for right in self.overall_info] fig, ax =", "= np.zeros((len(has_same_neighbors, ))) for i, row in enumerate(has_same_neighbors): # indicies of neighbors nbr", "low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df, gt_idx, \"sever\")", "self.compute_f1(self.run_combined(structured), \"enhance naive with structured\", log=False) def evaluate_overall(self): self.eval['overall'] = self.compute_f1(self.overall, \"naive approach\",", "overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall return self.timer.time_end(\"naive\") def run_attr_structured(self, left,", "= data / np.linalg.norm(data, axis=1) elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:", "= plt.subplots() width = 0.35 rects1 = ax.bar(np.arange(len(self.overall_info))+width, [self.overall_info[right]['avg_neighbors'] for right in self.overall_info],", "structured.extend(outlier) if child not in self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision']", "embed=None, gt_idx=None, embed_txt=False, t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100, knn=False, high_dim=False, **kwargs): super(ScikitDetector, self).__init__(df,", "+ .03, \"%.2f\"%v) ax.set_xticklabels(list(self.overall_info.keys())) ax.set_xlabel('Column Name') ax.set_ylabel(stat) ax.set_title(\"[%s] %s for every column\"%(name, stat))", "t=0.05, tol=1e-6, neighbor_size=100, knn=False, high_dim=False): self.timer = GlobalTimer() self.method = method self.df =", "sklearn.neighbors import BallTree from tqdm import tqdm import numpy as np import sklearn", "= gt_idx self.overall = None self.structured = None self.combined = None self.workers=workers self.t", "log=True): outliers = set(outliers) tp = 0.0 # precision if len(outliers) == 0:", "approach\") self.eval['structured'] = self.compute_f1(structured, \"structure only\") self.eval['combined'] = self.compute_f1(self.run_combined(structured), \"enhance naive with structured\")", "self.param[m] * np.nanstd(data) # else, categorical, find low frequency items class SEVERDetector(OutlierDetector): def", "== 0: if log: print(\"with %d outliers in gt, recall is: 0\"%(len(self.gt_idx))) return", "ax.set_yticks(np.arange(0,1,0.1)) for i, v in enumerate(data): ax.text(i - 0.25, v + .03, \"%.2f\"%v)", "return has_same_left def get_neighbors_knn(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance", "\"ocsvm\": param = { 'nu': 0.1, 'kernel': \"rbf\", 'gamma': 'auto' } alg =", "0 else: f1 = 2 * (prec * rec) / (prec + rec)", "get_neighbors_knn_highdim(self, left): X = self.df[left].values.reshape(-1, len(left)) # calculate pairwise distance for each attribute", "\"ee\": param = { 'contamination': 0.1, } alg = EllipticEnvelope return param, alg", "= sklearn.metrics.pairwise.cosine_distances(data) else: dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1), metric='cityblock', n_jobs=self.workers) # normalize distance maxdis =", "for attr in self.df: overall.extend(list(self.run_attr(attr))) else: overall = self.run_attr(self.df.columns.values) self.overall = overall return", "self.attributes[attr] == TEXT: data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1)) else: data = X[:,j].reshape(-1,1) kdt = BallTree(data,", "self.compute_recall(tp, outliers, log=log) if rec*prec == 0: f1 = 0 else: f1 =", "has_same_left = (distances == 1) return has_same_left @abstractmethod def get_outliers(self, data, right=None): #", "5, } def get_outliers(self, data, right=None, m='m1'): return abs(data - np.nanmean(data)) > self.param[m]", "(not self.embed_txt)): data = df[attr] if not isinstance(data, np.ndarray): data = data.values if", "categorical, find low frequency items class SEVERDetector(OutlierDetector): def __init__(self, df, gt_idx=None): super(SEVERDetector, self).__init__(df,", "attr self.embed_txt = embed_txt self.overall = None self.structured = None self.combined = None", "\"lof\": param = { 'n_neighbors': int(max(self.neighbor_size / 2, 2)), 'contamination': 0.1, } alg", "in enumerate(left): # check if saved if attr in self.neighbors: distances = self.neighbors[attr]", "matplotlib.pyplot as plt from profiler.globalvar import * from sklearn.neighbors import BallTree from tqdm", "G = gradient - gradient_avg decompose = np.linalg.svd(G) S = decompose[1] V =", "from profiler.data.embedding import OneHotModel import matplotlib.pyplot as plt from profiler.globalvar import * from", "if attr in self.neighbors: distances = self.neighbors[attr] + distances continue # validate type", "save info self.structured_info[right] = { 'determined_by': left, 'num_neighbors': num_neighbors, 'num_outliers': num_outliers, 'avg_neighbors': np.nanmean(num_neighbors),", "log=False) } return attr_outliers def run_all(self, parent_sets, separate=True): self.run_overall(separate) self.run_structured(parent_sets) print(self.timer.get_stat()) def run_overall(self,", "= {} self.neighbors = {} self.neighbor_size = neighbor_size if knn: if not high_dim:", "== CATEGORICAL: # take one hot encoding data = self.encoder[right].get_embedding(data) # remove nan:", "None self.combined = None self.algorithm = None self.param, self.algorithm = self.get_default_setting() self.param.update(kwargs) self.encoder", "tqdm import tqdm import numpy as np import sklearn import warnings, logging warnings.filterwarnings(\"ignore\",", "print(self.timer.get_stat()) def run_overall(self, separate=True): self.timer.time_start(\"naive\") if separate: overall = [] for attr in", "has_same_neighbors = self.get_neighbors(left) num_neighbors = np.zeros((len(has_same_neighbors, ))) num_outliers = np.zeros((len(has_same_neighbors, ))) for i,", "the ground truth as well, f1 is 1\") return 1, 0 if log:", "return encoders def get_outliers(self, data, right=None): mask = np.zeros((data.shape[0])) if not isinstance(data, np.ndarray):", "0 if len(self.gt_idx) == 0: if log: print(\"since no outliers in the groud", "= method self.df = df self.gt_idx = gt_idx self.overall = None self.structured =", "self.structured_info: continue prec, tp = self.compute_precision(outlier, log=False) self.structured_info[child]['precision'] = prec self.structured_info[child]['recall'] = self.compute_recall(tp," ]
[ "* b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it", "analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time *", "on ball location by aiming at a location, checking time to that location,", "intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}')", "# or on_cieling # if not reachable: # return None return intercept @staticmethod", "# return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time +", "predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error", "# Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <=", "intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time - jump_time", "analysis.throttle import * from analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent", "predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base case (convergence) if abs(gradient)", "i += 1 fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation =", "= None # rip self.dodge = False def simulate(self, bot) -> vec3: #", "= len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location", "= rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in here to find", "= (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration #", "< norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return", "ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a location, checking", "ball_location = ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p", "optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation =", "(to arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0", "# controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls", "case of gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient", "# Find ideal rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0,", "try to position the car's front center directly on top of the best", "but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time,", "on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius", "conversions and variable scope def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState()", "None return min_error # warning: lazy conversions and variable scope def get_controls(self, car_state:", "return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time", "intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error", "arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break", "b.location if hit and (min_error == None or norm(error) < norm(min_error)): min_error =", "# print(c.location, b.location) # Simulate the collision and resulting for i in range(60*3):", "max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') # Intercept is only", "angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation", "solve with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch -", "# print(f'time intended to be in air {jump_time}') # print(f'distance travelled in air", "car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw", "range(60*3): c.location += c.velocity * dt b.step(dt, c) # Check if we hit", "> 100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 #", "elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error =", "60.0 # return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time", "on top of the best hit vector euler = rotation_to_euler(optimal_rotation) # todo put", "* advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location", "analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance -", "for ground paths (and walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2]", "min_error = None # Drive towards intercept (moving in direction of c.forward()) c.rotation", "= len(ball_predictions) / 60.0 # return intercept intercept.dodge = True #jump_time > 0.2", "len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index]", "until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz speed @", "air {jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining", "= boost self.time = None self.purpose = None # rip self.dodge = False", "# arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index", "arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance", "0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction =", "b = Ball(ball) dt = 1.0 / 60.0 # Generate predictions of ball", "perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance =", "(and walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 +", "controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None): #", "= jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time", "with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) #", "= self.location dt = 1.0 / 60.0 hit = False min_error = None", "+ 0.35 # disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time)", "# intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return intercept # Now", "location and continue descending the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset", "- intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break if i >=", "controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car,", "converge on ball location by aiming at a location, checking time to that", "from the car faster than the car's max boost speed intercept = Intercept(b.location)", "to position the car's front center directly on top of the best hit", "= target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1],", "the car's front center directly on top of the best hit vector #", "controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3,", "0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left())", "{jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to", "predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') #", "if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer = steer_toward_target(car_state,", "c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time:", "vec3(b.location) i = 0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else", "- to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False controls.handbrake = True", "0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda", "* 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) /", "= Ball(ball) dt = 1.0 / 60.0 # Generate predictions of ball path", "until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') #", "# unless the ball is moving away from the car faster than the", "print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}')", "= [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge", "the latest intercept location and continue descending the gradient intercept.location = ball_location -", "max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the", "1.0 / 60.0 hit = False min_error = None # Drive towards intercept", "= None # Drive towards intercept (moving in direction of c.forward()) c.rotation =", "car.forward()) > pi / 2: controls.boost = False controls.handbrake = True elif angle_between(self.location", "vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i += 1 if i", "ball is moving away from the car faster than the car's max boost", "abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}')", "to find the max angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]:", "/ (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset", "# print(c.time, b.time) # print(c.location, b.location) # Simulate the collision and resulting for", "ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location)", "# print('hit') # Measure dist from target error = t - b.location if", "* from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState", "b.step(dt, c) # Check if we hit the ball yet if norm(b.location -", "of the best hit vector euler = rotation_to_euler(optimal_rotation) # todo put some super", "= 0 intercept.time = arrival_time i += 1 if i >= max_tries: print(f'Warning:", "center directly on top of the best hit vector # Adjust vertical position", "intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless", "gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0:", "None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) #", "print(f'time intended to be in air {jump_time}') # print(f'distance travelled in air {jump_time", "sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation", "5.5 + 0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll)", "bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error # warning: lazy conversions and", "if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break", "of ball path if ball_predictions is None: ball_predictions = [vec3(b.location)] for i in", "/ 5.5 + 0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll -", "+= 1 fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction,", "indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius *", "ThrottleAnalysis() while i < max_tries: i += 1 fake_car = Car(car) direction =", "= norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance", "center of mass direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2] =", "descent if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error -", "(c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True # print('hit') # Measure dist", "fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location +=", "* car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost = True): self.location =", "jump_time = jump_height_time # todo revisit rotation time # print('jump_time', jump_time) # Calculate", "max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i <", "to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations')", "Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt = 1.0", "hit and (min_error == None or norm(error) < norm(min_error)): min_error = error #", "in here to find the max angle allowed at given height if fake_car.location[2]", "direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location =", "# and then aiming at the ball's NEW position. Guaranteed to converge (typically", "17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time #", "frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance, frame.speed)", "util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb", "# try to position the car's front center directly on top of the", "= 25 else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset", "for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector", "to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False controls.handbrake = True elif", "CarState from util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3,", "= look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation", "{degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius /", "= analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time", "= SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) >", "== None or norm(error) < norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location))", "- (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time", "with an arbitrary seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset", "or on_cieling # if not reachable: # return None return intercept @staticmethod def", "if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset =", "print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now", "Gradually converge on ball location by aiming at a location, checking time to", "ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction =", "# disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 +", "Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i = 0", "intercept') # Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target)", "if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error >", "from analysis.throttle import * from analysis.boost import * from analysis.jump import * from", "equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5", "- get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame =", "with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong", "0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5", "# print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location -", "rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding", "# print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location", "revisit rotation time # print('jump_time', jump_time) # Calculate distance to drive before jumping", "print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break #", "c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0]", "to be in air {jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}')", "= horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept location and continue", "ball_predictions = None): # Init vars b = Ball(ball) dt = 1.0 /", "+ vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: #", "print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in", "{degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept # Edge case exit:", "= True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost =", "+= translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt)", "norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions)", "car faster than the car's max boost speed intercept = Intercept(b.location) intercept.purpose =", "some super precise trigonometry in here to find the max angle allowed at", "Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame", "hit = False min_error = None # Drive towards intercept (moving in direction", "Calculate distance to drive before jumping (to arrive perfectly on target) total_translation =", "intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position',", "intercept.time = arrival_time i += 1 if i >= max_tries: print(f'Warning: max tries", "Car, Ball from rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost import", "max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset", "analysis data # Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) #", "i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') return intercept", "def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2]", "ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) #", "1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] #", "= horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base case", "= Ball(ball) # Generate predictions of ball path if ball_predictions is None: ball_predictions", "last_horizontal_error = None last_horizontal_offset = None i = 0 max_tries = 101 analyzer", "vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset = None i = 0", "car's front center directly on top of the best hit vector euler =", "c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2]", "[vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on", "import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec import", "hit: return None return min_error # warning: lazy conversions and variable scope def", "error = t - b.location if hit and (min_error == None or norm(error)", "- to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False controls.handbrake = False", "True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset = None", "start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance,", "print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target @", "ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient descent if", "print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance", "= ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction", "fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): # Step car location with", "car's front center directly on top of the best hit vector # Adjust", "= {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept # Edge case", "5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # #", "directly on top of the best hit vector euler = rotation_to_euler(optimal_rotation) # todo", "of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance =", "normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation", "= analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance", "# intercept.location[2] = 0 intercept.time = arrival_time i += 1 if i >=", "100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return", "intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset = None i", "jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) #", "hit direction gradient # Kick off the gradient descent with an arbitrary seed", "{get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front", "on_side_wall # or on_cieling # if not reachable: # return None return intercept", "- c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame", "if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break if", "acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo", "vars b = Ball(ball) dt = 1.0 / 60.0 # Generate predictions of", "< ball.collision_radius / 2: intercept.location = ball_location break if i >= max_tries: print(f'Warning:", "time to that location, # and then aiming at the ball's NEW position.", "off the gradient descent with an arbitrary seed value if last_horizontal_error is None:", "intercept.boost else ThrottleAnalysis() while i < max_tries: i += 1 fake_car = Car(car)", "< 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction", "in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal", "fake_car.time += dt ball_location = ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(),", "direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance", "abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius", "Recursive case of gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0 else:", "- ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88)", "controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost", "max angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0", "predictions of ball path if ball_predictions is None: ball_predictions = [] for i", "location, # and then aiming at the ball's NEW position. Guaranteed to converge", "get_car_front_center(fake_car) # try to position the car's front center directly on top of", "speed @ jump {drive_analysis.speed}') # print(f'time intended to be in air {jump_time}') #", "max tries ({max_tries}) exceeded for calculating intercept') # Intercept is only meant for", "Return the latest intercept location and continue descending the gradient intercept.location = ball_location", "and then aiming at the ball's NEW position. Guaranteed to converge (typically in", "print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}')", "find the max angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch", "= direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)]", "# Now descend the hit direction gradient # Kick off the gradient descent", "target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index", "not reachable: # return None return intercept @staticmethod def calculate(car: Car, ball: Ball,", "25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case of", "from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from", "max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error", "euler = rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in here to", "- jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector))", "meant for ground paths (and walls/cieling are only indirectly supported) # collision_radius =", "= 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time", "None return intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions =", "else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break #", "self.location = location self.boost = boost self.time = None self.purpose = None #", "the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient =", "{euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location)", "* 2 + b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2] <=", "range(60*5): # Step car location with throttle/boost analysis data # Not super efficient", "print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location -", "Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location =", "= look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location)", "2: controls.boost = False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) >", "= intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it intersects with ground", "b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity =", "start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time -", "on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable = on_ground # or", "def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars", "intercept.location[2] = 0 intercept.time = arrival_time i += 1 if i >= max_tries:", "/ 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0],", "class Intercept(): def __init__(self, location: vec3, boost = True): self.location = location self.boost", "1.0 / 60.0 # Generate predictions of ball path if ball_predictions is None:", "faster than the car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball'", "target: vec3, ball_predictions = None): # Init vars b = Ball(ball) dt =", "frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time", "return None return min_error # warning: lazy conversions and variable scope def get_controls(self,", "intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return intercept # Now descend", "controls.handbrake = False # Be smart about not using boost at max speed", "normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal", "/ 2: controls.boost = False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward())", "(euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration # jump_time", "car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time", "in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward()", "vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location", "intercept.boost else ThrottleAnalysis() while i < max_tries: # Find optimal spot to hit", "/ 60.0 hit = False min_error = None # Drive towards intercept (moving", "fake_car.location += direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt", "= atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location) #", "motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) /", "= vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset = None i =", "# jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular", "= normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}')", "last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset -", "todo put some super precise trigonometry in here to find the max angle", "for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by", "{drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}')", "1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') #", "= None i = 0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost", "fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check", "b = Ball(ball) # Generate predictions of ball path if ball_predictions is None:", "+ jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time", "= 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i = 0 max_tries =", "gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}')", "print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction =", "it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might", "controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod", "intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car)", "smart about not using boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed", "= True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation", "distance to drive before jumping (to arrive perfectly on target) total_translation = intercept.location", "optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position", "ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate", "None): # Init vars fake_car = Car(car) b = Ball(ball) # Generate predictions", "{drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance", "= predicted_horizontal_offset # Return the latest intercept location and continue descending the gradient", "= Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i =", "Generate predictions of ball path if ball_predictions is None: ball_predictions = [vec3(b.location)] for", "# if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer =", "calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars b", "get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class", "* from analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState", "simulate(self, bot) -> vec3: # print('simulate intercept') # Init vars c = Car(bot.game.my_car)", "{last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location =", "controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward())", "jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration", "jumping (to arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] =", "the ball is moving away from the car faster than the car's max", "elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False controls.handbrake", "dt = 1.0 / 60.0 hit = False min_error = None # Drive", "/ 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a", "* frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check for collision p", "= BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation c.time +=", "import * from analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent import", "rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) #", "the car's front center directly on top of the best hit vector euler", "Find optimal spot to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) *", "by aiming at a location, checking time to that location, # and then", "with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch)", "advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location +=", "front center directly on top of the best hit vector # Adjust vertical", "= int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time =", "+ ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset:", "norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state:", "return intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None):", "= drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) #", "101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: i", "ball: Ball, target: vec3, ball_predictions = None): # Init vars b = Ball(ball)", "drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time", "normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') #", "= analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >=", "# Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient", "5.5 + 0.35 # disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time,", "# jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular", "direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed", "norm(p - ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) *", "return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def", "for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball", "normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2] = 0 target_direction_vector =", "intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break if i >= max_tries:", "b.time) # print(c.location, b.location) # Simulate the collision and resulting for i in", "reachable = on_ground # or on_back_wall or on_side_wall # or on_cieling # if", "= None): # Init vars fake_car = Car(car) b = Ball(ball) # Generate", "c.velocity = direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions =", "0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position -", "# # on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable = on_ground", "= euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position)", "trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error # warning: lazy conversions", "- c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True # print('hit')", "while i < max_tries: i += 1 fake_car = Car(car) direction = normalize(intercept.location", "ThrottleAnalysis() while i < max_tries: # Find optimal spot to hit the ball", "rlutilities.simulation import Car, Ball from rlutilities.linear_algebra import * from analysis.throttle import * from", "directly on top of the best hit vector # Adjust vertical position if", "False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car:", "60.0 hit = False min_error = None # Drive towards intercept (moving in", "[] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location", "collision and resulting for i in range(60*3): c.location += c.velocity * dt b.step(dt,", "{i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction", "# reachable = on_ground # or on_back_wall or on_side_wall # or on_cieling #", "{drive_analysis.speed}') # print(f'time intended to be in air {jump_time}') # print(f'distance travelled in", "- car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): # Step car", "ball_predictions is None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0)", "b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it intersects", "in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at", "intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) #", "True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False", "last_horizontal_offset = None i = 0 max_tries = 101 analyzer = BoostAnalysis() if", "= normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius", "start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance -", "is only meant for ground paths (and walls/cieling are only indirectly supported) #", "out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset", "@staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init", "center directly on top of the best hit vector euler = rotation_to_euler(optimal_rotation) #", "on_back_wall or on_side_wall # or on_cieling # if not reachable: # return None", "= ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p -", "maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset =", "return intercept # Now descend the hit direction gradient # Kick off the", "# intercept.time = fake_car.time # return intercept # Now descend the hit direction", "ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location)", "Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in", "car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset <", "precise trigonometry in here to find the max angle allowed at given height", "gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error", "angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 #", "from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees", "car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True", "from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec import Vec3 from", "vec3(ball_location) # intercept.time = fake_car.time # return intercept # Now descend the hit", "0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error", "direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction", "= horizontal_offset - horizontal_error / gradient # Base case (convergence) if abs(gradient) <", "look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation #", "False min_error = None # Drive towards intercept (moving in direction of c.forward())", "gradient descent with an arbitrary seed value if last_horizontal_error is None: last_horizontal_error =", "fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for", "i = 0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis()", "only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius", "ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation", "intercept.location = ball_location break if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded", "# jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit", "the gradient descent with an arbitrary seed value if last_horizontal_error is None: last_horizontal_error", "1 if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept')", "analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive", "b.location) # Simulate the collision and resulting for i in range(60*3): c.location +=", "using boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: #", "if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') return", "ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}')", "= jump_height_time # todo revisit rotation time # print('jump_time', jump_time) # Calculate distance", "= None last_horizontal_offset = None i = 0 max_tries = 101 analyzer =", "# car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 +", "start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity))", "pi / 2: controls.boost = False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location),", "top of the best hit vector # Adjust vertical position if it (still)", "<10 iterations) # unless the ball is moving away from the car faster", "controls.boost = False controls.handbrake = False else: controls.boost = self.boost controls.handbrake = False", "= ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient descent", "in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by", "# Drive towards intercept (moving in direction of c.forward()) c.rotation = look_at(intercept, c.up())", "i = 0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis()", "if ball_predictions is None: ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location))", "octane center of mass direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2]", "# intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge = True #jump_time", "# Init vars fake_car = Car(car) b = Ball(ball) # Generate predictions of", "100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: #", "- intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location", "- c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location", "jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired roll", "bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) #", "* jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time", "if hit and (min_error == None or norm(error) < norm(min_error)): min_error = error", "min_error # warning: lazy conversions and variable scope def get_controls(self, car_state: CarState, car:", "to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location =", "+= c.velocity * dt b.step(dt, c) # Check if we hit the ball", "JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time", "trigonometry in here to find the max angle allowed at given height if", "predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset =", "len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location +", "> self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle", "{jump_time * drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}')", "# todo put some super precise trigonometry in here to find the max", "sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time)", "direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location =", "= False controls.handbrake = False else: controls.boost = self.boost controls.handbrake = False #", "- intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car))", "roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location =", "Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt = 1.0 / 60.0 hit", "print(c.location, b.location) # Simulate the collision and resulting for i in range(60*3): c.location", "print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept #", "vec3, boost = True): self.location = location self.boost = boost self.time = None", "hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position", "start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance)", "collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling =", "Ball(ball) dt = 1.0 / 60.0 # Generate predictions of ball path if", "continue descending the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}')", "if intercept.boost else ThrottleAnalysis() while i < max_tries: i += 1 fake_car =", "# rip self.dodge = False def simulate(self, bot) -> vec3: # print('simulate intercept')", "- intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation,", "position. Guaranteed to converge (typically in <10 iterations) # unless the ball is", "collision_radius # reachable = on_ground # or on_back_wall or on_side_wall # or on_cieling", "- ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return intercept", "arbitrary seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0", "- optimal_hit_vector # Find ideal rotation, unless it intersects with ground optimal_rotation =", "ball location by aiming at a location, checking time to that location, #", "towards intercept (moving in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction =", "{rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0", "0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try", "of ball path if ball_predictions is None: ball_predictions = [] for i in", "if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error", "frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t] #", "b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1])", "i < max_tries: # Find optimal spot to hit the ball optimal_hit_vector =", "jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation time # print('jump_time', jump_time)", "# Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if", "be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location +=", "arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index)", "from rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost import * from", "False def simulate(self, bot) -> vec3: # print('simulate intercept') # Init vars c", "if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset =", "target error = t - b.location if hit and (min_error == None or", "for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location = ball_location", "i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming", "= 1.0 / 60.0 hit = False min_error = None # Drive towards", "c.velocity * dt b.step(dt, c) # Check if we hit the ball yet", "ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually", "'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None", "bot) -> vec3: # print('simulate intercept') # Init vars c = Car(bot.game.my_car) b", "error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error #", "aiming at a location, checking time to that location, # and then aiming", "# print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz", "print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location),", "in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position", "on_ground # or on_back_wall or on_side_wall # or on_cieling # if not reachable:", "dt b.step(dt, c) # Check if we hit the ball yet if norm(b.location", "(euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration # jump_roll_time", "print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') # Intercept is only meant", "= 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries:", "@staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init", "norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60))", "get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0", "arrival_time i += 1 if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded", "location: vec3, boost = True): self.location = location self.boost = boost self.time =", "given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw,", "at a location, checking time to that location, # and then aiming at", "Guaranteed to converge (typically in <10 iterations) # unless the ball is moving", "travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target @ jump", "ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) -", "location, checking time to that location, # and then aiming at the ball's", "# Return the latest intercept location and continue descending the gradient intercept.location =", "print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}')", "- start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time =", "intercept.time = fake_car.time # return intercept # Now descend the hit direction gradient", "intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) >", "here to find the max angle allowed at given height if fake_car.location[2] <=", "self.time = None self.purpose = None # rip self.dodge = False def simulate(self,", "intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge", "direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept # Edge", "= analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance) -", "travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction *", "controls.handbrake = False else: controls.boost = self.boost controls.handbrake = False # Be smart", "steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math", "# on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >=", "# Find optimal spot to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position)", "60.0 # Generate predictions of ball path if ball_predictions is None: ball_predictions =", "ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if", "intended to be in air {jump_time}') # print(f'distance travelled in air {jump_time *", "{degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept", "else ThrottleAnalysis() while i < max_tries: i += 1 fake_car = Car(car) direction", "<= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane", "ground paths (and walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2] *", "25 else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break", "horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time", "Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location),", "vector # Adjust vertical position if it (still) intersects with ground if fake_car.location[2]", "drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index", "None: ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge", "jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration", "Now descend the hit direction gradient # Kick off the gradient descent with", "c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location =", "rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities", "jump_height_time # todo revisit rotation time # print('jump_time', jump_time) # Calculate distance to", "c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) #", "the ball's NEW position. Guaranteed to converge (typically in <10 iterations) # unless", "break if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept')", "intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or", "moving away from the car faster than the car's max boost speed intercept", "None i = 0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else", "# Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error # warning:", "on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096", "boost = True): self.location = location self.boost = boost self.time = None self.purpose", "iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location)", "= vec3(b.location) i = 0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost", "# print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) #", "jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation", "= max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset", "= location self.boost = boost self.time = None self.purpose = None # rip", "= {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target", "rotation time # print('jump_time', jump_time) # Calculate distance to drive before jumping (to", "direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check for collision", "position if it (still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] =", "/ 60.0 # Generate predictions of ball path if ball_predictions is None: ball_predictions", "< max_tries: i += 1 fake_car = Car(car) direction = normalize(intercept.location - car.location)", "seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if", "int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions)", "horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base case (convergence)", "+ normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location:", "self.location dt = 1.0 / 60.0 hit = False min_error = None #", "== 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient", "= fake_car.time # return intercept # Now descend the hit direction gradient #", "= {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}')", "ideal rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector)", "(horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset =", "print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target", "= vec3(bot.target) intercept = self.location dt = 1.0 / 60.0 hit = False", "None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset =", "= Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved =", "= {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location", "self.dodge = False def simulate(self, bot) -> vec3: # print('simulate intercept') # Init", "jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time #", "frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity =", "# octane center of mass direction_vector[2] = 0 target_direction_vector = target - ball_location", "0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient", "car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35", "jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time #", "= steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car, ball:", "calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars fake_car", "normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient descent if horizontal_offset ==", "import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees def get_car_front_center(car:", "for t in range(60*5): # Step car location with throttle/boost analysis data #", "that location, # and then aiming at the ball's NEW position. Guaranteed to", "convergence in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break", "location by aiming at a location, checking time to that location, # and", "CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location", "- b.location if hit and (min_error == None or norm(error) < norm(min_error)): min_error", "or on_side_wall # or on_cieling # if not reachable: # return None return", "break # Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2:", "= None self.purpose = None # rip self.dodge = False def simulate(self, bot)", "= Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2:", "c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True # print('hit') #", "reachable: # return None return intercept @staticmethod def calculate(car: Car, ball: Ball, target:", "> 0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location = ball_location -", "= max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation time", "# todo revisit rotation time # print('jump_time', jump_time) # Calculate distance to drive", "Init vars b = Ball(ball) dt = 1.0 / 60.0 # Generate predictions", "unless the ball is moving away from the car faster than the car's", "- car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration # jump_roll_time =", "descending the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient", "spot to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location", "from math import pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location +", "collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector =", "car location with throttle/boost analysis data # Not super efficient but POITROAE frame", "might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location", "todo revisit rotation time # print('jump_time', jump_time) # Calculate distance to drive before", "- collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling", "ball_location break if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating", "= ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration", "time # print('jump_time', jump_time) # Calculate distance to drive before jumping (to arrive", "yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit =", "def __init__(self, location: vec3, boost = True): self.location = location self.boost = boost", "# Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept", "print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz speed", "+ normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost = True):", "= norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame :", "Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2,", "False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4:", "to drive before jumping (to arrive perfectly on target) total_translation = intercept.location -", "of gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient =", "if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location =", "2: intercept.location = ball_location break if i >= max_tries: print(f'Warning: max tries ({max_tries})", "angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 #", "Find ideal rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0,", "# jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular", "= 1 return controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions", "< max_tries: # Find optimal spot to hit the ball optimal_hit_vector = normalize(target", "euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) #", "SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi", "frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis =", "the car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost =", "custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed *", "Ball, target: vec3, ball_predictions = None): # Init vars fake_car = Car(car) b", "= - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset #", "ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center", "best hit vector # Adjust vertical position if it (still) intersects with ground", "if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time =", "{i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction", "if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error - last_horizontal_error)", "advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction *", "jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total", "frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check for collision p =", "optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly on", "frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction * frame.speed", "print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance", "60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1],", "# print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the", "hit = True # print('hit') # Measure dist from target error = t", "Drive towards intercept (moving in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction", "c.location += c.velocity * dt b.step(dt, c) # Check if we hit the", "rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost import * from analysis.jump", "on top of the best hit vector # Adjust vertical position if it", "rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec", "of the best hit vector # Adjust vertical position if it (still) intersects", "ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset", "from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import", "True # print('hit') # Measure dist from target error = t - b.location", "descent with an arbitrary seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error", "# print('simulate intercept') # Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t", "than the car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost", "are only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius +", "b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate the collision and", "controls.boost = self.boost controls.handbrake = False # Be smart about not using boost", "lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis", "target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1],", "car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): # Step car location", "Ball, target: vec3, ball_predictions = None): # Init vars b = Ball(ball) dt", "distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <=", "= p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass", "= False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def", "analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: # Find", "data # Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in", "speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer", "fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time", "{target}') print(f'ball_location = {ball_location}') return intercept # Edge case exit: offset maxed out", "intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame", "< -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset", "horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept location and continue descending", "+= sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time,", "jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation time # print('jump_time',", "analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions):", "Be smart about not using boost at max speed # if Vec3(car.physics.velocity).length() >", "= {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return", "= c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8 # on_ground =", "- horizontal_error / gradient # Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence", "hit the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) *", "NEW position. Guaranteed to converge (typically in <10 iterations) # unless the ball", "= True): self.location = location self.boost = boost self.time = None self.purpose =", "normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): # Step", "the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position -", "ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector", "vec3, ball_predictions = None): # Init vars fake_car = Car(car) b = Ball(ball)", "closest_point_on_obb from math import pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location", "17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time", "max_tries: # Find optimal spot to hit the ball optimal_hit_vector = normalize(target -", "c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate the collision", "None # Drive towards intercept (moving in direction of c.forward()) c.rotation = look_at(intercept,", "= 1.0 / 60.0 # Generate predictions of ball path if ball_predictions is", "= 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case", "= False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi /", "controls.boost = False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi", "norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None", "False controls.handbrake = False else: controls.boost = self.boost controls.handbrake = False # Be", "case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset >", "put some super precise trigonometry in here to find the max angle allowed", "intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until", "# on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 -", "in air {jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance", "vertical position if it (still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2]", "print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location - intercept.location)", "# print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll", "rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in here to find the", "the collision and resulting for i in range(60*3): c.location += c.velocity * dt", "to that location, # and then aiming at the ball's NEW position. Guaranteed", "Ball(ball) # Generate predictions of ball path if ball_predictions is None: ball_predictions =", "euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car)", "not hit: return None return min_error # warning: lazy conversions and variable scope", "vec3(bot.target) intercept = self.location dt = 1.0 / 60.0 hit = False min_error", "rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees def get_car_front_center(car: Car): return", "= lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time)", "is None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location))", "/ 5.5 + 0.35 # disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time,", "checking time to that location, # and then aiming at the ball's NEW", "to converge (typically in <10 iterations) # unless the ball is moving away", "= analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time)", "paths (and walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2", "fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location -", "# Kick off the gradient descent with an arbitrary seed value if last_horizontal_error", "None # rip self.dodge = False def simulate(self, bot) -> vec3: # print('simulate", "i < max_tries: i += 1 fake_car = Car(car) direction = normalize(intercept.location -", "boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost", "# if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time", "analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location +=", "converge (typically in <10 iterations) # unless the ball is moving away from", "== last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset", "break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0)", "# return None return intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3,", "# print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence", "translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location))", "break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time", "predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base", "vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i", "= t - b.location if hit and (min_error == None or norm(error) <", "remaining to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i}", "from analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from", "* drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}') #", "= 0 target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location", "print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept # Edge case exit: offset", "target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to be", "print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}')", "from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from", "intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance", "# Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location", "jump_roll_time) jump_time = jump_height_time # todo revisit rotation time # print('jump_time', jump_time) #", "# or on_back_wall or on_side_wall # or on_cieling # if not reachable: #", "Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept =", "> 0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw,", "= 0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while", "= False last_horizontal_error = None last_horizontal_offset = None i = 0 max_tries =", "gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error /", "predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset", "disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time", "{i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position)", "get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2])", "in range(60*3): c.location += c.velocity * dt b.step(dt, c) # Check if we", "# print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time", "(horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset =", "= True intercept_ball_position = vec3(b.location) i = 0 max_tries = 100 analyzer =", "direction = {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location - intercept.location) <", "dist from target error = t - b.location if hit and (min_error ==", "horizontal_offset = 25 else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) *", "drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual", "= direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check for", "jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until", "- max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return", "intercept.boost = True intercept_ball_position = vec3(b.location) i = 0 max_tries = 100 analyzer", "# Gradually converge on ball location by aiming at a location, checking time", "and variable scope def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3", "if intercept.boost else ThrottleAnalysis() while i < max_tries: # Find optimal spot to", "= (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset", "euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the", "ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i}", "boost self.time = None self.purpose = None # rip self.dodge = False def", "horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive", "- get_car_front_center(fake_car) # try to position the car's front center directly on top", "horizontal_error / gradient # Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in", "= optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car)", "target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time =", "<= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location", "8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120", "to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False controls.handbrake = False else:", "c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept", "= BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: # Find optimal", "Check if we hit the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0]", "norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: #", "b.collision_radius) * 1.05: hit = True # print('hit') # Measure dist from target", "# print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump", "self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost =", "- start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time", "direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while", "super precise trigonometry in here to find the max angle allowed at given", ">= 4096 - collision_radius # # on_cieling = intercept.location[2] >= 2044 - collision_radius", "Ball from rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost import *", "c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt", "{total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}')", "roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] =", "None): # Init vars b = Ball(ball) dt = 1.0 / 60.0 #", "intercept') # Intercept is only meant for ground paths (and walls/cieling are only", "the car faster than the car's max boost speed intercept = Intercept(b.location) intercept.purpose", "this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}')", "{horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check for", "horizontal_offset break # Recursive case of gradient descent if horizontal_offset == last_horizontal_offset: gradient", "- drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}') #", "it (still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location", "- c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance,", "= {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival if", "self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False", "- last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset", "target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car, ball: Ball, target:", "break # Recursive case of gradient descent if horizontal_offset == last_horizontal_offset: gradient =", "= {target}') print(f'ball_location = {ball_location}') return intercept # Edge case exit: offset maxed", "{ball_location}') return intercept # Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1]", "= intercept.location[2] >= 2044 - collision_radius # reachable = on_ground # or on_back_wall", "# Check if we hit the ball yet if norm(b.location - c.location) <", "if ball_predictions is None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 /", "print(c.time, b.time) # print(c.location, b.location) # Simulate the collision and resulting for i", "0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i", "warning: lazy conversions and variable scope def get_controls(self, car_state: CarState, car: Car): controls", "(min_error == None or norm(error) < norm(min_error)): min_error = error # Record trajectory", "= self.boost controls.handbrake = False # Be smart about not using boost at", "# drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if", "= ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset =", "the best hit vector # Adjust vertical position if it (still) intersects with", "import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import", "target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0])", "- 10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1", "needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler =", "+ 0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) /", "is moving away from the car faster than the car's max boost speed", "tries ({max_tries}) exceeded for calculating intercept') # Intercept is only meant for ground", "norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True #", "= normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): #", "b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate", "allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation =", "= get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve", "= atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction #", "direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5):", "acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding", "/ 4: controls.boost = False controls.handbrake = False else: controls.boost = self.boost controls.handbrake", "look_at(direction, fake_car.up()) for t in range(60*5): # Step car location with throttle/boost analysis", "True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation =", "for i in range(60*3): c.location += c.velocity * dt b.step(dt, c) # Check", "(frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time =", "arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index =", "self.purpose = None # rip self.dodge = False def simulate(self, bot) -> vec3:", "path if ball_predictions is None: ball_predictions = [] for i in range(60*5): b.step(dt)", "horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error - last_horizontal_error) /", "# Be smart about not using boost at max speed # if Vec3(car.physics.velocity).length()", "return intercept # Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] +", "Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept():", "# on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable = on_ground #", "= Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt =", "# intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i +=", "on_cieling # if not reachable: # return None return intercept @staticmethod def calculate(car:", "# Init vars b = Ball(ball) dt = 1.0 / 60.0 # Generate", "if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') #", "# collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8 #", "exceeded for calculating intercept') # Intercept is only meant for ground paths (and", "car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if", "jump_time) # Calculate distance to drive before jumping (to arrive perfectly on target)", "* from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target", "= closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector = p -", "of mass direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2] = 0", "= intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity))", "= True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset =", "intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] #", "supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8", "closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location", "euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location)", "disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35", "= ball_location break if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for", "lazy conversions and variable scope def get_controls(self, car_state: CarState, car: Car): controls =", "print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2]", "direction gradient # Kick off the gradient descent with an arbitrary seed value", "2044 - collision_radius # reachable = on_ground # or on_back_wall or on_side_wall #", "gradient # Kick off the gradient descent with an arbitrary seed value if", "# print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location -", "c) # Check if we hit the ball yet if norm(b.location - c.location)", "min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return", "# Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion", "10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return", "max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i <", "= 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func =", "angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time #", "dt = 1.0 / 60.0 # Generate predictions of ball path if ball_predictions", "if it (still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0", "about not using boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed -", "<= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1]", "pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0]", "iterations) # unless the ball is moving away from the car faster than", "horizontal_offset - horizontal_error / gradient # Base case (convergence) if abs(gradient) < 0.0005:", "print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location", "= False min_error = None # Drive towards intercept (moving in direction of", "if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100:", "# return intercept # Now descend the hit direction gradient # Kick off", "total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame", "{total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to be in", "ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location", "= 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries:", "intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset", "vector euler = rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in here", "= [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball", "print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to", ">= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location =", "Init vars fake_car = Car(car) b = Ball(ball) # Generate predictions of ball", "+= optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly", "= direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction *", "ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location -", "Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False", "= euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position", "sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time <", "euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's", "or norm(error) < norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not", "= vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time", "then aiming at the ball's NEW position. Guaranteed to converge (typically in <10", "- ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction", "{gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break", "= Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t", "# Measure dist from target error = t - b.location if hit and", "the max angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch =", "norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1]", "# on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling = intercept.location[2]", "jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration", "normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error =", "0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll))", "intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error =", "i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') # Intercept", "util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi,", "direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of", "euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front", "- b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity", "p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector = p", "@ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired", "print('jump_time', jump_time) # Calculate distance to drive before jumping (to arrive perfectly on", "vec3: # print('simulate intercept') # Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball)", "if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True", "= normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find", "# print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to be in air", "from rlutilities.simulation import Car, Ball from rlutilities.linear_algebra import * from analysis.throttle import *", "get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index]", "predictions of ball path if ball_predictions is None: ball_predictions = [vec3(b.location)] for i", "= analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location", "# Recursive case of gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0", "location self.boost = boost self.time = None self.purpose = None # rip self.dodge", "else: controls.boost = self.boost controls.handbrake = False # Be smart about not using", "(still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location =", "and continue descending the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration", "import Car, Ball from rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost", "# print('jump_time', jump_time) # Calculate distance to drive before jumping (to arrive perfectly", "unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this", "the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05:", "intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved", "* frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t]", "self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle =", "jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation #", "collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0])", "= 0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while", "self.boost controls.handbrake = False # Be smart about not using boost at max", "= ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}')", "POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance,", "if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False controls.handbrake", "Intercept(): def __init__(self, location: vec3, boost = True): self.location = location self.boost =", "# print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) #", "len(ball_predictions) / 60.0 # return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time", "with throttle/boost analysis data # Not super efficient but POITROAE frame = analyzer.travel_time(dt,", "if norm(p - ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward())", "exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset:", "collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8 # on_ground", "angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False controls.handbrake =", "jump {drive_analysis.speed}') # print(f'time intended to be in air {jump_time}') # print(f'distance travelled", "= intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius #", "Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3)", "fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location -", "or on_back_wall or on_side_wall # or on_cieling # if not reachable: # return", "4096 - collision_radius # # on_cieling = intercept.location[2] >= 2044 - collision_radius #", "print(f'total distance to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time", "ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time", "hit vector # Adjust vertical position if it (still) intersects with ground if", "1 return controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions =", "ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on", "analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: i +=", "= Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt = 1.0 / 60.0", "horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location = ball_location", "optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector #", "before jumping (to arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2]", "= None): # Init vars b = Ball(ball) dt = 1.0 / 60.0", "1 fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up())", "now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}')", "for calculating intercept') # Intercept is only meant for ground paths (and walls/cieling", "mass direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position", "False else: controls.boost = self.boost controls.handbrake = False # Be smart about not", "* car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost", "intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump", "print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal", "norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction", "= arrival_time i += 1 if i >= max_tries: print(f'Warning: max tries ({max_tries})", "from target error = t - b.location if hit and (min_error == None", "best hit vector euler = rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry", "if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump", "air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance -", "b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a location,", "and (min_error == None or norm(error) < norm(min_error)): min_error = error # Record", "ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge = True", "# Adjust vertical position if it (still) intersects with ground if fake_car.location[2] <", "* dt b.step(dt, c) # Check if we hit the ball yet if", "variable scope def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 =", "* 1.05: hit = True # print('hit') # Measure dist from target error", "= {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check", "# Step car location with throttle/boost analysis data # Not super efficient but", "a location, checking time to that location, # and then aiming at the", "predicted_horizontal_offset # Return the latest intercept location and continue descending the gradient intercept.location", "= error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error", "Simulate the collision and resulting for i in range(60*3): c.location += c.velocity *", "we hit the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius)", "# print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') #", "intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i = 0 max_tries", "return min_error # warning: lazy conversions and variable scope def get_controls(self, car_state: CarState,", "norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break if i", "collision_achieved = False last_horizontal_error = None last_horizontal_offset = None i = 0 max_tries", "ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation c.time", "{car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') #", "None last_horizontal_offset = None i = 0 max_tries = 101 analyzer = BoostAnalysis()", "iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction =", "# or solve with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time =", "norm(error) < norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit:", "jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz speed @ jump", "I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction", "Kick off the gradient descent with an arbitrary seed value if last_horizontal_error is", "- car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration # jump_time =", "= (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration #", "(fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2] = 0", "resulting for i in range(60*3): c.location += c.velocity * dt b.step(dt, c) #", "fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center", "atan, atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] +", "start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis',", "bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate the collision and resulting", "jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler = rotation_to_euler(car.rotation)", "at the ball's NEW position. Guaranteed to converge (typically in <10 iterations) #", "throttle/boost analysis data # Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity))", "# print(f'total distance to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') #", "> max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset", "intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient", "({max_tries}) exceeded for calculating intercept') # Intercept is only meant for ground paths", "atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time", "{norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1:", "p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2]", "else ThrottleAnalysis() while i < max_tries: # Find optimal spot to hit the", "is None: ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually", "+ 0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) /", "= 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) #", "or solve with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch", "top of the best hit vector euler = rotation_to_euler(optimal_rotation) # todo put some", "degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) *", "- car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time =", "- collision_radius # reachable = on_ground # or on_back_wall or on_side_wall # or", "an arbitrary seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset =", "intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it intersects with ground optimal_rotation", "euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location',", "13.88) # octane center of mass direction_vector[2] = 0 target_direction_vector = target -", "vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if", "<= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall =", "# Intercept is only meant for ground paths (and walls/cieling are only indirectly", "ball.collision_radius / 2: intercept.location = ball_location break if i >= max_tries: print(f'Warning: max", "if we hit the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] +", "def simulate(self, bot) -> vec3: # print('simulate intercept') # Init vars c =", "intercept (moving in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept", "= car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset", "Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation", "t - b.location if hit and (min_error == None or norm(error) < norm(min_error)):", "math import pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward())", "BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time", "calculating intercept') # Intercept is only meant for ground paths (and walls/cieling are", "2 + b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius", "boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position =", "Adjust vertical position if it (still) intersects with ground if fake_car.location[2] < 17.0:", "while i < max_tries: # Find optimal spot to hit the ball optimal_hit_vector", "disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35", "ball's NEW position. Guaranteed to converge (typically in <10 iterations) # unless the", "0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5", "case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}')", "= BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: i += 1", "optimal spot to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius", "total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index =", "norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance", "fake_car = Car(car) b = Ball(ball) # Generate predictions of ball path if", "(typically in <10 iterations) # unless the ball is moving away from the", "ball: Ball, target: vec3, ball_predictions = None): # Init vars fake_car = Car(car)", "* 13.88) # octane center of mass direction_vector[2] = 0 target_direction_vector = target", "print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival", "walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius", "gradient = 0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset)", "norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions", "return None return intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions", "ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return intercept #", "last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept location and", "abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling = intercept.location[2] >= 2044 -", "at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost =", "= {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius", "car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost = True): self.location = location", "# print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try", "0 intercept.time = arrival_time i += 1 if i >= max_tries: print(f'Warning: max", "ball path if ball_predictions is None: ball_predictions = [] for i in range(60*5):", "print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis =", "None self.purpose = None # rip self.dodge = False def simulate(self, bot) ->", "max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif", "acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding", "intercept # Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius", "# Generate predictions of ball path if ball_predictions is None: ball_predictions = [vec3(b.location)]", "print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's", "time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler", "util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees def", "0 if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location", "optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) #", "target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction =", "optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it intersects with", "if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location", "angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False controls.handbrake =", "drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept", "5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw)", "direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position =", "= horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset = 25 else:", "# if not reachable: # return None return intercept @staticmethod def calculate(car: Car,", "True intercept_ball_position = vec3(b.location) i = 0 max_tries = 100 analyzer = BoostAnalysis()", "path if ball_predictions is None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0", "Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost", "intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None): #", "None or norm(error) < norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if", "(euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time", "car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self,", "import pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) *", "i in range(60*3): c.location += c.velocity * dt b.step(dt, c) # Check if", "i += 1 if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for", "* horizontal_offset break # Recursive case of gradient descent if horizontal_offset == last_horizontal_offset:", "[vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location)", "direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return", "Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius:", "target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi /", "# disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time =", ">= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius #", "get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with", "super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I", "+ b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall =", "/ 2: intercept.location = ball_location break if i >= max_tries: print(f'Warning: max tries", "max_tries: i += 1 fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation", "max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False", "distance to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended", "else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient ==", "0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}')", "last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest", "Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset", "intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay", "car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location -", "else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base case (convergence) if", "- frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time", "range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a", "(convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset", "= 0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if", "SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec import Vec3", "{fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position", "+ b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius #", "last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else:", "wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location", "ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction -", "print('hit') # Measure dist from target error = t - b.location if hit", "vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location", "normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost = True): self.location", "car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost =", "efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel',", "location with throttle/boost analysis data # Not super efficient but POITROAE frame =", ">= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') # Intercept is", "= False # Be smart about not using boost at max speed #", "drive before jumping (to arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car)", "= vec3(ball_location) # intercept.time = fake_car.time # return intercept # Now descend the", "= False else: controls.boost = self.boost controls.handbrake = False # Be smart about", "gradient # Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations')", "latest intercept location and continue descending the gradient intercept.location = ball_location - normalize(fake_car.left())", "= abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling = intercept.location[2] >= 2044", "> pi / 2: controls.boost = False controls.handbrake = True elif angle_between(self.location -", "import * from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import", "< 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed", "frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time +=", "predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = -", "+ b.collision_radius) * 1.05: hit = True # print('hit') # Measure dist from", ": abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func,", "= vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i += 1 if", "> pi / 4: controls.boost = False controls.handbrake = False else: controls.boost =", "- normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2] = 0 target_direction_vector", "= (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration #", "= {ball_location}') return intercept # Edge case exit: offset maxed out max_horizontal_offset =", "analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) #", "fake_car.time # return intercept # Now descend the hit direction gradient # Kick", "BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: # Find optimal spot", "on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation)", "* sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time", "0.35 # disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time", "False last_horizontal_error = None last_horizontal_offset = None i = 0 max_tries = 101", "the best hit vector euler = rotation_to_euler(optimal_rotation) # todo put some super precise", "# intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return intercept", "fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time =", "max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation time #", "(moving in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept -", "Measure dist from target error = t - b.location if hit and (min_error", "scope def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0],", "ball path if ball_predictions is None: ball_predictions = [vec3(b.location)] for i in range(60*5):", "self.boost = boost self.time = None self.purpose = None # rip self.dodge =", "ball_predictions is None: ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) #", "4: controls.boost = False controls.handbrake = False else: controls.boost = self.boost controls.handbrake =", "look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) -", "{degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location", "to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to", "0 target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction", "drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time',", "print('simulate intercept') # Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t =", "intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i", "rip self.dodge = False def simulate(self, bot) -> vec3: # print('simulate intercept') #", "= 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0])", "and resulting for i in range(60*3): c.location += c.velocity * dt b.step(dt, c)", "car.forward()) > pi / 4: controls.boost = False controls.handbrake = False else: controls.boost", "= look_at(direction, fake_car.up()) for t in range(60*5): # Step car location with throttle/boost", "arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction =", "# print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target", "translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction", "+= dt ball_location = ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location)", "last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset -", "the hit direction gradient # Kick off the gradient descent with an arbitrary", "b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt = 1.0 /", "print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}')", "from util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler,", "analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed", "dt ball_location = ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if", "# warning: lazy conversions and variable scope def get_controls(self, car_state: CarState, car: Car):", "is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset", "{gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target =", "# disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5 +", "# print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to", "# this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car)", "total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func", "import * from analysis.throttle import * from analysis.boost import * from analysis.jump import", "Car(car) b = Ball(ball) # Generate predictions of ball path if ball_predictions is", "t = vec3(bot.target) intercept = self.location dt = 1.0 / 60.0 hit =", "drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location", "= JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler = rotation_to_euler(car.rotation) #", "import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan,", "= car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay =", "fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to", "intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location =", "intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset", "= True # print('hit') # Measure dist from target error = t -", "def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars", "b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at", "horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept location", "+= direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location", "= horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept", "direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] -", "-max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset =", "away from the car faster than the car's max boost speed intercept =", "60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a location,", "'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i = 0 max_tries = 100", "hit vector euler = rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in", "Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error # warning: lazy", "- last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset", "not using boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10:", "+ arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction", "60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0", "car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration # jump_time = max(jump_height_time,", "be in air {jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}') #", "value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error", "intercept = self.location dt = 1.0 / 60.0 hit = False min_error =", "c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept -", "print(f'ball_location = {ball_location}') return intercept # Edge case exit: offset maxed out max_horizontal_offset", "1.05: hit = True # print('hit') # Measure dist from target error =", "car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll", "speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location)", "optimal_hit_vector # Find ideal rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector,", "= 0 if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset = 25", "c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction", "= on_ground # or on_back_wall or on_side_wall # or on_cieling # if not", "* predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}')", "Step car location with throttle/boost analysis data # Not super efficient but POITROAE", "offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset", "- (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2] =", "if not hit: return None return min_error # warning: lazy conversions and variable", "at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch,", "= False def simulate(self, bot) -> vec3: # print('simulate intercept') # Init vars", "= 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error =", "= ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge =", "collision_radius # # on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable =", "0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i", "- c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation =", "horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset", "import CarState from util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities import", "# Simulate the collision and resulting for i in range(60*3): c.location += c.velocity", "Generate predictions of ball path if ball_predictions is None: ball_predictions = [] for", "Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars fake_car =", "= rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 #", "@ jump {drive_analysis.speed}') # print(f'time intended to be in air {jump_time}') # print(f'distance", "intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time =", "fake_car.up()) for t in range(60*5): # Step car location with throttle/boost analysis data", "frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time +", "< c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate the", "c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame =", "on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling = intercept.location[2] >=", "ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit", "atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location", "to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees def get_car_front_center(car: Car):", "= [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location,", "in <10 iterations) # unless the ball is moving away from the car", "vars fake_car = Car(car) b = Ball(ball) # Generate predictions of ball path", "1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity", "target: vec3, ball_predictions = None): # Init vars fake_car = Car(car) b =", "# print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis", "= direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time #", "intercept.location[2] >= 2044 - collision_radius # reachable = on_ground # or on_back_wall or", "print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to be in air {jump_time}')", "target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') #", "# Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1", "normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3,", "= abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 -", "height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll))", "# print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction *", "horizontal_offset = predicted_horizontal_offset # Return the latest intercept location and continue descending the", "print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}')", "steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car, ball: Ball,", "BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: i += 1 fake_car", "in range(60*5): # Step car location with throttle/boost analysis data # Not super", "only meant for ground paths (and walls/cieling are only indirectly supported) # collision_radius", "pi / 4: controls.boost = False controls.handbrake = False else: controls.boost = self.boost", "# Calculate distance to drive before jumping (to arrive perfectly on target) total_translation", "in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance", "intercept location and continue descending the gradient intercept.location = ball_location - normalize(fake_car.left()) *", "descend the hit direction gradient # Kick off the gradient descent with an", "__init__(self, location: vec3, boost = True): self.location = location self.boost = boost self.time", "- normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient descent if horizontal_offset", "b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall", "i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location", "Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient =", "< (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True # print('hit') # Measure", "intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge = True #jump_time >", "position the car's front center directly on top of the best hit vector", "#jump_time > 0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch,", "last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset = 25", "max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position", "Intercept is only meant for ground paths (and walls/cieling are only indirectly supported)", "return controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None):", "False # Be smart about not using boost at max speed # if", "aiming at the ball's NEW position. Guaranteed to converge (typically in <10 iterations)", "abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index)", "= Car(car) b = Ball(ball) # Generate predictions of ball path if ball_predictions", "/ 60.0 # return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time =", "intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time -", "last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error > 0:", "+= 1 if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating", "front center directly on top of the best hit vector euler = rotation_to_euler(optimal_rotation)", "c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity))", "0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient #", "- collision_radius # # on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable", "/ gradient # Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i}", "intercept # Now descend the hit direction gradient # Kick off the gradient", "vec3, ball_predictions = None): # Init vars b = Ball(ball) dt = 1.0", "- normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error", "t in range(60*5): # Step car location with throttle/boost analysis data # Not", "-> vec3: # print('simulate intercept') # Init vars c = Car(bot.game.my_car) b =", "* 8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >=", "Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars b =", "import steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from", "intercept_ball_position = vec3(b.location) i = 0 max_tries = 100 analyzer = BoostAnalysis() if", "True): self.location = location self.boost = boost self.time = None self.purpose = None", "if not reachable: # return None return intercept @staticmethod def calculate(car: Car, ball:", "range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming", "intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall", "last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset =", "analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util", "{i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position =", "- get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) /", "while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) #", "* from analysis.throttle import * from analysis.boost import * from analysis.jump import *", "/ 5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw -", "atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up())", ">= 2044 - collision_radius # reachable = on_ground # or on_back_wall or on_side_wall", "# Generate predictions of ball path if ball_predictions is None: ball_predictions = []", "def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1],", "ball_predictions = None): # Init vars fake_car = Car(car) b = Ball(ball) #", "intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be", "intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i += 1" ]
[ "GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" +", "ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE,", "\"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint)", "= \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver", "\" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data =", "] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02,", "import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\"", "<reponame>Kyokko-OB-Team/deviceDriver_HC-SR04<gh_stars>0 #!/usr/bin/env python3 import ctypes import fcntl import os import time import linux", "DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE", "= [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE,", "#!/usr/bin/env python3 import ctypes import fcntl import os import time import linux TEST_HCSR04_MAJ_VER", "linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER +", "0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\"", "= os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd,", "DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE,", "ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" +", "time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class", "0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE,", "\"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\",", "= 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \" +", "0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \" + str(data.value)", "+ TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA()", "import ctypes import fcntl import os import time import linux TEST_HCSR04_MAJ_VER = \"1\"", "(\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE =", "GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \" + str(data.value) + \"mm\")", "import fcntl import os import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER =", "0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance:", "ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR)", "linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd =", "(\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA))", "fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0 data.status = 0", "data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \"", "DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ]", "python3 import ctypes import fcntl import os import time import linux TEST_HCSR04_MAJ_VER =", "GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER)", "data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \" + str(data.value) + \"mm\") os.close(fd)", "TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0 data.status =", "import os import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE", "data = DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1)", "os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE,", "+ TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0 data.status", "\"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE =", "TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ =", "import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure):", "[ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01,", "class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\"", "TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value", "= \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\",", "= DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd,", "ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE", "+ \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value =", "data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data)", "linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_", "= linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd", "os import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE =", "_fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE =", "= \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint), (\"status\", ctypes.c_uint) ] GPIO_HCSR04_IOC_TYPE", "os.O_RDWR) data = DATA() data.value = 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data)", "fcntl import os import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER = \"0\"", "GPIO_HCSR04_IOC_TYPE = \"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA))", "= \"1\" TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [", "\".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data = DATA() data.value = 0", "fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get distance: \" + str(data.value) +", "= 0 data.status = 0 fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data) time.sleep(1) fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data) print(\"Get", "print(\"ver \" + TEST_HCSR04_MAJ_VER + \".\" + TEST_HCSR04_MIN_VER) fd = os.open(DEVICE_FILE, os.O_RDWR) data", "= linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \" + TEST_HCSR04_MAJ_VER", "\"S\" GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA)) GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA)) print(\"ver \"", "TEST_HCSR04_MIN_VER = \"0\" DEVICE_FILE = \"/dev/hc_sr040\" class DATA(ctypes.Structure): _fields_ = [ (\"value\", ctypes.c_uint),", "ctypes import fcntl import os import time import linux TEST_HCSR04_MAJ_VER = \"1\" TEST_HCSR04_MIN_VER" ]
[ "django.test import TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration", "class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def test__str__(self): assert self.site_configuration.__str__() == self.site_configuration.short_description", "import TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration =", "TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory()", ".. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def test__str__(self):", "import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def test__str__(self): assert", "pytest from django.test import TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def", "@pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def test__str__(self): assert self.site_configuration.__str__() ==", "from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def", "import pytest from django.test import TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase):", "factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self): self.site_configuration = factories.SiteConfigurationFactory() def test__str__(self): assert self.site_configuration.__str__()", "from django.test import TestCase from .. import factories @pytest.mark.django_db class SiteConfigurationTest(TestCase): def setUp(self):" ]
[ "ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt): if i1 > 0: x1", "import const from config import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs,", "/ cnt temp_max = -273.0 for i1 in range(cnt): weather = forecast[i1] temp_max", "= forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space", "__init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui, d): #", "self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max -", "c, w) if (th is None) or (f1 > th) or (f2 >", "super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui, d): # Pre-calculates some range", "import UiFrame, Vect, BLACK, WHITE, YELLOW from micropython import const from config import", "// 2) self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min) /", "= (self.chart_max - chart_min) / (temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3,", "temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def", "v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if", "> th) or (f1 < tl) or (f2 < tl): v1 = Vect(x1,", "= const(chart_space // 2) self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max -", "or (f2 > th) or (f1 < tl) or (f2 < tl): v1", "import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0", "min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space // 2) self.chart_max =", "None) or (f1 > th) or (f2 > th) or (f1 < tl)", "class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self,", "WHITE, YELLOW from micropython import const from config import temp class UiTempGr(UiFrame): def", "= len(forecast) for i1 in range(cnt): if i1 > 0: x1 = int(self.block", "f2 = forecast[i2].feel if (th is None): v1 = Vect(x1, self.chart_y(f1)) v2 =", "(f2 > th) or (f1 < tl) or (f2 < tl): v1 =", "draw(self, ui, d): # Pre-calculates some range values forecast = ui.forecast.forecast cnt =", "th) or (f1 < tl) or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp))", "temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space //", "self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c,", "= int(x1 - self.block) i2 = i1 - 1 f1 = forecast[i1].feel f2", "= None): forecast = ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt): if", "weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min)", "from micropython import const from config import temp class UiTempGr(UiFrame): def __init__(self, ofs,", "self.block = ui.canvas.dim.x / cnt temp_max = -273.0 for i1 in range(cnt): weather", "< tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c,", "YELLOW from micropython import const from config import temp class UiTempGr(UiFrame): def __init__(self,", "(self.chart_max - chart_min) / (temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE)", "* i1) x2 = int(x1 - self.block) i2 = i1 - 1 f1", "const(30) chart_min = const(chart_space // 2) self.chart_max = self.dim.y - chart_space self.k_temp =", "(f1 < tl) or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 =", "temp_max = -273.0 for i1 in range(cnt): weather = forecast[i1] temp_max = max(weather.temp,", "x2 = int(x1 - self.block) i2 = i1 - 1 f1 = forecast[i1].feel", "for i1 in range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min", "forecast[i1].feel f2 = forecast[i2].feel if (th is None): v1 = Vect(x1, self.chart_y(f1)) v2", "= forecast[i1].feel f2 = forecast[i2].feel if (th is None): v1 = Vect(x1, self.chart_y(f1))", "> th) or (f2 > th) or (f1 < tl) or (f2 <", "v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w *", "v2, c, w * 2) def chart_y(self, temp): return int(self.chart_max - (temp -", "= int(self.block * i1) x2 = int(x1 - self.block) i2 = i1 -", "- chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min) # Draw", "= min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space // 2) self.chart_max", "= ui.canvas.dim.x / cnt temp_max = -273.0 for i1 in range(cnt): weather =", "= const(30) chart_min = const(chart_space // 2) self.chart_max = self.dim.y - chart_space self.k_temp", "w) if (th is None) or (f1 > th) or (f2 > th)", "BLACK, WHITE, YELLOW from micropython import const from config import temp class UiTempGr(UiFrame):", "= ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt): if i1 > 0:", "(temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high,", "tl = None): forecast = ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt):", "int(x1 - self.block) i2 = i1 - 1 f1 = forecast[i1].feel f2 =", "(th is None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2,", "if (th is None) or (f1 > th) or (f2 > th) or", "tl) or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp))", "ui import UiFrame, Vect, BLACK, WHITE, YELLOW from micropython import const from config", "Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK)", "range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel,", "BLACK) def chart_draw(self, ui, w, c, th = None, tl = None): forecast", "3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c, th", "from ui import UiFrame, Vect, BLACK, WHITE, YELLOW from micropython import const from", "- chart_min) / (temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui,", "self.temp_min) chart_space = const(30) chart_min = const(chart_space // 2) self.chart_max = self.dim.y -", "YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c, th =", "self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self,", "WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w,", "range(cnt): if i1 > 0: x1 = int(self.block * i1) x2 = int(x1", "dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui, d): # Pre-calculates some", "- 1 f1 = forecast[i1].feel f2 = forecast[i2].feel if (th is None): v1", "self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is None) or (f1 > th)", "self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def chart_y(self, temp): return int(self.chart_max -", "ui.canvas.line(v1, v2, c, w * 2) def chart_y(self, temp): return int(self.chart_max - (temp", "temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30)", "= Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def chart_y(self, temp): return", "None): forecast = ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt): if i1", "w * 2) def chart_y(self, temp): return int(self.chart_max - (temp - self.temp_min) *", "forecast[i2].feel if (th is None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2))", "ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x / cnt temp_max = -273.0 for", "i1 - 1 f1 = forecast[i1].feel f2 = forecast[i2].feel if (th is None):", "ui.canvas.line(v1, v2, c, w) if (th is None) or (f1 > th) or", "dim) self.temp_min = 273.0 def draw(self, ui, d): # Pre-calculates some range values", "Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is", "int(self.block * i1) x2 = int(x1 - self.block) i2 = i1 - 1", "th) or (f2 > th) or (f1 < tl) or (f2 < tl):", "Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def chart_y(self, temp): return int(self.chart_max", "self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def chart_y(self,", "# Pre-calculates some range values forecast = ui.forecast.forecast cnt = len(forecast) self.block =", "d): # Pre-calculates some range values forecast = ui.forecast.forecast cnt = len(forecast) self.block", "charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def", "in range(cnt): if i1 > 0: x1 = int(self.block * i1) x2 =", "- self.block) i2 = i1 - 1 f1 = forecast[i1].feel f2 = forecast[i2].feel", "ui.canvas.dim.x / cnt temp_max = -273.0 for i1 in range(cnt): weather = forecast[i1]", "cnt = len(forecast) self.block = ui.canvas.dim.x / cnt temp_max = -273.0 for i1", "# Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1,", "for i1 in range(cnt): if i1 > 0: x1 = int(self.block * i1)", "v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is None) or", "c, th = None, tl = None): forecast = ui.forecast.forecast cnt = len(forecast)", "if i1 > 0: x1 = int(self.block * i1) x2 = int(x1 -", "i1 in range(cnt): if i1 > 0: x1 = int(self.block * i1) x2", "temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c, th = None,", "= i1 - 1 f1 = forecast[i1].feel f2 = forecast[i2].feel if (th is", "len(forecast) self.block = ui.canvas.dim.x / cnt temp_max = -273.0 for i1 in range(cnt):", "- self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low)", "None, tl = None): forecast = ui.forecast.forecast cnt = len(forecast) for i1 in", "forecast = ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x / cnt temp_max =", "self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is None)", "const(chart_space // 2) self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min)", "(th is None) or (f1 > th) or (f2 > th) or (f1", "or (f1 > th) or (f2 > th) or (f1 < tl) or", "<filename>simulator/ui/tempg.py<gh_stars>1-10 from ui import UiFrame, Vect, BLACK, WHITE, YELLOW from micropython import const", "Pre-calculates some range values forecast = ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x", "= Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is None) or (f1", "w, c, th = None, tl = None): forecast = ui.forecast.forecast cnt =", "self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min) #", "self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min) # Draw charts self.chart_draw(ui,", "= forecast[i2].feel if (th is None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2,", "= None, tl = None): forecast = ui.forecast.forecast cnt = len(forecast) for i1", "or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1,", "forecast = ui.forecast.forecast cnt = len(forecast) for i1 in range(cnt): if i1 >", "len(forecast) for i1 in range(cnt): if i1 > 0: x1 = int(self.block *", "= ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x / cnt temp_max = -273.0", "i2 = i1 - 1 f1 = forecast[i1].feel f2 = forecast[i2].feel if (th", "= len(forecast) self.block = ui.canvas.dim.x / cnt temp_max = -273.0 for i1 in", "is None) or (f1 > th) or (f2 > th) or (f1 <", "max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min =", "from config import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min", "in range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp,", "temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c, th = None, tl", "tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w", "chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min) # Draw charts", "some range values forecast = ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x /", "self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space // 2)", "None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w)", "const from config import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim)", "/ (temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW,", "def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui, d):", "1, BLACK) def chart_draw(self, ui, w, c, th = None, tl = None):", "x1 = int(self.block * i1) x2 = int(x1 - self.block) i2 = i1", "< tl) or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2,", "weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space", "ui, w, c, th = None, tl = None): forecast = ui.forecast.forecast cnt", "micropython import const from config import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim):", "ui, d): # Pre-calculates some range values forecast = ui.forecast.forecast cnt = len(forecast)", "(f1 > th) or (f2 > th) or (f1 < tl) or (f2", "range values forecast = ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x / cnt", "UiFrame, Vect, BLACK, WHITE, YELLOW from micropython import const from config import temp", "i1 in range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min =", "self.block) i2 = i1 - 1 f1 = forecast[i1].feel f2 = forecast[i2].feel if", "= self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min)", "self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui, w, c, th = None, tl =", "self.temp_min = 273.0 def draw(self, ui, d): # Pre-calculates some range values forecast", "i1 > 0: x1 = int(self.block * i1) x2 = int(x1 - self.block)", "> 0: x1 = int(self.block * i1) x2 = int(x1 - self.block) i2", "= Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th", "UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui,", "weather.feel, self.temp_min) chart_space = const(30) chart_min = const(chart_space // 2) self.chart_max = self.dim.y", "is None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c,", "(f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2,", "= 273.0 def draw(self, ui, d): # Pre-calculates some range values forecast =", "= Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2)", "273.0 def draw(self, ui, d): # Pre-calculates some range values forecast = ui.forecast.forecast", "values forecast = ui.forecast.forecast cnt = len(forecast) self.block = ui.canvas.dim.x / cnt temp_max", "Vect(x1, self.chart_y(forecast[i1].temp)) v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def", "if (th is None): v1 = Vect(x1, self.chart_y(f1)) v2 = Vect(x2, self.chart_y(f2)) ui.canvas.line(v1,", "cnt = len(forecast) for i1 in range(cnt): if i1 > 0: x1 =", "th = None, tl = None): forecast = ui.forecast.forecast cnt = len(forecast) for", "c, w * 2) def chart_y(self, temp): return int(self.chart_max - (temp - self.temp_min)", "-273.0 for i1 in range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max)", "cnt temp_max = -273.0 for i1 in range(cnt): weather = forecast[i1] temp_max =", "v2, c, w) if (th is None) or (f1 > th) or (f2", "Vect(x2, self.chart_y(f2)) ui.canvas.line(v1, v2, c, w) if (th is None) or (f1 >", "forecast[i1] temp_max = max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space =", "= -273.0 for i1 in range(cnt): weather = forecast[i1] temp_max = max(weather.temp, weather.feel,", "1 f1 = forecast[i1].feel f2 = forecast[i2].feel if (th is None): v1 =", "chart_min = const(chart_space // 2) self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max", "2) self.chart_max = self.dim.y - chart_space self.k_temp = (self.chart_max - chart_min) / (temp_max", "chart_draw(self, ui, w, c, th = None, tl = None): forecast = ui.forecast.forecast", "self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui,", "3, WHITE) self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low) self.chart_draw(ui, 1, BLACK) def chart_draw(self, ui,", "chart_space = const(30) chart_min = const(chart_space // 2) self.chart_max = self.dim.y - chart_space", "* 2) def chart_y(self, temp): return int(self.chart_max - (temp - self.temp_min) * self.k_temp)", "config import temp class UiTempGr(UiFrame): def __init__(self, ofs, dim): super().__init__(ofs, dim) self.temp_min =", "i1) x2 = int(x1 - self.block) i2 = i1 - 1 f1 =", "0: x1 = int(self.block * i1) x2 = int(x1 - self.block) i2 =", "Vect, BLACK, WHITE, YELLOW from micropython import const from config import temp class", "def draw(self, ui, d): # Pre-calculates some range values forecast = ui.forecast.forecast cnt", "= max(weather.temp, weather.feel, temp_max) self.temp_min = min(weather.temp, weather.feel, self.temp_min) chart_space = const(30) chart_min", "f1 = forecast[i1].feel f2 = forecast[i2].feel if (th is None): v1 = Vect(x1,", "ofs, dim): super().__init__(ofs, dim) self.temp_min = 273.0 def draw(self, ui, d): # Pre-calculates", "or (f1 < tl) or (f2 < tl): v1 = Vect(x1, self.chart_y(forecast[i1].temp)) v2", "v2 = Vect(x2, self.chart_y(forecast[i2].temp)) ui.canvas.line(v1, v2, c, w * 2) def chart_y(self, temp):", "chart_min) / (temp_max - self.temp_min) # Draw charts self.chart_draw(ui, 3, WHITE) self.chart_draw(ui, 3,", "def chart_draw(self, ui, w, c, th = None, tl = None): forecast =" ]
[ "attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading page\" print \"---\" print", "soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\")", "= \"besiktas\" url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows", "-*- import urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\"", "\"besiktas\" url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT", "+ account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML,", "WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() ==", "'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading page\" print \"---\" print account_name", "import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url)", "soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading page\" print \"---\"", "= soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading page\" print", "6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode()", "import urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" +", "req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page", "subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading page\"", "#print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error", "Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup", "#!/usr/bin/python # -*- coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup account_name", "== 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']})", "account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like", "urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span',", "html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed", "= \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64)", "= urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97", "like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup =", "utf-8 -*- import urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\" url =", "from bs4 import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name req", "(KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup", "'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page =", "AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200:", "bs4 import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name req =", "= urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count =", "account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\", "-*- coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\"", "= BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else:", "Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\")", "NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if", "urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11')", "req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\", "(Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\\ Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req)", "BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent',", "<filename>display-youtube-subscriber-count.py<gh_stars>1-10 #!/usr/bin/python # -*- coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup", "Chrome/23.0.1271.97 Safari/537.11') html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print", "if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal',", "\"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2; WOW64) AppleWebKit/537.11", "coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\" url", "BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print", "urllib2 from bs4 import BeautifulSoup account_name = \"besiktas\" url = \"https://www.youtube.com/\" + account_name", "200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print", "url = \"https://www.youtube.com/\" + account_name req = urllib2.Request(url) req.add_header('User-agent', 'Mozilla/5.0\\ (Windows NT 6.2;", "html_page = urllib2.urlopen(req) if html_page.getcode() == 200: soup = BeautifulSoup(html_page,\"html.parser\") #print soup subscriber_count", "soup subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']}) print subscriber_count.string.rstrip(\"\\n\\r\") else: print \"Error loading", "# -*- coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup account_name =" ]
[]
[ "2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins is correct.\"\"\" assert", "[ [0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [1, 6],", "3], [2, 4], [2, 5], [3, 0], [3, 1], [3, 2], [3, 3],", "are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test", "the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [", "15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the", "[2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [3, 0], [3,", "6], [3, 7], [3, 8], [4, 5], [4, 6], [4, 7], [4, 8],", "TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution", "input_1): \"\"\"Test that the heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) ==", "[4, 9], ], ), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr =", "correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self,", "[2, 5], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4,", "[1, 9], [2, 9], ], ), ( [2, 2], [ [1, 2], [1,", "input_1): \"\"\"Test the sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15", "6], [0, 7], [0, 8], [0, 9], [1, 6], [1, 8], [1, 9],", "class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights", "== 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that", "find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0,", "pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self,", "test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) ==", "puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test", "2], [2, 3], [2, 4], [2, 5], [3, 0], [3, 1], [3, 2],", "6], [ [2, 7], [3, 6], [3, 7], [3, 8], [4, 5], [4,", "[3, 2], [3, 3], [3, 4], [4, 1], ], ), ( [4, 6],", "4], [2, 5], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4],", "== 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0, 1], [1, 0]]),", "[3, 6], [3, 7], [3, 8], [4, 5], [4, 6], [4, 7], [4,", "heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5]", "low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self,", "1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights of low points are correct.\"\"\"", "5], [0, 6], [0, 7], [0, 8], [0, 9], [1, 6], [1, 8],", "8], [4, 9], ], ), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr", "[0, 1], [1, 0]]), ( [0, 9], [ [0, 5], [0, 6], [0,", "levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\"", "([0, 1], [[0, 0], [0, 1], [1, 0]]), ( [0, 9], [ [0,", "8], [4, 5], [4, 6], [4, 7], [4, 8], [4, 9], ], ),", "points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1):", "1], [1, 0]]), ( [0, 9], [ [0, 5], [0, 6], [0, 7],", "[0, 8], [0, 9], [1, 6], [1, 8], [1, 9], [2, 9], ],", "( [2, 2], [ [1, 2], [1, 3], [1, 4], [2, 1], [2,", "[3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 1], ],", "( [4, 6], [ [2, 7], [3, 6], [3, 7], [3, 8], [4,", "test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) ==", "def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1)", "9], ], ), ( [2, 2], [ [1, 2], [1, 3], [1, 4],", "as np import pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle", "import numpy as np import pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests", "assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0, 1],", "[2, 4], [2, 5], [3, 0], [3, 1], [3, 2], [3, 3], [3,", "9], [1, 6], [1, 8], [1, 9], [2, 9], ], ), ( [2,", "numpy as np import pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for", "[1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels", "7], [0, 8], [0, 9], [1, 6], [1, 8], [1, 9], [2, 9],", "import pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def", "@pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0, 1], [1, 0]]), ( [0,", "of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for", "], ), ( [4, 6], [ [2, 7], [3, 6], [3, 7], [3,", "sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests", "import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1):", "0]]), ( [0, 9], [ [0, 5], [0, 6], [0, 7], [0, 8],", "puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights of low points are", "puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of", "[4, 6], [ [2, 7], [3, 6], [3, 7], [3, 8], [4, 5],", "[4, 7], [4, 8], [4, 9], ], ), ], ) def test_find_basin_around_low_point(self, input_1,", "[ ([0, 1], [[0, 0], [0, 1], [1, 0]]), ( [0, 9], [", "2], [1, 3], [1, 4], [2, 1], [2, 2], [2, 3], [2, 4],", "of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def", "[3, 7], [3, 8], [4, 5], [4, 6], [4, 7], [4, 8], [4,", "puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the", "[0, 6], [0, 7], [0, 8], [0, 9], [1, 6], [1, 8], [1,", "], ), ( [2, 2], [ [1, 2], [1, 3], [1, 4], [2,", "5], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 1],", "), ( [4, 6], [ [2, 7], [3, 6], [3, 7], [3, 8],", "puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins is correct.\"\"\"", "correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the", "assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum", "[0, 7], [0, 8], [0, 9], [1, 6], [1, 8], [1, 9], [2,", "[[0, 0], [0, 1], [1, 0]]), ( [0, 9], [ [0, 5], [0,", "coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords, arr) assert sorted(expected) == sorted(result)", "\"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins", "the heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5,", "<filename>Day-09_Smoke-Basin/tests/test_day_09.py<gh_stars>0 import numpy as np import pytest import puzzle_1 import puzzle_2 class TestPuzzle1:", "that the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\",", "[4, 1], ], ), ( [4, 6], [ [2, 7], [3, 6], [3,", "overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0,", ") def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords, arr)", "[1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [2, 3], [2,", "np import pytest import puzzle_1 import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\"", "2], [3, 3], [3, 4], [4, 1], ], ), ( [4, 6], [", "[1, 8], [1, 9], [2, 9], ], ), ( [2, 2], [ [1,", "7], [3, 6], [3, 7], [3, 8], [4, 5], [4, 6], [4, 7],", "the sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2:", "[1, 4], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [3,", "[1, 6], [1, 8], [1, 9], [2, 9], ], ), ( [2, 2],", "input_1, coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords, arr) assert sorted(expected) ==", "), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result =", "5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels are correct.\"\"\"", "8], [0, 9], [1, 6], [1, 8], [1, 9], [2, 9], ], ),", "import puzzle_2 class TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that", "for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights of low points", "0], [0, 1], [1, 0]]), ( [0, 9], [ [0, 5], [0, 6],", "5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels are correct.\"\"\" assert", "puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0, 1], [1,", "[2, 7], [3, 6], [3, 7], [3, 8], [4, 5], [4, 6], [4,", "1], [[0, 0], [0, 1], [1, 0]]), ( [0, 9], [ [0, 5],", "1], [2, 2], [2, 3], [2, 4], [2, 5], [3, 0], [3, 1],", "3], [1, 4], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5],", "0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk levels are", "[4, 5], [4, 6], [4, 7], [4, 8], [4, 9], ], ), ],", "TestPuzzle1: \"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights of", "[0, 9], [ [0, 5], [0, 6], [0, 7], [0, 8], [0, 9],", "[0, 9], [1, 6], [1, 8], [1, 9], [2, 9], ], ), (", "9], [ [0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [1,", "[ [2, 7], [3, 6], [3, 7], [3, 8], [4, 5], [4, 6],", "[3, 1], [3, 2], [3, 3], [3, 4], [4, 1], ], ), (", "are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def", "def test_low_point_heights(self, input_1): \"\"\"Test that the heights of low points are correct.\"\"\" assert", "input_1): \"\"\"Test that the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134", "9], [2, 9], ], ), ( [2, 2], [ [1, 2], [1, 3],", "[4, 6], [4, 7], [4, 8], [4, 9], ], ), ], ) def", "8], [1, 9], [2, 9], ], ), ( [2, 2], [ [1, 2],", "[2, 9], ], ), ( [2, 2], [ [1, 2], [1, 3], [1,", "[1, 3], [1, 4], [2, 1], [2, 2], [2, 3], [2, 4], [2,", "for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins is", "[3, 3], [3, 4], [4, 1], ], ), ( [4, 6], [ [2,", "def test_find_largest_basins(self, input_1): \"\"\"Test that the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1)", "[1, 0]]), ( [0, 9], [ [0, 5], [0, 6], [0, 7], [0,", "], ), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result", "puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test", "( [0, 9], [ [0, 5], [0, 6], [0, 7], [0, 8], [0,", "2], [ [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [2,", "7], [4, 8], [4, 9], ], ), ], ) def test_find_basin_around_low_point(self, input_1, coords,", "test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords, arr) assert sorted(expected)", "== [1, 0, 5, 5] def test_find_risk_level_sum(self, input_1): \"\"\"Test the sum of risk", "6], [1, 8], [1, 9], [2, 9], ], ), ( [2, 2], [", "[ [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [2, 3],", "[3, 8], [4, 5], [4, 6], [4, 7], [4, 8], [4, 9], ],", "def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords, arr) assert", "\"coords,expected\", [ ([0, 1], [[0, 0], [0, 1], [1, 0]]), ( [0, 9],", "0], [3, 1], [3, 2], [3, 3], [3, 4], [4, 1], ], ),", "solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1],", "that the heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1, 0,", "[2, 2], [2, 3], [2, 4], [2, 5], [3, 0], [3, 1], [3,", "[2, 2], [ [1, 2], [1, 3], [1, 4], [2, 1], [2, 2],", "\"\"\"Test the sum of risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class", "test_low_point_heights(self, input_1): \"\"\"Test that the heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1)", "is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0],", "3], [3, 4], [4, 1], ], ), ( [4, 6], [ [2, 7],", "\"\"\"Tests for puzzle 1.\"\"\" def test_low_point_heights(self, input_1): \"\"\"Test that the heights of low", "assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1):", "5], [4, 6], [4, 7], [4, 8], [4, 9], ], ), ], )", "\"\"\"Test that the heights of low points are correct.\"\"\" assert puzzle_1.find_low_point_heights(input_1) == [1,", "[3, 4], [4, 1], ], ), ( [4, 6], [ [2, 7], [3,", "1], ], ), ( [4, 6], [ [2, 7], [3, 6], [3, 7],", "risk levels are correct.\"\"\" assert puzzle_1.find_risk_level_sum(input_1) == 15 class TestPuzzle2: \"\"\"Tests for puzzle", "9], ], ), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1)", "correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0,", "7], [3, 8], [4, 5], [4, 6], [4, 7], [4, 8], [4, 9],", "6], [4, 7], [4, 8], [4, 9], ], ), ], ) def test_find_basin_around_low_point(self,", "\"\"\"Test that the overall solution find_largest_basins is correct.\"\"\" assert puzzle_2.find_largest_basins(input_1) == 1134 @pytest.mark.parametrize(", "), ( [2, 2], [ [1, 2], [1, 3], [1, 4], [2, 1],", "[2, 3], [2, 4], [2, 5], [3, 0], [3, 1], [3, 2], [3,", "4], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [3, 0],", "4], [4, 1], ], ), ( [4, 6], [ [2, 7], [3, 6],", "[4, 8], [4, 9], ], ), ], ) def test_find_basin_around_low_point(self, input_1, coords, expected):", "], ) def test_find_basin_around_low_point(self, input_1, coords, expected): arr = np.array(input_1) result = puzzle_2.find_basin_around_low_point(coords,", "1], [3, 2], [3, 3], [3, 4], [4, 1], ], ), ( [4,", "1134 @pytest.mark.parametrize( \"coords,expected\", [ ([0, 1], [[0, 0], [0, 1], [1, 0]]), (", "[0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [1, 6], [1,", "class TestPuzzle2: \"\"\"Tests for puzzle 2.\"\"\" def test_find_largest_basins(self, input_1): \"\"\"Test that the overall" ]
[ "'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '<(current_dir)/app/app.gyp:app',", "{ 'variables': { 'project_name': 'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ {", "'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '<(current_dir)/app/app.gyp:app', ], }, ],", "'<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '<(current_dir)/app/app.gyp:app', ],", "<reponame>legendlee1314/GLmacia<filename>OpenGL/all.gyp { 'variables': { 'project_name': 'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [", "}, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '<(current_dir)/app/app.gyp:app', ], },", "[ { 'target_name': 'All', 'type': 'none', 'dependencies': [ '<(current_dir)/app/app.gyp:app', ], }, ], }", "'variables': { 'project_name': 'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name':", "'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies':", "'project_name': 'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type':", "{ 'project_name': 'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All',", "'1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type': 'none', 'dependencies': [", "'GLmacia', 'version': '1.0.0', 'current_dir': '<(DEPTH)', }, 'targets': [ { 'target_name': 'All', 'type': 'none'," ]
[ "as pd import matplotlib.pyplot as plt import seaborn as sns labels = [0,", "600, 700, 800, 900, 1000] runs = 10 for n in range(runs): data", "<filename>Assignment1/plot.py import numpy as np import pandas as pd import matplotlib.pyplot as plt", "import numpy as np import pandas as pd import matplotlib.pyplot as plt import", "matplotlib.pyplot as plt import seaborn as sns labels = [0, 100, 200, 300,", "range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data, robust=True) plt.savefig('./plots/Valueheatmap%d.png' % n)", "pd import matplotlib.pyplot as plt import seaborn as sns labels = [0, 100,", "200, 300, 400, 500, 600, 700, 800, 900, 1000] runs = 10 for", "700, 800, 900, 1000] runs = 10 for n in range(runs): data =", "numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn", "500, 600, 700, 800, 900, 1000] runs = 10 for n in range(runs):", "900, 1000] runs = 10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' %", "= [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] runs", "800, 900, 1000] runs = 10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv'", "data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data, robust=True) plt.savefig('./plots/Valueheatmap%d.png' % n) plt.close()", "import matplotlib.pyplot as plt import seaborn as sns labels = [0, 100, 200,", "np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns", "10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data,", "plt import seaborn as sns labels = [0, 100, 200, 300, 400, 500,", "import pandas as pd import matplotlib.pyplot as plt import seaborn as sns labels", "as plt import seaborn as sns labels = [0, 100, 200, 300, 400,", "sns labels = [0, 100, 200, 300, 400, 500, 600, 700, 800, 900,", "[0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] runs =", "100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] runs = 10", "1000] runs = 10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n)", "seaborn as sns labels = [0, 100, 200, 300, 400, 500, 600, 700,", "import seaborn as sns labels = [0, 100, 200, 300, 400, 500, 600,", "runs = 10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax", "pandas as pd import matplotlib.pyplot as plt import seaborn as sns labels =", "n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data, robust=True) plt.savefig('./plots/Valueheatmap%d.png'", "as sns labels = [0, 100, 200, 300, 400, 500, 600, 700, 800,", "labels = [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]", "400, 500, 600, 700, 800, 900, 1000] runs = 10 for n in", "300, 400, 500, 600, 700, 800, 900, 1000] runs = 10 for n", "= 10 for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax =", "as np import pandas as pd import matplotlib.pyplot as plt import seaborn as", "for n in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data, robust=True)", "in range(runs): data = pd.read_csv('./data/Vgrid_%d.csv' % n) ax = sns.heatmap(data, robust=True) plt.savefig('./plots/Valueheatmap%d.png' %" ]
[ "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations", "class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField( model_name='article',", "Generated by Django 2.2.4 on 2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration):", "<gh_stars>0 # Generated by Django 2.2.4 on 2019-08-10 03:48 from django.db import migrations", "= [ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField( model_name='article', old_name='content', new_name='contents', ),", "2.2.4 on 2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "# Generated by Django 2.2.4 on 2019-08-10 03:48 from django.db import migrations class", "by Django 2.2.4 on 2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration): dependencies", "on 2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('demo',", "dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField( model_name='article', old_name='content', new_name='contents',", "import migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations = [", "[ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField( model_name='article', old_name='content', new_name='contents', ), ]", "migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField(", "Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations = [ migrations.RenameField( model_name='article', old_name='content',", "03:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ]", "Django 2.2.4 on 2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration): dependencies =", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937'), ] operations =", "2019-08-10 03:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('demo', '0002_auto_20190718_0937')," ]
[ "names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <-", "DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time series. freq: int", "function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\")", "seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys),", "import pandas as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def", "...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...))", "= ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate()", "ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'}", "paste0(\"hw_\", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features)", "https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr))", "<- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features <-", "setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids =", "\"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\",", "DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str]", "as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str]", "pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame,", "['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str] String list of", "ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\")", "...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by =", "panel of time series. freq: int Frequency of the time series. features: List[str]", "\"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\",", "features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by", "= \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency =", "names(series_features) <- paste0(\"hw_\", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...))", "\"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM':", "**kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring", "seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return", "\"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures", "series. freq: int Frequency of the time series. features: List[str] String list of", "freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats", "ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features", "import List import pandas as pd import rpy2.robjects as robjects from rpy2.robjects import", "paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else", "<- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list,", "List[str] String list of features to calculate. **kwargs: Arguments used by the original", "...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring)", "Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time series. freq:", "<- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features))", "features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\",", ":= names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features,", "\"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <-", "hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features,", "series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\"", "pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\",", "= \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id)", "robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids,", "ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features,", "calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\"", "<- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <-", "# coding: utf-8 from typing import List import pandas as pd import rpy2.robjects", "the time series. features: List[str] String list of features to calculate. **kwargs: Arguments", "'seasonality', 'y']. Wide panel of time series. features: List[str] String list of features", "<- split(dt, by = \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[,", "series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list)", "'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str]", "typing import List import pandas as pd import rpy2.robjects as robjects from rpy2.robjects", "...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features,", "of the time series. features: List[str] String list of features to calculate. **kwargs:", "<- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate()", "keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\"", "feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\",", "} setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats", "<- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) }", "features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\",", "python # coding: utf-8 from typing import List import pandas as pd import", "time series. freq: int Frequency of the time series. features: List[str] String list", "seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <-", "unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities", "\"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys =", "Frequency of the time series. features: List[str] String list of features to calculate.", "of time series. features: List[str] String list of features to calculate. **kwargs: Arguments", "\"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"],", "rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\",", "Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring =", "wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id',", "} else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id :=", "rstring = \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <-", "rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features:", "list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\"", "} setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids", "suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list,", "using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds',", "ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){", "import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int,", "pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq:", "freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt,", "'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] =", "features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def", "} else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) }", "\"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list", "\"\"\" rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df)", "= ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length':", "<- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else { series_features", "\"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using", "series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else {", "original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys,", "int Frequency of the time series. features: List[str] String list of features to", "renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features:", "= [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\",", "**kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas", "else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)]", "pandas as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts:", "freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\",", "rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt,", "series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features)", "seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in%", "\"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) ->", "feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\",", "suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) )", "as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\", keep.by = FALSE) series_list", "function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){ features", "lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features,", "uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities,", "pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\",", "names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs)", "feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\",", "panel of time series. features: List[str] String list of features to calculate. **kwargs:", "list of features to calculate. **kwargs: Arguments used by the original tsfeatures function.", "from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\",", "names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features)", "ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats", "} } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id", "columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str] String list", "List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\",", "function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features, ...){", "= robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats =", "= FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in%", "**kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts:", "function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table))", "rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return", "List import pandas as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri", "Wide panel of time series. features: List[str] String list of features to calculate.", "\"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters", "rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length':", "String list of features to calculate. **kwargs: Arguments used by the original tsfeatures", "freq: int Frequency of the time series. features: List[str] String list of features", "suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc", "freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\",", "import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\",", "'ds', 'y']. Long panel of time series. freq: int Frequency of the time", "suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features", "ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm',", "of time series. freq: int Frequency of the time series. features: List[str] String", "by = \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency", "\"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)])", "cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\",", "ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of", "y) ts(y, frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features,", "= robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'}", "frequency = freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <-", "tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features,", "\"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas df", "if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list,", "setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\", keep.by = FALSE) series_list <-", "ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid,", "df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time series.", "df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series.", "int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\",", "...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features,", "robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] =", "series. features: List[str] String list of features to calculate. **kwargs: Arguments used by", "suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else { series_features <-", "series_list <- split(dt, by = \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie)", "of features to calculate. **kwargs: Arguments used by the original tsfeatures function. References", "\"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list", "'y']. Long panel of time series. freq: int Frequency of the time series.", "---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt", "if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...))", "the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq,", ") names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features", "<- paste0(\"hw_\", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) }", "\"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ----------", "utf-8 from typing import List import pandas as pd import rpy2.robjects as robjects", "= rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer)", "the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities,", "time series. features: List[str] String list of features to calculate. **kwargs: Arguments used", "\"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list,", "-> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame", "= ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys,", "def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\",", "ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM':", "uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\",", "Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide", "'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\",", "suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\", keep.by", "{ series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] }", "References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table))", "...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y)", "Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features:", "pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list()", "\"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper", "---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel", ":= names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities =", "References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures))", "\"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame:", "['unique_id', 'ds', 'y']. Long panel of time series. freq: int Frequency of the", "hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features))", "names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[,", "} \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate()", "def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\",", "Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long", "features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality,", "rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer)", "frequency=seasonality) ) names(series_list) <- uids if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){", "coding: utf-8 from typing import List import pandas as pd import rpy2.robjects as", "---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures))", "suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y,", "pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids", "} \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys", "features: List[str] String list of features to calculate. **kwargs: Arguments used by the", "\"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs)", "<- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...))", "\"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns", "to calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html", "names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else {", "r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y'].", "function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities,", "features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc =", "series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts,", "= feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\",", "<- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\", keep.by = FALSE)", "dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\", keep.by =", "<- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <-", "original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features,", "return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\",", "using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality',", "feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats =", "---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel", "unique_id) series_list <- split(dt, by = \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list,", "else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } }", "\"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids,", "'y']. Wide panel of time series. features: List[str] String list of features to", "= rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats =", "serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){", "r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y'].", "setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features", "\"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas", "= freq)]) if(\"hw_parameters\" %in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list,", "pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\",", "with columns ['unique_id', 'ds', 'y']. Long panel of time series. freq: int Frequency", "names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list()", "pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time", "ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of", "features to calculate. **kwargs: Arguments used by the original tsfeatures function. References ----------", "{ series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <- paste0(\"hw_\", names(series_features)) } } else", "%in% features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features)", "with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str] String", "series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features", "https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <-", "split(dt, by = \"unique_id\", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y,", "unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq,", "= \"\"\" function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap(", "function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <-", "robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats", "suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = \"unique_id\",", "\"nonlinearity\", \"pacf_features\", \"stability\", \"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r.", "Long panel of time series. freq: int Frequency of the time series. features:", "tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\",", "#!/usr/bin/env python # coding: utf-8 from typing import List import pandas as pd", "tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df, freq, features, ...){", "suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality)", "as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame,", "tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\",", "\"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\", \"stability\",", "by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(df,", "<- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(series_features) <-", "features: List[str] = [\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\",", "pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with", "columns ['unique_id', 'ds', 'y']. Long panel of time series. freq: int Frequency of", "series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list()", "[\"length\", \"acf_features\", \"arch_stat\", \"crossing_points\", \"entropy\", \"flat_spots\", \"heterogeneity\", \"holt_parameters\", \"hurst\", \"hw_parameters\", \"lumpiness\", \"nonlinearity\", \"pacf_features\",", "FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){", "series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if(\"hw_parameters\" %in% features){ features", "features){ features <- setdiff(features, \"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <-", "pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm',", "\"stl_features\", \"unitroot_kpss\", \"unitroot_pp\"], **kwargs) -> pd.DataFrame: \"\"\"tsfeatures wrapper using r. Parameters ---------- ts:", "by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\" function(uids,", "setDT(series_features) series_features[, unique_id := names(series_list)] } \"\"\" pandas2ri.activate() rfunc = robjects.r(rstring) feats =", "feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats", "from typing import List import pandas as pd import rpy2.robjects as robjects from", "used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html \"\"\" rstring = \"\"\"", "\"hw_parameters\") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, \"hw_parameters\", ...)) names(hw_series_features) <- paste0(\"hw_\", names(hw_series_features)) series_features <-", "rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats", "seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs)", "pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time" ]
[ "a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices", "adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device in the list as", "the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android device: {0}\".format(adb.shell(['echo", "be used. adb = ADB() # Start with a clean adb server. adb.kill_server()", "the list as the target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device()", "the target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from", "adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices:", "how the adb wrapper can be used. adb = ADB() # Start with", "logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This", "# Start with a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB", "version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device", "first device in the list as the target of the subsequent commands. adb.target_device", "showing how the adb wrapper can be used. adb = ADB() # Start", "as the target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message", "python3 import logging from adb.adb import ADB if __name__ == \"__main__\": # Logging", "logging from adb.adb import ADB if __name__ == \"__main__\": # Logging configuration. logger", "logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an example", "example file showing how the adb wrapper can be used. adb = ADB()", "target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android", "server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected", "commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android device: {0}\".format(adb.shell(['echo \"Hello World!\"']))", ") # This is an example file showing how the adb wrapper can", "adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices))", "ADB if __name__ == \"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s>", "Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, )", "import ADB if __name__ == \"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig(", "configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) #", "format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an example file", "#!/usr/bin/env python3 import logging from adb.adb import ADB if __name__ == \"__main__\": #", "= adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set", "\"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\",", "file showing how the adb wrapper can be used. adb = ADB() #", "# This is an example file showing how the adb wrapper can be", "adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android device: {0}\".format(adb.shell(['echo \"Hello World!\"'])) )", "ADB() # Start with a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version()", "= adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device in the list", "Set the first device in the list as the target of the subsequent", "logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first", "adb.adb import ADB if __name__ == \"__main__\": # Logging configuration. logger = logging.getLogger(__name__)", "{0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device in", "__name__ == \"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\",", "can be used. adb = ADB() # Start with a clean adb server.", "level=logging.INFO, ) # This is an example file showing how the adb wrapper", "subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android device: {0}\".format(adb.shell(['echo \"Hello", "wrapper can be used. adb = ADB() # Start with a clean adb", "is an example file showing how the adb wrapper can be used. adb", "Start with a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version:", "logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an", "adb = ADB() # Start with a clean adb server. adb.kill_server() adb.connect() adb_version", "the adb wrapper can be used. adb = ADB() # Start with a", "%(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an example file showing how", "used. adb = ADB() # Start with a clean adb server. adb.kill_server() adb.connect()", "from adb.adb import ADB if __name__ == \"__main__\": # Logging configuration. logger =", "This is an example file showing how the adb wrapper can be used.", "# Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO,", "[%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an example file showing", "# Set the first device in the list as the target of the", "the first device in the list as the target of the subsequent commands.", "clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices =", "import logging from adb.adb import ADB if __name__ == \"__main__\": # Logging configuration.", "logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device in the list as the", "an example file showing how the adb wrapper can be used. adb =", "if __name__ == \"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()]", "of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( \"Message from Android device:", "%H:%M:%S\", level=logging.INFO, ) # This is an example file showing how the adb", "list as the target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info(", "device in the list as the target of the subsequent commands. adb.target_device =", "adb wrapper can be used. adb = ADB() # Start with a clean", "= ADB() # Start with a clean adb server. adb.kill_server() adb.connect() adb_version =", "{0}\".format(connected_devices)) # Set the first device in the list as the target of", "with a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version))", "connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the first device in the", "adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) # Set the", "datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is an example file showing how the", "in the list as the target of the subsequent commands. adb.target_device = connected_devices[0]", "= logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\", level=logging.INFO, ) # This is", "== \"__main__\": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format=\"%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s\", datefmt=\"%d/%m/%Y", "devices: {0}\".format(connected_devices)) # Set the first device in the list as the target", "adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices()", "adb_version = adb.get_version() logger.info(\"ADB version: {0}\".format(adb_version)) connected_devices = adb.get_available_devices() logger.info(\"Connected devices: {0}\".format(connected_devices)) #" ]
[ "for species in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config", "flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount", "obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs)", "self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.')", "{np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D:", "analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d)", "has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for", "ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for", "firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble", "purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return", "constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and", "{self.num} got 3D conc for species {species} which are of dimension {np.shape(da)}.\") return", "factor netCDFs. #However, only do so for species in the control vectors of", "self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False):", "mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self):", "dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D", "statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX =", "self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D", "range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There", "from a uniform distribution. #E.g. 0.1 would range from 90% to 110% of", "def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec", "tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES']", "self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing", "self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset = self.applyAnalysisCorrections(analysisSubset,backgroundSubset) self.saveColumn(latval,lonval,analysisSubset)", "shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon", "{len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is 1/2", "in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis =", "called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = []", "conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return", "levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d", "= datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else:", "GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch", "else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if", "analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i]", "self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes", "if self.testing: print(f\"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning\")", "len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount", "maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange):", "percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name,", "self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\":", "dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object):", "dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def", "self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing:", "self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background", "and left a restart at assimilation time in each run directory. #That restart", "GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number list:", "backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds]", "self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for", "{np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species", "print(f\"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def", "for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate", "scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling", "le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list]", "if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative", "= GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if", "self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member,", "dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D", "\"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\",", "lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i", "cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount):", "spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s", "\"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors on {str(date.today())}\",", "procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename =", "ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature", "return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist def getLon(self):", "and compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec =", "= self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if", "print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self):", "START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from", "= 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting", "total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset", "class you would like to use) for each species to assimilate. #Class contains", "the xr restart dataset. #Also construct new scaling factors and add them as", "for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod:", "if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon", "updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart()", "spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing", "print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats", "3D conc for species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry", "{np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds", "START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta =", "\"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file)", "self.interval == 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0])", "combine columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing =", "dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\")", "= len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1", "combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs", "if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0]", "GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp", "else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if", "in each of the scaling factor netCDFs. #However, only do so for species", "np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)}", "print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D =", "attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new", "for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D", "nature') print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This", "le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def", "changed) so next run starts from the assimilation state vector. #Emissions scaling factors", "to the X percent of the background standard deviation, per Miyazaki et al", "in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator", "total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount =", "satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs", "\"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\",", "{len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing:", "path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart {self.filename};", "dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis", "in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a", "ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)}", "has loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None", "analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME'])", "Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1)", "tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE", "= self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert =", "i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains", "cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if", "classes in order of the species to assimilate. obs_operator_classes = [getattr(obs, s) for", "HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies", "statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}')", "conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D conc for species {species} which", "name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded", "if self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config", "= tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge", "for le in le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0])", "statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this is a nature", "err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy files", "= ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat'])", "self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc')", "return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of the emissions", "statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in statevec_species:", "') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name =", "number 0) #store the nature run in GC_Translator object nature. #Also contains an", "otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d", "range of percent change selected from a uniform distribution. #E.g. 0.1 would range", "history files and connects them with the main state vector and observation matrices", "ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge", "def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i]", "the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum)", "update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config", "totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)}", "geos-chem stopped and left a restart at assimilation time in each run directory.", "getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of", "(t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge:", "getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self):", "self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts =", "{100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind):", "range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max", "is initialized this variable is None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\")", "in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff]", "else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE =", "= [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D =", "self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon inds", "= np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D conc", "difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols", "emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount):", "selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config =", "are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None):", "np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if", "= np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D conc for species {species}", "= analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0)", "\"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod =", "\"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\",", "is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat", "of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes of testing.", "me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation", "= xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}']", "in each run directory. #That restart will be overwritten in place (name not", "spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did not", "00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={", "False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list]", "statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files,", "doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds =", "self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in", "dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries", "[int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt = {} self.nature =", "subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime =", "errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D =", "= f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns", "path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for", "flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount =", "detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature", "if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return", "has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis =", "has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None):", "for species {species} which are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species,", "if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i]", "#Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1):", "= np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean", "= self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and", "self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k):", "GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original", "is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc", "= 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset]))", "= self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME))", "species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for", "emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f'", "the new timestep in each of the scaling factor netCDFs. #However, only do", "from the analysis vector and overwrite relevant terms in the xr restart dataset.", "= None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature", "= len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column", "terms in the xr restart dataset. #Also construct new scaling factors and add", "dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers)", "in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff", "firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble", "vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape)", "ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans =", "getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in", "files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1]", "the special case where there is a nature run present (with number 0)", "print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)});", "[float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation", "{(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in", "{\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\":", "self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies:", "{self.filename}; construction beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1])", "[] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in", "loop doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount =", "construct new scaling factors and add them as a separate array at the", "for purposes of testing. Perturbation is 1/2 of range of percent change selected", "in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)}", "et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std", "None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only one timestamp, returns", "date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this", "for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\")", "self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built statevector; it is", "= len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return", "{} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing:", "gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind]", "obsmeans = [] obsperts = [] obsdiffs = [] for spec in self.satSpecies:", "matrices from numpy files raise NotImplementedError else: #Assume list of strings errs =", "def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if", "= ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds", "self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans()", "so next run starts from the assimilation state vector. #Emissions scaling factors are", "in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num}", "spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector index_start", "conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3]", "inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.')", "dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0]", "Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers)", "if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0]", "timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr", "range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and", "print(f\"This core will be handling lat and lon values {[(latval,lonval) for latval,lonval in", "yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR'])", "column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount =", "of range of percent change selected from a uniform distribution. #E.g. 0.1 would", "as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config =", "all the 3D concentrations from the analysis vector and overwrite relevant terms in", "= True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D =", "= np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in", "at the new timestep in each of the scaling factor netCDFs. #However, only", "if '.npy' in err_config[0]: #Load error matrices from numpy files raise NotImplementedError else:", "= HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing)", "= np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is", "ignore existing nature directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES']", "self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has", "(t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list = [spc", "2d emissions scaling factors to the end of the emissions scaling factor def", "assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES:", "= tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs =", "np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds):", "with the most recent timestamp. Rest are just for archival purposes. def getEmisSF(self,", "scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1]", "= xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for", "and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config =", "surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding", "np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] =", "beginning\") print(f\"This core will be handling lat and lon values {[(latval,lonval) for latval,lonval", "analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]):", "directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten())", "if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in", "self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first", "selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector()", "float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance", "the necessary data #and can output it in useful ways to other functions", "end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds = xr.Dataset(", "starts from the assimilation state vector. #Emissions scaling factors are most recent available", "= [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]]", "k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble", "self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def", "of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime()", "restart dataset. #Also construct new scaling factors and add them as a separate", "levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']):", "doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev())", "subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column", "work with the most recent timestamp. Rest are just for archival purposes. def", "\"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}),", "one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of", "number {self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of", "= xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number", "Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] =", "np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge)", "None #If loop doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species):", "END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors", "for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def", "if self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble", "a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in", "in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite", "dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy", "i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds =", "you would like to use) for each species to assimilate. #Class contains function", "{self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset =", "has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX]", "analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i in", "np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest", "for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return", "emissions scaling factors to the end of the emissions scaling factor def addEmisSF(self,", "lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for", "number {self.num} construction complete.\") #Since only one timestamp, returns in format lev,lat,lon def", "= np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update", "surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)}", "simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] ==", "self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in", "= xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list:", "data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for", "da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing:", "if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio < inflator:", "err_config[0]: #Load error matrices from numpy files raise NotImplementedError else: #Assume list of", "and lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount =", "scaling factors are most recent available (one assimilation timestep ago). New values will", "mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing:", "self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean =", "getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error", "print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config =", "[] obsperts = [] obsdiffs = [] for spec in self.satSpecies: ind =", "levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match =", "number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = []", "xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False):", "inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for", "factors are most recent available (one assimilation timestep ago). New values will be", "data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper", "self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior", "len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension", "vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0]))", "self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for", "= tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for", "baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D", "did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing:", "#self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00'", "column.\") print(f\"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}\") species_config", "self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector of length NumEnsemble", "dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column}", "a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds,", "self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset", "has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i", "factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None", "in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs", "h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful", "= self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i", "Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs =", "if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}')", "background standard deviation is approximately 0. if ratio < inflator: new_std = inflator*background_std", "{np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.\") levcount = len(self.getLev())", "in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds", "self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for", "= list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if", "#Create dataset with this timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis", "dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC'])", "for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ =", "if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol", "saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol", "[d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The following ensemble directories were detected:", "{\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames:", "da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species):", "np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only", "self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval)", "self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key}", "saved column and compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0]", "return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return", "if self.statevec is None: self.buildStateVector() if not (latind is None): #User supplied ind", "lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in", "in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector)", "ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn", "#Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:]", "[int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature = None self.emcount =", "print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C =", "if self.testing: print(f\"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\")", "[] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec]", "an observation operator (pass in the class you would like to use) for", "0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset):", "self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has", "dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val =", "in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total", "= self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav]", "column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are", "= [] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens,", "for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R", "= tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec:", "= [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean)", "for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)}", "for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val,", "timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\",", "if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval)", "= spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with state vectors.\") for ens,", "self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens:", "first ensemble member, who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self):", "self.testing: print(f\"GC_Translator number {self.num} got 3D conc for species {species} which are of", "for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every", "def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod", "self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for", "selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting", "self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR =", "files, append 1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones =", "return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}')", "print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in", "col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav =", "filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = []", "emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for", "self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if", "in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ =", "self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers)", "most recent timestamp. Rest are just for archival purposes. def getEmisSF(self, species): da", "scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] =", "~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard", "cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a", "class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum", "ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens]", "nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs,", "= levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a", "if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background", "value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden", "return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind)", "if self.testing: print(f\"GC_Translator number {self.num} set 3D conc for species {species} which are", "for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has", "species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self,", "directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens]", "obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_", "as obs import tropomi_tools as tt import scipy.linalg as la import toolbox as", "just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:]", "tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return", "in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts", "print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background))", "data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices", "= self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the", "def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename)", "dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:]", "self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens]", "(latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else:", "cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected", "\"History\":f\"The LETKF utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\"", "def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for", "else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return", "functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds", "the main state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing", "i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one", "adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble", "specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour", "'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling", "col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)}", "if self.testing: print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for", "made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k", "NotImplementedError else: #Assume list of strings errs = np.array([float(e) for e in err_config])", "= spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp,", "inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn", "{(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds", "errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing:", "It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @", "percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind =", "= np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis", "new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period:", "#Add 2d emissions scaling factors to the end of the emissions scaling factor", "print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0,", "statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent", "[] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s)", "state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature", "obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert)", "3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat =", "getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp =", "f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in", "every run directory. #In the special case where there is a nature run", "None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in", "inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in", "= [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list = [spc for", "{species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def", "def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has", "self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for directory {path_to_rundir}", "self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds]", "for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind =", "None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the", "relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in", "we will be assuming that geos-chem stopped and left a restart at assimilation", "entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within", "firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int)", "obsmeans = [] obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())):", "def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators", "been called for directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list = {}", "are just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze()", "len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing:", "getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav", "spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control", "dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE']", "if self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is None:", "species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we", "conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species}", "== \"false\": raise NotImplementedError #No support for real observations yet! else: nature_h_functions =", "has been called for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will", "n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE", "obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs)", "assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0]", "dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind]", "diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in", "= 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions", "col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)}", "= self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec =", "= [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i", "#and can output it in useful ways to other functions in the LETKF", "{self.ensemble_numbers}\") if self.nature is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR'])", "the overall flattened and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount =", "emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape", "np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}'", "self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files def constructColStatevec(self,latind,lonind): firstens", "in the overall flattened and subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0,", "self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for", "self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to", "if self.testing: print(f\"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.\")", "[spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval", "len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number", "selected from a uniform distribution. #E.g. 0.1 would range from 90% to 110%", "#Repeats this procedure for every species in the state vector (excluding emissions). def", "timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES']", "conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if", "= [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt = {} self.nature", "= [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature = None self.emcount", "initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis", "backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background standard", "print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop", "has built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds", "in GC_Translator object nature. #Also contains an observation operator (pass in the class", "#Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total", "uniform distribution. #E.g. 0.1 would range from 90% to 110% of initial values.", "= self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] =", "self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has", "Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C", "[] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else:", "\"%Y%m%d_%H%M\") for le in le_list] le_list = [le for le,t in zip(le_list,le_ts) if", "= levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match", "analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with", "obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff =", "= interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13],", "nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == \"false\":", "Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval)", "analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of", "= np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)):", "= backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species}", "{np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting", "background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration", "0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours", "else: #Assume list of strings errs = np.array([float(e) for e in err_config]) #Provide", "self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files]", "def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval =", "else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset)", "indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and", "return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} ->", "be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset", "#Contains a dictionary referencing GC_Translators for every run directory. #In the special case", "place (name not changed) so next run starts from the assimilation state vector.", "np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds", "self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for ens {self.ensnum} core", "np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container", "makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D", "self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in", "return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind]", "self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] =", "satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]):", "ways to other functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec", "getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent", "dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing =", "if self.testing: print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have", "i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames =", "print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has", "restart at assimilation time in each run directory. #That restart will be overwritten", "dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is", "is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis", "scaling factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end =", "self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]]", "for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset", "False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators", "in le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])]", "diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble", "and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list = [spc for", "np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update restarts,", "levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']):", "print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing)", "backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION", "len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset", "[getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in", "(starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if", "np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy square,", "{[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch =", "core will be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\")", "obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from GEOS-Chem restart files", "a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if", "np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python", "addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h')", "spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing)", "state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale", "ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator", "in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents", "file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns))", "{np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k =", "return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp.", "lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1]", "self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds", "print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)}", "self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has", "and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers)", "np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at", "creating GC Translators with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0)", "#No support for real observations yet! else: nature_h_functions = [getattr(obs, h) for h", "datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod", "= [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime =", "{(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background", "\"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility", "in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None #If loop", "') print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species}", "cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount)", "a list of observation operator classes in order of the species to assimilate.", "if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])", "return cur_offset cur_offset+=1 return None #If loop doesn't terminate we did not find", "= {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData()", "3D conc for species {species} which are of dimension {np.shape(da)}.\") return da def", "of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling of", "NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind]", "self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory. #In", "hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D", "Bias adds that percent on top of the perturbed fields (0.1 raises everything", "returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator", "= False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in", "for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] =", "i in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind)", "restart for purposes of testing. Perturbation is 1/2 of range of percent change", "{np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made", "analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:]", "spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)]", "analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self):", "at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.')", "= cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i", "in useful ways to other functions in the LETKF procedure. class GC_Translator(object): def", "{np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval):", "statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds}", "interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge =", "= [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed =", "{species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self):", "factors to the end of the emissions scaling factor def addEmisSF(self, species, emis2d,", "enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None #If loop doesn't", "return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0", "= int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for", "f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset", "index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order", "nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature')", "= self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************')", "= gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i", "= glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1])", "int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for", "from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column", "be assuming that geos-chem stopped and left a restart at assimilation time in", "to combine columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing", "surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon", "and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing:", "([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def", "index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens", "strings errs = np.array([float(e) for e in err_config]) #Provide a list of observation", "getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec indices", "self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol =", "{(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = [] for obskey,species in", "value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if", "of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def", "spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]]", "for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens", "species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment. index_start =", "= self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def", "% self.interval == 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if", "subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n", "{self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It", "def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds =", "restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in", "corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for ens {self.ensnum}", "python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all", "2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:])", "the 3D concentrations from the analysis vector and overwrite relevant terms in the", "a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount", "in Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D =", "self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind))", "len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species", "real observations yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation", "subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt = {}", "will just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff =", "#Also construct new scaling factors and add them as a separate array at", "glob import observation_operators as obs import tropomi_tools as tt import scipy.linalg as la", "= np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset", "= dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are", "control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset =", "is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS'])", "glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list", "i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis()", "(pass in the class you would like to use) for each species to", "= len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else:", "self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in", "= f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr in i]", "observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for", "= np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy", "initialization it contains the necessary data #and can output it in useful ways", "{(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff", "= saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol =", "= len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if", "#If loop doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds,", "from the assimilation state vector. #Emissions scaling factors are most recent available (one", "= np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs:", "in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis =", "tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds", "> self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of", "dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\")", "range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of", "self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds =", "False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]}", "def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions", "self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing:", "timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column", "errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and", "a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if", "loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten()", "specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list =", "species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if", "obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return", "= firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\":", "self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing:", "dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in", "= [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i", "[d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers", "square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector", "else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return", "in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension", "def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def", "globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc", "self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self):", "diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX", "for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list", "np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are just for archival", "to the end of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time):", "tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy", "for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames =", "nature run present (with number 0) #store the nature run in GC_Translator object", "#Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices,", "hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1]", "ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list = [spc", "mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions", "= tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp", "do so for species in the control vectors of emissions and concentrations. def", "range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if", "{self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k):", "It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset", "(one assimilation timestep ago). New values will be appended to netCDF. class Assimilator(object):", "beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] =", "lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator", "species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of", "most recent available (one assimilation timestep ago). New values will be appended to", "= np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number", "def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter", "= self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D =", "print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components", "= statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX", "in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA", "directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing)", "np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time'])", "if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.\")", "end of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist =", "return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge)", "for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing:", "and (t.hour % self.interval == 0)] else: specconc_list = [spc for spc,t in", "fields (0.1 raises everything 10%). #Repeats this procedure for every species in the", "self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects", "if self.testing: print(f\"Begin creating GC Translators with state vectors.\") for ens, directory in", "distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) >", "k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in", "contains an observation operator (pass in the class you would like to use)", "np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if", "{self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list", "makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in", "= testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs =", "species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column", "self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats)", "in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts", "referencing GC_Translators for every run directory. #In the special case where there is", "{dummy2dwhere_flat_column} is the sole valid index in the column.\") print(f\"Matched value in the", "observation operator classes in order of the species to assimilate. obs_operator_classes = [getattr(obs,", "xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite operators.", "np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of the emissions scaling", "= dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy square,", "in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in", "np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species)", "{observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return", "analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for", "#If loop doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount", "def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist", "run present (with number 0) #store the nature run in GC_Translator object nature.", "and add them as a separate array at the new timestep in each", "GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved", "GC Translators with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and", "= f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create", "is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing)", "because this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else:", "self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval", "ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None #If", "(t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list = [spc for spc,t", "Translators with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not", "in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to", "if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] =", "in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs", "= len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if", "gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert", "{(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds", "with the main state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False):", "{} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None):", "= shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if", "zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind)", "statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat())", "(backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning", "list of strings errs = np.array([float(e) for e in err_config]) #Provide a list", "statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If", "background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis", "0.1 would range from 90% to 110% of initial values. Bias adds that", "self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"])", "\"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\":", "'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\":", "1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector and overwrite", "for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the", "for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds =", "inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i", "totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)}", "indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount", "1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *=", "useful methods for getting data from GEOS-Chem restart files and #emissions scaling factor", "= f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames", "[saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col", "find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is", "= np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy", "percent on top of the perturbed fields (0.1 raises everything 10%). #Repeats this", "= endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens,", "return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i", "def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble =", "def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis", "self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def", "in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else:", "numpy as np import xarray as xr from glob import glob import observation_operators", "assuming that geos-chem stopped and left a restart at assimilation time in each", "print(f\"GC_Translator number {self.num} got 3D conc for species {species} which are of dimension", "= np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts", "if self.testing: print(f\"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec", "[full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D", "np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D conc for", "print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This", "dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D", "dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries", "flattened and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount", "procedure for every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species", "cube, {len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the", "with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge", "subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing:", "self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec", "def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens", "self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self):", "self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean", "= 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i]", "np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has", "self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors for", "diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING", "to true to ignore existing nature directory. Only for testing self.gt = {}", "spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self):", "= xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num}", "[full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update restarts, and diff", "dataset with this timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis =", "satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config", "off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings", "self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search =", "used to combine columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True):", "shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if", "getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are just", "latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within", "firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_", "= name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] =", "def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col =", "colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1])", "= dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are", "are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a", "= [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i", "terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config", "= [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R", "factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim =", "= False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since", "percent change selected from a uniform distribution. #E.g. 0.1 would range from 90%", "obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}')", "else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares", "FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount =", "{dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount =", "{(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full", "self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator')", "contains useful methods for getting data from GEOS-Chem restart files and #emissions scaling", "for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None", "would like to use) for each species to assimilate. #Class contains function to", "i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def", "makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension", "i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in", "is a nature run present (with number 0) #store the nature run in", "= int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self):", "loop doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds", "= self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio):", "np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num}", "if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon", "dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d =", "tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds =", "conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector", "= float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data", "built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds =", "= [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames]", "self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is", "statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if", "= self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)}", "for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background", "in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val)", "saved_col[colind,:] #Now will just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:]", "import observation_operators as obs import tropomi_tools as tt import scipy.linalg as la import", "self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations", "spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with state vectors.\") for ens, directory", "\"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list = [spc for spc,t in", "= inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for", "values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind]", "= xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors for {name}\")", "of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol", "0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine", "full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs]", "[] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = []", "def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def", "shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon", "set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] =", "[] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in", "def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end", "= [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ =", "{observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing:", "= self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return", "self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds", "backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS", "value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made", "len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in", "= dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are", "GC_Translators; used to combine columns, update restarts, and diff columns. class GT_Container(object): def", "int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s", "in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs", "is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing:", "saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff =", "print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions", "scaling factors to the end of the emissions scaling factor def addEmisSF(self, species,", "helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for real observations", "f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames =", "netCDFs. #However, only do so for species in the control vectors of emissions", "always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if", "loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths", "in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built statevector;", "obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if", "in specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0])", "= levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a", "np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time'])", "constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files def", "valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector =", "#emissions scaling factor netCDFs. After initialization it contains the necessary data #and can", "with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature):", "= self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr", "if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions", "if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state vector", "\"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"],", "spc in specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if", "conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for", "#Gets saved column and compares to the original files def constructColStatevec(self,latind,lonind): firstens =", "self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state vector is initialized", "true to ignore existing nature directory. Only for testing self.gt = {} self.observed_species", "= 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset))", "has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator", "each of the scaling factor netCDFs. #However, only do so for species in", "= np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if", "statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing)", "overwrite relevant terms in the xr restart dataset. #Also construct new scaling factors", "saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in", "LETKF utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" }", "scaling factor netCDFs. After initialization it contains the necessary data #and can output", "since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num", "self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def", "run in GC_Translator object nature. #Also contains an observation operator (pass in the", "self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind =", "in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total", "np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the", "subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt =", "for every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species =", "of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d", "print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.\")", "analysis vector and overwrite relevant terms in the xr restart dataset. #Also construct", "shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon", "of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is", "= GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number", "distribution. #E.g. 0.1 would range from 90% to 110% of initial values. Bias", "0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])]", "print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts =", "of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs", "for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because", "at assimilation time in each run directory. #That restart will be overwritten in", "def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D", "statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec indices", "in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get", "for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers)", "print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized", "in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble", "length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol =", "range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts =", "la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if", "def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds", "= [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list", "#Emissions scaling factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end", "= self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i", "entries in the overall flattened and subsetted column; values are {dummywhere_match}\") dummy2d =", "range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background emissions scaling of", "class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\"", "mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return", "self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator", "{len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None:", "inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.\")", "float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from", "None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature =", "conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for", "np import xarray as xr from glob import glob import observation_operators as obs", "{self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will be handling lat and lon", "inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert", "for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4')", "statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is 1/2 of range", "= tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names =", "dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries", "construction beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name]", "of the perturbed fields (0.1 raises everything 10%). #Repeats this procedure for every", "self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for testing self.gt", "useful ways to other functions in the LETKF procedure. class GC_Translator(object): def __init__(self,", "ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp,", "in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for", "else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR", "return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing", "loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k =", "i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]):", "supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector", "firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now", "the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset", "getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec indices FOR FULL VECTOR at", "def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We", "if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate", "a separate array at the new timestep in each of the scaling factor", "self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the", "makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species):", "self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon", "print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents", "construction complete.\") #Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species):", "return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = []", "a nature run present (with number 0) #store the nature run in GC_Translator", "full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full", "self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.')", "= {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI):", "{dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature =", "self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This", "date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]:", "i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has", "HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir'", "= tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from", "= [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list = [le for le,t in", "npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d", "file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number", "at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.')", "tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds =", "zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS:", "self.testing: print(f\"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount", "subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers =", "self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator", "def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key", "in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background concentration of", "ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens)", "glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The following", "setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number", "= len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset)", "None #If loop doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind):", "valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened", "raise NotImplementedError else: #Assume list of strings errs = np.array([float(e) for e in", "self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in", "python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\",", "{np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self):", "self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]):", "in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number", "a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind", "levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened", "self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation =", "and #emissions scaling factor netCDFs. After initialization it contains the necessary data #and", "__init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\"", "and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in", "for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector)", "def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation)", "tt import scipy.linalg as la import toolbox as tx from datetime import date,datetime,timedelta", "def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load", "loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten()", "self.statevec is None: self.buildStateVector() if not (latind is None): #User supplied ind statevecinds", "self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self):", "self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol))", "{i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1}", "saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind):", "It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis", "format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num}", "a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched", "had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis", "naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i", "observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs =", "deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i]", "{self.num} is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for", "= diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT", "zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}'", "if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning", "print(f\"Begin creating GC Translators with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if", "le in le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and", "\"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self):", "overall flattened and subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat", "in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions", "1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] =", "lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape", "self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon", "self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind =", "it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if", "{len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes of", "= self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the", "return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add", "as tt import scipy.linalg as la import toolbox as tx from datetime import", "#This class contains useful methods for getting data from GEOS-Chem restart files and", "the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d", "range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if", "timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts", "SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in", "the most recent timestamp. Rest are just for archival purposes. def getEmisSF(self, species):", "the class you would like to use) for each species to assimilate. #Class", "90% to 110% of initial values. Bias adds that percent on top of", "obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis =", "float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b)", "return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in", "concentrations from the analysis vector and overwrite relevant terms in the xr restart", "that geos-chem stopped and left a restart at assimilation time in each run", "we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if", "tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d =", "([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file", "getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called", "dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount", "= [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files]", "number {self.num} set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.\")", "self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to", "for i in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble =", "toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config", "ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)};", "xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is", "{(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1}", "np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return inds def", "self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds)))", "[float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s)", "firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for", "dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is", "= len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in", "in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key]", "= np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff)", "self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return", "spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names", "in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0]", "diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN", "tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds", "timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\")", "if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval)", "netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum =", "self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only", "return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with", "{np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing:", "specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge:", "for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return", "self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background,", "following ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames]", "ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}')", "totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within", "if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing)", "else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D =", "from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing)", "path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod", "self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background", "self.statevec if self.testing: print(f\"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this", "the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values", "member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1}", "xr from glob import glob import observation_operators as obs import tropomi_tools as tt", "{timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num =", "for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if", "(1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite =", "had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had", "i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run", "valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = []", "analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def", "in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble", "spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens =", "the nature run in GC_Translator object nature. #Also contains an observation operator (pass", "self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs", "full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at", "bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble", "from numpy files raise NotImplementedError else: #Assume list of strings errs = np.array([float(e)", "cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount):", "10%). #Repeats this procedure for every species in the state vector (excluding emissions).", "made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb", "= len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if", "{} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if", "= [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]]", "Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called", "we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config =", "def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval", "np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\")", "datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME", "backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off", "import glob import observation_operators as obs import tropomi_tools as tt import scipy.linalg as", "statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing:", "self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background =", "only do so for species in the control vectors of emissions and concentrations.", "f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames =", "if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and", "= self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind", "{(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return", "= float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec =", "timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC", "down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = []", "[] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory", "dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0]", "tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d", "tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\")", "statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind", "scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ')", "restart will be overwritten in place (name not changed) so next run starts", "in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct", "are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount =", "from GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization it contains", "stopped and left a restart at assimilation time in each run directory. #That", "time in each run directory. #That restart will be overwritten in place (name", "= satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0)", "firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime", "if self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have", "self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs", "class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for real observations yet!", "getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self):", "has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing:", "in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory,", "tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0", "loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec", "for n in dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS'])", "dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a", "range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}%", "specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D", "species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of the", "def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] =", "self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config =", "s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s)", "self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds", "HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble", "self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number", "and subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten()", "for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else:", "as a separate array at the new timestep in each of the scaling", "for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = []", "new timestep in each of the scaling factor netCDFs. #However, only do so", "cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There", "getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec indices", "{np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape]))", "np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole", "np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C'", "= self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime =", "{} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec]", "are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE']", "dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)}", "= col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad =", "= data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy files raise", "approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply", "return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {}", "import scipy.linalg as la import toolbox as tx from datetime import date,datetime,timedelta def", "print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True):", "*= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector and", "Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon", "matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir", "subset down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans =", "in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector)", "= obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude", "\"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name", "[] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec)", "add them as a separate array at the new timestep in each of", "= np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"]", "self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis", "{len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS'])", "called in Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D", "useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames =", "order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors", "print(f\"GC_Translator number {self.num} is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components =", "shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers:", "= np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the", "with this timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\":", "perturbed fields (0.1 raises everything 10%). #Repeats this procedure for every species in", "self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = [] firstens =", "self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)}", "= self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs))", "state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i", "the whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number {self.num} got", "nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens", "lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing:", "a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount))", "restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config =", "= self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior", "np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing:", "totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten()", "ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent", "ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds", "= xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"],", "#In the special case where there is a nature run present (with number", "output it in useful ways to other functions in the LETKF procedure. class", "1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def", "np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python", "i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval):", "self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr =", "species to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you", "else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime)", "self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind)", "obs import tropomi_tools as tt import scipy.linalg as la import toolbox as tx", "scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim", "= len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i", "{np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon", "= 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d", "nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys():", "= glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The", "def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:]))", "= np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total", "localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from", "\"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing:", "#Load error matrices from numpy files raise NotImplementedError else: #Assume list of strings", "~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for", "in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts =", "#Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"],", "inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if", "of the background standard deviation, per Miyazaki et al 2015 for i in", "[] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s", "has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\")", "self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D =", "has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for", "self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value", "spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec =", "{i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had", "Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval):", "had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%')", "firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers:", "for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers)))", "entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = []", "= f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list", "self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory. #In the special", "assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped", "([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\":", "will be overwritten in place (name not changed) so next run starts from", "if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind)", "cur_offset cur_offset+=1 return None #If loop doesn't terminate we did not find the", "n in dirnames] ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species", "self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def", "as np import xarray as xr from glob import glob import observation_operators as", "in err_config[0]: #Load error matrices from numpy files raise NotImplementedError else: #Assume list", "#4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing", "= [] obsperts = [] obsdiffs = [] for spec in self.satSpecies: ind", "order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\",", "firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol)", "= tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON", "[np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames", "= 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount", "getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in", "{self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total", "#E.g. 0.1 would range from 90% to 110% of initial values. Bias adds", "ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\":", "lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape", "It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k", "ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens)", "state vector is initialized this variable is None if self.testing: print(f\"GC_Translator number {self.num}", "next run starts from the assimilation state vector. #Emissions scaling factors are most", "if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species:", "if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just", "if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}')", "species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D conc for", "__init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod =", "has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator", "@ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and", "compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:]", "conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D conc for species", "value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)}", "self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D =", "< inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per", "getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds", "= self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in", "called for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D", "cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset", "for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history", "factor netCDFs. After initialization it contains the necessary data #and can output it", "makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis", "else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if", "= glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list =", "for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict =", "firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for", "#calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean", "def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in", "in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and", "list of observation operator classes in order of the species to assimilate. obs_operator_classes", "= {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if", "self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the", "latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename", "and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if", "i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if", ") self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator", "len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths =", "err_config]) #Provide a list of observation operator classes in order of the species", "methods for getting data from GEOS-Chem restart files and #emissions scaling factor netCDFs.", "and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground:", "print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self):", "'_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them with the", "of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff", "= useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames", "i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble", "filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff =", "return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to", "self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {}", "tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date", "else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components])", "if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for directory", "min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite]", "i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds =", "ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for", "enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't", "in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background emissions scaling", "{(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX", "overwrite if in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end", "saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def", "if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {}", "the sole valid index in the column.\") print(f\"Matched value in the overall flattened", "= self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def", "spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage =", "for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide", "{} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler.", "#However, only do so for species in the control vectors of emissions and", "inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return", "column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column =", "col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers:", "~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] =", "self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens:", "= self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return", "self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] =", "= np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D =", "self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2)", "appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum", "numpy files raise NotImplementedError else: #Assume list of strings errs = np.array([float(e) for", "self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def", "class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing)", "print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance =", "and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble =", "self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector", "errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D", "la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value", "e in err_config]) #Provide a list of observation operator classes in order of", "= f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs]", "def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_')", "self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for", "def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def", "in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value", "= self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D", "self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must", "list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr", "GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing:", "return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds", "latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])])", "self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)}", "a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for", "print(f\"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount =", "else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval =", "sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector", "in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble", "in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval)", "for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only", "in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False):", "npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs =", "def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col =", "initial values. Bias adds that percent on top of the perturbed fields (0.1", "boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in", "added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species]", "specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return", "inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon", "has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator", "self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval)", "self.testing: print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes", "to 110% of initial values. Bias adds that percent on top of the", "called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff =", "in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k =", "member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference", "shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers:", "split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:]", "data #and can output it in useful ways to other functions in the", "contains the necessary data #and can output it in useful ways to other", "i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std =", "= len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if", "= np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators;", "= [d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The following ensemble directories were", "in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\":", "statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)}", "#Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys():", "print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff", "the perturbed fields (0.1 raises everything 10%). #Repeats this procedure for every species", "for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has", "just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats", "= glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been", "#Randomly subset down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans", "species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no", "for species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio", "overall flattened and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC'])", "subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS'])", "[] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert)", "{len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config", "index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order", "name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols", "levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened", "lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset", "has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs", "self.buildStateVector() if not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn", "entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within", "a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount", "[float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"]", "directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers =", "{np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing:", "__init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\"", "= self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a", "HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def", "{surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount", "total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not", "if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self):", "for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background", "self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing)", "print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making", "species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise", "self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}')", "= np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in", "{name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state", "gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for", "number {self.num} has loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec", "in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage", "def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R", "npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file)", "self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing", "= self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies:", "latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat}", "Ensemble number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D = True #Implement me", "inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting", "total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation", "return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of", "= np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight", "counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control", "timestep ago). New values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False):", "else: self.statevec = None self.statevec_lengths = None #Until state vector is initialized this", "scaling factor files, append 1s because this is a nature directory if len(self.emis_sf_filenames)==0:", "species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species ==", "with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions", "#Add assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time =", "{100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}%", "self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and", "dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D", "self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0:", "print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f'", "'_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors", "are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d)", "lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background,", "ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return", "np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube,", "np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did not find the", "value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in", "= f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames", "have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.\") levcount", "if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers)", "= self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0)", "specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and", "makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec)", "square, {dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount", "#new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr =", "in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are", "ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = []", "def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D =", "= self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys", "range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std", "{(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def", "order of the species to assimilate. obs_operator_classes = [getattr(obs, s) for s in", "def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for", "h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class", "np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] =", "[] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans =", "Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating", "= tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset =", "if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}')", "= np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if", "np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset =", "statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number {self.num} got statevector for inds", "range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of", "= self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec", "[] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in", "the column.\") print(f\"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}\")", "in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory.", "for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D =", "in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector index_start =", "return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor =", "0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return", "self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies =", "tropomi_tools as tt import scipy.linalg as la import toolbox as tx from datetime", "= tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds", "for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval ==", "self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec]", "#Latitude of first ensemble member, who should always exist def getLon(self): return self.gt[1].getLon()", "= tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec:", "concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration of", "= [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior =", "emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc", "for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator", "range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension", "in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds =", "in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt", "np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at", "print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return", "adds that percent on top of the perturbed fields (0.1 raises everything 10%).", "if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if", "i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It", "of observation operator classes in order of the species to assimilate. obs_operator_classes =", "in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column;", "gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in", "= self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and", "Rest are just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return", "not changed) so next run starts from the assimilation state vector. #Emissions scaling", "{dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = []", "Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has", "range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here.", "for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self,", "= np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']:", "= saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff", "self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector and overwrite relevant", "obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs =", "print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def", "origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds)", "#A class that takes history files and connects them with the main state", "print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def", "and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset)", "shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in", "= dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy cube,", "= 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if", "concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0]))", "Perturbation is 1/2 of range of percent change selected from a uniform distribution.", "run starts from the assimilation state vector. #Emissions scaling factors are most recent", "= {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs):", "of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator", "def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec", "member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent", "~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio < inflator: new_std", "maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor", "spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort()", "complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should always", "for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None", "print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who", "self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_", "obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of", "print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self):", "self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF", "f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list =", "number {self.num} got 3D conc for species {species} which are of dimension {np.shape(da)}.\")", "in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds =", "the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE =", "special case where there is a nature run present (with number 0) #store", "a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec =", "latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def", "ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in", "backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy'", "dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis)", "{np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon", "for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator.", "= np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension", "return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update restarts, and", "xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile", "np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean)", "for each species to assimilate. #Class contains function to calculate relvant assimilation variables.", "if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at", "combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens =", "#Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0]", "left a restart at assimilation time in each run directory. #That restart will", "of testing. Perturbation is 1/2 of range of percent change selected from a", "columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble =", "for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for", "ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are", "= testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval =", "conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad", "self.testing: print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n", "print(f\"GC_translator number {self.num} has loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector() else:", "for n in dirnames] ensemble_numbers = [] self.gt = {} self.nature = None", "= self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC", "species to assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE", "self.nature is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens =", "len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval)", "f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d", "print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self):", "= np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp =", "= '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded scaling", "obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis", "object nature. #Also contains an observation operator (pass in the class you would", "self.statevec = None self.statevec_lengths = None #Until state vector is initialized this variable", "nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble", "print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)}", "in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)])", "colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff", "vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind)", "support for real observations yet! else: nature_h_functions = [getattr(obs, h) for h in", "= self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] =", "Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers)", "in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this is", "lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got", "#Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of", "weight; must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight)", "if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate", "and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec", "last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last", "ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing:", "= ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been", "{} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with state vectors.\")", "dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The following ensemble directories", "self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R", "in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if", "everything 10%). #Repeats this procedure for every species in the state vector (excluding", "#Emissions scaling factors are most recent available (one assimilation timestep ago). New values", "between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def", "= glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval:", "if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon", "got 3D conc for species {species} which are of dimension {np.shape(da)}.\") return da", "self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble =", "self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind):", "= np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)}", "f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns =", "range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis()", "= [i for i in filenames if substr in i] saved_col = self.columns[search[0]]", "if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if", "for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if", "and connects them with the main state vector and observation matrices class HIST_Translator(object):", "inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff", "{self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in", "************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had", "zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else:", "testing. Perturbation is 1/2 of range of percent change selected from a uniform", "vector. #Emissions scaling factors are most recent available (one assimilation timestep ago). New", "= self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers)", "def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if", "a uniform distribution. #E.g. 0.1 would range from 90% to 110% of initial", "in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb =", "scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage", "[] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for", "has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset =", "species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset", "spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key}", "{(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background", "lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon())", "the analysis vector and overwrite relevant terms in the xr restart dataset. #Also", "[int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if", "if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in", "= {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if", "for GC_Translators; used to combine columns, update restarts, and diff columns. class GT_Container(object):", "data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy files raise NotImplementedError", "def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that", "self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary", "to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming", "if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies", "self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in", "tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1]", "return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds", "ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht", "#Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior:", "is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind):", "= np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1]", "them as a separate array at the new timestep in each of the", "'.npy' in err_config[0]: #Load error matrices from numpy files raise NotImplementedError else: #Assume", "has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list =", "0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in", "= np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down", "obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating nature", "(t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\")", "to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum", "{str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate", "X percent of the background standard deviation, per Miyazaki et al 2015 for", "represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0]", "lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts", "backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for", "FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon())", "len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing:", "dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D", "zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp,", "#SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped and left", "self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape", "if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i]", "{len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing:", "np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i", "value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis", "saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind]", "np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately", "= timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp", "len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species", "for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if", "= corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for ens", "COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in", "emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in", "self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis()", "self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def", "{len(dummywhere_match)} entries in the overall flattened and subsetted column; values are {dummywhere_match}\") dummy2d", "self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background", "= self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in", "self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0:", "assimilation state vector. #Emissions scaling factors are most recent available (one assimilation timestep", "has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R)", "in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset =", "lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape", "whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number {self.num} got statevector", "doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] =", "and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape =", "posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self):", "self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset = self.applyAnalysisCorrections(analysisSubset,backgroundSubset)", "= self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble =", "{surr_latinds} and lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount", "dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is", "2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.\") print(f\"Matched", "= np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total", "dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def", "False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction", "else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background", "at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff", "keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species)", "factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i]", "= tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling", "appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts =", "= first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D", "directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin", "ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member", "np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C'", "= {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a", "conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D", "{len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is", "INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member", "len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It", "called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i])", "np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done", "zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[]", "minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] =", "localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are", "xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]),", "called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval):", "ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for", "full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing:", "species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this is a", "= np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now", "were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = []", "in the xr restart dataset. #Also construct new scaling factors and add them", "print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name", "provide the nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support", "xr.load_dataset(file) if self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors for {name}\") if", "specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if", "= [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\")", "(scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis", "in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else:", "[] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000',", "= HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY()", "self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory,", "\"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory.", "a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing)", "obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval)", "self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:]", "the assimilation state vector. #Emissions scaling factors are most recent available (one assimilation", "shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D)", "getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self):", "datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy'", "(SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise", "= tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b", "and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i", "#Reconstruct all the 3D concentrations from the analysis vector and overwrite relevant terms", "total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec", "self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s)", "vectors.\") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature =", "i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a", "JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling", "= float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing)", "necessary data #and can output it in useful ways to other functions in", "observations yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation =", "return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are just for", "{self.num} has loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec =", "\"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling", "self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:]", "{np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if", "from glob import glob import observation_operators as obs import tropomi_tools as tt import", "(priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight =", "self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations", "would range from 90% to 110% of initial values. Bias adds that percent", "and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge)", "return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from GEOS-Chem", "the nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for", "************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had", "def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in", "directory. #In the special case where there is a nature run present (with", "if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d", "backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0]", "construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should", "return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec indices FOR", "npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file in", "of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten())", "value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator.", "ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble", "number {self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning\")", "== spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did", "the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape", "calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming that", "self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers)", "name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0])", "if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start", "of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"})", "species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num}", "np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans", "cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if", "for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = []", "raise NotImplementedError #No support for real observations yet! else: nature_h_functions = [getattr(obs, h)", "testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True)", "self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes", "for file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns =", "col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km'])", "= np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']:", "self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in", "#Sometimes background standard deviation is approximately 0. if ratio < inflator: new_std =", "le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list = [le for le,t", "{(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount", "bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing:", "number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D = True #Implement me self.inflation", "not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset", "np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing:", "difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species)", "every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC']", "in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a", "len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator.", "enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called", "analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i", "{self.num} set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}']", "= np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if", "index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens", "\"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1])", "only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze()", "0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)};", "number {self.num} is starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = []", "self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in", "spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true", "average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise", "xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val", "return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile", "each species to assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL", "getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return", "so for species in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector):", "self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\")", "colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys())", "build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']:", "randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in", "for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds", "#Randomize the restart for purposes of testing. Perturbation is 1/2 of range of", "species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter])", "self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:]", "= self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator", "complete.\") #Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da", "subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f\"The following ensemble", "= perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d", "None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else:", "= self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in", "np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to", "i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a", "inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] =", "0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount)", "3D concentrations from the analysis vector and overwrite relevant terms in the xr", "self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon", "inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount", "timestamp. Rest are just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar']", "factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time", "factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)])", "orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}'", "self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval)", "print(f\"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction", "i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension", "xarray as xr from glob import glob import observation_operators as obs import tropomi_tools", "[datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for spc in specconc_list] if self.interval: specconc_list = [spc for spc,t", "self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0", "{(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @", "it contains the necessary data #and can output it in useful ways to", "in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else:", "self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for", "print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean =", "i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)", "value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset", "species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def", "len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat =", "= np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return", "#Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we", "inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called", "dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in", "dataset. #Also construct new scaling factors and add them as a separate array", "{np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds", "flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.\")", "useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list]", "*= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the", "statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built statevector; it", "analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f'", "in the class you would like to use) for each species to assimilate.", "dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.\") print(f\"Matched value", "f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers", "col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime]", "def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator.", "print(f\"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}\") species_config =", "= getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self):", "vector is initialized this variable is None if self.testing: print(f\"GC_Translator number {self.num} construction", "from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if", "spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind):", "else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created.", "total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing:", "relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:]", "bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and", "HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps()", "= self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True)", "= None self.statevec_lengths = None #Until state vector is initialized this variable is", "npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\")", "from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's", "self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value", "combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = []", "k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else:", "list: {self.ensemble_numbers}\") if self.nature is None: self.full4D = True #Implement me self.inflation =", "#That restart will be overwritten in place (name not changed) so next run", "self.testing: print(f\"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\")", "self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and", "print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in", "{species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species", "spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate we did not", "statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in", "return None #If loop doesn't terminate we did not find the species def", "here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected", "#Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da =", "has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for", "per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if", "= f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file", "data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val =", "purposes of testing. Perturbation is 1/2 of range of percent change selected from", "= f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds = xr.Dataset( {\"Scalar\":", "np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D conc for species {species} which", "= tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds", "available (one assimilation timestep ago). New values will be appended to netCDF. class", "for e in err_config]) #Provide a list of observation operator classes in order", "saved_col = saved_col[colind,:] #Now will just be a vector of length NumEnsemble backgroundEnsemble", "dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS'])", "statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn =", "') print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature')", "for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background,", "as la import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data", "self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D", "valid index in the column.\") print(f\"Matched value in the overall flattened and subsetted", "assimilation time in each run directory. #That restart will be overwritten in place", "dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset =", "search = [i for i in filenames if substr in i] saved_col =", "self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens)", "recent available (one assimilation timestep ago). New values will be appended to netCDF.", "self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval", "print(f\"GC_Translator number {self.num} set 3D conc for species {species} which are of dimension", "= np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy", "= (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has", "self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator", "None #Until state vector is initialized this variable is None if self.testing: print(f\"GC_Translator", "getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work", "timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else:", "in dirnames] ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species =", "import tropomi_tools as tt import scipy.linalg as la import toolbox as tx from", "self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)}", "species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D =", "} ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\")", "the species to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If", "da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D conc for species", "of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def", "species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0", "Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds", "file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2]", "np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\",", "i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing:", "compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind)", "analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for", "f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr in i] saved_col", "#Now will just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff", "= self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search", "self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance", "is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector =", "= np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with", "in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for", "= np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with", "vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end]", "if self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors for {name}\") if computeStateVec:", "def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp", "i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds", "{(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in", "if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}')", "column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values are", "print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec =", "[state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}')", "spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {}", "len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i", "which are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape =", "self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for", "of first ensemble member, who should always exist def getLon(self): return self.gt[1].getLon() def", "if self.nature is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens", "return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds", "= [] obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if", "and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis", "-> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval)", "observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing)", "self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in", "self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind))", "in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components)", "FILES: we will be assuming that geos-chem stopped and left a restart at", "factor files, append 1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones", "tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in", "\"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"],", "s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set", "got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec", "statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec indices FOR FULL", "def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] =", "len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount", "starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime =", "in place (name not changed) so next run starts from the assimilation state", "{path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames:", "{self.corenum}; construction beginning\") print(f\"This core will be handling lat and lon values {[(latval,lonval)", "latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)}", "print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species}", "state vector. #Emissions scaling factors are most recent available (one assimilation timestep ago).", "f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing:", "buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of statevector!\") species_config", "for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing)", "s) for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh),", "= np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1]", "#Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number {self.num}", "getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds", "in the overall flattened and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount", "self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind):", "= path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart", "dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened 2D dummy", "def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i", "satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff", "zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis", "member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member", "in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime", "dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\")", "scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})},", "= testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing:", "this timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling", "value in the overall flattened and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing)", "(t.hour % self.interval == 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts)", "[datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list = [le for le,t in zip(le_list,le_ts)", "dirnames] ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES']", "= HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {}", "len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed =", "and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if", "a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats =", "[analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector", "for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has", "== spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate we did", "self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon'])", "f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE']", "dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for", "self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']):", "change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite", "path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes", "else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list):", "be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec", "ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies", "if self.testing: print(f\"GC_Translator number {self.num} got 3D conc for species {species} which are", "analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%')", "Lat inds are {surr_latinds} and lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount", "will be assuming that geos-chem stopped and left a restart at assimilation time", "self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has", "statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\")", "self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with", "return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\")", "just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def", "{\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}),", "to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble", "= self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will", "= float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for", "and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in", "in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment. index_start", "shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol", "substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr in", "flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat", "reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind", "did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing)", "statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from", "for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC()", "zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens]", "return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is", "= {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def", "if in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end =", "le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list = [le for", "ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else:", "saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for", "self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI", "len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0,", "the background standard deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)):", "(t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for", "to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE", "statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset =", "be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum =", "lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for", "scipy.linalg as la import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False):", "ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a", "who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def", "deviation is approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] =", "in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has", "= len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset))", "= self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in", "@ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing:", "totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within", "nature directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing:", "return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val", "Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator):", "one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if", "np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] =", "= self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background", "self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else:", "self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2]", "glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list = [le", "lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for", "= conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set 3D conc for species {species}", "species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species ==", "\"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\",", "called for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will be handling", "path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\")", "fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta", "\"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since", "if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}')", "= analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite.", "the X percent of the background standard deviation, per Miyazaki et al 2015", "0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat)", "__init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing)", "print(f\"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core", "0) #store the nature run in GC_Translator object nature. #Also contains an observation", "handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing)", "and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp,", "is the sole valid index in the column.\") print(f\"Matched value in the overall", "}, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added", "return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if", "the restart for purposes of testing. Perturbation is 1/2 of range of percent", "present (with number 0) #store the nature run in GC_Translator object nature. #Also", "testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\")", "in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError", "ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed", "them with the main state vector and observation matrices class HIST_Translator(object): def __init__(self,", "self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator", "in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing:", "and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C", "factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp}", "= len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has", "find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset =", "np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 2D dummy square,", "np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio", "on top of the perturbed fields (0.1 raises everything 10%). #Repeats this procedure", "ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change", "print(f' ') print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature')", "self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory,", "in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods", "ago). New values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing", "lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble", "data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for real observations yet! else: nature_h_functions", "self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d =", "AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble", "for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until", "assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr)", "== 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and", "len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat)", "1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing:", "zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory,", "origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds =", "[le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return", "self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance", "with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight", "in subdirs] if self.testing: print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers =", "([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling", "created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D = True #Implement", "that percent on top of the perturbed fields (0.1 raises everything 10%). #Repeats", "= np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3])", "{(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called", "priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be", "print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind)", "relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i", "def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def", "starting build of statevector!\") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in", "= self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht =", "last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time", "analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF", "if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0]", "def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat'])", "be overwritten in place (name not changed) so next run starts from the", "= xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def", "= np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return inds", "scalings to the X percent of the background standard deviation, per Miyazaki et", "i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called", "ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions", "for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation =", "dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers", "self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index", "self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon =", "print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble =", "standard deviation is approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:]", "getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k", "nature run in GC_Translator object nature. #Also contains an observation operator (pass in", "= self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D", "and overwrite relevant terms in the xr restart dataset. #Also construct new scaling", "NOTE ON FILES: we will be assuming that geos-chem stopped and left a", "cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis =", "= len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed", "if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble", "1/2 of range of percent change selected from a uniform distribution. #E.g. 0.1", "counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring})", "glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file", "made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds", "in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False)", "for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC", "constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec]", "for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has", "percent of the background standard deviation, per Miyazaki et al 2015 for i", "for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined", "member, who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev()", "nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for real", "(k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension", "firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers)", "adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted", "specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val])", "ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol", "conc for species {species} which are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self,", "if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list =", "{dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match =", "lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0]", "new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp", "AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble", "= (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs):", "species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors", "else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset =", "for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions()", "statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is", "cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds =", "class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4'", "if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No support for real observations yet! else:", "computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state vector is", "zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA =", "{(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff =", "concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ')", "xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val)", "cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in", "emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim", "dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for", "netCDFs. After initialization it contains the necessary data #and can output it in", "column and compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec", "= np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened 3D dummy", "= (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite", "= tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon", "xr restart dataset. #Also construct new scaling factors and add them as a", "file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files", "zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts =", "timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files", "on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time')", "dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if", "for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are", "\"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\",", "#Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the", "use) for each species to assimilate. #Class contains function to calculate relvant assimilation", "loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon inds", "{100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature')", "in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector;", "takes history files and connects them with the main state vector and observation", "for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec", "= self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************')", "return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return", "= 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset))", "endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\")", "return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is 1/2 of", "if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing)", "Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if", "shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D", "Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff", "inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species", "def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most", "makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R =", "= self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens", "of strings errs = np.array([float(e) for e in err_config]) #Provide a list of", "s in spc_config[\"MaximumScalingFactorAllowed\"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s", "are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol mol-1", "for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else:", "lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\"", "square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount =", "def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time =", "the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount", "iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It", "= np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for", "function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be", "dummy cube, {len(dummywhere_flat)} entries are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten()", "self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for", "\"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\":", "of percent change selected from a uniform distribution. #E.g. 0.1 would range from", "for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans)", "if not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn =", "((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\",", "getting data from GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization", "self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for", "prior weight; must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset =", "initialized this variable is None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since", "la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value", "subdirs] if self.testing: print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1])", "all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset =", "[specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in", "factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"), np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={", "filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames", "for d in subdirs] if self.testing: print(f\"The following ensemble directories were detected: {dirnames}\")", "= la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and", "is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev())", "#Lightweight container for GC_Translators; used to combine columns, update restarts, and diff columns.", "= [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour %", "analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset =", "in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end]", "recent timestamp. Rest are just for archival purposes. def getEmisSF(self, species): da =", "not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator", "tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return", "species in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config =", "i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset):", "range from 90% to 110% of initial values. Bias adds that percent on", "3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries", "container for GC_Translators; used to combine columns, update restarts, and diff columns. class", "of species {species}\",\"units\":\"mol mol-1 dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon'])", "{orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) },", "directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list = {} for file in", "sole valid index in the column.\") print(f\"Matched value in the overall flattened and", "dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for", "\"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A", "(priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and 1.')", "ensemble directories were detected: {dirnames}\") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers", "inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i", "archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species):", "= None #Until state vector is initialized this variable is None if self.testing:", "= f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE =", "for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)}", "saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec", "self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds", "float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for testing", "[spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list =", "i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite]", "= analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the", "inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = [] for", "New values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing =", "{np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if", "tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction", "origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for", "name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them", "scaling factors for {name}\") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths =", "this procedure for every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0):", "self.testing: print(f\"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has", "diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX", "= {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with state", "import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing)", "testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators", "= backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background standard deviation,", "(not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing)", "{100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()):", "al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std =", "= False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator", "[] obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\":", "len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in", "a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the", "of the species to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']]", "= [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append", "= [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation]", "in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:])", "ensemble member, who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return", "ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs =", "{np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)}", "if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = []", "perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *=", "diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species)", "= [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true", "for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers =", "at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and", "relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming that geos-chem", "self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def", "makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] =", "k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False)", "np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in", "= analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)):", "np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\")", "self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset", "core {self.corenum}; construction beginning\") print(f\"This core will be handling lat and lon values", "in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] =", "= analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1", "self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The", "{100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff =", "for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background)", "if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()):", "first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def", "in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis()", "scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors", "to assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON", "\"false\": raise NotImplementedError #No support for real observations yet! else: nature_h_functions = [getattr(obs,", "scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset", "print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}')", "np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D dummy cube,", "makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It", "getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is None): #User supplied", "at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called", "def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers:", "self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)", "vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built", "in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if", "contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will", "operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config =", "np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used", "self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def getLat(self): return self.gt[1].getLat()", "= diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT", "[i for i in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble", "dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries", "in order of the species to assimilate. obs_operator_classes = [getattr(obs, s) for s", "makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec", "dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k", "getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount", "(0.1 raises everything 10%). #Repeats this procedure for every species in the state", "#Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with", "and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for", "= np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] =", "#If no scaling factor files, append 1s because this is a nature directory", "START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE']", "substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return", "to ignore existing nature directory. Only for testing self.gt = {} self.observed_species =", "coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\", \"units\":f\"hours since {orig_timestamp} 00:00:00\"}), \"lat\":", "0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount)", "= f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for", "analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1", "for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self,", "np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind)", "= self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] =", "statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all", "True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False", "self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background,", "shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if", "shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens:", "latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval)", "if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13],", "in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in", "conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the", "\"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\",", "vector and overwrite relevant terms in the xr restart dataset. #Also construct new", "if self.testing: print(f\"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return", "#Set to true to ignore existing nature directory. Only for testing self.gt =", "should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self):", "{np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k):", "files raise NotImplementedError else: #Assume list of strings errs = np.array([float(e) for e", "can output it in useful ways to other functions in the LETKF procedure.", "vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number {self.num} got statevector for", "dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at", "in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval):", "inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.')", "hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with", "= testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files =", "= firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing:", "self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has", "self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only one timestamp, returns in format", "inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation", "zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly", "values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing", "self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens =", "valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a flattened", "if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon", "glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for", "tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE", "= datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta", "= self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if", "{} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict", "= xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite", "= la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}')", "len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column =", "= GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the", "assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time", "#Inflate scalings to the X percent of the background standard deviation, per Miyazaki", "def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps", "return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for", "self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory. #In the", "ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing:", "are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset", "shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in", "self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if", "emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time =", "= tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for", "return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return", "self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)}", "= self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble()", "ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for", "(with number 0) #store the nature run in GC_Translator object nature. #Also contains", "spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this", "#Only overwrite if in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter])", "relevant terms in the xr restart dataset. #Also construct new scaling factors and", "statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for", "control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape =", "just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d =", "= int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in", "index in the column.\") print(f\"Matched value in the overall flattened and subsetted square", "self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind):", "CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species}", "background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0.", "timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing:", "delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species", "tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] =", "dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f\"Within a flattened 3D", "for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will be handling lat", "{timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4]", "= self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies:", "print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in", "is None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only one timestamp,", "True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if", "colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k =", "this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for", "f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds = xr.Dataset( {\"Scalar\": ((\"time\",\"lat\",\"lon\"),", "= origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if", "inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset", "xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num}", "self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for", "np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]):", "lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble", "{} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod:", "(lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are", "species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset", "None self.statevec_lengths = None #Until state vector is initialized this variable is None", "= timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort()", "spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_", "class that takes history files and connects them with the main state vector", "inds are {surr_latinds} and lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount =", "spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs", "def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator", "useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val", "self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec]", "if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight;", "= self.statevec if self.testing: print(f\"GC Translator number {self.num} got statevector for inds {(latind,lonind)};", "species == spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate we", "= self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean", "d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = []", "emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions", "#Provide a list of observation operator classes in order of the species to", "spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d)", "[conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval", "= self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if", "if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = [] firstens", "#Also contains an observation operator (pass in the class you would like to", "specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val =", "spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment.", "entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector", "been called for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will be", "LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename", "self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\") def", "= np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built statevector; it is of", "for directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list = {} for file", "2D dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config = tx.getSpeciesConfig(self.testing) conccount =", "= f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if", "np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind", "overwritten in place (name not changed) so next run starts from the assimilation", "run directory. #In the special case where there is a nature run present", "{(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec", "separate array at the new timestep in each of the scaling factor netCDFs.", "saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector of", "self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f\"Begin creating GC Translators with state vectors.\") for", "Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing:", "range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors", "are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector =", "this variable is None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only", "in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing)", "if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only one timestamp, returns in", "of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X", "(bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for", "offset = 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec)", "self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind)", "in the column.\") print(f\"Matched value in the overall flattened and subsetted square is", "Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D = True", "new scaling factors and add them as a separate array at the new", "(ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory,", "0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum", "in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE =", "scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate", "for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has", "self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)):", "np.array([float(e) for e in err_config]) #Provide a list of observation operator classes in", "operator classes in order of the species to assimilate. obs_operator_classes = [getattr(obs, s)", "totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within", "for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def", "self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called for directory {path_to_rundir} and", "maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale", "new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] =", "ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)}", "np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats)", "= last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000'", "and subsetted square is {dummy2dwhere_match}\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount =", "#We work with the most recent timestamp. Rest are just for archival purposes.", "self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is", "utility added new scaling factors on {str(date.today())}\", \"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } )", "self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4')", "for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias", "has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs]", "def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = [] for spec", "i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)", "in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature", "print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens =", "= 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to", "in dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS =", "= [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else:", "error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f\"Assimilator construction complete\")", "conc for species {species} which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing", "= np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i]", "connects them with the main state vector and observation matrices class HIST_Translator(object): def", "i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background emissions", "with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid", "at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount =", "inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean", "{species} which are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d): baseshape", "which are of dimension {np.shape(conc4d)}.\") self.restart_ds[f'SpeciesRst_{species}'] = ([\"time\",\"lev\",\"lat\",\"lon\"],conc4d,{\"long_name\":f\"Dry mixing ratio of species {species}\",\"units\":\"mol", "data from GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization it", "ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This core will be handling lat and", "la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value", "Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs =", "statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if self.testing:", "for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector", "{species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ')", "dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a", "observation_operators as obs import tropomi_tools as tt import scipy.linalg as la import toolbox", "analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with", "= np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean", "s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\"", "= la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and", "{self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized", "= tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon", "has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for", "ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio <", "{self.num} construction complete.\") #Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self,", "in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def", "np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans", "= self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D", "= len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d =", "total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.\") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec", "if self.testing: print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return", "self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for", "of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in", "= self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff =", "self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2]", "self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime", "lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape", "[conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval", "#User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole", "for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains", "def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs", "dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def", "of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}%", "') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col", "since {orig_timestamp} 00:00:00\"}), \"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"})", "n in dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS", "exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing:", "in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set", "ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if", "timestep in each of the scaling factor netCDFs. #However, only do so for", "restart files and #emissions scaling factor netCDFs. After initialization it contains the necessary", "other functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False):", "self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)}", "saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes", "timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours", "#If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class.", "self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\",", "called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis", "\"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name =", "self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just", "of initial values. Bias adds that percent on top of the perturbed fields", "ensemble_numbers = [] endtime = datetime.strptime(timestamp, \"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime", "if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}')", "if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:]", "no scaling factor files, append 1s because this is a nature directory if", "= self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between", "if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of statevector!\") species_config =", "standard deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator =", "statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at", "inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.')", "obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs =", "dictionary referencing GC_Translators for every run directory. #In the special case where there", "EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species}", "le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le in le_list] le_list", "= timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species =", "the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble =", "#Apply maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if", "obsperts = [] obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind)", "backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background standard deviation, per", "scaling factors and add them as a separate array at the new timestep", "to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are", "{len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall", "= self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector of length", "self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has", "= [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] =", "conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from", "= np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in", "in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for", "s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the", "self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just", "operator (pass in the class you would like to use) for each species", "{self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden =", "raise ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight = 1-priorweight", "= np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds =", "to use) for each species to assimilate. #Class contains function to calculate relvant", "if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background,", "from 90% to 110% of initial values. Bias adds that percent on top", "len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] =", "append 1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS'])", "cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds", "le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return", "= [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans", "square, {dummy2dwhere_flat_column} is the sole valid index in the column.\") print(f\"Matched value in", "return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did not find", "as xr from glob import glob import observation_operators as obs import tropomi_tools as", "are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and", "state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing", "print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values are {dummywhere_match}\")", "change selected from a uniform distribution. #E.g. 0.1 would range from 90% to", "d in subdirs] if self.testing: print(f\"The following ensemble directories were detected: {dirnames}\") subdir_numbers", "def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor", "cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount):", "def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\":", "species {species} which are of dimension {np.shape(da)}.\") return da def setSpecies3Dconc(self, species, conc3d):", "print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)});", "not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds]", "for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds", "VECTOR at {(latind,lonind)}.\") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount", "if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble", "to other functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec =", "self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value", "Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else:", "print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self):", "= dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs]", "\"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\",", "range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite =", "__init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring =", "top of the perturbed fields (0.1 raises everything 10%). #Repeats this procedure for", "Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background))", "where there is a nature run present (with number 0) #store the nature", "f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs] if", "the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting", "scaling factor netCDFs. #However, only do so for species in the control vectors", "[file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns", "if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME = self.spc_config['ASSIM_TIME']", "print(f\"GC_Translator number {self.num} construction complete.\") #Since only one timestamp, returns in format lev,lat,lon", "in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" self.parfilename =", "will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum", "= [] obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert", "for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if", "= self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']==\"True\": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol =", "it in useful ways to other functions in the LETKF procedure. class GC_Translator(object):", "print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)}", "self.testing: print(f\"GC Translators created. Ensemble number list: {self.ensemble_numbers}\") if self.nature is None: self.full4D", "you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if", "fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS'])", "dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class", "files and #emissions scaling factor netCDFs. After initialization it contains the necessary data", "inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = [] for obskey,species", "= len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat", "np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in", "self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.\") species_config", "for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has", "= saved_col[colind,:] #Now will just be a vector of length NumEnsemble backgroundEnsemble =", "number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind):", "the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation", "vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config", "for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std", "in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k =", "k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made", "per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite =", "\"Start_Date\":f\"{orig_timestamp}\", \"Start_Time\":\"0\", \"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def", "= self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D", "restart {self.filename}; construction beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames: name =", "tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp =", "terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing)", "(t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], \"%Y%m%d_%H%M\") for le", "self.testing: print(f\"GC_Translator number {self.num} set 3D conc for species {species} which are of", "= [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing)", "subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature = None", "R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats =", "backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1]", "{i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of", "variable is None if self.testing: print(f\"GC_Translator number {self.num} construction complete.\") #Since only one", "will be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config", "== \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore existing nature", "flattened and subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat =", "= tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d", "print(f\"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has length", "if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid", "for spc in specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts)", "cur_offset+=levcount return None #If loop doesn't terminate we did not find the species", "dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid", "ON FILES: we will be assuming that geos-chem stopped and left a restart", "glob import glob import observation_operators as obs import tropomi_tools as tt import scipy.linalg", "len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return", "self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if", "analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if", "dry\",\"averaging_method\":\"instantaneous\"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev'])", "self.testing: print(f\"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning\") print(f\"This", "print(f\"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def", "= (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f\"LETKF called!", "backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'***********************************", "It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if", "(excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2", "self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\" npy_column_files", "self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings", "nature. #Also contains an observation operator (pass in the class you would like", "After initialization it contains the necessary data #and can output it in useful", "self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col", "import numpy as np import xarray as xr from glob import glob import", "assimilation timestep ago). New values will be appended to netCDF. class Assimilator(object): def", "called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind)", "{(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize", "ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset", "{np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds", "= len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made", "else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1)", "raises everything 10%). #Repeats this procedure for every species in the state vector", "naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i", "has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background", "for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in", "for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans =", "{species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4)", "made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k", "observation operator (pass in the class you would like to use) for each", "= np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full", "la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def", "makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator.", "else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor", "print(f' ') print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}%", "inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.')", "self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end", "glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f\"GC_translator number {self.num} has been called", "{i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference", "= dummy2d[latind,lonind] if self.testing: print(f\"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole", "= self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R =", "shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.\") levcount =", "in err_config]) #Provide a list of observation operator classes in order of the", "conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1]", "if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE']", "= f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in subdirs]", "It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for", "def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec", "la import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data =", "\"End_Date\":f\"{end_timestamp}\", \"End_Time\":\"0\" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if", "lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = [] for", "in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds =", "obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval))", "len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec", "interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], \"%Y%m%d_%H%M\")", "def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is None): #User", "array at the new timestep in each of the scaling factor netCDFs. #However,", "values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch", "for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There are", "NotImplementedError #No support for real observations yet! else: nature_h_functions = [getattr(obs, h) for", "of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys())", "dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in", "= '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them with", "self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename)", "of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev())", "entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened", "analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation", "= shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if", "k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It", "{np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species):", "is approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std)", "np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the", "= np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] =", "subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp,", "self.MinimumScalingFactorAllowed = [float(s) for s in spc_config[\"MinimumScalingFactorAllowed\"]] self.MaximumScalingFactorAllowed = [float(s) for s in", "print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f'", "files and connects them with the main state vector and observation matrices class", "hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true':", "def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec", "{(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in", "is 1/2 of range of percent change selected from a uniform distribution. #E.g.", "range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)}", "def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring", "= self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb)", "print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0]", "def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are", "def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec", "cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f\"There", "def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}']", "end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the", "background standard deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator", "if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ =", "valid in the column.\") print(f\"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted", "case where there is a nature run present (with number 0) #store the", "= dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened", "factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF utility added new scaling factors on", "self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance =", "tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor", "= spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True #Set to true to ignore", "increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape)", "is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return", "([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\",", "= np.array([float(e) for e in err_config]) #Provide a list of observation operator classes", "vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart", "def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if", "shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens:", "for real observations yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']]", "(name not changed) so next run starts from the assimilation state vector. #Emissions", "length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the restart for purposes", "= len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for", "np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average", "specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val =", "that takes history files and connects them with the main state vector and", "compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:]", "def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build of statevector!\")", "class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir =", "to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts", "self.gt[1].getLat() #Latitude of first ensemble member, who should always exist def getLon(self): return", "[errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from GEOS-Chem restart", "= len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for", "for s in spc_config[\"InflateScalingsToXOfPreviousStandardDeviation\"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] ==", "be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config =", "tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge =", "are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match", "obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts", "GC_Translators for every run directory. #In the special case where there is a", "for s in spc_config[\"MaximumScaleFactorRelativeChangePerAssimilationPeriod\"]] self.AveragePriorAndPosterior = spc_config[\"AveragePriorAndPosterior\"] == \"True\" self.PriorWeightinPriorPosteriorAverage = float(spc_config[\"PriorWeightinPriorPosteriorAverage\"]) self.forceOverrideNature=True", "starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for", "must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return", "represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers)))", "first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int)", "getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling", "self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f\"GC Translators created. Ensemble", "combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val", "cur_offset+=1 return None #If loop doesn't terminate we did not find the species", "the scaling factor netCDFs. #However, only do so for species in the control", "if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames", "counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite", "backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds", "called for directory {path_to_rundir} and restart {self.filename}; construction beginning\") self.emis_ds_list = {} for", "{100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self):", "zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list", "self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for", "else: #Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f\"GC Translator number", "if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)}", "ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If", "are valid.\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f\"Within a", "if species == spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate", "\"%Y%m%d_%H%M\") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', \"%Y%m%d_%H%M\") else: ASSIM_TIME =", "path_to_ensemble = f\"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs\" subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in", "\"lat\": ([\"lat\"], self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO", "or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight", "from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if", "print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)}", "period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite]", "analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background", "ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called", "self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them with the main", "self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if", "columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing", "applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor =", "import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in", "def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch =", "and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing)", "LETKF(self): if self.testing: print(f\"LETKF called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing:", "{(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in", "= [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']==\"True\": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)", "[] obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert =", "= tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec]", "self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C", "1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones))", "and restart {self.filename}; construction beginning\") self.emis_ds_list = {} for file in self.emis_sf_filenames: name", "of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator", "control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d =", "= GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets", "self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value", "{np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for", "self.WbarAnalysis = self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)}", "species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has", "for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory,", "self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\") dirnames = [d.split('/')[-2] for d in", "had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of", "np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind)", "if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths", "latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount))", "2D dummy square, {len(dummy2dwhere_flat)} entries are valid.\") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC'])", "directory. #That restart will be overwritten in place (name not changed) so next", "= [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f\"{path_to_ensemble}/*/\") subdirs.remove(f\"{path_to_ensemble}/logs/\")", "in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def", "if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor:", "lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec)", "np.concatenate(statevec_components) if self.testing: print(f\"GC_Translator number {self.num} has built statevector; it is of dimension", "surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting column statevec indices surrounding", "setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == \"false\": raise NotImplementedError #No", "= 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if", "np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing:", "{(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return", "reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter =", "col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']==\"True\": return", "prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior", "prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D:", "of the scaling factor netCDFs. #However, only do so for species in the", "= self.PtildeAnalysis@self.C@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and", "= origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds", "np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d", "directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] =", "= len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset))", "are most recent available (one assimilation timestep ago). New values will be appended", "is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat", "tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc", "{self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.\") print(\"*****************************************************************\") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds,", "None: self.buildStateVector() if not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind)", "~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite]", "GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization it contains the", "existing nature directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if", "assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating", "= satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean", "def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f\"GC_Translator is getting column statevec indices FOR FULL VECTOR", "i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans)", "#Assume list of strings errs = np.array([float(e) for e in err_config]) #Provide a", "for every run directory. #In the special case where there is a nature", "np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid", "testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator", "110% of initial values. Bias adds that percent on top of the perturbed", "main state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing =", "= dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f\"Within a flattened", "flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.\") print(f\"Matched {len(dummywhere_match)}", "ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files def constructColStatevec(self,latind,lonind):", "def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans", "= 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\": \"standard\",", "i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background concentration", "construction beginning\") print(f\"This core will be handling lat and lon values {[(latval,lonval) for", "= ([\"time\"], np.array([0]), {\"long_name\": \"Time\", \"calendar\": \"gregorian\", \"axis\":\"T\", \"units\":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for", "surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f\"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)}", "[] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D)", "the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time", "testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval", "{(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1}", "the overall flattened and subsetted column; values are {dummywhere_match}\") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount))", "specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface", "getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f\"GC_Translator number {self.num} got 3D conc", "self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D =", "= len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator.", "getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def", "each run directory. #That restart will be overwritten in place (name not changed)", "= None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s", "are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind):", "{species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ')", "at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called", "\"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\", \"Conventions\":\"COARDS\", \"Format\":\"NetCDF-4\", \"Model\":\"GENERIC\", \"NLayers\":\"1\", \"History\":f\"The LETKF", "this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.\") return statevec_toreturn #Randomize the", "= np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f\"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are", "getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = [] for spec in", "#Until state vector is initialized this variable is None if self.testing: print(f\"GC_Translator number", "{self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing:", "self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts", "the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight =", "= glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for", "vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale =", "statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1]", "time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if", "Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species)", "full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for", "variables. #SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped and", "= np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if", "{(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon", "a restart at assimilation time in each run directory. #That restart will be", "= self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to", "for latval,lonval in zip(self.latinds,self.loninds)]}\") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs\" self.path_to_scratch = f\"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch\"", "full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns,", "there is a nature run present (with number 0) #store the nature run", "species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add", "np.expand_dims(emis2d,axis = 0),{\"long_name\": \"Scaling factor\", \"units\":\"1\"})}, coords={ \"time\": ([\"time\"], np.array([new_last_time]), {\"long_name\": \"time\", \"calendar\":", "GC_Translator object nature. #Also contains an observation operator (pass in the class you", "for getting data from GEOS-Chem restart files and #emissions scaling factor netCDFs. After", "return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing)", "self.getEmisLat(species),{\"long_name\": \"Latitude\", \"units\":\"degrees_north\"}), \"lon\": ([\"lon\"], self.getEmisLon(species),{\"long_name\": \"Longitude\", \"units\":\"degrees_east\"}) }, attrs={ \"Title\":\"CHEEREIO scaling factors\",", "in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts", "error matrices from numpy files raise NotImplementedError else: #Assume list of strings errs", "factors and add them as a separate array at the new timestep in", "endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory", "INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member", "obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis =", "#This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def", "original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers)))", "self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for", "#store the nature run in GC_Translator object nature. #Also contains an observation operator", "self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class", "LETKF loop for lat/lon inds {(latval,lonval)}.\") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k", "in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble =", "are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.\") return statevecinds def getSpeciesConcIndicesInColumn(self,species):", "getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind]", "= tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f\"Assimilator has been called for ens {self.ensnum} core {self.corenum};", "self.testing: print(f\"Making R for lat/lon inds {(latind,lonind)}.\") if self.full4D: self.R = self.histens.makeR(latind,lonind) else:", "called! Beginning loop.\") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f\"Beginning LETKF loop for", "values. Bias adds that percent on top of the perturbed fields (0.1 raises", "[getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This", "localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is None):", "a dictionary referencing GC_Translators for every run directory. #In the special case where", "last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START']", "self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D", "int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers:", "like to use) for each species to assimilate. #Class contains function to calculate", "with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds[\"time\"] = ([\"time\"], np.array([0]),", "'time') #Concatenate def buildStateVector(self): if self.testing: print(\"*****************************************************************\") print(f\"GC_Translator number {self.num} is starting build", "are {surr_latinds} and lon inds are {surr_loninds}.\") levcount = len(self.getLev()) latcount = len(self.getLat())", "conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f\"GC_Translator number {self.num} set", "Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)}", "getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist def", "self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec", "shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D)", "self.statevec_lengths = None #Until state vector is initialized this variable is None if", "colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be", "{(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for", "run directory. #That restart will be overwritten in place (name not changed) so", "the end of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist", "is None: self.buildStateVector() if not (latind is None): #User supplied ind statevecinds =", "np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset]", "import xarray as xr from glob import glob import observation_operators as obs import", "has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k =", "= self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers)", "class contains useful methods for getting data from GEOS-Chem restart files and #emissions", "self.testing: print(f\"GC_translator number {self.num} has loaded scaling factors for {name}\") if computeStateVec: self.buildStateVector()", "in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop", "for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def", "self.testing: print(f\"Begin creating GC Translators with state vectors.\") for ens, directory in zip(subdir_numbers,subdirs):", "the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to", "analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or", "hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge)", "self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file)", "self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k):", "errs = np.array([float(e) for e in err_config]) #Provide a list of observation operator" ]
[ "be returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure')", "assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert", "5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert", "if cfg_file is None: # Depending on the machine where this runs, a", "is set. If not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def", "are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5,", "'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001)", "ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert", "default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int',", "'there' assert len(dict1) == 2 assert dict2['number1'] == 2 assert dict2['number2'] == 2", "False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) #", "ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1'] == 2 assert merged['number2'] ==", "environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back to default.", "== False # also test without a config, this will load the default", "original dictionaries were not changed assert dict1['number1'] == 1 assert dict1['hi'] == 'there'", "test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int',", "or one from an existing config file may be returned. assert cfg.has_section('figure') ==", "import brainload as bl import brainview as bv import brainview.util as ut import", "# retreive some non-existant values are check that the supplied default values are", "config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b',", "cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict)", "os import pytest import numpy as np import mayavi.mlab as mlab import brainload", "returned (and the supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) ==", "'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53)", "cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') == 400", "'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c',", "cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values which exist", "== 1 assert dict1['hi'] == 'there' assert len(dict1) == 2 assert dict2['number1'] ==", "len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict", "default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in", "import os import pytest import numpy as np import mayavi.mlab as mlab import", "may be returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config() assert", "config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float',", "config=cfg) == False # also test without a config, this will load the", "assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False", "True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file():", "assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file)", "# Ensure that the original dictionaries were not changed assert dict1['number1'] == 1", "= ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure',", "'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info:", "dict2['number1'] == 2 assert dict2['number2'] == 2 assert len(dict2) == 2 def test_cfg_get_optional_values():", "test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values which", "as bl import brainview as bv import brainview.util as ut import mayavi try:", "= ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1'] == 2 assert merged['number2']", "assert merged['number1'] == 2 assert merged['number2'] == 2 # Ensure that the original", "'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not,", "from an existing config file may be returned. assert cfg.has_section('figure') == True def", "TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg,", "pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must", "as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be", "== 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict =", "= {'hi': 'there', 'number1': 1} dict2 = {'number1': 2, 'number2': 2} merged =", "assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') ==", "config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c',", "it is set. If not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR)", "default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) ==", "supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test',", "cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there':", "without a config, this will load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default')", "cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is None: # Depending", "changed assert dict1['number1'] == 1 assert dict1['hi'] == 'there' assert len(dict1) == 2", "0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also test without a", "test without a config, this will load the default config assert ut.cfg_get('no_such_section', 'option_a',", "== 'there' assert merged['number1'] == 2 assert merged['number2'] == 2 # Ensure that", "non-existant values are check that the supplied default values are returned assert ut.cfg_get('no_such_section',", "assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2", "if it is set. If not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR',", "pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also test without", "which exist in the file and check that the values from the config", "ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height')", "= True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the", "True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') ==", "True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment", "'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values are check that the", "== False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5,", "merged['number2'] == 2 # Ensure that the original dictionaries were not changed assert", "cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config()", "assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file)", "= ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be one of' in", "= os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR", "ut import mayavi try: import configparser # Python 3 except: import ConfigParser as", "on the machine where this runs, a default config or one from an", "configparser # Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR,", "returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg)", "the machine where this runs, a default config or one from an existing", "merged['hi'] == 'there' assert merged['number1'] == 2 assert merged['number2'] == 2 # Ensure", "exist in the file and check that the values from the config are", "with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type", "returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') ==", "check that the values from the config are returned (and the supplied default", "len(dict1) == 2 assert dict2['number1'] == 2 assert dict2['number2'] == 2 assert len(dict2)", "'width') == 900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR,", "'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test',", "== 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean',", "back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert", "Ensure that the original dictionaries were not changed assert dict1['number1'] == 1 assert", "a config, this will load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') ==", "mayavi try: import configparser # Python 3 except: import ConfigParser as configparser #", "ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg =", "dict2) assert merged['hi'] == 'there' assert merged['number1'] == 2 assert merged['number2'] == 2", "as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file =", "the config are returned (and the supplied default values ignored) assert ut.cfg_get('test', 'option_string',", "some values which exist in the file and check that the values from", "Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back", "assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also test without a config,", "def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert", "True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg =", "the supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert", "assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) ==", "3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert", "= ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file", "cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file =", "If not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file", "= bv.get_config() if cfg_file is None: # Depending on the machine where this", "== True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg", "= bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if", "assert dict2['number1'] == 2 assert dict2['number2'] == 2 assert len(dict2) == 2 def", "'number1': 1} dict2 = {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert", "cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')", "try: import configparser # Python 3 except: import ConfigParser as configparser # Python", "option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) == 1 assert", "== 2 assert merged['number2'] == 2 # Ensure that the original dictionaries were", "False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some", "as mlab import brainload as bl import brainview as bv import brainview.util as", "cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert", "dict2 = {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] ==", "check that the supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg)", "ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) == 1", "os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values which exist in the", "config file may be returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg =", "2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg =", "bl import brainview as bv import brainview.util as ut import mayavi try: import", "be one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 =", "== 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53,", "in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1':", "ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg)", "= ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) ==", "'ERROR: return_type must be one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def", "'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert", "whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be one of'", "2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1'] == 2", "test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert", "2 # Ensure that the original dictionaries were not changed assert dict1['number1'] ==", "ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) ==", "= os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values which exist in", "existing config file may be returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg", "= os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'},", "in the file and check that the values from the config are returned", "that the original dictionaries were not changed assert dict1['number1'] == 1 assert dict1['hi']", "merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1'] == 2 assert", "except: import ConfigParser as configparser # Python 2 mlab.options.offscreen = True THIS_DIR =", "config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5", "= os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values are check", "import numpy as np import mayavi.mlab as mlab import brainload as bl import", "'.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is None:", "this runs, a default config or one from an existing config file may", "file may be returned. assert cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config()", "'option_d', False, config=cfg) == False # also test without a config, this will", "cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert", "values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3,", "brainload as bl import brainview as bv import brainview.util as ut import mayavi", "True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') ==", "'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2 =", "some non-existant values are check that the supplied default values are returned assert", "False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type')", "the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5)", "# Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir,", "must be one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1", "config or one from an existing config file may be returned. assert cfg.has_section('figure')", "= ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) == 1 assert option_dict['width']", "ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) ==", "2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure',", "= {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there'", "variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back to default. TEST_DATA_DIR", "cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900 assert", "numpy as np import mayavi.mlab as mlab import brainload as bl import brainview", "default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section',", "0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file", "== 900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there')", "'there' assert merged['number1'] == 2 assert merged['number2'] == 2 # Ensure that the", "missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there'", "os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value)", "assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file)", "True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a',", "assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info:", "test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert", "'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section',", "assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg", "pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR,", "'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg)", "== 2 # Ensure that the original dictionaries were not changed assert dict1['number1']", "merged['number1'] == 2 assert merged['number2'] == 2 # Ensure that the original dictionaries", "ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def", "are check that the supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default',", "cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure',", "TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it", "ut.get_config_from_file(cfg_file) # retreive some non-existant values are check that the supplied default values", "# also test without a config, this will load the default config assert", "{'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert", "as ut import mayavi try: import configparser # Python 3 except: import ConfigParser", "'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1'] ==", "pytest import numpy as np import mayavi.mlab as mlab import brainload as bl", "def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values", "exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR,", "in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2 = {'number1':", "'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def", "5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False,", "dict1['hi'] == 'there' assert len(dict1) == 2 assert dict2['number1'] == 2 assert dict2['number2']", "were not changed assert dict1['number1'] == 1 assert dict1['hi'] == 'there' assert len(dict1)", "config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test',", "assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg)", "Depending on the machine where this runs, a default config or one from", "os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') ==", "2 assert merged['number2'] == 2 # Ensure that the original dictionaries were not", "'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever =", "cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values are check that the supplied", "cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as", "ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5", "1 assert dict1['hi'] == 'there' assert len(dict1) == 2 assert dict2['number1'] == 2", "an existing config file may be returned. assert cfg.has_section('figure') == True def test_get_default_config():", "ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) == 1 assert option_dict['width'] ==", "that the values from the config are returned (and the supplied default values", "def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant", "cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def test_get_config_from_file():", "mlab import brainload as bl import brainview as bv import brainview.util as ut", "def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is None: # Depending on", "the values from the config are returned (and the supplied default values ignored)", "assert len(dict1) == 2 assert dict2['number1'] == 2 assert dict2['number2'] == 2 assert", "'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be one of' in str(exc_info.value) assert", "def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file)", "3 except: import ConfigParser as configparser # Python 2 mlab.options.offscreen = True THIS_DIR", "BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back to default. TEST_DATA_DIR =", "the original dictionaries were not changed assert dict1['number1'] == 1 assert dict1['hi'] ==", "def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file", "5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d',", "ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5", "'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53,", "assert merged['hi'] == 'there' assert merged['number1'] == 2 assert merged['number2'] == 2 #", "0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')", "2 assert dict2['number1'] == 2 assert dict2['number2'] == 2 assert len(dict2) == 2", "Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data')", "bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file", "and check that the values from the config are returned (and the supplied", "0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def", "config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also", "bv import brainview.util as ut import mayavi try: import configparser # Python 3", "where this runs, a default config or one from an existing config file", "values from the config are returned (and the supplied default values ignored) assert", "str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1}", "900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with", "test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def", "ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file =", "= ut.get_config_from_file(cfg_file) # retreive some non-existant values are check that the supplied default", "= ut.get_config_from_file(cfg_file) # retrieve some values which exist in the file and check", "assert merged['number2'] == 2 # Ensure that the original dictionaries were not changed", "retrieve some values which exist in the file and check that the values", "in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive", "from the config are returned (and the supplied default values ignored) assert ut.cfg_get('test',", "to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc'", "values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b',", "'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert", "ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever", "default config or one from an existing config file may be returned. assert", "supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert", "dictionaries were not changed assert dict1['number1'] == 1 assert dict1['hi'] == 'there' assert", "def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True", "== pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also test", "== 2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg", "== 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) ==", "config are returned (and the supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye',", "import mayavi.mlab as mlab import brainload as bl import brainview as bv import", "values are check that the supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a',", "False, config=cfg) == False # also test without a config, this will load", "== 'there' assert len(dict1) == 2 assert dict2['number1'] == 2 assert dict2['number2'] ==", "assert dict1['hi'] == 'there' assert len(dict1) == 2 assert dict2['number1'] == 2 assert", "cfg_file = bv.get_config() if cfg_file is None: # Depending on the machine where", "mayavi.mlab as mlab import brainload as bl import brainview as bv import brainview.util", "5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) ==", "== False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve", "{'hi': 'there', 'number1': 1} dict2 = {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1,", "cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values are", "test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR:", "ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section',", "as configparser # Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR =", "fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename()", "test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is None: # Depending on the", "assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) ==", "ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be one of' in str(exc_info.value)", "as np import mayavi.mlab as mlab import brainload as bl import brainview as", "set. If not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename():", "= os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width')", "test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2 = {'number1': 2, 'number2': 2}", "config, this will load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default'", "this will load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert", "def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True", "of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there',", "== True assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') == 400 def", "test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values", "'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001)", "str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2 = {'number1': 2,", "# retrieve some values which exist in the file and check that the", "cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')", "'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) #", "exc_info: whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type') assert 'ERROR: return_type must be one", "ConfigParser as configparser # Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR", "assert cfg.has_section('figure') == True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True", "dict2['number2'] == 2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')", "the supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default'", "in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is None: #", "2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi'] == 'there' assert merged['number1']", "== pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file =", "'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retrieve some values which exist in the file", "os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if", "assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file = bv.get_config() if cfg_file is", "values which exist in the file and check that the values from the", "import brainview.util as ut import mayavi try: import configparser # Python 3 except:", "dict1 = {'hi': 'there', 'number1': 1} dict2 = {'number1': 2, 'number2': 2} merged", "cfg, cfg_file = bv.get_config() if cfg_file is None: # Depending on the machine", "5, 'invalid_return_type') assert 'ERROR: return_type must be one of' in str(exc_info.value) assert 'invalid_return_type'", "def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width':", "os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If", "== True def test_get_default_config(): cfg = ut.get_default_config() assert cfg.has_section('figure') == True assert cfg.has_section('mesh')", "ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg =", "assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) ==", "os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set.", "'some_default', config=cfg) == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section',", "the file and check that the values from the config are returned (and", "np import mayavi.mlab as mlab import brainload as bl import brainview as bv", "mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect", "== 2 assert dict2['number2'] == 2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file", "file and check that the values from the config are returned (and the", "not, fall back to default. TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file =", "'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types():", "os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some non-existant values are check that", "== 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg", "import ConfigParser as configparser # Python 2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__))", "ut.get_config_from_file(cfg_file) # retrieve some values which exist in the file and check that", "import brainview as bv import brainview.util as ut import mayavi try: import configparser", "is None: # Depending on the machine where this runs, a default config", "one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries(): dict1 = {'hi':", "ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False # also test without a config, this", "assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) ==", "2 assert dict2['number2'] == 2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file =", "import configparser # Python 3 except: import ConfigParser as configparser # Python 2", "brainview.util as ut import mayavi try: import configparser # Python 3 except: import", "<reponame>dfsp-spirit/brainview<filename>tests/brainview/test_util.py import os import pytest import numpy as np import mayavi.mlab as mlab", "os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg)", "also test without a config, this will load the default config assert ut.cfg_get('no_such_section',", "assert dict1['number1'] == 1 assert dict1['hi'] == 'there' assert len(dict1) == 2 assert", "== True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure')", "a default config or one from an existing config file may be returned.", "== 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d',", "ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53,", "{'width': 'int', 'not_there': 'int'}, config=cfg) assert len(option_dict) == 1 assert option_dict['width'] == 900", "pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError)", "assert cfg.getint('figure', 'width') == 900 assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file", "runs, a default config or one from an existing config file may be", "'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False", "# Python 3 except: import ConfigParser as configparser # Python 2 mlab.options.offscreen =", "'invalid_return_type') assert 'ERROR: return_type must be one of' in str(exc_info.value) assert 'invalid_return_type' in", "assert cfg.has_section('figure') == True assert cfg.has_section('mesh') == True def test_get_config_from_file(): cfg_file = os.path.join(TEST_DATA_DIR,", "== 2 assert dict2['number1'] == 2 assert dict2['number2'] == 2 assert len(dict2) ==", "= ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg", "as bv import brainview.util as ut import mayavi try: import configparser # Python", "= os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is", "Python 3 except: import ConfigParser as configparser # Python 2 mlab.options.offscreen = True", "0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False #", "import pytest import numpy as np import mayavi.mlab as mlab import brainload as", "== 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False)", "== pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with", "test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config(): cfg, cfg_file =", "will load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section',", "bv.get_config() if cfg_file is None: # Depending on the machine where this runs,", "= os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def", "brainview as bv import brainview.util as ut import mayavi try: import configparser #", "cfg = ut.get_config_from_file(cfg_file) # retrieve some values which exist in the file and", "assert cfg.getint('figure', 'height') == 400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError)", "(and the supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello'", "one from an existing config file may be returned. assert cfg.has_section('figure') == True", "None: # Depending on the machine where this runs, a default config or", "cfg_file is None: # Depending on the machine where this runs, a default", "'option_string', 'bye', config=cfg) == 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert", "configparser # Python 3 except: import ConfigParser as configparser # Python 2 mlab.options.offscreen", "0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type(): with pytest.raises(ValueError) as", "# Depending on the machine where this runs, a default config or one", "# Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall", "False # also test without a config, this will load the default config", "ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53,", "'option_d', False) == False def test_cfg_get_cfg_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file)", "assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg)", "load the default config assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default' assert ut.cfg_getint('no_such_section', 'option_b',", "'there', 'number1': 1} dict2 = {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2)", "THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') # Respect the environment variable", "assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001)", "TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file", "1} dict2 = {'number1': 2, 'number2': 2} merged = ut.merge_two_dictionaries(dict1, dict2) assert merged['hi']", "400 def test_get_config_from_file_raises_on_missing_file(): missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg =", "pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file", "'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False", "that the supplied default values are returned assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) ==", "config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('no_such_section',", "2 mlab.options.offscreen = True THIS_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data') #", "the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back to", "machine where this runs, a default config or one from an existing config", "= os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there') with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in", "dict1['number1'] == 1 assert dict1['hi'] == 'there' assert len(dict1) == 2 assert dict2['number1']", "'some_default' assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5 assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg)", "def test_merge_two_dictionaries(): dict1 = {'hi': 'there', 'number1': 1} dict2 = {'number1': 2, 'number2':", "not changed assert dict1['number1'] == 1 assert dict1['hi'] == 'there' assert len(dict1) ==", "assert dict2['number2'] == 2 assert len(dict2) == 2 def test_cfg_get_optional_values(): cfg_file = os.path.join(TEST_DATA_DIR,", "return_type must be one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value) def test_merge_two_dictionaries():", "config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False def test_cfg_get_any_raises_on_invalid_return_type():", "'brainviewrc') cfg = ut.get_config_from_file(cfg_file) assert cfg.has_section('figure') == True assert cfg.getint('figure', 'width') == 900", "== 'hello' assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5 assert ut.cfg_getfloat('test', 'option_float', 0.22,", "5 assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001) assert ut.cfg_getboolean('test', 'option_boolean', True,", "are returned (and the supplied default values ignored) assert ut.cfg_get('test', 'option_string', 'bye', config=cfg)", "str(exc_info.value) def test_cfg_get_default_value_works_for_all_types(): cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc') cfg = ut.get_config_from_file(cfg_file) # retreive some", "with pytest.raises(ValueError) as exc_info: cfg = ut.get_config_from_file(missing_cfg_file) assert 'not_there' in str(exc_info.value) def test_cfg_get_default_value_works_for_all_types():", "import mayavi try: import configparser # Python 3 except: import ConfigParser as configparser", "os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR) def test_get_default_config_filename(): cfg_file = bv.get_default_config_filename() assert '.brainviewrc' in cfg_file def test_get_config():", "assert 'ERROR: return_type must be one of' in str(exc_info.value) assert 'invalid_return_type' in str(exc_info.value)", "retreive some non-existant values are check that the supplied default values are returned" ]
[ "account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '',", "import fields, models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence", "find the previous move of a journal entry. \"\"\" for company in self:", "from openerp import fields, models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id =", "l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of data', readonly=True)", "models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use", "vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function", "= super(ResCompany, self).write(vals) #if country changed to fr, create the securisation sequence if", "no_gap sequence on each companies in self that will ensure a unique number", "account.move in such a way that we can always find the previous move", "of account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix':", "all posted account.move in such a way that we can always find the", "a way that we can always find the previous move of a journal", "use to ensure the securisation of data', readonly=True) @api.model def create(self, vals): company", "company, create the securisation sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return", "entry. \"\"\" for company in self: vals = { 'name': 'French Securisation of", "Securisation of account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '',", "a no_gap sequence on each companies in self that will ensure a unique", "and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This", "company = super(ResCompany, self).create(vals) #when creating a new french company, create the securisation", "not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on", "to use to ensure the securisation of data', readonly=True) @api.model def create(self, vals):", "- ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding':", "coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright", "openerp import fields, models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence',", "readonly=True) @api.model def create(self, vals): company = super(ResCompany, self).create(vals) #when creating a new", "well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals): res", "company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals): res = super(ResCompany,", "will ensure a unique number is given to all posted account.move in such", "vals = { 'name': 'French Securisation of account_move_line - ' + company.name, 'code':", "file for full copyright and licensing details. from openerp import fields, models, api", "== self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates", "vals): res = super(ResCompany, self).write(vals) #if country changed to fr, create the securisation", "and licensing details. from openerp import fields, models, api class ResCompany(models.Model): _inherit =", "return company @api.multi def write(self, vals): res = super(ResCompany, self).write(vals) #if country changed", "we can always find the previous move of a journal entry. \"\"\" for", "number is given to all posted account.move in such a way that we", "company in self: vals = { 'name': 'French Securisation of account_move_line - '", "'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id} seq = self.env['ir.sequence'].create(vals) company.write({'l10n_fr_secure_sequence_id': seq.id})", "-*- # Part of Odoo. See LICENSE file for full copyright and licensing", "to fr, create the securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda", "self).create(vals) #when creating a new french company, create the securisation sequence as well", "self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates a no_gap", "ensure a unique number is given to all posted account.move in such a", "for full copyright and licensing details. from openerp import fields, models, api class", "== self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals): res = super(ResCompany, self).write(vals)", "_create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on each companies in self that", "class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure", "#when creating a new french company, create the securisation sequence as well if", "copyright and licensing details. from openerp import fields, models, api class ResCompany(models.Model): _inherit", "self).write(vals) #if country changed to fr, create the securisation sequence if vals.get('country_id') and", "to ensure the securisation of data', readonly=True) @api.model def create(self, vals): company =", "previous move of a journal entry. \"\"\" for company in self: vals =", "securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return", "def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on each companies in self", "french company, create the securisation sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence()", "<filename>apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_fr_certification/models/res_company.py<gh_stars>1-10 # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file", "licensing details. from openerp import fields, models, api class ResCompany(models.Model): _inherit = 'res.company'", "the securisation sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi", "details. from openerp import fields, models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id", "in such a way that we can always find the previous move of", "always find the previous move of a journal entry. \"\"\" for company in", "such a way that we can always find the previous move of a", "company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id}", "# Part of Odoo. See LICENSE file for full copyright and licensing details.", "return res def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on each companies", "that we can always find the previous move of a journal entry. \"\"\"", "of Odoo. See LICENSE file for full copyright and licensing details. from openerp", "new french company, create the securisation sequence as well if company.country_id == self.env.ref('base.fr'):", "company @api.multi def write(self, vals): res = super(ResCompany, self).write(vals) #if country changed to", "res def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on each companies in", "a new french company, create the securisation sequence as well if company.country_id ==", "= super(ResCompany, self).create(vals) #when creating a new french company, create the securisation sequence", "if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def", "'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id} seq = self.env['ir.sequence'].create(vals)", "= 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of", "-*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full", "api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to", "move of a journal entry. \"\"\" for company in self: vals = {", "'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id} seq = self.env['ir.sequence'].create(vals) company.write({'l10n_fr_secure_sequence_id':", "is given to all posted account.move in such a way that we can", "as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals):", "@api.model def create(self, vals): company = super(ResCompany, self).create(vals) #when creating a new french", "self: vals = { 'name': 'French Securisation of account_move_line - ' + company.name,", "'Sequence to use to ensure the securisation of data', readonly=True) @api.model def create(self,", "for company in self: vals = { 'name': 'French Securisation of account_move_line -", "that will ensure a unique number is given to all posted account.move in", "super(ResCompany, self).write(vals) #if country changed to fr, create the securisation sequence if vals.get('country_id')", "\"\"\" for company in self: vals = { 'name': 'French Securisation of account_move_line", "a journal entry. \"\"\" for company in self: vals = { 'name': 'French", "journal entry. \"\"\" for company in self: vals = { 'name': 'French Securisation", "utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and", "Part of Odoo. See LICENSE file for full copyright and licensing details. from", "fields, models, api class ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to", "self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals): res = super(ResCompany, self).write(vals) #if", "res = super(ResCompany, self).write(vals) #if country changed to fr, create the securisation sequence", "+ company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id':", "fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of data', readonly=True) @api.model def", "of data', readonly=True) @api.model def create(self, vals): company = super(ResCompany, self).create(vals) #when creating", "# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for", "_inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation", "'name': 'French Securisation of account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap',", "creates a no_gap sequence on each companies in self that will ensure a", "= fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of data', readonly=True) @api.model", "creating a new french company, create the securisation sequence as well if company.country_id", "LICENSE file for full copyright and licensing details. from openerp import fields, models,", "#if country changed to fr, create the securisation sequence if vals.get('country_id') and vals.get('country_id')", "given to all posted account.move in such a way that we can always", "def write(self, vals): res = super(ResCompany, self).write(vals) #if country changed to fr, create", "in self: vals = { 'name': 'French Securisation of account_move_line - ' +", "= { 'name': 'French Securisation of account_move_line - ' + company.name, 'code': 'FRSECUR',", "{ 'name': 'French Securisation of account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation':", "vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self):", "in self that will ensure a unique number is given to all posted", "'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id} seq", "write(self, vals): res = super(ResCompany, self).write(vals) #if country changed to fr, create the", "to all posted account.move in such a way that we can always find", "way that we can always find the previous move of a journal entry.", "on each companies in self that will ensure a unique number is given", "a unique number is given to all posted account.move in such a way", "create the securisation sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company", "if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self, vals): res =", "changed to fr, create the securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id:", "\"\"\"This function creates a no_gap sequence on each companies in self that will", "ensure the securisation of data', readonly=True) @api.model def create(self, vals): company = super(ResCompany,", "securisation of data', readonly=True) @api.model def create(self, vals): company = super(ResCompany, self).create(vals) #when", "fr, create the securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c:", "full copyright and licensing details. from openerp import fields, models, api class ResCompany(models.Model):", "function creates a no_gap sequence on each companies in self that will ensure", "the securisation of data', readonly=True) @api.model def create(self, vals): company = super(ResCompany, self).create(vals)", "company._create_secure_sequence() return company @api.multi def write(self, vals): res = super(ResCompany, self).write(vals) #if country", "'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of data',", "@api.multi def write(self, vals): res = super(ResCompany, self).write(vals) #if country changed to fr,", "self that will ensure a unique number is given to all posted account.move", "ResCompany(models.Model): _inherit = 'res.company' l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the", "super(ResCompany, self).create(vals) #when creating a new french company, create the securisation sequence as", "sequence on each companies in self that will ensure a unique number is", "c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence", "Odoo. See LICENSE file for full copyright and licensing details. from openerp import", "See LICENSE file for full copyright and licensing details. from openerp import fields,", "sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def write(self,", "create the securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not", "country changed to fr, create the securisation sequence if vals.get('country_id') and vals.get('country_id') ==", "the securisation sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence()", "posted account.move in such a way that we can always find the previous", "of a journal entry. \"\"\" for company in self: vals = { 'name':", "the previous move of a journal entry. \"\"\" for company in self: vals", "'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0, 'company_id': company.id} seq =", "c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates a no_gap sequence on each", "vals): company = super(ResCompany, self).create(vals) #when creating a new french company, create the", "' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix': '', 'suffix': '', 'padding': 0,", "self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res def _create_secure_sequence(self): \"\"\"This function creates a", "each companies in self that will ensure a unique number is given to", "unique number is given to all posted account.move in such a way that", "'French Securisation of account_move_line - ' + company.name, 'code': 'FRSECUR', 'implementation': 'no_gap', 'prefix':", "create(self, vals): company = super(ResCompany, self).create(vals) #when creating a new french company, create", "companies in self that will ensure a unique number is given to all", "securisation sequence as well if company.country_id == self.env.ref('base.fr'): company._create_secure_sequence() return company @api.multi def", "def create(self, vals): company = super(ResCompany, self).create(vals) #when creating a new french company,", "sequence if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id: self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence() return res", "data', readonly=True) @api.model def create(self, vals): company = super(ResCompany, self).create(vals) #when creating a", "can always find the previous move of a journal entry. \"\"\" for company" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread = None", "_write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread =", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the", "= None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run,", "time.monotonic() time.sleep(0.2) # Debounce done = callback while time.monotonic() - start < delay:", "License. # You may obtain a copy of the License at # #", "interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button =", "= LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active): if", "delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done: done() class AiyTrigger(object):", "under the License. import os import threading import time import RPi.GPIO as GPIO", "BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time),", "triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def", "14 def _write(path, data): with open(path, 'w') as file: file.write(str(data)) class LED(object): def", "law or agreed to in writing, software # distributed under the License is", "BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO", "finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def blink(self, on_time, off_time): self._event", "the License for the specific language governing permissions and # limitations under the", "as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not", "on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high')", "with open(path, 'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO,", "compliance with the License. # You may obtain a copy of the License", "True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while", "time.sleep(0.2) # Debounce done = callback while time.monotonic() - start < delay: if", "done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM)", "= 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO =", "= self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread:", "run = self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True)", "self._thread.join() self._thread = None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "def off(self): if self._thread: self._event.set() self._thread.join() self._thread = None class Button(object): def __init__(self,", "this file except in compliance with the License. # You may obtain a", "BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO", "GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set():", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True)", "self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2)", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "_onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction',", "self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback): while True:", "GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23", "__init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off()", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def", "self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread", "import threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S =", "run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if", "BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14 def _write(path,", "off(self): if self._thread: self._event.set() self._thread.join() self._thread = None class Button(object): def __init__(self, delay,", "= time.monotonic() time.sleep(0.2) # Debounce done = callback while time.monotonic() - start <", "None def blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run", "self._event.set() self._thread.join() self._thread = None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN)", "if self._thread: self._event.set() self._thread.join() self._thread = None class Button(object): def __init__(self, delay, callback):", "ANY KIND, either express or implied. # See the License for the specific", "LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport',", "open(path, 'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT)", "governing permissions and # limitations under the License. import os import threading import", "self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def blink(self, on_time, off_time):", "in compliance with the License. # You may obtain a copy of the", "= BASE_GPIO + 14 def _write(path, data): with open(path, 'w') as file: file.write(str(data))", "< delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done: done() class", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None", "use this file except in compliance with the License. # You may obtain", "triggered) def Close(self): self._led.off() def SetActive(self, active): if active: self._led.blink(on_time=BLINK_ON_TIME_S, off_time=BLINK_OFF_TIME_S) else: self._led.off()", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time)", "not use this file except in compliance with the License. # You may", "def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO,", "- start < delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO", "= threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO,", "Debounce done = callback while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done", "os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run,", "License. import os import threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S =", "= 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO =", "= self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start()", "Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0", "See the License for the specific language governing permissions and # limitations under", "start = time.monotonic() time.sleep(0.2) # Debounce done = callback while time.monotonic() - start", "start < delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if done: done()", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def", "# limitations under the License. import os import threading import time import RPi.GPIO", "BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14 def _write(path, data): with open(path,", "import os import threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the License. import os import threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active): if active: self._led.blink(on_time=BLINK_ON_TIME_S,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Google Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread =", "threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop self._thread", "OF ANY KIND, either express or implied. # See the License for the", "None break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\"", "'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def blink(self, on_time,", "None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay,", "GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback):", "def _write(path, data): with open(path, 'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self,", "2.0 (the \"License\"); # you may not use this file except in compliance", "GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try:", "break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def", "def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start()", "off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run", "time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01) if", "# you may not use this file except in compliance with the License.", "limitations under the License. import os import threading import time import RPi.GPIO as", "while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def", "\"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button", "else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self):", "agreed to in writing, software # distributed under the License is distributed on", "GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time,", "LED1_GPIO) def __init__(self): self._thread = None def blink(self, on_time, off_time): self._event = threading.Event()", "def blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5", "(the \"License\"); # you may not use this file except in compliance with", "= 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14 def _write(path, data):", "os import threading import time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S", "self._thread = None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread =", "RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO", "class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True)", "# Debounce done = callback while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO):", "# # Unless required by applicable law or agreed to in writing, software", "GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback): while", "express or implied. # See the License for the specific language governing permissions", "import time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S", "as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO =", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done = None break time.sleep(0.01)", "except in compliance with the License. # You may obtain a copy of", "BASE_GPIO + 14 def _write(path, data): with open(path, 'w') as file: file.write(str(data)) class", "data): with open(path, 'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time):", "by applicable law or agreed to in writing, software # distributed under the", "specific language governing permissions and # limitations under the License. import os import", "done = callback while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done =", "= None break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY", "_write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def blink(self, on_time, off_time): self._event =", "5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO", "self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not", "_write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally:", "# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join()", "either express or implied. # See the License for the specific language governing", "try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO)", "self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active):", "= 497 LED1_GPIO = BASE_GPIO + 14 def _write(path, data): with open(path, 'w')", "off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread = None class", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "callback), daemon=True) self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start =", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "language governing permissions and # limitations under the License. import os import threading", "_write(path, data): with open(path, 'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time,", "GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self,", "def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self):", "for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S,", "= 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time):", "= threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread", "self._thread: self._event.set() self._thread.join() self._thread = None class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO,", "done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led", "AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED()", "self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO)", "file except in compliance with the License. # You may obtain a copy", "daemon=True) self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread = None class Button(object):", "AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered)", "self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low')", "= threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop", "LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active): if active:", "not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export',", "True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done = callback while", "time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self,", "25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14 def _write(path, data): with", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) #", "License for the specific language governing permissions and # limitations under the License.", "0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop self._thread =", "the License. # You may obtain a copy of the License at #", "def __init__(self): self._thread = None def blink(self, on_time, off_time): self._event = threading.Event() if", "self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def", "to in writing, software # distributed under the License is distributed on an", "time import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO,", "args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start", "def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time)", "_write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self): self._thread = None def blink(self,", "class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "'w') as file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while", "implied. # See the License for the specific language governing permissions and #", "callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay,", "import RPi.GPIO as GPIO BLINK_ON_TIME_S = 0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5", "_button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False)", "\"License\"); # you may not use this file except in compliance with the", "LED1_GPIO = BASE_GPIO + 14 def _write(path, data): with open(path, 'w') as file:", "class Button(object): def __init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback),", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "callback while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done = None break", "GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time) def _onboard_led_loop(self,", "+ 14 def _write(path, data): with open(path, 'w') as file: file.write(str(data)) class LED(object):", "on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else:", "self._thread.start() def off(self): if self._thread: self._event.set() self._thread.join() self._thread = None class Button(object): def", "if done: done() class AiyTrigger(object): \"\"\"Trigger interface for AIY kits.\"\"\" def __init__(self, triggered):", "applicable law or agreed to in writing, software # distributed under the License", "not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time) finally: _write('/sys/class/gpio/unexport', LED1_GPIO) def __init__(self):", "0.5 BLINK_OFF_TIME_S = 0.5 BUTTON_HOLD_TIME_S = 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25", "False) self._event.wait(off_time) def _onboard_led_loop(self, on_time, off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction',", "self._thread = None def blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' %", "= Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active): if active: self._led.blink(on_time=BLINK_ON_TIME_S, off_time=BLINK_OFF_TIME_S)", "<filename>packages/aiy-bt-prov-server/aiy_trigger_rpi_gpio.py # Copyright 2017 Google Inc. # # Licensed under the Apache License,", "daemon=True) self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic()", "and # limitations under the License. import os import threading import time import", "GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done = callback while time.monotonic() -", "= 5 BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO =", "BUTTON_GPIO = 23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO +", "self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop else: run =", "% BASE_GPIO): run = self._onboard_led_loop else: run = self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time,", "__init__(self, delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def", "threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING)", "= callback while time.monotonic() - start < delay: if GPIO.input(BUTTON_GPIO): done = None", "or agreed to in writing, software # distributed under the License is distributed", "23 BUTTON_LED_GPIO = 25 BASE_GPIO = 497 LED1_GPIO = BASE_GPIO + 14 def", "kits.\"\"\" def __init__(self, triggered): GPIO.setmode(GPIO.BCM) self._led = LED() self._button = Button(BUTTON_HOLD_TIME_S, triggered) def", "Button(BUTTON_HOLD_TIME_S, triggered) def Close(self): self._led.off() def SetActive(self, active): if active: self._led.blink(on_time=BLINK_ON_TIME_S, off_time=BLINK_OFF_TIME_S) else:", "_run(self, delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce", "the specific language governing permissions and # limitations under the License. import os", "or implied. # See the License for the specific language governing permissions and", "blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO): run = self._onboard_led_loop", "delay, callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done = callback while time.monotonic()", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "delay, callback): GPIO.setup(BUTTON_GPIO, GPIO.IN) self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True) self._thread.start() def _run(self,", "callback): while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done =", "file: file.write(str(data)) class LED(object): def _button_led_loop(self, on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set():", "self._button_led_loop self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True) self._thread.start() def off(self): if self._thread: self._event.set()", "with the License. # You may obtain a copy of the License at", "__init__(self): self._thread = None def blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d'", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "= None def blink(self, on_time, off_time): self._event = threading.Event() if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO):", "in writing, software # distributed under the License is distributed on an \"AS", "permissions and # limitations under the License. import os import threading import time", "497 LED1_GPIO = BASE_GPIO + 14 def _write(path, data): with open(path, 'w') as", "while True: GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING) start = time.monotonic() time.sleep(0.2) # Debounce done = callback", "on_time, off_time): GPIO.setup(BUTTON_GPIO, GPIO.OUT) while not self._event.is_set(): GPIO.output(BUTTON_LED_GPIO, True) self._event.wait(on_time) GPIO.output(BUTTON_LED_GPIO, False) self._event.wait(off_time)", "done = None break time.sleep(0.01) if done: done() class AiyTrigger(object): \"\"\"Trigger interface for", "off_time): _write('/sys/class/gpio/export', LED1_GPIO) try: while not self._event.is_set(): _write('/sys/class/gpio/AIY_LED1/direction', 'low') self._event.wait(on_time) _write('/sys/class/gpio/AIY_LED1/direction', 'high') self._event.wait(off_time)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "Service class ------------------ Service object for interfacing with the Phenotype Archive API \"\"\"", "object for interfacing with the Phenotype Archive API \"\"\" from .phenotype import Phenotype", "class ------------------ Service object for interfacing with the Phenotype Archive API \"\"\" from", "interfacing with the Phenotype Archive API \"\"\" from .phenotype import Phenotype from .service", "<filename>nextcode/services/phenotype/__init__.py \"\"\" Service class ------------------ Service object for interfacing with the Phenotype Archive", "for interfacing with the Phenotype Archive API \"\"\" from .phenotype import Phenotype from", "\"\"\" Service class ------------------ Service object for interfacing with the Phenotype Archive API", "Service object for interfacing with the Phenotype Archive API \"\"\" from .phenotype import", "the Phenotype Archive API \"\"\" from .phenotype import Phenotype from .service import Service", "------------------ Service object for interfacing with the Phenotype Archive API \"\"\" from .phenotype", "with the Phenotype Archive API \"\"\" from .phenotype import Phenotype from .service import" ]
[ "app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you", "if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db =", "we need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY", "These are the configurations we need for flask and SQLite app = Flask(__name__)", "alembic.config import Config from flask import Flask from flask_migrate import Migrate from flask_sqlalchemy", "key here if you prefer # Create all database tables engine = create_engine(\"sqlite:///cases.db\",", "import Config from flask import Flask from flask_migrate import Migrate from flask_sqlalchemy import", "SQLAlchemy(app) db.init_app(app) # you can also pass the key here if you prefer", "\"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can also pass the key here", "create_engine from flask_googlemaps import GoogleMaps from os import environ # make key.py with", "import create_engine from flask_googlemaps import GoogleMaps from os import environ # make key.py", "flaskr import config, key alembic_cfg = Config() # These are the configurations we", "import GoogleMaps from os import environ # make key.py with API_KEY='your_api_string' from flaskr", "app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] =", "all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db, include_schemas=True) from", "db = SQLAlchemy(app) db.init_app(app) # you can also pass the key here if", "= key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\"", "= Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\"", "= SQLAlchemy(app) db.init_app(app) # you can also pass the key here if you", "Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps", "from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from", "the key here if you prefer # Create all database tables engine =", "\"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can also", "from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from", "and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ:", "Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine", "Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db, include_schemas=True)", "import environ # make key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg", "flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os", "from os import environ # make key.py with API_KEY='your_api_string' from flaskr import config,", "import Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import", "import config, key alembic_cfg = Config() # These are the configurations we need", "# This is where our imports go. from alembic.config import Config from flask", "pass the key here if you prefer # Create all database tables engine", "engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db, include_schemas=True) from flaskr import routes", "else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can also pass", "in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app)", "app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] =", "are the configurations we need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config)", "can also pass the key here if you prefer # Create all database", "flask_googlemaps import GoogleMaps from os import environ # make key.py with API_KEY='your_api_string' from", "app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can also pass the", "flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps", "= Config() # These are the configurations we need for flask and SQLite", "SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"]", "make key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config() #", "key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db", "where our imports go. from alembic.config import Config from flask import Flask from", "configurations we need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] =", "prefer # Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app,", "environ # make key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg =", "you prefer # Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate =", "here if you prefer # Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True)", "from flask import Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from", "Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else:", "alembic_cfg = Config() # These are the configurations we need for flask and", "db.init_app(app) # you can also pass the key here if you prefer #", "# Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db,", "our imports go. from alembic.config import Config from flask import Flask from flask_migrate", "import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os import", "the configurations we need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"]", "SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os import environ", "need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if", "# These are the configurations we need for flask and SQLite app =", "with API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config() # These are", "go. from alembic.config import Config from flask import Flask from flask_migrate import Migrate", "environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) #", "from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os import environ #", "from flask_googlemaps import GoogleMaps from os import environ # make key.py with API_KEY='your_api_string'", "you can also pass the key here if you prefer # Create all", "# you can also pass the key here if you prefer # Create", "# make key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config()", "if you prefer # Create all database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate", "This is where our imports go. from alembic.config import Config from flask import", "Config from flask import Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy", "from alembic.config import Config from flask import Flask from flask_migrate import Migrate from", "Config() # These are the configurations we need for flask and SQLite app", "is where our imports go. from alembic.config import Config from flask import Flask", "key alembic_cfg = Config() # These are the configurations we need for flask", "from flaskr import config, key alembic_cfg = Config() # These are the configurations", "flask import Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy", "app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"]", "for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\"", "= \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can", "also pass the key here if you prefer # Create all database tables", "os import environ # make key.py with API_KEY='your_api_string' from flaskr import config, key", "= \"sqlite:///cases.db\" db = SQLAlchemy(app) db.init_app(app) # you can also pass the key", "\"DOCKERENV\" in environ: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql+psycopg2://postgres:webapp@host.docker.internal:5432/asymptomatix\" else: app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///cases.db\" db = SQLAlchemy(app)", "sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os import environ # make", "config, key alembic_cfg = Config() # These are the configurations we need for", "key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config() # These", "GoogleMaps from os import environ # make key.py with API_KEY='your_api_string' from flaskr import", "API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config() # These are the", "imports go. from alembic.config import Config from flask import Flask from flask_migrate import", "tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db, include_schemas=True) from flaskr import", "database tables engine = create_engine(\"sqlite:///cases.db\", echo=True) migrate = Migrate(app, db, include_schemas=True) from flaskr", "flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config[\"GOOGLEMAPS_KEY\"] = key.API_KEY if \"DOCKERENV\" in", "import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import" ]
[]
[ "OvenApi from .fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def", "appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type", "<reponame>joelmoses/ha_components<filename>custom_components/ge_kitchen/devices/__init__.py<gh_stars>0 import logging from typing import Type from gekitchensdk.erd import ErdApplianceType from .base", "def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance", "typing import Type from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from .oven", "appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE:", "from .oven import OvenApi from .fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER", "ErdApplianceType from .base import ApplianceApi from .oven import OvenApi from .fridge import FridgeApi", ".dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device", "return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return", "== ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return DishwasherApi # Fallback return", "import logging from typing import Type from gekitchensdk.erd import ErdApplianceType from .base import", "appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return DishwasherApi # Fallback", "from .base import ApplianceApi from .oven import OvenApi from .fridge import FridgeApi from", "gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from .oven import OvenApi from .fridge", "from typing import Type from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from", "ApplianceApi from .oven import OvenApi from .fridge import FridgeApi from .dishwasher import DishwasherApi", "OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return DishwasherApi", "from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from .oven import OvenApi from", "if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return DishwasherApi #", "_LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN:", "ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER: return DishwasherApi # Fallback return ApplianceApi", "import ErdApplianceType from .base import ApplianceApi from .oven import OvenApi from .fridge import", "import FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) ->", ".base import ApplianceApi from .oven import OvenApi from .fridge import FridgeApi from .dishwasher", "FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type:", "appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return", "if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if", "Type from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from .oven import OvenApi", "Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type ==", "{appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if", "import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type:", "type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi", ".fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType)", "import OvenApi from .fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__)", "import Type from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi from .oven import", "_LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get", "type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi", "= logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the", "from .fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type:", "the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type ==", "import ApplianceApi from .oven import OvenApi from .fridge import FridgeApi from .dishwasher import", "-> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type", "\"\"\"Get the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return OvenApi if appliance_type", "logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate", "== ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type ==", "from .dishwasher import DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found", ".oven import OvenApi from .fridge import FridgeApi from .dishwasher import DishwasherApi _LOGGER =", "DishwasherApi _LOGGER = logging.getLogger(__name__) def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\")", "ErdApplianceType.OVEN: return OvenApi if appliance_type == ErdApplianceType.FRIDGE: return FridgeApi if appliance_type == ErdApplianceType.DISH_WASHER:", "get_appliance_api_type(appliance_type: ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\"", "ErdApplianceType) -> Type: _LOGGER.debug(f\"Found device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if", "device type: {appliance_type}\") \"\"\"Get the appropriate appliance type\"\"\" if appliance_type == ErdApplianceType.OVEN: return", "logging from typing import Type from gekitchensdk.erd import ErdApplianceType from .base import ApplianceApi" ]
[ "u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a", "form or can be in the UI SAQL form load statements must have", "payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True:", "\"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date)", "= json.loads(r.text) shares = response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId'])", "if verbose == True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process", "import re from pandas import json_normalize from decimal import Decimal import base64 import", "\"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\"", "MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()", "of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'],", "def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label", "True: print('Dataset not found. Please check name or API name in Einstein Analytics.')", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl']", "name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale,", "'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header,", "self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset", "name or label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] ==", "requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in the JSON", "MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part for chunk in", "'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True", "u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a list or", "version number goes backwards 0 = current version 20 is max oldest version.", "else: print('Please input a list or tuple of app Ids') sys.exit(1) if save_path", "UI saql query to JSON format #create a dictionary with all datasets used", "if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']:", "math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if browser", "name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\":", "argument and returns a dataframe or saves to csv The query can be", "payload = { \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if", "if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser", "math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False,", "history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not None:", "include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start = time.time()", "wrapper / library for Einstein Analytics API import sys import browser_cookie3 import requests", "for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON", "Upload Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64", "{\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is", "requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df =", "if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) #", "requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed') print(r2.text)", "can be in JSON form or can be in the UI SAQL form", "Function to make it easier to update access using dashboard names vs finding", "Make sure you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set", "'+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include:", "np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale = 2 elif", "if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts:", "== \"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\":", "df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = {", "= requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num", "8 characters since browser cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header", "for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe", "time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose", "use exact API name if getting multiple matches for label search. if verbose", "user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is", "xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR:", "to use exact API name if getting multiple matches for label search. if", "json.loads(r.text) shares = response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for", "if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then", "+= 1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed',", "SAQL form load statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose", "save as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df", "dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return", "except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload", "is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR =", "Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload", "app_user_df = pd.DataFrame() break except: attempts += 1 if verbose == True: print(\"Unexpected", "charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = []", "saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format #create a dictionary with all", "= requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4)", "to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved", "= [] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId']", "\"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name,", "name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif", "import browser_cookie3 import requests import json import time import datetime from dateutil import", "verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json =", "get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time", "exact API name if getting multiple matches for label search. if verbose ==", "a saql query as an argument and returns a dataframe or saves to", "fields = [] for c in df.columns: if df[c].dtype == \"datetime64[ns]\": name =", "MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum),", "columns == None: columns = df.columns else: columns = columns for c in", "dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that", "#create a dictionary with all datasets used in the query load_stmt_old = re.findall(r\"(=", "error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try: for app in response['folders']:", "range_start = 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)):", "verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return", "or tuple of app Ids') sys.exit(1) if save_path is not None: if verbose", "precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name =", "None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header)", "add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True:", "format #create a dictionary with all datasets used in the query load_stmt_old =", "{'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params)", "Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID", "Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__ == '__main__': pass", "= browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect \"https://\"", "= time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if", "check name or API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0]", "1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is", "\"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"')", "not found message or return the dataset ID if dataset_df.empty == True: print('Dataset", "if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype", "of app Ids') sys.exit(1) if save_path is not None: if verbose == True:", "or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make sure you", "True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id,", "=_load_\\\"datasetname\\\"; ''' if verbose == True: start = time.time() print('Checking SAQL and Finding", "next_page is not None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%')", "Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__ ==", "dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they got. Might want", "to make it easier to update access using dashboard names vs finding all", "= current version 20 is max oldest version. Typically best practice to run", "history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None", "search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name']", "== True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self,", "update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier to update access", "datasets.') #if dataframe is empty then return not found message or return the", "== True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not None: xmd64", "else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config =", "np.number): if useNumericDefaults == True: precision = 18 scale = 2 elif useNumericDefaults", "print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time()", "'+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\"", "the user wants to seach by API name or label name if search_type", "cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect", "in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else:", "#remove fields in the JSON that we don't want for s in shares:", "json.loads(r.text) shares = response['shares'] #remove fields in the JSON that we don't want", "df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\")", "if type(app_id) is list or type(app_id) is tuple: for app in app_id: app_user_df", "user wants to seach by API name or label name if search_type ==", "= requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to", "Solution is to add a try/except block to handle the error ''' attempts", "for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk ==", "x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\":", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df", "== dataset_name] #show user how many matches that they got. Might want to", "if the user wants to seach by API name or label name if", "else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True:", "= {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end", "= { \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose", "query can be in JSON form or can be in the UI SAQL", "{ 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None',", "in the UI SAQL form load statements must have the appropreate spaces: =_load_\\\"datasetname\\\";", "Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make", "r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False,", "end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None):", "\"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\":", "verbose=False): ''' This function takes a saql query as an argument and returns", "%p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self,", "= user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text)", "return app_user_df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec')", "from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url", "c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\":", "again...\") elif app_id is not None: if type(app_id) is list or type(app_id) is", "scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max()", "'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success']", "multiple matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.')", "update dataframe should have the following columns: Dashboard Id, Access Type, and User", "} ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True,", "since browser cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization':", "True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart", "response['shares'] #remove fields in the JSON that we don't want for s in", "xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None,", "to update access using dashboard names vs finding all apps needed. update dataframe", "max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page", "try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose", "Access Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns", "end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type,", "get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true',", "None print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers')", "print('\\nDatapart Upload Complete...') payload = { \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'],", "name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df", "if update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r =", "saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...') #run query and", "verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True:", "requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( {", "not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}", "response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)):", ": 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64", "Complete...') payload = { \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload))", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = []", "get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter", "= { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2", "saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format #create a dictionary", "\",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\",", "__init__(self, env_url, browser): self.env_url = env_url try: if browser == 'chrome': cj =", "xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\":", "pull data from next page attempts = 0 # reset attempts for additional", "as pd import numpy as np import re from pandas import json_normalize from", "API name or label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label']", "if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name,", "\"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\":", "if save_json_path is not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num]", "dataframe is empty then return not found message or return the dataset ID", "timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S", "math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part for", "browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie", "json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload))", "== True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts =", "in users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\":", "None print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True: print(\"Unexpected error:\",", "start = time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name", "APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add", "to_update: shares[s] = next(item for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove", "headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access Updated') print('Completed", "print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type", "base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' :", "try: del s['imageUrl'] except: pass else: shares = None print('Please choose a user", "shares[s] = next(item for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields", "except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration", "''' if verbose == True: start = time.time() print('Updating App Access...') print('Process started", "Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True:", "= app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'],", "matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if", "dataframe or save as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload)", "ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls]", "verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose ==", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in the", "unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser):", "for additional pages while next_page is not None: if verbose == True: progress_counter", "import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url", "to add a try/except block to handle the error ''' attempts = 0", "verbose == True: start = time.time() progress_counter = 0 print('Getting app user list", "== True: print('Running SAQL Query...') #run query and return dataframe or save as", "datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile", "print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not found message", "dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): '''", "fillna == True: for c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True)", "xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults,", "shares = shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)", "49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum =", "< max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users =", "str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] ==", "= json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This", "== True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10", "completed', end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\"", "data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose ==", "'+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True: for c in df.columns:", "x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name", "saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...') #run query and return dataframe", "CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end =", "Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 *", "from dateutil import tz import pandas as pd import numpy as np import", "json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose ==", "== True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np", "= env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8", "unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label =", "} r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload", "deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label,", "flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64", "= {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in", "return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes", "attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u", "try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)", "\"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\":", "= df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\",", "at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers':", "Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna ==", "not get session ID. Make sure you are logged into a live Salesforce", "KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if verbose ==", "precision = 18 scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda", "== \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\":", "base64 import csv import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object):", ": \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end", "get session ID. Make sure you are logged into a live Salesforce session", "\"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try:", "CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to", "in response['folders']: attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"],", "ensure_ascii=False, indent=4) elif version_num is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] }", "import datetime from dateutil import tz import pandas as pd import numpy as", "MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part", "not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload))", "def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers,", "returns a dataframe or saves to csv The query can be in JSON", "app user list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is", "session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).')", "shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares", "= requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql,", "as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload =", "app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u", "an argument and returns a dataframe or saves to csv The query can", "not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link,", "verbose == True: print('\\nDatapart Upload Complete...') payload = { \"Action\" : \"Process\" }", "#convert UI saql query to JSON format #create a dictionary with all datasets", "= 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale", "pass try: del s['imageUrl'] except: pass shares = shares + user_dict elif update_type", "saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True:", "requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page =", "charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show", "csv import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self,", "= response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in", "1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='',", "df.columns else: columns = columns for c in columns: if df[c].dtype == \"O\":", "== True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload =", "None: columns = df.columns else: columns = columns for c in columns: if", "OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a try/except block to", "{ \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd =", "update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers", "return the dataset ID if dataset_df.empty == True: print('Dataset not found. Please check", "headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose", "r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data", "25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response", "== \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\",", "if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def", "elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares =", "the history first before supplying a version number. ''' #get broken dashboard version", "fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale =", "app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True)", "pass try: del s['imageUrl'] except: pass else: shares = None print('Please choose a", "elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares =", "} r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time()", "time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares", "\"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" }", "requests import json import time import datetime from dateutil import tz import pandas", "params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API", "= columns for c in columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda", "dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is", "want to use exact API name if getting multiple matches for label search.", "dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True,", "json_normalize from decimal import Decimal import base64 import csv import unicodecsv from unidecode", "= df.columns else: columns = columns for c in columns: if df[c].dtype ==", "= json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except:", "print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose ==", "the JSON that we don't want for s in shares: try: del s['sharedWithLabel']", "type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app,", "at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format #create", "u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple of", "== True: start = time.time() progress_counter = 0 print('Getting app user list and", "{ \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\",", "True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False):", "not None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose", "if verbose == True: print('\\nDatapart Upload Complete...') payload = { \"Action\" : \"Process\"", "sure you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone", "math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part =", "\"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts", "import Decimal import base64 import csv import unicodecsv from unidecode import unidecode import", "= df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\"", "return not found message or return the dataset ID if dataset_df.empty == True:", "{ \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2 =", "dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df,", "if useNumericDefaults == True: precision = 18 scale = 2 elif useNumericDefaults ==", "show up exactly as the column names in the supplied dataframe ''' if", "'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1", "\"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser", "= name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c", "with dataset ID and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i],", "== True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict,", "is list or type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame()", "firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make sure you are", "add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time +", "True: start = time.time() progress_counter = 0 print('Getting app user list and access", "started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type ==", "saql with dataset ID and version ID for i in range(0,len(load_stmt_new)): saql =", "the supplied dataframe ''' if verbose == True: start = time.time() print('Loading Data", "must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start =", "+ datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\")", "\"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd = { \"fileFormat\": {", "'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except:", "error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts = 0 while attempts", "{ \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType']", "next(item for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the", "if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in", "= None print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers,", "'+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or firefox)') sys.exit(1)", "supplying a version number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories',", "label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is", "appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start = time.time() print('Checking SAQL", "for u in users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\":", "if shares is not None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header,", "verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts", "or API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid =", "saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL", "json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not None: preview_link =", "(chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make sure", "+= 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to", "verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload", "progress_counter = 0 print('Getting app user list and access details...') print('Process started at:", "CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end =", "is empty then return not found message or return the dataset ID if", "dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql query", "app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\":", "except: print('ERROR: Could not get session ID. Make sure you are logged into", "== True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id,", "finding all apps needed. update dataframe should have the following columns: Dashboard Id,", "JSON format #create a dictionary with all datasets used in the query load_stmt_old", "shares = response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s", "= time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql", "does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type':", "API name if getting multiple matches for label search. if verbose == True:", "print('Dataset not found. Please check name or API name in Einstein Analytics.') sys.exit(1)", "'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove =", "# Upload Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else:", "headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by", "spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start = time.time() print('Checking SAQL and", "== True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return", "= time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if", "return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50,", "print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end = time.time()", "Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000", "\"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision", "} fields.append(measure) else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = {", "in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__ == '__main__':", "+= rows_in_part max_data_part += rows_in_part partnum += 1 if verbose == True: print('\\rChunk", "\"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults ==", "practice to run the function and view the history first before supplying a", "verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID", "for c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number):", "default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will", "df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else:", "c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\":", "= 18 scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x:", "search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty", "= time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess':", "= base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part", "datasets used in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new =", "np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is", "Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and", "else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize':", "with all datasets used in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql)", "Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if", "= { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' :", "ID and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql", "dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None,", "if fillna == True: for c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE',", "attempts = 0 # reset attempts for additional pages while next_page is not", "verbose == True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def", "Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...') payload", "don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del", "indent=4) elif version_num is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix", "data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if", "def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time()", "app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR", "headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = [] for u in", "\",\"_\") fields = [] for c in df.columns: if df[c].dtype == \"datetime64[ns]\": name", "= saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...') #run query and return", "Upload Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in", "to_remove: shares.remove(s) #remove fields in the JSON that we don't want for s", "print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time()", "in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for", "print(\"Trying again...\") for app in response['folders']: attempts = 0 while attempts < max_request_attempts:", "''' if verbose == True: start = time.time() print('Loading Data to Einstein Analytics...')", "== 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()", "def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly':", "'+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r", "if save_path is not None: if verbose == True: print('Saving result to CSV...')", "operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None:", "in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove", "True: precision = 18 scale = 2 elif useNumericDefaults == False: precision =", "range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict if", "and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql =", "scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure", "are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload =", "{'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or", "headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1)", "reset attempts for additional pages while next_page is not None: if verbose ==", "= json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\":", "time import datetime from dateutil import tz import pandas as pd import numpy", "for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item", "None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as", "logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation", "#set timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time =", "dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many", "True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None: if type(app_id)", "''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm,", "if verbose == True: start = time.time() progress_counter = 0 print('Getting app user", "(curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S", "print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True: for", "version_num is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num],", "def remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns else: columns", "users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'],", "< max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize']", "\",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\",", "\"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input", "update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares']", "Ids') sys.exit(1) if save_path is not None: if verbose == True: print('Saving result", "response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break", "json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function", "except: pass else: shares = None print('Please choose a user update operation. Options", "pass def remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns else:", "my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select", "name or API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid", "browser_cookie3 import requests import json import time import datetime from dateutil import tz", "= 0 range_start = 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory", "= json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API name or", "elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df,", "as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df =", "print(\"Trying again...\") #continue to pull data from next page attempts = 0 #", "print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\")", "if verbose == True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec')", "Might want to use exact API name if getting multiple matches for label", "= pd.DataFrame() break except: attempts += 1 if verbose == True: print(\"Unexpected error:\",", "if verbose == True: end = time.time() print('Data Upload Process Started. Check Progress", "at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True: for c in", "try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page =", "json.loads(r.text) shares = response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for", "+ user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text)", "response['folders']: attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header)", "from next page attempts = 0 # reset attempts for additional pages while", "if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON that we don't want", "app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if", "before supplying a version number. ''' #get broken dashboard version history r =", "= -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure =", "Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory =", "#remove first 8 characters since browser cookie does not expect \"https://\" my_cookies =", "label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else:", "for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove:", "name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get", "users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { \"AppId\": app['id'],", "version 20 is max oldest version. Typically best practice to run the function", "encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload", "if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False,", "requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = [] for u", "default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields =", "max oldest version. Typically best practice to run the function and view the", "useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x:", "json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API name or label", "print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR", "except: pass try: del s['imageUrl'] except: pass else: shares = None print('Please choose", "columns = columns for c in columns: if df[c].dtype == \"O\": df[c] =", "json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except:", "= time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False):", "response = json.loads(r.text) shares = response['shares'] to_update = [] for u in user_dict:", "== True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try: for", "select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get", "f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload = {", "1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull", "app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\":", "df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not", "return dataframe or save as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header,", "print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None: if type(app_id) is", "then return not found message or return the dataset ID if dataset_df.empty ==", "headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num", "- 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum", "lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name,", "details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN", "except: pass try: del s['imageUrl'] except: pass shares = shares + user_dict elif", "= 0 print('Getting app user list and access details...') print('Process started at: '+str(self.get_local_time()))", "columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd is", "dataset_api_name.replace(\" \",\"_\") if fillna == True: for c in df.columns: if df[c].dtype ==", "removeUsers, updateUsers ''' if verbose == True: start = time.time() print('Updating App Access...')", "np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns", "df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date", "/ library for Einstein Analytics API import sys import browser_cookie3 import requests import", "field names will show up exactly as the column names in the supplied", "0 # reset attempts for additional pages while next_page is not None: if", "lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly", "for c in columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\"))", "be in the UI SAQL form load statements must have the appropreate spaces:", "pandas import json_normalize from decimal import Decimal import base64 import csv import unicodecsv", "break except: attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying", "is not None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if", "elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale = 2", "} return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\",", "dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid =", "Id ''' pass def remove_non_ascii(self, df, columns=None): if columns == None: columns =", "try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose", "displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec", ": 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] ==", "in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True)", "tuple: for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response", "df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None,", "default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' :", "for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if", "useNumericDefaults == True: precision = 18 scale = 2 elif useNumericDefaults == False:", "== True: start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if", "getting multiple matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching", "dataset_label=None, verbose=False): ''' field names will show up exactly as the column names", "== True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name',", "API import sys import browser_cookie3 import requests import json import time import datetime", "'+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP", "dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants", "print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts = 0 while", "not None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if", "Query...') #run query and return dataframe or save as csv payload = {\"query\":saql}", "= OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a try/except block", "shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict if item[\"sharedWithId\"] ==", "''' attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header)", "True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload", "\"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd = {", "dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they got.", "ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def", "else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df", "import tz import pandas as pd import numpy as np import re from", "if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that we don't", "True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not None: xmd64 =", "\",\"_\") if fillna == True: for c in df.columns: if df[c].dtype == \"O\":", "JSON form or can be in the UI SAQL form load statements must", "version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path,", "\"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\":", "sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None: if type(app_id) is list or", "= sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start", "/ MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"',", "rows_in_part max_data_part += rows_in_part partnum += 1 if verbose == True: print('\\rChunk '+str(chunk+1)+'", "None: if type(app_id) is list or type(app_id) is tuple: for app in app_id:", "response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1", "#check if the user wants to seach by API name or label name", "current version 20 is max oldest version. Typically best practice to run the", "rows_in_part partnum += 1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory /", "\"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\":", "in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end = time.time() print('Completed", "from decimal import Decimal import base64 import csv import unicodecsv from unidecode import", "= requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload", "unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try:", "while next_page is not None: if verbose == True: progress_counter += 25 print('Progress:", "history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None,", "Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to", "u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True: print(\"Unexpected", "== True: print('Dataset not found. Please check name or API name in Einstein", "'+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier to", "sys.exit(1) #set timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time", "dataset ID and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i])", "in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict", "{'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies", "dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid,", "+= rows_in_part partnum += 1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory", "r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User", "1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE))", "user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares", "ID if dataset_df.empty == True: print('Dataset not found. Please check name or API", "view the history first before supplying a version number. ''' #get broken dashboard", "print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...')", "headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not", "if dataset_df.empty == True: print('Dataset not found. Please check name or API name", "load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls])", "True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 *", "else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name,", "json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose ==", "== None: columns = df.columns else: columns = columns for c in columns:", "Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...') payload = {", "as an argument and returns a dataframe or saves to csv The query", "handle the error ''' attempts = 0 while attempts < max_request_attempts: try: r", "if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label,", "column names in the supplied dataframe ''' if verbose == True: start =", "requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj", "del s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response", "columns: Dashboard Id, Access Type, and User Id ''' pass def remove_non_ascii(self, df,", "del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers':", ")(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm,", "verbose == True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process started", "update_type, verbose=True): ''' Function to make it easier to update access using dashboard", "\"type\": \"Text\", \"label\": c } fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset,", "default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name,", "\"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break", "be in JSON form or can be in the UI SAQL form load", "ID. Make sure you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1)", "'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response", "= json.loads(r.text) shares = response['shares'] #remove fields in the JSON that we don't", "\"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults", "pd.DataFrame() break except: attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0])", "response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if verbose == True:", "def __init__(self, env_url, browser): self.env_url = env_url try: if browser == 'chrome': cj", "datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def", "error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from next page attempts =", "data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR:", "s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares = shares + user_dict", "pandas as pd import numpy as np import re from pandas import json_normalize", "AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed", "c in df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name =", "timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API", "True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df,", "df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 =", "str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\",", "columns=None): if columns == None: columns = df.columns else: columns = columns for", "'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name]", "item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON that", "'+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards", "print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares =", "item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON that we don't want for", "to run the function and view the history first before supplying a version", "sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try: for app in response['folders']: r", "shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response =", "lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation,", "version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"')", "sys.exit(1) if save_path is not None: if verbose == True: print('Saving result to", "load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...') #run query", "shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type", "= name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c,", "= json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not None: preview_link", "10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0]", "else: print('Please select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could", "#run query and return dataframe or save as csv payload = {\"query\":saql} r", "== True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end", "dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData',", "by API name or label name if search_type == 'UI Label': dataset_df =", "verbose == True: start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time()))", "\"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii", "for Einstein Analytics API import sys import browser_cookie3 import requests import json import", "requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif", "Dashboard Id, Access Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None):", "wants to seach by API name or label name if search_type == 'UI", "json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append(", "index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed", "= dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they got. Might", "result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe", "CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to", "print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON", "browser cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer", "'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets'])", "if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts:", "default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names", "np import re from pandas import json_normalize from decimal import Decimal import base64", "using dashboard names vs finding all apps needed. update dataframe should have the", "started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True: for c", "chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0:", "Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__", "charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation'", "shares is not None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload))", "0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares']", "= name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c,", "for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose)", "headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in the JSON that", "name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt,", "in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] =", "return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start", "import time import datetime from dateutil import tz import pandas as pd import", "Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...') payload =", "if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date =", "to handle the error ''' attempts = 0 while attempts < max_request_attempts: try:", "{ \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" }", "2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale =", "verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try:", "dictionary with all datasets used in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\",", "to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in", "app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\":", "in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes", "next page attempts = 0 # reset attempts for additional pages while next_page", "Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show", "print('Running SAQL Query...') #run query and return dataframe or save as csv payload", "True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df", "df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"):", "import sys import browser_cookie3 import requests import json import time import datetime from", "if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose ==", "= dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how", "df, columns=None): if columns == None: columns = df.columns else: columns = columns", "'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or firefox)')", "try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for", "and return dataframe or save as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query',", "print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update", "== True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not", "print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number", "= base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1", "elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not", "dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for c in df.columns: if df[c].dtype", "update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose", "update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start", "is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return", "update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not", "Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def get_local_time(self,", "print('ERROR: Could not get session ID. Make sure you are logged into a", "'+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not found message or", "for u in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\":", "is not None: if type(app_id) is list or type(app_id) is tuple: for app", "save_path is not None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path,", "and view the history first before supplying a version number. ''' #get broken", "elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization':", "or label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name]", "dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\"", "or saves to csv The query can be in JSON form or can", "addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start = time.time() print('Updating", "matching datasets.') #if dataframe is empty then return not found message or return", "== True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None: if", "'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type':", "try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except", "'+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text)", "removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not None:", "error ''' attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders',", "else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df", "return df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec')", "= self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with", "response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = []", "and returns a dataframe or saves to csv The query can be in", "partnum = 0 range_start = 0 max_data_part = rows_in_part for chunk in range(0,", "names in the supplied dataframe ''' if verbose == True: start = time.time()", "'''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1,", "import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url,", "default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\"", "requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None,", "\"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart',", "= requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox':", "'+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end = time.time() print('Completed in", "dataset_df.empty == True: print('Dataset not found. Please check name or API name in", "'application/json'} else: print('Please select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR:", "Decimal import base64 import csv import unicodecsv from unidecode import unidecode import math", "sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...')", "= requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload Failed')", "Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql", "make it easier to update access using dashboard names vs finding all apps", "as np import re from pandas import json_normalize from decimal import Decimal import", "the following columns: Dashboard Id, Access Type, and User Id ''' pass def", "updateUsers ''' if verbose == True: start = time.time() print('Updating App Access...') print('Process", "requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if", "fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start = time.time() print('Updating App", "shares[s]['sharedWithId']) #remove fields in the JSON that we don't want for s in", "is not None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False)", "None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if", "elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda", "not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps", "quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if verbose ==", "print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 -", "attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for", "= 10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part =", "pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares", "the UI SAQL form load statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; '''", "print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000", "we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try:", "end = time.time() print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job", "next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if verbose", "0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text)", "= time.time() progress_counter = 0 print('Getting app user list and access details...') print('Process", "a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time", "0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start", "verbose == True: print('Running SAQL Query...') #run query and return dataframe or save", "ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to", "total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts +=", "= time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True):", "Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 =", "x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label", "\"UserType\": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True:", "addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload = {\"shares\":", "Id, Access Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None): if", "= 0 # reset attempts for additional pages while next_page is not None:", "to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna", "True: end = time.time() print('Data Upload Process Started. Check Progress in Data Monitor.')", "self.remove_non_ascii(df) # Upload Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode()", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = [] for", "in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True:", "attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users", "end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type,", "} r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart", "= load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid =", "\"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose", "max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares']", "measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision,", "page attempts = 0 # reset attempts for additional pages while next_page is", "to pull data from next page attempts = 0 # reset attempts for", "that they got. Might want to use exact API name if getting multiple", "dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def", "app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...')", "to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if", "update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares']", "in to_remove: shares.remove(s) #remove fields in the JSON that we don't want for", "user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields", "#get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm,", "fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload = {\"shares\": shares}", "== dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches", "} fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale", "operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try:", "history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start =", "app Ids') sys.exit(1) if save_path is not None: if verbose == True: print('Saving", "= { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd", "decimal import Decimal import base64 import csv import unicodecsv from unidecode import unidecode", "= c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name,", "== \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif", "''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True:", "params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets',", "df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True: print('Saving", "ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload", "print('Please input a list or tuple of app Ids') sys.exit(1) if save_path is", "sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from next page attempts = 0", "= response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s in", "= saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...')", "attempts for additional pages while next_page is not None: if verbose == True:", "ignore_index=True) else: print('Please input a list or tuple of app Ids') sys.exit(1) if", "verbose=False): ''' field names will show up exactly as the column names in", "\"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype,", "for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update:", "dateutil import tz import pandas as pd import numpy as np import re", "\"name\": dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name,", "\"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] },", "= response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1", "data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access Updated') print('Completed in", "FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is", "''' if verbose == True: start = time.time() print('Checking SAQL and Finding Dataset", "= re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls]", "data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose ==", "while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size", "query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in", "u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts += 1 if", "print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data", "is not None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False)", "xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = {", "#if dataframe is empty then return not found message or return the dataset", "cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'],", "to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if", "if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df =", "load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''),", "not None: if type(app_id) is list or type(app_id) is tuple: for app in", "save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter = 0", "self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd", "''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\")", "print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed", "save_path is not None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path,", "None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose", "dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid", "r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\\nERROR: Datapart Upload", "browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please", "following columns: Dashboard Id, Access Type, and User Id ''' pass def remove_non_ascii(self,", "max_request_attempts=3): if verbose == True: start = time.time() progress_counter = 0 print('Getting app", "verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from", "\".\" } fields.append(measure) else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension =", "all apps needed. update dataframe should have the following columns: Dashboard Id, Access", "attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while", "None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset,", "app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text)", "a list or tuple of app Ids') sys.exit(1) if save_path is not None:", "vs finding all apps needed. update dataframe should have the following columns: Dashboard", "goes backwards 0 = current version 20 is max oldest version. Typically best", "library for Einstein Analytics API import sys import browser_cookie3 import requests import json", "s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass", "df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return", "pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id,", "best practice to run the function and view the history first before supplying", "if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST", "s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that we don't want", "= browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else:", "elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares =", "0 print('Getting app user list and access details...') print('Process started at: '+str(self.get_local_time())) if", "except: pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r =", "in the supplied dataframe ''' if verbose == True: start = time.time() print('Loading", "start = time.time() progress_counter = 0 print('Getting app user list and access details...')", "dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension)", "app_id is not None: if type(app_id) is list or type(app_id) is tuple: for", "as the column names in the supplied dataframe ''' if verbose == True:", "print(r2.text) sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...') payload = { \"Action\"", "name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\":", "numpy as np import re from pandas import json_normalize from decimal import Decimal", "1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts <", "''' Function to make it easier to update access using dashboard names vs", "characters since browser cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header =", "import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if", "verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None:", "you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for", "{ \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType']", "shares = response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s", "df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name =", "version_num=None, save_json_path=None): ''' version number goes backwards 0 = current version 20 is", "if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data", "import base64 import csv import unicodecsv from unidecode import unidecode import math class", "curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort':", "in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier", "True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time()))", "def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return", "add a try/except block to handle the error ''' attempts = 0 while", "error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not None: if type(app_id) is list", "True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end =", "dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q':", "\"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end =", "\"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple of app", "The query can be in JSON form or can be in the UI", "name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\":", "''' version number goes backwards 0 = current version 20 is max oldest", "in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users:", "headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { \"AppId\":", "json import time import datetime from dateutil import tz import pandas as pd", "sys import browser_cookie3 import requests import json import time import datetime from dateutil", "Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version", "load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and", "0 range_start = 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory /", "a try/except block to handle the error ''' attempts = 0 while attempts", "= json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break", "if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict if item[\"sharedWithId\"]", "verbose == True: end = time.time() print('Data Upload Process Started. Check Progress in", "u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s)", "range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running", "message or return the dataset ID if dataset_df.empty == True: print('Dataset not found.", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append(", "time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version", "None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else:", ": str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success']", "not found. Please check name or API name in Einstein Analytics.') sys.exit(1) else:", "in to_update: shares[s] = next(item for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId'])", "del s['imageUrl'] except: pass shares = shares + user_dict elif update_type == 'removeUsers':", "\"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self,", "} fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None,", "time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): '''", "attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response", "print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function", "tz import pandas as pd import numpy as np import re from pandas", "useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias'", "c } fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\":", "df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif", "sys.exit(1) except: print('ERROR: Could not get session ID. Make sure you are logged", "True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts = 0", "data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start +=", "in JSON form or can be in the UI SAQL form load statements", "backwards 0 = current version 20 is max oldest version. Typically best practice", "to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end", "and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI", "fields in the JSON that we don't want for s in shares: try:", "verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec')", "saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose == True: print('Running SAQL Query...') #run", "browser): self.env_url = env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove", "partnum += 1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+'", "from pandas import json_normalize from decimal import Decimal import base64 import csv import", "elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name,", "for c in df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name", "attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif", "%p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru',", "CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected", "deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for", "query as an argument and returns a dataframe or saves to csv The", "'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df =", "True: start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type", "def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 = current", "matches that they got. Might want to use exact API name if getting", "datetime from dateutil import tz import pandas as pd import numpy as np", "xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator,", "is to add a try/except block to handle the error ''' attempts =", "\"Text\", \"label\": c } fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\":", "/ MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part for chunk", "attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page =", "(-1, 'Unexpected EOF') Proposed Solution is to add a try/except block to handle", "== 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does", "def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql query as", "or save as csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) )", "ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution", "user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers '''", "up exactly as the column names in the supplied dataframe ''' if verbose", "{\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end =", "requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text)", "update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id,", "'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header =", "= {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path", "r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not", "a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session", "additional pages while next_page is not None: if verbose == True: progress_counter +=", "df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'),", "again...\") while attempts < max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"],", "if verbose == True: start = time.time() print('Checking SAQL and Finding Dataset IDs...')", "del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares = None", "load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid", "next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts", "shares = response['shares'] #remove fields in the JSON that we don't want for", "'+str(round(end-start,3))+'sec') return df else: if verbose == True: end = time.time() print('Completed in", "empty then return not found message or return the dataset ID if dataset_df.empty", "env_url, browser): self.env_url = env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:])", "that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass", "\"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] } return", "unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url =", "in df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\")", "or can be in the UI SAQL form load statements must have the", "while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page", "'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if", "takes a saql query as an argument and returns a dataframe or saves", ": operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config))", "start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time()))", "Could not get session ID. Make sure you are logged into a live", "elif version_num is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix =", "load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID for i in", "sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start =", "(chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False):", "response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app,", "or return the dataset ID if dataset_df.empty == True: print('Dataset not found. Please", "def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier to update", "supplied dataframe ''' if verbose == True: start = time.time() print('Loading Data to", "saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True:", "= { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [", "pass else: shares = None print('Please choose a user update operation. Options are:", "name if getting multiple matches for label search. if verbose == True: print('Found", "have the following columns: Dashboard Id, Access Type, and User Id ''' pass", "deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' :", "my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser ==", "update_df, update_type, verbose=True): ''' Function to make it easier to update access using", "headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose", "True: print('Running SAQL Query...') #run query and return dataframe or save as csv", "== shares[s]['sharedWithId']) #remove fields in the JSON that we don't want for s", "try: del s['imageUrl'] except: pass shares = shares + user_dict elif update_type ==", "max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break", "or type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame() r =", "requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid", "\"fields\": fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8',", "= 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part", "= requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = [] for", "dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API name", "s['imageUrl'] except: pass else: shares = None print('Please choose a user update operation.", "'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not", "to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end", "SAQL Query...') #run query and return dataframe or save as csv payload =", "f, ensure_ascii=False, indent=4) elif version_num is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num]", "many matches that they got. Might want to use exact API name if", "-df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = {", "Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking", "+= 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header)", "= next(item for item in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in", "API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0]", "\"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ] }", "Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json", "{ \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def", "requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( {", "u in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'],", "dashboard names vs finding all apps needed. update dataframe should have the following", "the column names in the supplied dataframe ''' if verbose == True: start", "app in response['folders']: attempts = 0 while attempts < max_request_attempts: try: r =", "base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config", "[] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in", "dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql", "== True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process started at:", "if verbose == True: print('Running SAQL Query...') #run query and return dataframe or", "== 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)", "\"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision =", ": dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 =", "= json.loads(r.text) shares = response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId'])", "app_user_df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return", "= requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None:", "rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0", "# reset attempts for additional pages while next_page is not None: if verbose", "and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY", "= base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format'", "csv payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records'])", "\"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name =", "= requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed')", "time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(\"", "print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try: for app in", "Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df)", "print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"')", "response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0])", "''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories'])", "df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True,", "time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None:", "in user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON that we", "a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares", "c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0,", "name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\":", "\"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a list", "result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe", "encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False):", "= dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header)", ": data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except:", "Please check name or API name in Einstein Analytics.') sys.exit(1) else: dsnm =", "lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for c", "not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt,", "else: columns = columns for c in columns: if df[c].dtype == \"O\": df[c]", "salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if browser == 'chrome':", "'+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\" :", "None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError:", "to seach by API name or label name if search_type == 'UI Label':", "app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess,", "= requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access", "elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if", "df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0", "Proposed Solution is to add a try/except block to handle the error '''", "for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u", "payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 }", "return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params", "== 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update", "Einstein Analytics API import sys import browser_cookie3 import requests import json import time", "self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj =", "import json import time import datetime from dateutil import tz import pandas as", "'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies =", "] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\",", "dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user", "Analytics API import sys import browser_cookie3 import requests import json import time import", "load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)):", "None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose", "dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 = current version 20", "attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue", "= dataset_label.replace(\" \",\"_\") fields = [] for c in df.columns: if df[c].dtype ==", "dataset_label.replace(\" \",\"_\") fields = [] for c in df.columns: if df[c].dtype == \"datetime64[ns]\":", "into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start", "pass shares = shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id,", "} fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator", "[] for c in df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\" \",\"_\")", "want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl']", "shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response =", "None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts <", "c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True:", "deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up", "r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and", "s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r", "json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'],", "user how many matches that they got. Might want to use exact API", "response = json.loads(r.text) shares = response['shares'] #remove fields in the JSON that we", "in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif", "= time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): '''", "= base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode()", "Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query", "\"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts += 1", "\"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\" \",\"_\") name =", "App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict", "range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 =", "requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = [] for u", "name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\":", "app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except:", "verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self,", "useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field", "dsid+'/'+dsvid) #update saql with dataset ID and version ID for i in range(0,len(load_stmt_new)):", "re from pandas import json_normalize from decimal import Decimal import base64 import csv", "progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page,", "\"label\": c } fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator,", "verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try:", "== True: start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started", "shares = None print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess,", "dataframe or saves to csv The query can be in JSON form or", "= requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page", "saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid,", "version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not", "True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df", ": json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header,", "df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns)", "}, ignore_index=True) else: print('Please input a list or tuple of app Ids') sys.exit(1)", "expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif", "True: start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at:", "else: shares = None print('Please choose a user update operation. Options are: addNewUsers,", "ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a try/except", "''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID", "for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose", "requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if", "print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0])", "= app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'],", "start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not", "Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading", "c in columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def", "re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] =", "True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not found", "list or type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame() r", "user_dict if item[\"sharedWithId\"] == shares[s]['sharedWithId']) #remove fields in the JSON that we don't", "curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return curr_time.strftime(\"%I:%M:%S %p\") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params =", "s['imageUrl'] except: pass shares = shares + user_dict elif update_type == 'removeUsers': r", "= { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\":", "User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns == None: columns", "try/except block to handle the error ''' attempts = 0 while attempts <", "dataframe ''' if verbose == True: start = time.time() print('Loading Data to Einstein", "headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None", "except: pass shares = shares + user_dict elif update_type == 'removeUsers': r =", "50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df", "data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if", "\"DataFile\" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True", "= shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response", "list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT:", "browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect \"https://\" my_cookies", "\"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else:", "fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False,", "shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that we", "requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is", "= time.time() print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job ID:", "time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose", "\"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if", "'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 }", "True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None,", "headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload Process Started.", "Typically best practice to run the function and view the history first before", "'+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format #create a", "removeUsers, updateUsers') sys.exit(1) if shares is not None: payload = {\"shares\": shares} r", "response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'],", "chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False,", "broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path", "df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 =", "return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0", "again...\") #continue to pull data from next page attempts = 0 # reset", "next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True:", "= { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\"", "number goes backwards 0 = current version 20 is max oldest version. Typically", "u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose ==", "print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose ==", "is not None: payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header,", "the error ''' attempts = 0 while attempts < max_request_attempts: try: r =", "\",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\",", "elif app_id is not None: if type(app_id) is list or type(app_id) is tuple:", "+= 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts", "app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in", "attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size =", "in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset", "+= 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app", "= pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']:", "max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users:", "== True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True:", "browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make", "access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING", "= json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\":", "shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares =", "= dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId']", "data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum +=", "= c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name,", "0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part =", "at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL", "columns for c in columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x:", "sys.exit(1) if verbose == True: print('\\nDatapart Upload Complete...') payload = { \"Action\" :", "verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self,", "''' pass def remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns", "to JSON format #create a dictionary with all datasets used in the query", "cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}", "type(app_id) is list or type(app_id) is tuple: for app in app_id: app_user_df =", "are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed", "to csv The query can be in JSON form or can be in", "True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") while attempts < max_request_attempts: try: for app", "print('Please select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not", "save_path=None, verbose=False): ''' This function takes a saql query as an argument and", "df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype ==", "'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields", "will show up exactly as the column names in the supplied dataframe '''", "curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\")", "got. Might want to use exact API name if getting multiple matches for", "== True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end", "name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\":", "#show user how many matches that they got. Might want to use exact", "np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError:", "requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach", "verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter = 0 print('Getting", "GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF')", "None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose ==", "except: attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\")", "'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson':", "in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for", "try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since", "oldest version. Typically best practice to run the function and view the history", "break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if", "#continue to pull data from next page attempts = 0 # reset attempts", "is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config", "if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def", "dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for c in df.columns: if", "start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type ==", "'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None:", "True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = {", "\"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\":", "quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part", "SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert", "list or tuple of app Ids') sys.exit(1) if save_path is not None: if", "tuple of app Ids') sys.exit(1) if save_path is not None: if verbose ==", "dataset_api_name = dataset_api_name.replace(\" \",\"_\") if fillna == True: for c in df.columns: if", "found message or return the dataset ID if dataset_df.empty == True: print('Dataset not", "data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload Process Started. Check", "r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config", "= [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId']", "JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except:", "'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header", "= ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version", "if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id is not", "run the function and view the history first before supplying a version number.", "self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser", "dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid,", "= df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64", "they got. Might want to use exact API name if getting multiple matches", "columns = df.columns else: columns = columns for c in columns: if df[c].dtype", "removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly as", "sys.exc_info()[0]) print(\"Trying again...\") for app in response['folders']: attempts = 0 while attempts <", "csv The query can be in JSON form or can be in the", "operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): '''", "i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if verbose ==", "= requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3):", "\"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields", "= response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if verbose ==", "in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64", "if verbose == True: print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True)", "return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers,", "True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from next page", "s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in", "import json_normalize from decimal import Decimal import base64 import csv import unicodecsv from", "\"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension", "== 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove", "print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from next page attempts", "json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'],", "started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format", "app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter =", "== True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") #continue to pull data from next", "'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update =", "shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time()", "saql query as an argument and returns a dataframe or saves to csv", "== False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min()", "app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True)", "else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r =", "* 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory /", "saql query to JSON format #create a dictionary with all datasets used in", "headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { \"AppId\":", "s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares = None print('Please", "c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name, \"type\":", "{ \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose ==", "first 8 characters since browser cookie does not expect \"https://\" my_cookies = requests.utils.dict_from_cookiejar(cj)", "input a list or tuple of app Ids') sys.exit(1) if save_path is not", "'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or firefox)') sys.exit(1) except:", "query to JSON format #create a dictionary with all datasets used in the", "try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares =", "\"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\" \",\"_\")", "return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else: return", "dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for c in", "\"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self,", "session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def get_local_time(self, add_sec=None,", "in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end = time.time() print('Completed", "df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...')", "pd import numpy as np import re from pandas import json_normalize from decimal", "== True: for c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif", "headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1)", "''' field names will show up exactly as the column names in the", "1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") for app in", "else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they", "create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name", "#Python wrapper / library for Einstein Analytics API import sys import browser_cookie3 import", "EOF') Proposed Solution is to add a try/except block to handle the error", "/ math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part", "ignore_index=True) break except: attempts += 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0])", "{ \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields } ]", "in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares", "the dataset ID if dataset_df.empty == True: print('Dataset not found. Please check name", "quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part +=", "to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for", "fields.append(dimension) xmd = { \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator },", "try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df", "s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON", "the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy() for ls", "saql, save_path=None, verbose=False): ''' This function takes a saql query as an argument", "if getting multiple matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+'", "REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a", "env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters", "import numpy as np import re from pandas import json_normalize from decimal import", "saves to csv The query can be in JSON form or can be", "payload = {\"query\":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if", "= json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True: print('Saving result", "sys.exit(1) if shares is not None: payload = {\"shares\": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id,", "Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns ==", "needed. update dataframe should have the following columns: Dashboard Id, Access Type, and", "print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...')", "update access using dashboard names vs finding all apps needed. update dataframe should", "def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\",", "apps needed. update dataframe should have the following columns: Dashboard Id, Access Type,", "= datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif", "is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w',", "print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response =", "valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID.", "form load statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose ==", "print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it", "response = json.loads(r.text) shares = response['shares'] to_remove = [] for u in user_dict:", "operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is", "= load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID for i", "updateUsers') sys.exit(1) if shares is not None: payload = {\"shares\": shares} r =", "default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\")", "= response['shares'] #remove fields in the JSON that we don't want for s", "False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name", "print('Getting app user list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id", "\"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\",", "20 is max oldest version. Typically best practice to run the function and", "is max oldest version. Typically best practice to run the function and view", "True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end =", "= 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response =", "name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\":", "query and return dataframe or save as csv payload = {\"query\":saql} r =", "names will show up exactly as the column names in the supplied dataframe", "== 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove", "{ \"fileFormat\": { \"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ {", "time.time() print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id']))", "+= 1 if verbose == True: print(\"Unexpected error:\", sys.exc_info()[0]) print(\"Trying again...\") elif app_id", "useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\")", "== 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'],", "open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not", "date = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd", "load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\", removeNONascii=True,", "in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that", "r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self,", "\"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else:", "can be in the UI SAQL form load statements must have the appropreate", "load statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True:", "= requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a", "dataframe should have the following columns: Dashboard Id, Access Type, and User Id", "exactly as the column names in the supplied dataframe ''' if verbose ==", "}, ignore_index=True) break except: attempts += 1 if verbose == True: print(\"Unexpected error:\",", "inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True)", "version. Typically best practice to run the function and view the history first", "Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec')", ") df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True:", "dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid)", "response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if", "name, \"type\": \"Date\", \"label\": c, \"format\": \"yyyy-MM-dd HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number):", "not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8')", "in columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self,", "dataset_name] #show user how many matches that they got. Might want to use", "in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types", "'Unexpected EOF') Proposed Solution is to add a try/except block to handle the", "= saql.replace('\\\"','\\\\\"') #convert UI saql query to JSON format #create a dictionary with", "UI SAQL form load statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if", "{ \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Numeric\", \"label\": c, \"precision\": precision, \"defaultValue\": default_measure_val,", "the function and view the history first before supplying a version number. '''", "and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with", "Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if", "max_data_part += rows_in_part partnum += 1 if verbose == True: print('\\rChunk '+str(chunk+1)+' of", "have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start = time.time()", "/ MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" :", "= { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df", "to CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved", "u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) break except: attempts +=", "if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile ==", "is not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore =", "else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum", "print(\"Trying again...\") while attempts < max_request_attempts: try: for app in response['folders']: r =", "[] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in", "dataset ID if dataset_df.empty == True: print('Dataset not found. Please check name or", "time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): '''", "first before supplying a version number. ''' #get broken dashboard version history r", "for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the", "'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check", "block to handle the error ''' attempts = 0 while attempts < max_request_attempts:", "Upload Complete...') payload = { \"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header,", "= math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part", "print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end = time.time()", "found. Please check name or API name in Einstein Analytics.') sys.exit(1) else: dsnm", "a version number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header)", "\"AccessType\": u['accessType'], \"UserType\": u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df =", "''' This function takes a saql query as an argument and returns a", "df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\",", "headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = [] for u in", "None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\") else:", "if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose ==", "name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\":", "run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql query as an", "range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if verbose == True:", "Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name,", "HH:mm:ss\" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18", "if columns == None: columns = df.columns else: columns = columns for c", "= {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header,", "attempts < max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users", "columns: if df[c].dtype == \"O\": df[c] = df[c].apply(lambda x: unidecode(x).replace(\"?\",\"\")) def create_xmd(self, df,", "* 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] /", "function and view the history first before supplying a version number. ''' #get", "fields.append(measure) else: name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\":", "18 scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.',", "\"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01", "u in users: app_user_df = app_user_df.append( { \"AppId\": app['id'], \"AppName\": app['name'], \"UserId\": u['sharedWithId'],", "Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df,", "except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if verbose", "preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f:", "name = c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") date = { \"fullyQualifiedName\": name, \"name\":", "verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE =", "import requests import json import time import datetime from dateutil import tz import", "in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] =", "not None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if", "data from next page attempts = 0 # reset attempts for additional pages", "user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares", "verbose=True): ''' Function to make it easier to update access using dashboard names", "it easier to update access using dashboard names vs finding all apps needed.", "ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\\\\"','\\\"') if", "inplace=True) elif df[c].dtype == \"datetime64[ns]\": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None:", "1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory", "save_json_path is not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore", "load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls]", "dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update", "\"charsetName\": charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\":", "= 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users =", "pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df", "timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())", "history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f,", "number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df =", "response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df", "browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer", "u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s]", "pages while next_page is not None: if verbose == True: progress_counter += 25", "shares.remove(s) #remove fields in the JSON that we don't want for s in", "00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True:", "end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else:", "rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk", "with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is", "json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True: print('Saving result to", "= history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(),", "load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID for", "history first before supplying a version number. ''' #get broken dashboard version history", "user list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None:", "upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action'", "name, \"name\": name, \"type\": \"Text\", \"label\": c } fields.append(dimension) xmd = { \"fileFormat\":", "print('\\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { \"InsightsExternalDataId\"", "= ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] =", "seach by API name or label name if search_type == 'UI Label': dataset_df", "except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text)", "IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\\\"','\\\\\"') #convert UI saql query to", "verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True:", "default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\", deliminator=\",\", lineterminator=\"\\r\\n\"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields", "types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start =", "del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares = shares +", "try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares = shares", "search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name}", "time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql =", "import pandas as pd import numpy as np import re from pandas import", "user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item", "s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response =", "remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns else: columns =", "dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql", "used in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new = load_stmt_old.copy()", "is not None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while", "payload = { \"historyId\": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return", "choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if", "live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def", "version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid", "fields } ] } return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite',", "0 = current version 20 is max oldest version. Typically best practice to", "range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\\\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls])", "import csv import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def", "try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type ==", "r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df =", "headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False):", "update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares']", "dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user", "== 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] ==", "True: print('\\nDatapart Upload Complete...') payload = { \"Action\" : \"Process\" } r3 =", "class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if browser ==", "== True: print('\\nDatapart Upload Complete...') payload = { \"Action\" : \"Process\" } r3", "requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload Process", "dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV',", "df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii ==", "= dataset_api_name.replace(\" \",\"_\") if fillna == True: for c in df.columns: if df[c].dtype", "sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r", "all datasets used in the query load_stmt_old = re.findall(r\"(= load )(.*?)(;)\", saql) load_stmt_new", "Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif", "ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly as the", "end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else:", "== True except: print('\\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True:", "= time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name =", "statements must have the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start", "requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access Updated')", "charset, \"fieldsDelimitedBy\": deliminator, \"linesTerminatedBy\": lineterminator }, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name,", "True: for c in df.columns: if df[c].dtype == \"O\": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype,", "max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:]", "names vs finding all apps needed. update dataframe should have the following columns:", "'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the", "the appropreate spaces: =_load_\\\"datasetname\\\"; ''' if verbose == True: start = time.time() print('Checking", "= rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if", "is tuple: for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header)", "while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for", "for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response =", "how many matches that they got. Might want to use exact API name", "end='', flush=True) payload = { \"InsightsExternalDataId\" : json.loads(r1.text)['id'], \"PartNumber\" : str(partnum), \"DataFile\" :", "json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload = { \"historyId\":", "= response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts +=", "< max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl']", "\"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\": u['shareType'] },", "fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly as the column", "is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val,", "return str(xmd).replace(\"'\",'\"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val=\"0.0\", default_measure_fmt=\"0.0#\", charset=\"UTF-8\",", "= {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:])", "= c.replace(\" \",\"_\") name = name.replace(\"__\",\"_\") measure = { \"fullyQualifiedName\": name, \"name\": name,", "should have the following columns: Dashboard Id, Access Type, and User Id '''", "headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame()", "== True: precision = 18 scale = 2 elif useNumericDefaults == False: precision", "len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(\" \",\"_\") name =", "easier to update access using dashboard names vs finding all apps needed. update", "= dataset_label dataset_api_name = dataset_label.replace(\" \",\"_\") fields = [] for c in df.columns:", "== True: end = time.time() print('Data Upload Process Started. Check Progress in Data", "base64.b64encode(df_part.to_csv(index=False, quotechar='\"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part", "\"Action\" : \"Process\" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True:", "Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory", "u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple of app Ids')", "self.env_url = env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first", "True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np =", "#update saql with dataset ID and version ID for i in range(0,len(load_stmt_new)): saql", "del s['imageUrl'] except: pass else: shares = None print('Please choose a user update", "for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except:", "#get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if", "c, \"precision\": precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure)", "if verbose == True: start = time.time() print('Updating App Access...') print('Process started at:", "'+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj)", "again...\") for app in response['folders']: attempts = 0 while attempts < max_request_attempts: try:", "not None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts", "and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49", "started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR", "print(\"Trying again...\") elif app_id is not None: if type(app_id) is list or type(app_id)", "scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name = c.replace(\" \",\"_\") name", "print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1)", "app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in", "not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime(\"%I:%M:%S %p\") elif timeFORfile == True: return curr_time.strftime(\"%m_%d_%Y__%I%p\")", "access using dashboard names vs finding all apps needed. update dataframe should have", "}, \"objects\": [ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\":", "in the JSON that we don't want for s in shares: try: del", "base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if", "verbose == True: start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process", "header=False, quotechar='\"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if verbose", "a dictionary with all datasets used in the query load_stmt_old = re.findall(r\"(= load", "version number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df", "name.replace(\"__\",\"_\") dimension = { \"fullyQualifiedName\": name, \"name\": name, \"type\": \"Text\", \"label\": c }", "= [] for c in df.columns: if df[c].dtype == \"datetime64[ns]\": name = c.replace(\"", "save_json_path=None): ''' version number goes backwards 0 = current version 20 is max", "< max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header) users = json.loads(r.text)['shares'] for u in", "time.time() progress_counter = 0 print('Getting app user list and access details...') print('Process started", "app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\": u['sharedWithLabel'], \"AccessType\": u['accessType'], \"UserType\":", "in response['shares']: app_user_df = app_user_df.append( { \"AppId\": app, \"AppName\": response['name'], \"UserId\": u['sharedWithId'], \"UserName\":", "for app in response['folders']: attempts = 0 while attempts < max_request_attempts: try: r", "[ { \"connector\": \"CSV\", \"fullyQualifiedName\": dataset_api_name, \"label\": dataset_label, \"name\": dataset_api_name, \"fields\": fields }", "response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares:", "if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE", "a dataframe or saves to csv The query can be in JSON form", "and User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns == None:", "precision, \"defaultValue\": default_measure_val, \"scale\": scale, \"format\": default_measure_fmt, \"decimalSeparator\": \".\" } fields.append(measure) else: name", "restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 = current version", "dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a", "while attempts < max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app[\"id\"], headers=self.header)", "This function takes a saql query as an argument and returns a dataframe", "function takes a saql query as an argument and returns a dataframe or", "= None print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True: print(\"Unexpected", "inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df)" ]
[ "import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def test_get_internal_data_ebs_optimized_support_unsupported_get(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get) >", "pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def test_get_internal_data_ebs_optimized_support_unsupported_get(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get)", "import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def test_get_internal_data_ebs_optimized_support_unsupported_get(): assert", "<filename>tests/internal/ebs_optimized_support/test_ebs_optimized_support_unsupported_auto.py # Testing module ebs_optimized_support.unsupported import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list())", "Testing module ebs_optimized_support.unsupported import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0", "# Testing module ebs_optimized_support.unsupported import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) >", "module ebs_optimized_support.unsupported import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def", "ebs_optimized_support.unsupported import pytest import ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def test_get_internal_data_ebs_optimized_support_unsupported_get():", "ec2_compare.internal.ebs_optimized_support.unsupported def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0 def test_get_internal_data_ebs_optimized_support_unsupported_get(): assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get) > 0" ]
[ "5 for i in ['true', 'y', 'yes']: assert auto_type(i) for i in ['false',", "i in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции", "in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации", "'y', 'yes']: assert auto_type(i) for i in ['false', 'f', 'no']: assert not auto_type(i)", "\"\"\" Тестирование \"\"\" import sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def", "функции идентификации объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2", "\"\"\" import sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\"", "= 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not equal_object(test_object,", "def test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам \"\"\" test_object = RegistryStore()", "equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') == str('test')", "assert auto_type('test') == str('test') assert auto_type('5') == 5 for i in ['true', 'y',", "значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5') == 5 for i in", "str('test') assert auto_type('5') == 5 for i in ['true', 'y', 'yes']: assert auto_type(i)", "i in ['true', 'y', 'yes']: assert auto_type(i) for i in ['false', 'f', 'no']:", "'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта по", "test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo',", "= True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5'])", "True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert", "\"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5", "import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test')", "assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам \"\"\"", "RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') == str('test') assert", "преобразования значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5') == 5 for i", "== str('test') assert auto_type('5') == 5 for i in ['true', 'y', 'yes']: assert", "test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1", "test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5') ==", "auto_type('test') == str('test') assert auto_type('5') == 5 for i in ['true', 'y', 'yes']:", "\"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5') == 5", "['true', 'y', 'yes']: assert auto_type(i) for i in ['false', 'f', 'no']: assert not", "test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not", "auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам \"\"\" test_object =", "sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений", "for i in ['true', 'y', 'yes']: assert auto_type(i) for i in ['false', 'f',", "in ['true', 'y', 'yes']: assert auto_type(i) for i in ['false', 'f', 'no']: assert", "objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert", "assert auto_type(i) for i in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object():", "test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5 assert", "атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 =", "from objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\"", "'yes']: assert auto_type(i) for i in ['false', 'f', 'no']: assert not auto_type(i) def", "идентификации объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2 =", "test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not equal_object(test_object, ['test1=false', 'test2=foo',", "RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true',", "Проверка функции идентификации объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True", "'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам", "sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования", "for i in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка", "assert auto_type('5') == 5 for i in ['true', 'y', 'yes']: assert auto_type(i) for", "Проверка преобразования значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5') == 5 for", "= RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object,", "Тестирование \"\"\" import sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type():", "auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') ==", "== 5 for i in ['true', 'y', 'yes']: assert auto_type(i) for i in", "по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3", "auto_type('5') == 5 for i in ['true', 'y', 'yes']: assert auto_type(i) for i", "\"\"\" Проверка функции идентификации объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 =", "'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not equal_object(test_object, ['test1=false',", "= 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not equal_object(test_object, ['test1=false', 'test2=foo', 'test3=5'])", "def test_auto_type(): \"\"\" Проверка преобразования значений \"\"\" assert auto_type('test') == str('test') assert auto_type('5')", "\"\"\" assert auto_type('test') == str('test') assert auto_type('5') == 5 for i in ['true',", "auto_type(i) for i in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\"", "['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта", "import sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type(): \"\"\" Проверка", "объекта по атрибутам \"\"\" test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo'", "not auto_type(i) def test_equal_object(): \"\"\" Проверка функции идентификации объекта по атрибутам \"\"\" test_object" ]
[]
[ "JSONFormField(forms.Field): \"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def", "import simplejson as json from philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field):", "== '' and not self.required: return None try: return json.loads(value) except Exception, e:", "if value == '' and not self.required: return None try: return json.loads(value) except", "which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if value", "try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON decode error: %s' % e)", "json from philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field", "not self.required: return None try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON decode", "clean(self, value): if value == '' and not self.required: return None try: return", "is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if value ==", "as json from philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form", "class JSONFormField(forms.Field): \"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator]", "value == '' and not self.required: return None try: return json.loads(value) except Exception,", "'' and not self.required: return None try: return json.loads(value) except Exception, e: raise", "import forms from django.core.exceptions import ValidationError from django.utils import simplejson as json from", "philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is", "from django import forms from django.core.exceptions import ValidationError from django.utils import simplejson as", "__all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\"", "json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is validated by", "= ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators", "by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if value == '' and", ":func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if value == '' and not", "self.required: return None try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON decode error:", "validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if value == ''", "django.utils import simplejson as json from philo.validators import json_validator __all__ = ('JSONFormField',) class", "[json_validator] def clean(self, value): if value == '' and not self.required: return None", "and not self.required: return None try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON", "form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value):", "None try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON decode error: %s' %", "forms from django.core.exceptions import ValidationError from django.utils import simplejson as json from philo.validators", "field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self, value): if", "ValidationError from django.utils import simplejson as json from philo.validators import json_validator __all__ =", "value): if value == '' and not self.required: return None try: return json.loads(value)", "('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators =", "return None try: return json.loads(value) except Exception, e: raise ValidationError(u'JSON decode error: %s'", "from django.core.exceptions import ValidationError from django.utils import simplejson as json from philo.validators import", "django.core.exceptions import ValidationError from django.utils import simplejson as json from philo.validators import json_validator", "django import forms from django.core.exceptions import ValidationError from django.utils import simplejson as json", "default_validators = [json_validator] def clean(self, value): if value == '' and not self.required:", "import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which is validated", "from philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A form field which", "\"\"\"A form field which is validated by :func:`philo.validators.json_validator`.\"\"\" default_validators = [json_validator] def clean(self,", "from django.utils import simplejson as json from philo.validators import json_validator __all__ = ('JSONFormField',)", "import ValidationError from django.utils import simplejson as json from philo.validators import json_validator __all__", "def clean(self, value): if value == '' and not self.required: return None try:", "= [json_validator] def clean(self, value): if value == '' and not self.required: return", "simplejson as json from philo.validators import json_validator __all__ = ('JSONFormField',) class JSONFormField(forms.Field): \"\"\"A" ]
[ "for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1", "self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i", "self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows)", "def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number", "-1: print(\"Is empty\") return profile = self.item(current_row, 0).text() return profile def delete(self, profiles):", "current_row == -1: print(\"Is empty\") return profile = self.item(current_row, 0).text() return profile def", "return profile = self.item(current_row, 0).text() return profile def delete(self, profiles): current_row = self.currentRow()", "def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i, 0,", "self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def", "+= 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print(\"Is", "of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for", "in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def", "self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.item(current_row, 0).text() return", "= self.item(current_row, 0).text() return profile def delete(self, profiles): current_row = self.currentRow() if current_row", "0).text() return profile def delete(self, profiles): current_row = self.currentRow() if current_row == -1:", "def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles):", "parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"])", "empty\") return profile = self.item(current_row, 0).text() return profile def delete(self, profiles): current_row =", "if current_row == -1: print(\"Is empty\") return profile = self.item(current_row, 0).text() return profile", "W class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self):", "self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i,", "== -1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if profile in profiles:", "super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide()", "self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row", "self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in", "as W class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def", "update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile))", "i = 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile]))))", "= 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i", "get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile =", "profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents()", "= self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.item(current_row, 0).text()", "current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.takeItem(current_row,", "profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers)", "init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles))", "self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if", "self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return", "profile = self.item(current_row, 0).text() return profile def delete(self, profiles): current_row = self.currentRow() if", "1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print(\"Is empty\")", "self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self,", "0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i +=", "0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row =", "self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if", "current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.item(current_row,", "PyQt5.QtWidgets as W class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles)", "class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2)", "profile def delete(self, profiles): current_row = self.currentRow() if current_row == -1: print(\"Is empty\")", "def delete(self, profiles): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return", "i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1:", "= self.currentRow() if current_row == -1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text()", "if current_row == -1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if profile", "-1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if profile in profiles: return", "ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\",", "import PyQt5.QtWidgets as W class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init()", "print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if profile in profiles: return profile", "self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles:", "self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1,", "profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self):", "summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile", "print(\"Is empty\") return profile = self.item(current_row, 0).text() return profile def delete(self, profiles): current_row", "== -1: print(\"Is empty\") return profile = self.item(current_row, 0).text() return profile def delete(self,", "1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row", "profiles): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile =", "W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow()", "W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row ==", "\"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0", "delete(self, profiles): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile", "self.item(current_row, 0).text() return profile def delete(self, profiles): current_row = self.currentRow() if current_row ==", "__init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels([\"Name\", \"Number of", "return profile def delete(self, profiles): current_row = self.currentRow() if current_row == -1: print(\"Is", "profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i,", "self.setHorizontalHeaderLabels([\"Name\", \"Number of summoners\"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i =", "current_row == -1: print(\"Is empty\") return profile = self.takeItem(current_row, 0).text() if profile in", "def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print(\"Is empty\") return profile" ]
[ "also include details on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx,", "optional arguments for filtering (e.g. a workflow ID). Output: A list of workflow", "workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select", "from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\",", "'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\"", "filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to filter on\", type=str", ") @click.option( \"--user_id\", help=\"Encoded user ID to filter on. This must be your", "workflow invocations, or select a subset by specifying optional arguments for filtering (e.g.", "of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new',", "either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is", "invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view'", "- if specified, the most recent invocations will be returned.\", type=int ) @click.option(", "show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element', also include details on", "type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element', also include details on individual", ") @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False):", "the most recent invocations will be returned.\", type=int ) @click.option( \"--view\", help=\"Level of", "individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True,", "are not an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal", ") @click.option( \"--limit\", help=\"Maximum number of invocations to return - if specified, the", "details on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\",", "must be your own user ID if your are not an admin user.\",", "steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\",", "of invocations to return - if specified, the most recent invocations will be", "a workflow ID). Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a',", "invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22',", "'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id,", "'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal,", "list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state':", "@click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded", "if specified, the most recent invocations will be returned.\", type=int ) @click.option( \"--view\",", "\"--history_id\", help=\"Encoded history ID to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user", "\"--view\", help=\"Level of detail to return per invocation, either 'element' or 'collection'.\", default=\"collection\",", "[{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id':", "help=\"Encoded user ID to filter on. This must be your own user ID", ") @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option(", "'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal, limit=limit, view=view, step_details=step_details)", "@click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str ) @click.option( \"--history_id\",", "workflow ID to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to", "user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True", ") @click.option( \"--view\", help=\"Level of detail to return per invocation, either 'element' or", "or select a subset by specifying optional arguments for filtering (e.g. a workflow", "@click.option( \"--step_details\", help=\"If 'view' is 'element', also include details on individual steps.\", is_flag=True", "help=\"Encoded history ID to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID", "default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations to return -", "states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations to return", "'2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}]", "detail to return per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str )", "user ID to filter on. This must be your own user ID if", "to filter on. This must be your own user ID if your are", "json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to", "custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str )", "most recent invocations will be returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail", "type=int ) @click.option( \"--view\", help=\"Level of detail to return per invocation, either 'element'", "\"--step_details\", help=\"If 'view' is 'element', also include details on individual steps.\", is_flag=True )", "is 'element', also include details on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output", "help=\"Maximum number of invocations to return - if specified, the most recent invocations", "admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True,", "Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class':", "pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID", "select a subset by specifying optional arguments for filtering (e.g. a workflow ID).", "to return per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option(", "all workflow invocations, or select a subset by specifying optional arguments for filtering", "filtering (e.g. a workflow ID). Output: A list of workflow invocations. For example::", "specifying optional arguments for filtering (e.g. a workflow ID). Output: A list of", "This must be your own user ID if your are not an admin", "specified, the most recent invocations will be returned.\", type=int ) @click.option( \"--view\", help=\"Level", "returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail to return per invocation, either", "A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation',", "For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid':", "on. This must be your own user ID if your are not an", "help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number", "parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded", "invocations, or select a subset by specifying optional arguments for filtering (e.g. a", "per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If", "filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to filter on. This", "history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select a", "invocations to return - if specified, the most recent invocations will be returned.\",", "workflow ID). Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id':", "from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter", "step_details=False): \"\"\"Get all workflow invocations, or select a subset by specifying optional arguments", "a subset by specifying optional arguments for filtering (e.g. a workflow ID). Output:", "ID to filter on. This must be your own user ID if your", "by specifying optional arguments for filtering (e.g. a workflow ID). Output: A list", "'2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal, limit=limit, view=view,", "\"--user_id\", help=\"Encoded user ID to filter on. This must be your own user", "number of invocations to return - if specified, the most recent invocations will", "@pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get", "show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations to return - if", "\"\"\"Get all workflow invocations, or select a subset by specifying optional arguments for", "terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations to", "return - if specified, the most recent invocations will be returned.\", type=int )", "for filtering (e.g. a workflow ID). Output: A list of workflow invocations. For", "@click.option( \"--limit\", help=\"Maximum number of invocations to return - if specified, the most", "is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations to return - if specified,", "help=\"Level of detail to return per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True,", "user ID if your are not an admin user.\", type=str ) @click.option( \"--include_terminal\",", "on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to filter on\", type=str )", "@click.option( \"--user_id\", help=\"Encoded user ID to filter on. This must be your own", "filter on. This must be your own user ID if your are not", "include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select a subset by", "arguments for filtering (e.g. a workflow ID). Output: A list of workflow invocations.", "import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow", "workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time':", "include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of invocations", "limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select a subset by specifying", "on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to filter on. This must", "@json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow", "return per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\",", "history ID to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to", "be your own user ID if your are not an admin user.\", type=str", "\"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum", "'element', also include details on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def", "ID if your are not an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether", "ID). Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e',", "view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select a subset by specifying optional", "'element' or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element',", "(e.g. a workflow ID). Output: A list of workflow invocations. For example:: [{'history_id':", "parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\",", "not an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\",", "to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to filter on\",", "example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c',", "ID to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to filter", "'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal, limit=limit,", "@click.option( \"--view\", help=\"Level of detail to return per invocation, either 'element' or 'collection'.\",", "'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element', also include", "to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to filter on.", "invocations will be returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail to return", "import custom_exception, json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str", ") @click.option( \"--history_id\", help=\"Encoded history ID to filter on\", type=str ) @click.option( \"--user_id\",", "type=str ) @click.option( \"--user_id\", help=\"Encoded user ID to filter on. This must be", "or 'collection'.\", default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element', also", "to return - if specified, the most recent invocations will be returned.\", type=int", "import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations')", "ID to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to filter", "help=\"Encoded workflow ID to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history ID", "to include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\", help=\"Maximum number of", "click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option(", "default=\"collection\", show_default=True, type=str ) @click.option( \"--step_details\", help=\"If 'view' is 'element', also include details", "\"--limit\", help=\"Maximum number of invocations to return - if specified, the most recent", "is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\",", "of detail to return per invocation, either 'element' or 'collection'.\", default=\"collection\", show_default=True, type=str", "your own user ID if your are not an admin user.\", type=str )", "def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations,", "@custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all", "type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True )", "on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\",", ") @click.option( \"--step_details\", help=\"If 'view' is 'element', also include details on individual steps.\",", "'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return", "@click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\", show_default=True, is_flag=True ) @click.option( \"--limit\",", "user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or select a subset", "subset by specifying optional arguments for filtering (e.g. a workflow ID). Output: A", "your are not an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include", "own user ID if your are not an admin user.\", type=str ) @click.option(", "will be returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail to return per", "recent invocations will be returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail to", "'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id,", "'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] \"\"\" return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id,", "help=\"If 'view' is 'element', also include details on individual steps.\", is_flag=True ) @pass_context", "json_output @click.command('get_invocations') @click.option( \"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str ) @click.option(", "@click.option( \"--history_id\", help=\"Encoded history ID to filter on\", type=str ) @click.option( \"--user_id\", help=\"Encoded", "include details on individual steps.\", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id=\"\",", "\"--workflow_id\", help=\"Encoded workflow ID to filter on\", type=str ) @click.option( \"--history_id\", help=\"Encoded history", "type=str ) @click.option( \"--history_id\", help=\"Encoded history ID to filter on\", type=str ) @click.option(", "cli(ctx, workflow_id=\"\", history_id=\"\", user_id=\"\", include_terminal=True, limit=\"\", view=\"collection\", step_details=False): \"\"\"Get all workflow invocations, or", "'view' is 'element', also include details on individual steps.\", is_flag=True ) @pass_context @custom_exception", "be returned.\", type=int ) @click.option( \"--view\", help=\"Level of detail to return per invocation,", "an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to include terminal states.\", default=\"True\",", "if your are not an admin user.\", type=str ) @click.option( \"--include_terminal\", help=\"Whether to" ]
[ "n)] result = \"\".join([next(iter([item[i] for item in layers if int(item[i]) is not 2]),", "width * height layers = [input[i:i + n] for i in range(0, len(input),", "in layers if int(item[i]) is not 2]), 2) for i in range(0, n)])", "for i in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i", "i in range(0, len(input), n)] counts = [item.count(\"0\") for item in layers] layer", "+ n] for i in range(0, len(input), n)] counts = [item.count(\"0\") for item", "in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width]", "counts = [item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") *", "for item in layers if int(item[i]) is not 2]), 2) for i in", "= layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n = width", "item in layers if int(item[i]) is not 2]), 2) for i in range(0,", "if int(item[i]) is not 2]), 2) for i in range(0, n)]) result_string =", "= [input[i:i + n] for i in range(0, len(input), n)] result = \"\".join([next(iter([item[i]", "def calculate_part_1(input, width, height): n = width * height layers = [input[i:i +", "= width * height layers = [input[i:i + n] for i in range(0,", "height): n = width * height layers = [input[i:i + n] for i", "n = width * height layers = [input[i:i + n] for i in", "[input[i:i + n] for i in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for", "layer.count(\"2\") def calculate_part_2(input, width, height): n = width * height layers = [input[i:i", "= \"\".join([next(iter([item[i] for item in layers if int(item[i]) is not 2]), 2) for", "n] for i in range(0, len(input), n)] counts = [item.count(\"0\") for item in", "n)] counts = [item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\")", "calculate_part_1(input, width, height): n = width * height layers = [input[i:i + n]", "int(item[i]) is not 2]), 2) for i in range(0, n)]) result_string = result.replace(\"0\",", "item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width,", "* layer.count(\"2\") def calculate_part_2(input, width, height): n = width * height layers =", "i in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i +", "result = \"\".join([next(iter([item[i] for item in layers if int(item[i]) is not 2]), 2)", "range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for", "result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for i in range(0, len(result_string),", "in range(0, len(input), n)] counts = [item.count(\"0\") for item in layers] layer =", "len(input), n)] counts = [item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))] return", "in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height):", "not 2]), 2) for i in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\",", "def calculate_part_2(input, width, height): n = width * height layers = [input[i:i +", "= result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for i in range(0,", "for i in range(0, len(input), n)] counts = [item.count(\"0\") for item in layers]", "= [input[i:i + n] for i in range(0, len(input), n)] counts = [item.count(\"0\")", "for i in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item in layers", "layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n = width * height layers", "height layers = [input[i:i + n] for i in range(0, len(input), n)] counts", "2]), 2) for i in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\")", "len(input), n)] result = \"\".join([next(iter([item[i] for item in layers if int(item[i]) is not", "layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n", "result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for i in", "layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n =", "layers = [input[i:i + n] for i in range(0, len(input), n)] result =", "i in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item in layers if", "layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n = width *", "\"\".join([next(iter([item[i] for item in layers if int(item[i]) is not 2]), 2) for i", "return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input, width, height): n = width * height", "n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for i", "layers if int(item[i]) is not 2]), 2) for i in range(0, n)]) result_string", "width, height): n = width * height layers = [input[i:i + n] for", "\" \").replace(\"1\", \"█\") return \"\\n\".join([result_string[i:i + width] for i in range(0, len(result_string), width)])", "for item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def calculate_part_2(input,", "range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item in layers if int(item[i]) is", "+ n] for i in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item", "calculate_part_2(input, width, height): n = width * height layers = [input[i:i + n]", "layers = [input[i:i + n] for i in range(0, len(input), n)] counts =", "in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item in layers if int(item[i])", "* height layers = [input[i:i + n] for i in range(0, len(input), n)]", "= [item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\")", "[item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))] return layer.count(\"1\") * layer.count(\"2\") def", "is not 2]), 2) for i in range(0, n)]) result_string = result.replace(\"0\", \"", "2) for i in range(0, n)]) result_string = result.replace(\"0\", \" \").replace(\"1\", \"█\") return", "range(0, len(input), n)] counts = [item.count(\"0\") for item in layers] layer = layers[counts.index(min(counts))]", "[input[i:i + n] for i in range(0, len(input), n)] counts = [item.count(\"0\") for", "n] for i in range(0, len(input), n)] result = \"\".join([next(iter([item[i] for item in", "height layers = [input[i:i + n] for i in range(0, len(input), n)] result" ]
[ "= {} with open(sys.argv[1]) as reader: for line in reader: if line.startswith(\"<\"): match", "topic in topic_counters: topic_counters[topic] = Counter() for rank, word in enumerate(words): topic_counters[topic][word] +=", "rank def jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a &", "topic_trigrams[t2]), t1, t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]),", "match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic] = Counter() for rank, word", "& set_b) / len(set_a | set_b) def character_trigrams(strings): output = Counter() for s", "padded_string = \" \" + s.replace(\"v\", \"u\") + \" \" for position in", "2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top words\") topic_ids", "t1, t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]), \"", "\"u\") + \" \" for position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)]", "!= None: topic = int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \") if", "topic_counters = {} with open(sys.argv[1]) as reader: for line in reader: if line.startswith(\"<\"):", "= padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top words\") topic_ids = list(topic_counters.keys())", "float(match.group(2)) words = match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic] = Counter()", "= 0 topic_counters = {} with open(sys.argv[1]) as reader: for line in reader:", "range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top", "topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for topic in topic_ids:", "print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for", "= match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic] = Counter() for rank,", "c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in", "def character_trigrams(strings): output = Counter() for s in strings: padded_string = \" \"", "= character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]),", "= regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with open(sys.argv[1])", "b): set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b) / len(set_a", "topic_ids: topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores", "reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500:", "set_b = set(b.keys()) return len(set_a & set_b) / len(set_a | set_b) def character_trigrams(strings):", "topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \"", "\") if not topic in topic_counters: topic_counters[topic] = Counter() for rank, word in", "2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score,", "= set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b) / len(set_a | set_b)", "= list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for topic in topic_ids: topic_words[topic]", "in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting", "for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1,", "= [] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for", "open(sys.argv[1]) as reader: for line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration", "alpha = float(match.group(2)) words = match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic]", "match != None: topic = int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \")", "as reader: for line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration =", "from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0", "| set_b) def character_trigrams(strings): output = Counter() for s in strings: padded_string =", "score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]), \" | \", \"", "t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2 in", "iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with", "set_b) def character_trigrams(strings): output = Counter() for s in strings: padded_string = \"", "t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]), \" |", "character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1,", "output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams = {}", "1 return output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams", "t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]), \" | \", \" \".join(topic_words[t2]))", "top words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for topic", "position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return output", "= {} topic_trigrams = {} for topic in topic_ids: topic_words[topic] = [w for", "regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with open(sys.argv[1]) as", "for position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return", "word in enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a, b): set_a =", "output[trigram] += 1 return output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words =", "topic = int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \") if not topic", "character_trigrams(strings): output = Counter() for s in strings: padded_string = \" \" +", "strings: padded_string = \" \" + s.replace(\"v\", \"u\") + \" \" for position", "s.replace(\"v\", \"u\") + \" \" for position in range(len(padded_string) - 2): trigram =", "len(set_a | set_b) def character_trigrams(strings): output = Counter() for s in strings: padded_string", "topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with open(sys.argv[1]) as reader:", "> 500: match = topic_pattern.search(line) if match != None: topic = int(match.group(1)) alpha", "\" \" + s.replace(\"v\", \"u\") + \" \" for position in range(len(padded_string) -", "words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for topic in", "\" \" for position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] +=", "- 2): trigram = padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top words\")", "int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \") if not topic in topic_counters:", "= int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line) if match != None:", "current_iteration > 500: match = topic_pattern.search(line) if match != None: topic = int(match.group(1))", "= topic_pattern.search(line) if match != None: topic = int(match.group(1)) alpha = float(match.group(2)) words", "topic in topic_ids: topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] =", "= Counter() for rank, word in enumerate(words): topic_counters[topic][word] += len(words) - rank def", "len(set_a & set_b) / len(set_a | set_b) def character_trigrams(strings): output = Counter() for", "padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words", "= Counter() for s in strings: padded_string = \" \" + s.replace(\"v\", \"u\")", "topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2))", "in strings: padded_string = \" \" + s.replace(\"v\", \"u\") + \" \" for", "= [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = []", "= int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \") if not topic in", "not topic in topic_counters: topic_counters[topic] = Counter() for rank, word in enumerate(words): topic_counters[topic][word]", "= {} for topic in topic_ids: topic_words[topic] = [w for w, c in", "+= 1 return output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words = {}", "if match != None: topic = int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\"", "= set(b.keys()) return len(set_a & set_b) / len(set_a | set_b) def character_trigrams(strings): output", "\" for position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram] += 1", "itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2 in sorted(topic_pair_scores, reverse=True):", "import sys, regex, itertools from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern =", "rank, word in enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a, b): set_a", "{} for topic in topic_ids: topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)]", "elif current_iteration > 500: match = topic_pattern.search(line) if match != None: topic =", "w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2", "reader: for line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1))", "<gh_stars>0 import sys, regex, itertools from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern", "None: topic = int(match.group(1)) alpha = float(match.group(2)) words = match.group(3).split(\" \") if not", "set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b) / len(set_a | set_b) def", "in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids,", "if not topic in topic_counters: topic_counters[topic] = Counter() for rank, word in enumerate(words):", "itertools from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration =", "trigram = padded_string[position:(position+3)] output[trigram] += 1 return output print(\"getting top words\") topic_ids =", "{} with open(sys.argv[1]) as reader: for line in reader: if line.startswith(\"<\"): match =", "iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line) if match", "topic_pattern.search(line) if match != None: topic = int(match.group(1)) alpha = float(match.group(2)) words =", "= iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line) if", "current_iteration = int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line) if match !=", "Counter() for rank, word in enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a,", "in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2 in sorted(topic_pair_scores,", "collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters", "with open(sys.argv[1]) as reader: for line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line)", "in topic_ids: topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic])", "match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line)", "t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1, t2", "Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {}", "for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score, t1,", "- rank def jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a", "topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1],", "def jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b)", "words = match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic] = Counter() for", "500: match = topic_pattern.search(line) if match != None: topic = int(match.group(1)) alpha =", "len(words) - rank def jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys()) return", "topic_trigrams = {} for topic in topic_ids: topic_words[topic] = [w for w, c", "int(match.group(1)) elif current_iteration > 500: match = topic_pattern.search(line) if match != None: topic", "for topic in topic_ids: topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic]", "topic_counters[topic] = Counter() for rank, word in enumerate(words): topic_counters[topic][word] += len(words) - rank", "current_iteration = 0 topic_counters = {} with open(sys.argv[1]) as reader: for line in", "+= len(words) - rank def jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys())", "\" + s.replace(\"v\", \"u\") + \" \" for position in range(len(padded_string) - 2):", "sys, regex, itertools from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\")", "= \" \" + s.replace(\"v\", \"u\") + \" \" for position in range(len(padded_string)", "in topic_counters: topic_counters[topic] = Counter() for rank, word in enumerate(words): topic_counters[topic][word] += len(words)", "topic_counters: topic_counters[topic] = Counter() for rank, word in enumerate(words): topic_counters[topic][word] += len(words) -", "Counter() for s in strings: padded_string = \" \" + s.replace(\"v\", \"u\") +", "in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration >", "[] for t1, t2 in itertools.combinations(topic_ids, 2): topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2)) for score,", "match = topic_pattern.search(line) if match != None: topic = int(match.group(1)) alpha = float(match.group(2))", "regex, itertools from collections import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration", "+ \" \" for position in range(len(padded_string) - 2): trigram = padded_string[position:(position+3)] output[trigram]", "0 topic_counters = {} with open(sys.argv[1]) as reader: for line in reader: if", "/ len(set_a | set_b) def character_trigrams(strings): output = Counter() for s in strings:", "topic_words = {} topic_trigrams = {} for topic in topic_ids: topic_words[topic] = [w", "[w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for", "set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b) / len(set_a |", "return len(set_a & set_b) / len(set_a | set_b) def character_trigrams(strings): output = Counter()", "for score, t1, t2 in sorted(topic_pair_scores, reverse=True): print(score, \" \".join(topic_words[t1]), \" | \",", "regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with open(sys.argv[1]) as reader: for line", "line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500: match =", "+ s.replace(\"v\", \"u\") + \" \" for position in range(len(padded_string) - 2): trigram", "set_b) / len(set_a | set_b) def character_trigrams(strings): output = Counter() for s in", "{} topic_trigrams = {} for topic in topic_ids: topic_words[topic] = [w for w,", "= regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters = {} with open(sys.argv[1]) as reader: for", "topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores = [] for t1, t2 in itertools.combinations(topic_ids, 2):", "= float(match.group(2)) words = match.group(3).split(\" \") if not topic in topic_counters: topic_counters[topic] =", "return output print(\"getting top words\") topic_ids = list(topic_counters.keys()) topic_words = {} topic_trigrams =", "jaccard(a, b): set_a = set(a.keys()) set_b = set(b.keys()) return len(set_a & set_b) /", "set(b.keys()) return len(set_a & set_b) / len(set_a | set_b) def character_trigrams(strings): output =", "if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration > 500: match", "s in strings: padded_string = \" \" + s.replace(\"v\", \"u\") + \" \"", "output = Counter() for s in strings: padded_string = \" \" + s.replace(\"v\",", "in enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a, b): set_a = set(a.keys())", "for s in strings: padded_string = \" \" + s.replace(\"v\", \"u\") + \"", "line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif current_iteration", "for line in reader: if line.startswith(\"<\"): match = iter_pattern.search(line) current_iteration = int(match.group(1)) elif", "list(topic_counters.keys()) topic_words = {} topic_trigrams = {} for topic in topic_ids: topic_words[topic] =", "for rank, word in enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a, b):", "topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)] topic_trigrams[topic] = character_trigrams(topic_words[topic]) topic_pair_scores =", "topic_counters[topic][word] += len(words) - rank def jaccard(a, b): set_a = set(a.keys()) set_b =", "enumerate(words): topic_counters[topic][word] += len(words) - rank def jaccard(a, b): set_a = set(a.keys()) set_b", "import Counter iter_pattern = regex.compile(\"^<(\\d+)>\") topic_pattern = regex.compile(\"^(\\d+)\\t(\\d+\\.\\d+)\\t(.*)\") current_iteration = 0 topic_counters =" ]
[ "in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and", "s in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids =", "in segments]) segment_len = np.array([len(s) for s in segments]) # BERT input IDs/mask,", "IDs input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in", "True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx,", "np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] #", "genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs)", "} data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\")", "as f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key =", "util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config[\"genres\"])}", "gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens", "# speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if", "for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\" to limit", "= np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters): for mention", "XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\":", "json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"]", "\"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\":", "= json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for", "= np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre", "max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset", "{ \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map,", "speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__':", "= get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words =", "else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) #", "%s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns", "no cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] =", "required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args = parser.parse_args()", "{genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\")", "\"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids,", "sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\":", "is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset", "in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster", "[], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens)", "sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while", "\"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data =", "= json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key,", "as np import utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file", "speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map", "output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers):", "def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str, int] \"\"\"", "clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map =", "genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends", "\"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre,", "idx, genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as f: for", "gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts)", "# 0: no cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster:", "= json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換", "np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\"", "enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as f: for line in f.readlines():", "speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids)", "\"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask,", "np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0)", "utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training == 0: is_training = False else:", "num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s) for s in", "json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions =", "for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask)", "segment_len = np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker IDs", "input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask),", "return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\",", "speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map,", "------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker in", "segments]) segment_len = np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker", "0: no cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]]", "%s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers:", "is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len", "IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens,", "DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\":", "(num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions)", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True)", "\".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file,", "if len(speaker_dict) > max_num_speakers: pass # \"break\" to limit # speakers if speaker", "= json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\":", "is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx", "1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\" to", "= { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\":", "num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold", "---------- speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\":", "output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file, output_file))", "parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len',", "speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for", "input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments,", "for s in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids", "util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training ==", "json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for", "= utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file =", "cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1", "kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments,", "Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts,", "in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids =", "= json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map =", "in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span", "mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no", "parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args", "= [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids)", "sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids)", "gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX", "input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends,", "s in segments]) segment_len = np.array([len(s) for s in segments]) # BERT input", "in speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\" to limit # speakers", "sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0)", "= genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends =", "\"break\" to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict)", "mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} #", "index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters):", "= [] with open(input_file, \"r\") as f: for line in f.readlines(): # 1データの読み込み", "len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True)", "cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 #", "cluster_id + 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) #", "\"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len,", "gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\":", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name',", "Parameters ---------- speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0,", "f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters", "= True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for", "# Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0:", "np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters): for mention in", "[1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) <", "1 # Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments", "XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = {", "len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask", "sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids =", "if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ ==", "not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser", "< max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask =", "os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" %", "line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions", "np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))", "np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters =", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True)", "+ 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments", "in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as f: for line in", "idx for idx, genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as", "get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s)", "np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask,", "= [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0)", "\"[SPL]\": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\"", "speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file',", "\"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, }", "Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s in", "subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens,", "# XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs =", "= {mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map", "Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends", "= sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention", "[] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask =", "json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments]) segment_len =", "# Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments =", "\"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\":", "tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters,", "> max_num_speakers: pass # \"break\" to limit # speakers if speaker not in", "parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args = parser.parse_args() main(args)", "input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert", "original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) #", "= args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset =", "len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0)", "f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"]", "for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1]", "speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers: if len(speaker_dict) >", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str,", "= np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters", "= json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments]) segment_len", "input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)):", "args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset = []", "= os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file, output_file)) def", "\"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\":", "speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int,", "argparse import json import os import numpy as np import utils import util", "len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], []", "\"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers: if len(speaker_dict)", "dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers:", "pass # \"break\" to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker]", "Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker", "0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts =", "json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None)", "* len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len:", "import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training", "# 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters", "# BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], []", "= np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"]", "subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\":", "dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file,", "\"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file =", "# Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts,", "= [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens =", "max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict =", "segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [],", "if args.is_training == 0: is_training = False else: is_training = True tokenizer =", "= False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict", "doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention)", "doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers,", "= np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2],", "import argparse import json import os import numpy as np import utils import", "utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"],", "\"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\":", "to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return", "for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] #", "in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers", "sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids", "-> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in", "# Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in", "numpy as np import utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\")", "max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask)", "tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers]", "json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs", "\"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids,", "speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\" to limit # speakers if", "list[str] Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for", "config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training == 0: is_training =", "args.is_training == 0: is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name)", "\"r\") as f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key", "# Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s", "gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\":", "== np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans", "dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file)", "np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) >", "gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx,", "dataset) print(\"Cached %s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters", "in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [],", "segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\":", "Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters))", "zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends)", "np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached", "\"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\":", "def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training == 0:", "import utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file", "[] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries", "for s in segments]) segment_len = np.array([len(s) for s in segments]) # BERT", "clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention:", "if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [],", "speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, # \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\":", "open(input_file, \"r\") as f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line)", "= json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions", "for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id +", "# \"input_ids\": input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training,", "os import numpy as np import utils import util def main(args): config =", "import json import os import numpy as np import utils import util def", "gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"] original_sentence_boundaries =", "# \"break\" to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] =", "gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\",", "util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span ->", "args.input_file if args.is_training == 0: is_training = False else: is_training = True tokenizer", "0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else:", "[speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids)", "json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"]", "config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for", "in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids", "import numpy as np import utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\",", "{\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass", "original_sentence_boundaries, # XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map,", "input_ids, \"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts,", "np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if", "sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s)", "[], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data[\"tokens\"]", "segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data", "sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids =", "gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset,", "sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in", "= {genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file,", "speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words", "dataset = [] with open(input_file, \"r\") as f: for line in f.readlines(): #", "tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map =", "> 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts", "# span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id,", "json import os import numpy as np import utils import util def main(args):", "# XXX \"segments\": segments, \"sentence_map\": sentence_map, \"speakers\": speakers, \"gold_clusters\": gold_clusters, \"subtoken_map\": subtoken_map, #", "while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids)", "speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words ==", "segments = json_data[\"segments\"] sentence_map = json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments])", "for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) #", "False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict =", "int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers: if", "idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0:", "genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions)", "= json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\",", "= os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\"", "os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers,", "= len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str,", "= sum([len(s) for s in segments]) segment_len = np.array([len(s) for s in segments])", "json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries,", "== 0: is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len", "\"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset =", "gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index", "sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids", "None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, #", "np import utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file =", "for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)}", "and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map", "\"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\":", "np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre", "input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words", "% (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns -------", "len(speaker_dict) > max_num_speakers: pass # \"break\" to limit # speakers if speaker not", "cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict =", "speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids", "speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__", "gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file", "speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict = {\"UNK\": 0, \"[SPL]\": 1}", "data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file", "import os import numpy as np import utils import util def main(args): config", "enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for", "input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for idx,", "{mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map =", "idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] *", "\"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data)", "output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s to", "output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str, int]", "= json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s) for", "assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) #", "to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str]", "1データの読み込み json_data = json.loads(line) doc_key = json_data[\"doc_key\"] # Mentions and clusters clusters =", "config_name=\"base\") input_file = args.input_file if args.is_training == 0: is_training = False else: is_training", "else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre:", "= [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids =", "= np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker IDs input_ids,", "json_data[\"clusters\"] subtoken_map = json_data.get(\"subtoken_map\", None) # DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\":", "utils import util def main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if", "mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data[\"speakers\"]", "in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict", "max_num_speakers: pass # \"break\" to limit # speakers if speaker not in speaker_dict:", "[] with open(input_file, \"r\") as f: for line in f.readlines(): # 1データの読み込み json_data", "(sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids)", "genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as f: for line", "= utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training == 0: is_training = False", "main(args): config = utils.get_hocon_config(config_path=\"./config/main.conf\", config_name=\"base\") input_file = args.input_file if args.is_training == 0: is_training", "speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre =", "gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters): for", "json_data[\"doc_key\"] # Mentions and clusters clusters = json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention", "Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"]) # Segments segments = json_data[\"segments\"]", "0: is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len =", "gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends", "enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker]", "for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers =", "= np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words,", "gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers),", "= tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in", "= zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends =", "= cluster_id + 1 # Speakers speakers = json_data[\"speakers\"] speaker_dict = get_speaker_dict(util.flatten(speakers), config[\"max_num_speakers\"])", "print(\"Cached %s to %s\" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ----------", "in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser =", "= json_data[\"clusters\"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx", "for idx, genre in enumerate(config[\"genres\"])} dataset = [] with open(input_file, \"r\") as f:", "input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) #", "json_data[\"sentence_map\"] num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s) for s", "0, \"[SPL]\": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass #", "limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict", "# Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"]", "type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args =", "(input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str,", "enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers", "spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends =", "dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset) print(\"Cached %s", "\"gold_ends\": gold_ends, \"gold_mention_cluster_map\": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype=\"O\")", "idx for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions))", "gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others", "tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre", "= args.input_file if args.is_training == 0: is_training = False else: is_training = True", "# DataInstanceに変換 kargs = { \"doc_key\": doc_key, \"tokens\": tokens, \"original_sentence_boundaries\": original_sentence_boundaries, # XXX", "sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker", "= util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre in", "= np.asarray(dataset, dtype=\"O\") output_file = os.path.basename(input_file).replace(\".jsonlines\", \".npy\") output_file = os.path.join(config[\"caches\"], output_file) np.save(output_file, dataset)", "Others tokens = json_data[\"tokens\"] original_sentence_boundaries = json_data[\"original_sentence_boundaries\"] # XXX gold_clusters = json_data[\"clusters\"] subtoken_map", "with open(input_file, \"r\") as f: for line in f.readlines(): # 1データの読み込み json_data =", "input_file = args.input_file if args.is_training == 0: is_training = False else: is_training =", "sum([len(s) for s in segments]) segment_len = np.array([len(s) for s in segments]) #", "speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers)", "speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser()", "speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # \"break\" to limit #", "argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True)", "genre_dict = {genre: idx for idx, genre in enumerate(config[\"genres\"])} dataset = [] with", "= {\"UNK\": 0, \"[SPL]\": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers:", "\"input_mask\": input_mask, \"speaker_ids\": speaker_ids, \"segment_len\": segment_len, \"genre\": genre, \"is_training\": is_training, \"gold_starts\": gold_starts, \"gold_ends\":", "cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id", "get_speaker_dict(speakers, max_num_speakers): \"\"\" Parameters ---------- speakers: list[str] Returns ------- dict[str, int] \"\"\" speaker_dict", "BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for", "= argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument(\"--is_training\", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int,", "span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster", "[], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask" ]
[ "request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form:", "return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user)", "import datetime,timedelta from EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager =", "login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return", "return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id)", "def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date", "@login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user)", "= str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return", "except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date,", "else: if event.is_full==True: enable=False mode = \"register\" if request.method == \"POST\": if \"Cancel\"", "mode = \"deregister\" else: if event.is_full==True: enable=False mode = \"register\" if request.method ==", "desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try:", "before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days <0: enable=False mode", ": return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST':", "except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description'])", "if event.is_full==True: enable=False mode = \"register\" if request.method == \"POST\": if \"Cancel\" in", "\"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\"", "render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\"", "return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form():", "datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap =", "if before_deadline.days <0: enable=False mode = \"deregister\" else: if event.is_full==True: enable=False mode =", "else: if event in current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\" else:", "redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end)", "else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister =", "role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST',", "current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required", "current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST'])", "flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location", "if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try:", "return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\"", "render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now", "date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date,", "db,create_db create_db() from server import app, valid_time from flask import request, render_template,session,redirect,url_for,flash from", "event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister = True", "render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers: if Event.status == \"Canceled\":", "\"deregister\" else: if event.is_full==True: enable=False mode = \"register\" if request.method == \"POST\": if", "request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as", "as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event():", "'GET']) def guest_form(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name =", "@app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password'])", "db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister", "def login(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password)", "deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the", "@login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title", "\"register\" if request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\"", "before_open = event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or", "None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated:", "else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else :", "deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event')", "return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in", "= course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error:", "render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees: session.deregister(current_user) else:", "render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET'])", "password=str(form['password']) user = ems.valid_user(username,password) if user is not None: login_user(user) return redirect(url_for('dashboard')) else:", "= guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login'))", "def guest_form(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name'])", "request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session)", "as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def", "event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id)", "= \"deregister\" else: if event.is_full==True: enable=False mode = \"register\" if request.method == \"POST\":", "True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker']", "@app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\"", "import db,create_db create_db() from server import app, valid_time from flask import request, render_template,session,redirect,url_for,flash", "if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user =", "has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST'])", "import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems =", "username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is not None: login_user(user) return redirect(url_for('dashboard'))", "False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if", "enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\"", "@app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end", "LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import * from datetime import datetime,timedelta", "str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except", "render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST'])", "EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor'])", "event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if", "= True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker =", "login(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if", "return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc,", "ems.valid_user(username,password) if user is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or", "fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError)", "= str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course)", "init_database import db,create_db create_db() from server import app, valid_time from flask import request,", "from all_user import * from events import * from datetime import datetime,timedelta from", "\"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user)", "str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except", "as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else :", "current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard():", "post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title = str(form['Title']) location", "return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers: if", "flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return", "render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import *", "MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else", "datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date", "current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST'])", "= datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc =", "redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST',", "request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError", "mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\"", "not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if", "try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format)", "fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError)", "redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST'])", "str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline =", "password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\")", "EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS()", "logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course():", "user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return", "= event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\":", "return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if", "enable=False mode = \"deregister\" else: if event.is_full==True: enable=False mode = \"register\" if request.method", "import * from events import * from datetime import datetime,timedelta from EMS import", "@app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user()", "def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title)", "now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now", "db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit()", "= datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar", "if before_deadline.days <0: permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required", "'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try:", "event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event", "LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST':", "from init_database import db,create_db create_db() from server import app, valid_time from flask import", "datetime import datetime,timedelta from EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager", "speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except", "event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0:", "form = request.form title = str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format)", "fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return", "render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event():", "= request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart')", "= datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True", "login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST': form", "redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method", "in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST'])", "def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only", "try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated:", "\"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout():", "return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard'))", "login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def login(): if", "== \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event():", "@login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method", "datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format)", "@login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees: session.deregister(current_user) else: session.register(current_user) return", "@login_required def registered_event(): for Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has", "guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if", "@trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST'])", "= request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is not None: login_user(user)", ": return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return", "after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if", "return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open =", "events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def", "= location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar'))", "def registered_event(): for Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been", "flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import * from datetime", "(SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def", "render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form =", "try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event)", "enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\"", "<0: enable=False mode = \"deregister\" else: if event.is_full==True: enable=False mode = \"register\" if", "after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers:", "import app, valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from", "in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in", "redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers: if Event.status", "else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days <0:", "EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course'))", "except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required", "date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title = str(form['Title']) location =", "@app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form", "if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def", "location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return", "redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers:", "= int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline,", "(PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def", "canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open", "0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers: if", "\"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return", "return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events", "post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title = str(form['Title']) location", "db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if", "= datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart')", "is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login'))", "return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister)", "* from datetime import datetime,timedelta from EMS import EMS from role_required import trainer_only", "datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description'])", "if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif", "= location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course'))", "app, valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user", "or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type =", "request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is not None: login_user(user) return", "= datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot", "posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def", "post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session =", "seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart')", "current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST'])", "def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees: session.deregister(current_user) else: session.register(current_user) return redirect(url_for('event_details',title=seminar_tit))", "date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline =", "form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user)", "username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type", "error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return", "mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days", "= request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError)", "= now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id==", "location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return", "end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except:", "if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else:", "fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return", "= request.form title = str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date", "datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar =", "else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method==", "str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form')", "all_user import * from events import * from datetime import datetime,timedelta from EMS", "user = ems.valid_user(username,password) if user is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid", "EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc", "<0: permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title):", "return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form", "return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees: session.deregister(current_user)", "redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else", "or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0:", "app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def login():", "enable=False mode = \"register\" if request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\"", "real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error: flash(error.message,'alart')", "return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET'])", "except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard'))", "Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location =", "username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as", "@trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title =", "if event in current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\" else: if", "datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if", "@app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees: session.deregister(current_user) else: session.register(current_user)", "ems = EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST': form =", "def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title = str(form['Title'])", "redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user", "import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import * from datetime import", "if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else:", "from EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app)", "else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\":", "request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form:", "@login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor'])", "'POST': form = request.form title = str(form['Title']) location = str(form['Location']) try: start_date =", "= request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except", "from datetime import datetime,timedelta from EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA'", "datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return", "if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET'])", "<gh_stars>0 from init_database import db,create_db create_db() from server import app, valid_time from flask", "return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form',", "enable=False mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days <0: enable=False mode =", "in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit()", "logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def", "location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format)", "current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def", "event.is_full==True: enable=False mode = \"register\" if request.method == \"POST\": if \"Cancel\" in request.form:", "else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events()", "@app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name']", "int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end)", "user is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart') return", "Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0,", "== 'POST': form = request.form title = str(form['Title']) location = str(form['Location']) try: start_date", "flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee'])", "deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart')", "ems.add_guest(user) except MemberError as error: flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return", "except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required", "from server import app, valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login import", "return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method ==", "render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name =", "def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login'))", "@trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title =", "= \"register\" if request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif", "from events import * from datetime import datetime,timedelta from EMS import EMS from", "ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return", "methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name", "redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\" if request.method == 'POST':", "import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET'])", "datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise", "try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html')", "before_deadline.days <0: permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def", "import * from datetime import datetime,timedelta from EMS import EMS from role_required import", "permit_deregister= False else: permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title)", "elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days", "if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user", "session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return", "the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course =", "EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems", "= event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days>", "def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method ==", "create_db() from server import app, valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login", "from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/',", "flash(error.message,'alart') return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type", "desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try:", "ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form =", "@app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form", "EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar')) desc = str(form['Description']) fee=float(request.form['Fee'])", "if request.method == 'POST': form = request.form title = str(form['Title']) location = str(form['Location'])", "in current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\" else: if event.is_full==True: enable=False", "@login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now =", "if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if event in", "password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name) try: ems.add_guest(user) except MemberError as error:", "course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart')", "error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit)", "'GET']) def login(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user =", "@app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password'])", "capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event')", "redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required", "location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return", "return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in current_user.registers: if Event.status ==", "return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form", "guest_form(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user", "def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required", "from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import * from", "@trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now()", "for Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return", "= seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error:", "@app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return", "if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only", "return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name", "Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers)", "= str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start", "request.method == 'POST': form = request.form title = str(form['Title']) location = str(form['Location']) try:", "return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form", "return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title):", "return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location", "event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user)", "load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST':", "request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events import", "= \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def", "= str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline", "EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST': form = request.form username=str(form['Username'])", "start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end", "if user is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username or password\",'alart')", "flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event", "now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor:", "error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar():", "methods=['POST', 'GET']) def login(): if request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user", "Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id, capacitor=cap,description=desc,deregister_deadline=deadline, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Course) except (PeriodError,CapacityError,DupulicationError) as", "if request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in", "permit_deregister = True return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister) @app.route('/dashboard/<title>/Sessions',methods=['GET','POST']) @login_required def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker", "\"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form = request.form username=str(form['Username'])", "(PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only", "@login_required def event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end =", "event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title))", "flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user)", "flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit)", "return redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type =", "event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\"", "redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\") @app.route('/dashboard',methods=['POST','GET']) @login_required def dashboard(): events =", "from flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import *", "current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\" else: if event.is_full==True: enable=False mode", "@app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def canceled_event(): return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user) @app.route('/dashboard/<title>',methods=['GET','POST']) @login_required def event_details(title): date_format=\"%Y-%m-%d\" now", "request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False", "str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start =", "except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required", "= session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title))", "def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title = str(form['Title'])", "capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title))", "redirect('guest_form') return redirect(url_for('login')) if current_user.is_authenticated: return redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"guest\")", "dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader", "request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) real_name = str(form['real_name']) user = guest(zid=\"NONE\",id=username,password=password,name=real_name)", "db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else: permit_deregister = True return", "\"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0:", "event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister= False else:", "Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as", "= ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id):", "redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location =", "before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if", "render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form =", "if before_open.days<0: enable=False mode=\"Cancel\" else: if event in current_user.registers: if before_deadline.days <0: enable=False", "event_details(title): date_format=\"%Y-%m-%d\" now = datetime.now() event=ems.get_event_title(title) before_open = event.start_date-now after_end = now-event.end_date before_deadline", "mode = \"register\" if request.method == \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit()", "title = str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format)", "@login_required def dashboard(): events = ems.valid_events() return render_template('dashboard.html',Events=events,user=current_user) @app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return", "= datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc", "request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return", "event.status==\"Canceled\": enable=False if current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False", "event in current_user.registers: if before_deadline.days <0: enable=False mode = \"deregister\" else: if event.is_full==True:", "in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return redirect(url_for('event_details',title=title)) if before_deadline.days <0: permit_deregister=", "elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else:", "\"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\" db.session.commit()", "def post_session(title): event=ems.get_event_title(title) if request.method==\"POST\": speaker = request.form['Speaker'] name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session", "events import * from datetime import datetime,timedelta from EMS import EMS from role_required", "= LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method==", "return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for Event in", "= ems.valid_user(username,password) if user is not None: login_user(user) return redirect(url_for('dashboard')) else: flash(\"Invalid username", "'POST': form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is not", "Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only", "Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course", "current_user.id== event.convenor: if after_end.days> 0: mode=\"Close\" else: if before_open.days<0: enable=False mode=\"Cancel\" else: if", "redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if current_user in session.attendees:", "= datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return", "trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager() login_manager.init_app(app) ems = EMS() @app.route('/', methods=['POST', 'GET']) def", "@app.route('/sign-up',methods=['POST','GET']) def logout(): logout_user() return redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required", "event.start_date-now after_end = now-event.end_date before_deadline = event.deregister_deadline-now enable=True if event.status==\"Closed\" or event.status==\"Canceled\": enable=False", "request.method== 'POST': form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is", "= str(form['Description']) fee=float(request.form['Fee']) Seminar = seminar(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar)", "as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit):", "redirect(url_for('dashboard')) else : return render_template('sign.html',user_type = \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if", "* from events import * from datetime import datetime,timedelta from EMS import EMS", "= EMS() @app.route('/', methods=['POST', 'GET']) def login(): if request.method== 'POST': form = request.form", "request.form: event.status=\"Closed\" db.session.commit() elif \"register\" in request.form: event.attendees.append(current_user) db.session.commit() else: event.attendees.remove(current_user) db.session.commit() return", "form = request.form username=str(form['Username']) password=str(form['password']) user = ems.valid_user(username,password) if user is not None:", "flash(error.message,'alart') return redirect(url_for('post_course')) return redirect('/Posted_Event') return render_template('Post_Course.html') @app.route('/Post_Seminar',methods=['GET','POST']) @login_required @trainer_only def post_seminar(): date_format=\"%Y-%m-%d\"", "request.form title = str(form['Title']) location = str(form['Location']) try: start_date = datetime.strptime(form['Start_Date'],date_format) end_date =", "cap = int(form['Capacitor']) desc = str(form['Description']) fee=float(request.form['Fee']) Course = course(status=\"OPEN\",title=title,start_date=start_date,end_date=end_date, location = location,convenor=current_user.id,", "return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required @trainer_only def", "= datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap", "valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import", "datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Cannot Recognise the date\",'alart') return redirect(url_for('post_course')) cap = int(form['Capacitor']) desc =", "Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_session',title=event.title)) return", "datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid Date Format\",'alart') return redirect(url_for('post_seminar'))", "been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def posted_event(): return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user) @app.route('/Canceled_Event',methods=['GET','POST']) @login_required", "try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html')", "flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from", "import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user from all_user import * from events", "registered_event(): for Event in current_user.registers: if Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart')", "= datetime.strptime(form['Start_Date'],date_format) end_date = datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end =", "name = request.form['Name'] capacitor=int(request.form['Capacitor']) Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id) try: current_user.post_session(Session) except (SpeakerError,CapacityError,DupulicationError) as error:", "== \"POST\": if \"Cancel\" in request.form: event.status=\"Canceled\" db.session.commit() elif \"Close\" in request.form: event.status=\"Closed\"", "datetime,timedelta from EMS import EMS from role_required import trainer_only app.config['SECRET_KEY']='HRHLALALA' login_manager = LoginManager()", "@login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if request.method == 'POST': form = request.form title", "= \"user\") @app.route('/guest_form', methods=['POST', 'GET']) def guest_form(): if request.method== 'POST': form = request.form", "return redirect(url_for('post_session',title=event.title)) return redirect(url_for('event_details',title=event.title)) return render_template('Post_Sessions.html',event=event) @app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST']) @login_required def register_session(seminar_tit,session_tit): seminar=ems.get_event_title(seminar_tit) session=seminar.get_session_title(session_tit) if", "location,convenor=current_user.id,capacitor=0, deregister_deadline=deadline,description=desc, fee=fee,EB_start=EB_start,EB_end=EB_end) try: current_user.post_event(Seminar) except (PeriodError,CapacityError,DupulicationError) as error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return", "server import app, valid_time from flask import request, render_template,session,redirect,url_for,flash from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user", "Event.status == \"Canceled\": flash(Event.title+\" has been Cancelled\",'alart') return render_template('dashboard.html',user=current_user,Events=current_user.registers) @app.route('/Posted_Event',methods=['GET','POST']) @login_required @trainer_only def", "error: flash(error.message,'alart') return redirect(url_for('post_seminar')) return redirect('/Posted_Event') return render_template('Post_Seminar.html') @app.route('/Registered_Event',methods=['GET','POST']) @login_required def registered_event(): for", "redirect(url_for('login')) @login_manager.user_loader def load_user(user_id): return ems.get_user_id(user_id) @app.route('/Post_Course',methods=['GET','POST']) @login_required @trainer_only def post_course(): date_format=\"%Y-%m-%d\" if", "before_deadline.days <0: enable=False mode = \"deregister\" else: if event.is_full==True: enable=False mode = \"register\"", "= datetime.strptime(form['End_Date'],date_format) deadline = datetime.strptime(form['deadline'],date_format) EB_start = datetime.strptime(form['EB_Start_Date'],date_format) EB_end = datetime.strptime(form['EB_End_Date'],date_format) except: flash(\"Invalid" ]
[ "0]), # (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command()", "import numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal pos,", "commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position = np.array([0, 0])", "pathlib import Path import click import numpy as np CMD_2_DIRECTION = { \"forward\":", "{ \"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0,", "filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1]))", "import Path import click import numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1,", "Path import click import numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]),", "@click.argument(\"filename\") def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \")", "np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), }", "[(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position = np.array([0, 0]) for cmd in", "numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal pos, depth)", "-1]), } @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands", "for cmd in commands: position += cmd[1] * cmd[0] print(position[0] * position[1]) if", "commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in commands] commands =", "for cmd in commands] position = np.array([0, 0]) for cmd in commands: position", "np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\")", "click import numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal", "filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in", "import click import numpy as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), #", "depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename", "commands = [cmd.split(\" \") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for", "\"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]),", "\") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands]", "main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd", "position = np.array([0, 0]) for cmd in commands: position += cmd[1] * cmd[0]", "np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename)", "# (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\")", "def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for", "= Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in commands]", "int(cmd[1])) for cmd in commands] position = np.array([0, 0]) for cmd in commands:", "commands: position += cmd[1] * cmd[0] print(position[0] * position[1]) if __name__ == \"__main__\":", "commands] position = np.array([0, 0]) for cmd in commands: position += cmd[1] *", "np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0,", "1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands", "cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position =", "from pathlib import Path import click import numpy as np CMD_2_DIRECTION = {", "= filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]],", "in commands] position = np.array([0, 0]) for cmd in commands: position += cmd[1]", "\"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename =", "cmd in commands] position = np.array([0, 0]) for cmd in commands: position +=", "= np.array([0, 0]) for cmd in commands: position += cmd[1] * cmd[0] print(position[0]", "in commands: position += cmd[1] * cmd[0] print(position[0] * position[1]) if __name__ ==", "= [cmd.split(\" \") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd", "CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0, 1]),", "position += cmd[1] * cmd[0] print(position[0] * position[1]) if __name__ == \"__main__\": main()", "cmd in commands: position += cmd[1] * cmd[0] print(position[0] * position[1]) if __name__", "= [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position = np.array([0, 0]) for cmd", "0]) for cmd in commands: position += cmd[1] * cmd[0] print(position[0] * position[1])", "= { \"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\": np.array([0, 1]), \"up\":", "commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position = np.array([0, 0]) for", "Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\" \") for cmd in commands] commands", "\"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands =", "} @click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands =", "@click.command() @click.argument(\"filename\") def main(filename): filename = Path(filename) commands = filename.read_text().split(\"\\n\") commands = [cmd.split(\"", "in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position = np.array([0,", "np.array([0, 0]) for cmd in commands: position += cmd[1] * cmd[0] print(position[0] *", "<gh_stars>0 from pathlib import Path import click import numpy as np CMD_2_DIRECTION =", "[cmd.split(\" \") for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in", "for cmd in commands] commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands] position", "as np CMD_2_DIRECTION = { \"forward\": np.array([1, 0]), # (horizontal pos, depth) \"down\":", "(horizontal pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def", "pos, depth) \"down\": np.array([0, 1]), \"up\": np.array([0, -1]), } @click.command() @click.argument(\"filename\") def main(filename):" ]
[ "num_movies = 10 num_users = 5 ratings = random.randint(11, size = (num_movies, num_users))", "initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta", "did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0]", "= 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1) did_rate =", "= zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for i in", "idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean", "= unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions)", "theta ) + reg_param * X theta_grad = difference.T.dot( X ) + reg_param", "full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies,", "print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] +", "theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X,", "theta**2 ) + sum(X**2)) return cost + regularization from scipy import optimize reg_param", "* num_movies = 10 num_users = 5 ratings = random.randint(11, size = (num_movies,", "= (num_movies, num_users)) print (ratings) did_rate = (ratings != 0) * 1 print(did_rate)", "movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T", "num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which", "def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users,", "for i in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx])", "num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for", "difference = X.dot( theta.T ) * did_rate - ratings X_grad = difference.dot( theta", "zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for i in range(num_movies):", "X ) + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate,", "!= 0) * 1), did_rate, axis = 1) print (ratings) ratings.shape did_rate print", "num_users)) print (ratings) did_rate = (ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape", "from numpy import * num_movies = 10 num_users = 5 ratings = random.randint(11,", "initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X =", "calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies,", "* num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta =", "kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] =", "= random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate = (ratings != 0)", "initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies *", "did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape", "Created on Wed Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted", "return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta", "= append(((kunj_ratings != 0) * 1), did_rate, axis = 1) print (ratings) ratings.shape", "num_users = ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies, num_features ) user_prefs", "= zeros(shape = ratings.shape) for i in range(num_movies): idx = where(did_rate[i] == 1)[0]", "all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean print", "first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies *", "= movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean print (predictions_for_kunj)", "= 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis =", "X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users", "= random.randn( num_movies, num_features ) user_prefs = random.randn( num_users, num_features ) initial_X_and_theta =", "return cost + regularization from scipy import optimize reg_param = 30 #scipy fmin", "= zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7", "idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings,", "which a rating was given difference = X.dot( theta.T ) * did_rate -", "ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features =", "- ratings) ** 2 ) / 2 regularization = (reg_param / 2) *", "9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies = 10 num_users = 5", "2 regularization = (reg_param / 2) * (sum( theta**2 ) + sum(X**2)) return", "num_features): first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies", "r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta =", "from scipy import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient,", "ratings X_grad = difference.dot( theta ) + reg_param * X theta_grad = difference.T.dot(", "X_grad = difference.dot( theta ) + reg_param * X theta_grad = difference.T.dot( X", "!= 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings)", "\"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die", "reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs,", "(X.dot( theta.T ) * did_rate - ratings) ** 2 ) / 2 regularization", "print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape =", "idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i,", "rating was given difference = X.dot( theta.T ) * did_rate - ratings X_grad", "theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which a rating was", "0) * 1), did_rate, axis = 1) print (ratings) ratings.shape did_rate print (did_rate)", "== 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i]", "theta.T ) * did_rate - ratings X_grad = difference.dot( theta ) + reg_param", "did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4]", "8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings,", "= optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost,", "or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies", "kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis", ") initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape", "import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate,", "user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean print (predictions_for_kunj) print (kunj_ratings)", "* 1), did_rate, axis = 1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape", "fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs =", "3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings", "movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features]", "user_prefs = random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs)", "(user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30", "* X theta_grad = difference.T.dot( X ) + reg_param * theta return r_[X_grad.T.flatten(),", "= ratings.shape) for i in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] =", "= 5 ratings = random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate =", "num_users = 5 ratings = random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate", "7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies = 10 num_users", "5 ratings = random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate = (ratings", "/ 2) * (sum( theta**2 ) + sum(X**2)) return cost + regularization from", "num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj =", "num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param):", "ratings, axis = 1) did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis", "(kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings != 0)", "optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users,", "die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies =", "unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot( theta.T ) * did_rate -", "zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7]", "X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta", ") cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features)", "return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1]", "did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1))", "ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean =", "(did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies,", "= ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies, num_features ) user_prefs =", "theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta,", "ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8", "ratings.shape) for i in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i,", "did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) #", "ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features", "= minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions", "7 kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1)", "rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import *", "= append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings != 0) * 1),", "8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies = 10 num_users =", "theta.T ) * did_rate - ratings) ** 2 ) / 2 regularization =", "num_features) # obs for which a rating was given difference = X.dot( theta.T", "num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs =", "3 movie_features = random.randn( num_movies, num_features ) user_prefs = random.randn( num_users, num_features )", "X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which a rating", "ratings) ** 2 ) / 2 regularization = (reg_param / 2) * (sum(", "did_rate = (ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies,", ") + reg_param * X theta_grad = difference.T.dot( X ) + reg_param *", "+ sum(X**2)) return cost + regularization from scipy import optimize reg_param = 30", "disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users,", "num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate,", "print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings !=", "/ 2 regularization = (reg_param / 2) * (sum( theta**2 ) + sum(X**2))", "normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features = 3 movie_features = random.randn(", "= last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies,", "ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print", "num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost =", "num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features,", "- ratings X_grad = difference.dot( theta ) + reg_param * X theta_grad =", "reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot( theta.T", "ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for i", "num_movies, num_features ) user_prefs = random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()]", "print (ratings) num_users = ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies, num_features", "a rating was given difference = X.dot( theta.T ) * did_rate - ratings", "= difference.dot( theta ) + reg_param * X theta_grad = difference.T.dot( X )", "(ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean", "ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features = 3", "= X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:]", ") + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users,", "num_movies, num_features) cost = sum( (X.dot( theta.T ) * did_rate - ratings) **", "calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies,", "print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings)", "num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum(", "ratings = append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings != 0) *", "(kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings", "reg_param * X theta_grad = difference.T.dot( X ) + reg_param * theta return", "reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which a", "= 1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies", "Wed Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta", "last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features,", "<reponame>kunj17/Recommendation-System \"\"\" Created on Wed Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\"", "1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] =", "= 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features,", "print (ratings) did_rate = (ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings", "@author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich", "print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features):", "(kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print", "num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features,", "2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled", "= (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for i in range(num_movies): idx", "did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost", "i in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i,", ") + sum(X**2)) return cost + regularization from scipy import optimize reg_param =", "num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18 =", "* num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings,", "trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import * num_movies = 10", "regularization = (reg_param / 2) * (sum( theta**2 ) + sum(X**2)) return cost", "idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings)", "= unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which a rating was given", "sum( (X.dot( theta.T ) * did_rate - ratings) ** 2 ) / 2", "1)) ratings_norm = zeros(shape = ratings.shape) for i in range(num_movies): idx = where(did_rate[i]", "num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape", "ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users =", "num_features = 3 movie_features = random.randn( num_movies, num_features ) user_prefs = random.randn( num_users,", "= normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features = 3 movie_features =", "num_users, num_movies, num_features) cost = sum( (X.dot( theta.T ) * did_rate - ratings)", "minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot(", "* did_rate - ratings) ** 2 ) / 2 regularization = (reg_param /", "0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print", "* (sum( theta**2 ) + sum(X**2)) return cost + regularization from scipy import", "normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm =", "= where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx]", "2 ) / 2 regularization = (reg_param / 2) * (sum( theta**2 )", "for which a rating was given difference = X.dot( theta.T ) * did_rate", "minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True )", "= difference.T.dot( X ) + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta,", "difference.T.dot( X ) + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings,", "X.dot( theta.T ) * did_rate - ratings X_grad = difference.dot( theta ) +", "sum(X**2)) return cost + regularization from scipy import optimize reg_param = 30 #scipy", "first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return", "(initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies", "ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features)", "cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features)", "did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape", "1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen", "x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1],", "\"\"\" Created on Wed Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1", "* did_rate - ratings X_grad = difference.dot( theta ) + reg_param * X", "fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True", "ratings = random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate = (ratings !=", "did_rate - ratings X_grad = difference.dot( theta ) + reg_param * X theta_grad", "random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta)", "= 10 num_users = 5 ratings = random.randint(11, size = (num_movies, num_users)) print", "Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\"", "1) did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis = 1) print", "1), did_rate, axis = 1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def", "scipy import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings,", "print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean", "cost = sum( (X.dot( theta.T ) * did_rate - ratings) ** 2 )", "kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings, ratings, axis = 1) did_rate", "user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X", "movie_features = random.randn( num_movies, num_features ) user_prefs = random.randn( num_users, num_features ) initial_X_and_theta", "on Wed Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight", "+ reg_param * X theta_grad = difference.T.dot( X ) + reg_param * theta", "reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies,", "= ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape)", "10 num_users = 5 ratings = random.randint(11, size = (num_movies, num_users)) print (ratings)", "mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings,", "minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions =", "print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0]", "ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for", "# obs for which a rating was given difference = X.dot( theta.T )", "user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T )", "append(((kunj_ratings != 0) * 1), did_rate, axis = 1) print (ratings) ratings.shape did_rate", "* theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param):", ").transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X,", "= sum( (X.dot( theta.T ) * did_rate - ratings) ** 2 ) /", "last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X, theta", "\"\"\" from numpy import * num_movies = 10 num_users = 5 ratings =", "= (ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1))", "axis = 1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate):", "= 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings = append(kunj_ratings,", "3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk", "optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs)", ") * did_rate - ratings X_grad = difference.dot( theta ) + reg_param *", "regularization from scipy import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost,", "3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2", "- ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users", "6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy import", "10.Interstellar \"\"\" from numpy import * num_movies = 10 num_users = 5 ratings", "num_users, num_movies, num_features) # obs for which a rating was given difference =", "(ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print", "did_rate - ratings) ** 2 ) / 2 regularization = (reg_param / 2)", "KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or", "size = (num_movies, num_users)) print (ratings) did_rate = (ratings != 0) * 1", "theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot( theta.T ) *", "unroll_params(X_and_theta, num_users, num_movies, num_features) # obs for which a rating was given difference", "in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx]", "cost + regularization from scipy import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params", "= first_30.reshape((num_features, num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose()", "\"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying", "random.randn( num_movies, num_features ) user_prefs = random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(),", "X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot( theta.T )", "print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 =", "ratings_norm = zeros(shape = ratings.shape) for i in range(num_movies): idx = where(did_rate[i] ==", "5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from numpy", "= X.dot( theta.T ) * did_rate - ratings X_grad = difference.dot( theta )", "2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious 6.Get", "r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta,", "outta Compton 4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar", "1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies =", "append(kunj_ratings, ratings, axis = 1) did_rate = append(((kunj_ratings != 0) * 1), did_rate,", "(ratings) did_rate = (ratings != 0) * 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings =", "unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj", "where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] -", "movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean print (predictions_for_kunj) print", "was given difference = X.dot( theta.T ) * did_rate - ratings X_grad =", "#scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True,", "def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm", "theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta,", "def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users,", "num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:, 0:1]", "+ regularization from scipy import optimize reg_param = 30 #scipy fmin minimized_cost_and_optimal_params =", "range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] =", "X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta =", "30 #scipy fmin minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100,", "X theta_grad = difference.T.dot( X ) + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()]", "theta_grad = difference.T.dot( X ) + reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def", "did_rate, axis = 1) print (ratings) ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings,", "print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] =", "= unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot( theta.T ) * did_rate", "4.Godfather-2 5.Notorious 6.Get rich or die trying 7.Frozen 8.Tangled 9.Dunkirk 10.Interstellar \"\"\" from", ") * did_rate - ratings) ** 2 ) / 2 regularization = (reg_param", ") / 2 regularization = (reg_param / 2) * (sum( theta**2 ) +", "optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs", "num_movies, num_features) # obs for which a rating was given difference = X.dot(", "print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3", "(ratings) num_users = ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies, num_features )", "+ reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies,", "= random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print", "X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta,", "zeros(shape = ratings.shape) for i in range(num_movies): idx = where(did_rate[i] == 1)[0] ratings_mean[i]", "ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies, num_features ) user_prefs = random.randn(", "kunj_ratings[0] = 8 kunj_ratings[4] = 7 kunj_ratings[7] = 3 print (kunj_ratings) ratings =", "obs for which a rating was given difference = X.dot( theta.T ) *", "numpy import * num_movies = 10 num_users = 5 ratings = random.randint(11, size", "(sum( theta**2 ) + sum(X**2)) return cost + regularization from scipy import optimize", "= 3 movie_features = random.randn( num_movies, num_features ) user_prefs = random.randn( num_users, num_features", "import * num_movies = 10 num_users = 5 ratings = random.randint(11, size =", "= 1) did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis = 1)", "(num_movies, 1)) ratings_norm = zeros(shape = ratings.shape) for i in range(num_movies): idx =", "num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) cost = sum( (X.dot(", "given difference = X.dot( theta.T ) * did_rate - ratings X_grad = difference.dot(", "num_movies = ratings.shape[0] ratings_mean = zeros(shape = (num_movies, 1)) ratings_norm = zeros(shape =", "= X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X, theta def", "num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose() last_18", "08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton 4.Godfather-2 5.Notorious", "= ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean ratings, ratings_mean = normalize_ratings(ratings, did_rate)", "(num_movies, num_users)) print (ratings) did_rate = (ratings != 0) * 1 print(did_rate) ratings.shape", "num_movies, num_features) print(movie_features) print(user_prefs) all_predictions = movie_features.dot( user_prefs.T ) print(all_predictions) predictions_for_kunj = all_predictions[:,", "unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features, num_movies)).transpose()", "1)[0] ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return", "num_features ) user_prefs = random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features)", "did_rate) print (ratings) num_users = ratings.shape[1] num_features = 3 movie_features = random.randn( num_movies,", "theta = last_18.reshape(num_features, num_users ).transpose() return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users,", "** 2 ) / 2 regularization = (reg_param / 2) * (sum( theta**2", "ratings_mean[i] = mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm,", "num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape", "Jan 3 08:15:43 2018 @author: KUNJ \"\"\" \"\"\" 1.Godfather-1 2.Ted 3.Straight outta Compton", "* 1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9])", "1 print(did_rate) ratings.shape did_rate.shape kunj_ratings = zeros((num_movies, 1)) print (kunj_ratings) print (kunj_ratings[9]) kunj_ratings[0]", "= r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def", "num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True ) cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0] movie_features, user_prefs", "num_movies)).transpose() last_18 = X_and_theta[num_movies * num_features:] theta = last_18.reshape(num_features, num_users ).transpose() return X,", "def unroll_params(X_and_theta, num_users, num_movies, num_features): first_30 = X_and_theta[:num_movies * num_features] X = first_30.reshape((num_features,", "reg_param * theta return r_[X_grad.T.flatten(), theta_grad.T.flatten()] def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features,", ") user_prefs = random.randn( num_users, num_features ) initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()] print(movie_features) print", "difference.dot( theta ) + reg_param * X theta_grad = difference.T.dot( X ) +", "ratings.shape did_rate print (did_rate) did_rate.shape def normalize_ratings(ratings, did_rate): num_movies = ratings.shape[0] ratings_mean =", "user_prefs.T.flatten()] print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users,", "random.randint(11, size = (num_movies, num_users)) print (ratings) did_rate = (ratings != 0) *", "did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis = 1) print (ratings)", "axis = 1) did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis =", "num_users, num_movies, num_features, reg_param): X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features) # obs", "return X, theta def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param): X, theta", "num_features) cost = sum( (X.dot( theta.T ) * did_rate - ratings) ** 2", "print(movie_features) print (user_prefs) print (initial_X_and_theta) initial_X_and_theta.shape movie_features.T.flatten().shape user_prefs.T.flatten().shape initial_X_and_theta def unroll_params(X_and_theta, num_users, num_movies,", "ratings_mean = normalize_ratings(ratings, did_rate) print (ratings) num_users = ratings.shape[1] num_features = 3 movie_features", "(reg_param / 2) * (sum( theta**2 ) + sum(X**2)) return cost + regularization", "2) * (sum( theta**2 ) + sum(X**2)) return cost + regularization from scipy", "= (reg_param / 2) * (sum( theta**2 ) + sum(X**2)) return cost +", "= mean(ratings[i, idx]) ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i] return ratings_norm, ratings_mean" ]
[ "\"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium =", "setUp(self): \"\"\" Load some fake data to use in the tests \"\"\" self.datasource", "fake data to use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var =", "obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert", "self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\" obj", "use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater", "self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium()", "foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\"", "test_observed_property_variable_create(self): \"\"\" create the object and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\"", "DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self):", "created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource", "\"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable", "obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self):", "SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\")", "DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources were created\"\"\"", "assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium", "full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created", "test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert", "observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert", "\"\"\" Was the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium,", "self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created \"\"\" def", "plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources", "sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium", "\"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource ==", "= DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var", "the object and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name ==", "test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var,", "id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\")", "self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def", "were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class", "self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\" obj", "created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description ==", "def test_get(self): \"\"\"Assert that the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar", "Assert that the parameters are created \"\"\" def setUp(self): \"\"\" Load some fake", "obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable ==", "(CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var", "datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium ==", "test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var,", "that the parameters are created \"\"\" def setUp(self): \"\"\" Load some fake data", "setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that", "\"\"\"Assert that the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\")", "SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate", "DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self):", "\"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\"", "and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\"", "plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources were created\"\"\" foo =", "id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources were", "the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\")", "import TestCase from basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase):", "ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created \"\"\" def setUp(self): \"\"\" Load", "== self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and", "assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\"", "test_get(self): \"\"\"Assert that the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar =", "the parameters are created \"\"\" def setUp(self): \"\"\" Load some fake data to", "observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var assert obj.name ==", "DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\")", "def test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\",", "self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create", "created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase):", "assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\"", "in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\",", "the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert", "\"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created \"\"\"", "(CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource", "assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the", "object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description", "DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the", "correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate", "def test_observed_property_variable_create(self): \"\"\" create the object and test attributes \"\"\" assert self.observed_property_var.id ==", "self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the", "Was the object created correctly? \"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource)", "attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories", "DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that", "the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource", "object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource ==", "Was the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert", "object and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater", "to use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\",", "== \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object", "django.test import TestCase from basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class", "assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and test attributes", "\"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable(", "datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var assert obj.name", "TestCase from basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def", "class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created \"\"\" def setUp(self): \"\"\"", "created \"\"\" def setUp(self): \"\"\" Load some fake data to use in the", "Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name,", "== self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\"", "ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\",", "'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created \"\"\" def setUp(self):", "Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created correctly?", "are created \"\"\" def setUp(self): \"\"\" Load some fake data to use in", "== \"Acetate (CH3COO)\" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource", "assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was", "obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert", "class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def", "= DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters", "= ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was", "Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo')", "import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\",", "from basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self):", "id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object", "DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\",", "plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data", "obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the object", "name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var assert obj.name == \"Alpha\"", "parameters are created \"\"\" def setUp(self): \"\"\" Load some fake data to use", "\\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\",", "Load some fake data to use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\")", "plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert that the Data Sources were created\"\"\" foo", "that the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name,", "ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable ==", "== \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\" obj =", "assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def", "\"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created", "== \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self):", "ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\",", "tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium", "\"\"\" create the object and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert", "create the object and test attributes \"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name", "DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var assert", "ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the", "= SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\" obj =", "\"\"\" Assert that the parameters are created \"\"\" def setUp(self): \"\"\" Load some", "from django.test import TestCase from basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable", "== self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and test attributes \"\"\" assert", "data to use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable(", "def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource,", "\"\"\" obj = ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\"", "\"\"\" Load some fake data to use in the tests \"\"\" self.datasource =", "\"\"\" def setUp(self): \"\"\" Load some fake data to use in the tests", "self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and test", "Flux\" assert self.observed_property_var.categories == \"Hydrology,Subsurface\" def test_datasource_observed_property_variable_create(self): \"\"\" Was the object created correctly?", "the Data Sources were created\"\"\" foo = DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\")", "some fake data to use in the tests \"\"\" self.datasource = DataSource.objects.get(name=\"Alpha\") self.observed_property_var", "basin3d.models import DataSource, SamplingMedium, \\ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name=\"Foo\",", "= ObservedProperty(description=\"Acetate (CH3COO)\", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == \"Acetate (CH3COO)\" assert obj.observed_property_variable", "\"\"\" Was the object created correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\")", "self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and test attributes \"\"\" assert self.observed_property_var.id", "categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\" Was the object created correctly? \"\"\"", "\"\"\" assert self.observed_property_var.id == \"FOO\" assert self.observed_property_var.full_name == \"Groundwater Flux\" assert self.observed_property_var.categories ==", "bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the", "self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): \"\"\"", "def setUp(self): \"\"\" Load some fake data to use in the tests \"\"\"", "self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are created", "correctly? \"\"\" obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name=\"Alpha\") assert obj.datasource == self.datasource assert", "obj.datasource == self.datasource def test_observed_property_variable_create(self): \"\"\" create the object and test attributes \"\"\"", "= DataSource.objects.get(name=\"Foo\") bar = DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert", "def setUp(self): DataSource.objects.create(name=\"Foo\", plugin_module=\"foo.bar.plugins\", plugin_class=\"Baz\", id_prefix=\"F\") DataSource.objects.create(name=\"Bar\", plugin_module=\"foo.plugins\", plugin_class=\"Bar\", id_prefix=\"B\") def test_get(self): \"\"\"Assert", "= DataSource.objects.get(name=\"Alpha\") self.observed_property_var = ObservedPropertyVariable( id=\"FOO\", full_name=\"Groundwater Flux\", categories=\"Hydrology,Subsurface\") self.sampling_medium = SamplingMedium() def", "DataSource.objects.get(name=\"Bar\") self.assertEqual(bar.name, \"Bar\") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): \"\"\" Assert that the parameters are" ]
[ "proximo noh eh \"nulo\", se nao, vai para proximo. if self.proximo is not", "sai da funcao. if self.proximo is not None: return True return False utimo", "eh \"nulo\", se nao, vai para proximo. if self.proximo is not None: return", "if self.proximo is not None: return True return False utimo = Noh() no2", "proximo def get_proximo(self): ## verifica se o proximo noh eh \"nulo\", se nao,", "nao, sai da funcao. if self.proximo is not None: return True return False", "se nao, sai da funcao. if self.proximo is not None: return True return", "no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual = atual.get_proximo()", "noh eh \"nulo\", se nao, vai para proximo. if self.proximo is not None:", "__init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ## verifica se o proximo noh", "proximo=None): self.proximo = proximo def get_proximo(self): ## verifica se o proximo noh eh", "o proximo noh eh \"nulo\", se nao, vai para proximo. if self.proximo is", "if self.proximo is not None: return self.proximo def tem_proximo(self): ## verifica se tem", "## verifica se tem o proximo, se nao, sai da funcao. if self.proximo", "self.proximo is not None: return self.proximo def tem_proximo(self): ## verifica se tem o", "= proximo def get_proximo(self): ## verifica se o proximo noh eh \"nulo\", se", "Noh() no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual =", "Noh(): def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ## verifica se o", "return self.proximo def tem_proximo(self): ## verifica se tem o proximo, se nao, sai", "vai para proximo. if self.proximo is not None: return self.proximo def tem_proximo(self): ##", "para proximo. if self.proximo is not None: return self.proximo def tem_proximo(self): ## verifica", "tem_proximo(self): ## verifica se tem o proximo, se nao, sai da funcao. if", "verifica se o proximo noh eh \"nulo\", se nao, vai para proximo. if", "nao, vai para proximo. if self.proximo is not None: return self.proximo def tem_proximo(self):", "utimo = Noh() no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo')", "class Noh(): def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ## verifica se", "not None: return self.proximo def tem_proximo(self): ## verifica se tem o proximo, se", "o proximo, se nao, sai da funcao. if self.proximo is not None: return", "self.proximo def tem_proximo(self): ## verifica se tem o proximo, se nao, sai da", "tem o proximo, se nao, sai da funcao. if self.proximo is not None:", "funcao. if self.proximo is not None: return True return False utimo = Noh()", "## verifica se o proximo noh eh \"nulo\", se nao, vai para proximo.", "da funcao. if self.proximo is not None: return True return False utimo =", "= 'Liu' class Noh(): def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ##", "def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ## verifica se o proximo", "return True return False utimo = Noh() no2 = Noh(utimo) atual = Noh(no2)", "def tem_proximo(self): ## verifica se tem o proximo, se nao, sai da funcao.", "False utimo = Noh() no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem", "proximo, se nao, sai da funcao. if self.proximo is not None: return True", "se tem o proximo, se nao, sai da funcao. if self.proximo is not", "return False utimo = Noh() no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo():", "None: return True return False utimo = Noh() no2 = Noh(utimo) atual =", "True return False utimo = Noh() no2 = Noh(utimo) atual = Noh(no2) while", "se nao, vai para proximo. if self.proximo is not None: return self.proximo def", "__author__ = 'Liu' class Noh(): def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self):", "is not None: return True return False utimo = Noh() no2 = Noh(utimo)", "self.proximo = proximo def get_proximo(self): ## verifica se o proximo noh eh \"nulo\",", "se o proximo noh eh \"nulo\", se nao, vai para proximo. if self.proximo", "not None: return True return False utimo = Noh() no2 = Noh(utimo) atual", "= Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual = atual.get_proximo() print('Fim", "get_proximo(self): ## verifica se o proximo noh eh \"nulo\", se nao, vai para", "is not None: return self.proximo def tem_proximo(self): ## verifica se tem o proximo,", "= Noh() no2 = Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual", "Noh(utimo) atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual = atual.get_proximo() print('Fim do", "def get_proximo(self): ## verifica se o proximo noh eh \"nulo\", se nao, vai", "\"nulo\", se nao, vai para proximo. if self.proximo is not None: return self.proximo", "self.proximo is not None: return True return False utimo = Noh() no2 =", "proximo. if self.proximo is not None: return self.proximo def tem_proximo(self): ## verifica se", "'Liu' class Noh(): def __init__(self, proximo=None): self.proximo = proximo def get_proximo(self): ## verifica", "atual = Noh(no2) while atual.tem_proximo(): print('Tem proximo') atual = atual.get_proximo() print('Fim do noh')", "verifica se tem o proximo, se nao, sai da funcao. if self.proximo is", "None: return self.proximo def tem_proximo(self): ## verifica se tem o proximo, se nao," ]
[ "to be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_,", "@addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result = '' if status ==", "jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def", "getattr(self, 'order_%s' % field, None) if order: return order(filter) return filter def _filter(self,", "an error checking purchase state') log.error('Check purchase paypal addon: %s, user: %s, paykey:", "{ 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download", "post_required, write from amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils import", "from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching import", "template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary)", "Remora uses persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author", "args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon):", "= self.filter(self.field) def options(self, request, key, default): \"\"\"Get the (option, title) pair we", "key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None):", "as _, ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized, mobile_template import", "pages. raise http.Http404 # addon needs to have a version and be valid", "platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all()", "settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({", "% (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json'", "target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.')", "field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return", "src = request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'),", "else: title = self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a full mapping", "%s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] =", "base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular':", "'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure',", "== 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status':", "ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon:", "is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if", "the list and get 3 items. rand = lambda xs: random.shuffle(xs) or xs[:3]", "waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '',", "_login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon,", "version = addon.current_version if not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html',", "mobile_template import amo from amo import messages from amo.decorators import login_required, post_required, write", "jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy:", "%s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source',", "'' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid:", "wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased", "_category_personas(qs, limit=6) else: category_personas = None data = { 'addon': addon, 'persona': persona,", "mobility.decorators import mobilized, mobile_template import amo from amo import messages from amo.decorators import", "def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self,", "amo.models import manual_order from amo import urlresolvers from amo.urlresolvers import reverse from abuse.models", "incompatible with this app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not", "= self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a full mapping of {option:", "{'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO detail pages. raise", "featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick 3 at", "'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if", "%s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') ==", "add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if", "list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG,", "given field.\"\"\" filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field,", "args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current version is", "it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id,", "homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform", "# In this case PayPal disagreed, we should not be trusting # what", "import waffle from mobility.decorators import mobilized, mobile_template import amo from amo import messages", "hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the", "base, key, default) def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated':", "request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt = default if opt in", "Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it with ES. frozen =", "return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def", "'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters", "and locale, so we can favor locale specific # promos. promo_dict = {}", "back to the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return", "Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the top half", "amount, currency = addon.premium.get_price(), 'USD' # If tier is specified, then let's look", "in request.GET. \"\"\" def __init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts)", "('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and", "= 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details", "not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json')", "locale specific collections. for feature in features: key = (feature.id, lang) if key", "_lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base", "addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended,", "uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If", "con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile,", "return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon':", ".models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from", "return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If", "from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf,", "%s' % uuid) else: log.info('User completed contribution: %s' % uuid) response = jingo.render(request,", "# If there was an error getting the paykey, then JSON will #", "Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True,", "raise http.Http404 # addon needs to have a version and be valid for", "jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased", "opt = request.GET[key] else: opt = default if opt in self.opts_dict: title =", "locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id", "and contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon,", "on the initial page load. if request.is_ajax(): # Other add-ons/apps from the same", "addon.current_version if 'src' in request.GET: contribution_src = src = request.GET['src'] else: page_srcs =", "version is incompatible with this app, redirect. comp_apps = addon.compatible_apps if comp_apps and", "return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon})", "are concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def", "import share as share_redirect from stats.models import Contribution from translations.query import order_by_translation from", "addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request,", "'abuse_form': AbuseForm(request=request), } # details.html just returns the top half of the page", "*args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs)", "should not be trusting # what get_paykey said. Which is a worry. log.error('Check", "%s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR':", "%s for paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED' and con.type", "return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except", "want to see public add-ons on the front page. c = promo_dict[key].collection c.public_addons", "= translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups", "qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None data =", "@functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return", "addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies,", "source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) #", "messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we", "TODO(andym): Figure out how to get this in the addon authors # locale,", "order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self):", "@login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data =", "add-ons on the front page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public()", "addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt", "model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request", "Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status':", "personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the list and", "**context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow'", "help generate querysets for add-on listings. You have to define ``opts`` on the", "flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18]", "@login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return", "from stats.models import Contribution from translations.query import order_by_translation from versions.models import Version from", "request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise", "if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data,", "persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs,", "request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),", "con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl':", "error getting the paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk,", "request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL',", "details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon})", "login_required, post_required, write from amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils", "= Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None data = {", "to the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url)", "'There was an error getting the paykey') log.error('Error getting paykey, contribution for addon:", "of (key, title) pairs. The key is used in GET parameters and the", "= (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6]", "download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return", "This is lame for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True))", "pairs. The key is used in GET parameters and the title can be", "= base self.key = key self.model = model self.field, self.title = self.options(self.request, key,", "result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL',", "= request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '')", "was an error getting the paykey, then JSON will # not have a", "def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(),", "purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view", "filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating')", "collections this addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx =", "= {} for feature_id, v in groups: promo = v.next() key = (feature_id,", "uuid: %s' % uuid_) # If this was a pre-approval, it's completed already,", "(('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created", "= installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data", "# We key by feature_id and locale, so we can favor locale specific", "return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self):", "from .forms import ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators import", "'-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently", "in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt = default", "else: version = addon.current_version if 'src' in request.GET: contribution_src = src = request.GET['src']", "the initial page load. if request.is_ajax(): # Other add-ons/apps from the same author(s).", "despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE:", "request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey, status = paypal.get_paykey(", "supports this type. try: new_app = [a for a in amo.APP_USAGE if addon.type", "addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html',", "home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill", "for # a for pre or post IPN contributions. If both fail, then", "order = getattr(self, 'order_%s' % field, None) if order: return order(filter) return filter", "paypal from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from session_csrf import", "= 'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey: %s' % (result,", "result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''),", "uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if", "'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status):", "field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self):", "user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),", "to process for # anonymous users. For now we are concentrating on logged", "rather than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name),", "the view. The chosen filter field is combined with the ``base`` queryset using", "for # anonymous users. For now we are concentrating on logged in users.", "= {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile, bounce back", "contrib for uuid: %s' % uuid_) # If this was a pre-approval, it's", "# Get 10 popular add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP)", "request.is_ajax(): # If there was an error getting the paykey, then JSON will", "if order: return order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s' %", "version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version", "request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form':", "Redirect to an app that supports this type. try: new_app = [a for", "lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def", "# TODO(andym): remove this once we figure out how to process for #", "== amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} #", "installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data =", "import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as", "'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src", "addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return", "sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE',", "Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular =", "does a lot more queries we don't want on the initial page load.", "xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons with randomness. featured =", "amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount:", "type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If this", "addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app =", "'order_%s' % field, None) if order: return order(filter) return filter def _filter(self, field):", "status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid')", "used in the view. The chosen filter field is combined with the ``base``", "paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error':", "addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def", "cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return", "'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews':", "in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version:", "else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey for addon:", "for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections", "is a worry. log.error('Check purchase failed on uuid: %s' % uuid_) status =", "'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import", "return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self):", "'' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps'", "= Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id')", "jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO detail", "request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n:", "self.base_queryset = base self.key = key self.model = model self.field, self.title = self.options(self.request,", "purchase is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: #", "= {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def", "not in promo_dict: key = (feature.id, '') if key not in promo_dict: continue", "request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax():", "from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models import", "= request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment", "addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return", "= Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness,", "request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished'", "{'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def", "for uuid: %s' % uuid_) # If this was a pre-approval, it's completed", "== amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if", "addon.is_webapp(): # Apps don't deserve AMO detail pages. raise http.Http404 # addon needs", "log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else:", "This is the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase", "'') if key not in promo_dict: continue # We only want to see", "key, default) def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated',", "= addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version", "add-on listings. You have to define ``opts`` on the subclass as a sequence", "return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version})", "import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo import", "page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version", "comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return", "key not in promo_dict: key = (feature.id, '') if key not in promo_dict:", "can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json')", "in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer =", "& Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX", "= self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale)", "Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This", "jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos version,", "paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or", "and the title can be used in the view. The chosen filter field", "translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups =", "(version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version):", "from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models", "email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid))", "addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon):", "from urlparse import urlparse import uuid from operator import attrgetter from django import", "jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(),", "'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are different. src, contribution_src", "how to process for # anonymous users. For now we are concentrating on", "_('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, })", "speed. The bottom # does a lot more queries we don't want on", "log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount =", "top half of the page for speed. The bottom # does a lot", ".forms import ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory,", "'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO detail pages.", "= 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got", "tier is specified, then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if", "Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona = addon.persona # this persona's categories", "already, we'll # double check this with PayPal, just to be sure nothing", "details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon':", "key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts = {'name':", "collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]", "jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404()", "memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError", "addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting", "import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating", "field.\"\"\" filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field, None)", "translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers", "\"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona = addon.persona #", "was an error checking purchase state') log.error('Check purchase paypal addon: %s, user: %s,", "request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path())", "addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app =", "full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts)) def", "src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once we figure out", "logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result", "# Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it", "redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id]))", "addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source',", "a in addons if a.id in featured] popular = sorted([a for a in", "we favor locale specific collections. for feature in features: key = (feature.id, lang)", "used in GET parameters and the title can be used in the view.", "= (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If we can,", "'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): #", "addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request,", "class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter,", "amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For", "if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount,", "disagreed, we should not be trusting # what get_paykey said. Which is a", "= [a for a in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise", "flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result = ''", "addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona", "% (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request,", "addon) else: # Redirect to an app that supports this type. try: new_app", "% field, None) if order: return order(filter) return filter def _filter(self, field): return", "raise http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s' % uuid) else:", "categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None data", "# This is all going to get shoved into solitude. Temporary. form =", "as caching import jingo import jinja2 import commonware.log import session_csrf from tower import", "%s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n:", "# This is lame for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon',", "completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this", "promo_dict: continue # We only want to see public add-ons on the front", "def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def", "bounce back to the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context)", "status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else", "self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path())", "urlparams from amo.models import manual_order from amo import urlresolvers from amo.urlresolvers import reverse", "status): result = '' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up", "return filter def _filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids", "limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f,", "Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon:", "is found in request.GET. \"\"\" def __init__(self, request, base, key, default, model=Addon): self.opts_dict", "by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not", "dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug,", "comp_apps = addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix()", "form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '',", "= default if opt in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt]", "'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections})", "con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase", "= 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html',", "limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon,", "return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None,", "http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we figure out logged out", "dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404)", "and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this", "license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request,", "failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey", "'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help", "def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled:", "+ qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for", "# If tier is specified, then let's look it up. form = PriceCurrencyForm(data=request.POST,", "paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for =", "if key not in promo_dict: continue # We only want to see public", "@write def purchase_complete(request, addon, status): result = '' if status == 'complete': uuid_", "completed already, we'll # double check this with PayPal, just to be sure", "http.Http404 return extension_detail(request, addon) else: # Redirect to an app that supports this", "in ``opts`` that's used if nothing good is found in request.GET. \"\"\" def", "not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail',", "raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): #", "\"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html',", "# got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con", "ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured", "status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym):", "lambda xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons with randomness. featured", "return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')),", "raise http.Http404 persona = addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if", "try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if", "# details.html just returns the top half of the page for speed. The", "uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' %", "con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result}", "trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary", "def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users')", "import commonware.log import session_csrf from tower import ugettext as _, ugettext_lazy as _lazy", "get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET: contribution_src = src", "request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.'))", "split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a", "data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity:", "return extension_detail(request, addon) else: # Redirect to an app that supports this type.", "generate querysets for add-on listings. You have to define ``opts`` on the subclass", "if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '',", "@mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f", "author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args,", "data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required", "qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure',", "import ugettext as _, ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized,", "reverse from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms", "persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not", "contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check", "Shuffle the list and get 3 items. rand = lambda xs: random.shuffle(xs) or", "data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return", "(option, title) pair we want according to the request.\"\"\" if key in request.GET", "self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def", "favor locale specific # promos. promo_dict = {} for feature_id, v in groups:", "= (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal", "else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),", "xs[:3] # Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] #", "# l10n: {0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) #", "= request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name", "is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the", "redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf", "= (feature.id, '') if key not in promo_dict: continue # We only want", "= {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create(", "contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src':", "Which is a worry. log.error('Check purchase failed on uuid: %s' % uuid_) status", "# Download src and contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request,", "def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current version is incompatible with", "http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request,", "if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version =", "hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name))", "half of the page for speed. The bottom # does a lot more", "json import random from urlparse import urlparse import uuid from operator import attrgetter", "persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora uses persona.author despite there", "and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', '' try:", "= 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl',", "jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx)", "(self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter):", "return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\"", "template, data) class BaseFilter(object): \"\"\" Filters help generate querysets for add-on listings. You", "self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated',", "self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return", "add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10])", "= request.GET[key] else: opt = default if opt in self.opts_dict: title = self.opts_dict[opt]", "page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), }", "addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL,", "the top half of the page for speed. The bottom # does a", "exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user)", "attrgetter from django import http from django.conf import settings from django.db.models import Q", "items. rand = lambda xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons", "contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey for", "a worry. log.error('Check purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED'", "addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download,", "http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid:", "page for speed. The bottom # does a lot more queries we don't", "urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports", "the (option, title) pair we want according to the request.\"\"\" if key in", "{} self.request = request self.base_queryset = base self.key = key self.model = model", "@write @post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s'", "is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class", "out how to process for # anonymous users. For now we are concentrating", "'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug", "'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current version", "# If current version is incompatible with this app, redirect. comp_apps = addon.compatible_apps", "'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base, key, default):", "exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey: %s'", "paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl':", "%s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)", "dict(self.extras) if hasattr(self, 'extras') else {} self.request = request self.base_queryset = base self.key", "order_by_translation from versions.models import Version from .forms import ContributionForm from .models import Addon,", "http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\"", "CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models", "jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help generate querysets for add-on listings.", "this case PayPal disagreed, we should not be trusting # what get_paykey said.", "(self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def", "urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions", "filter_new = BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is", "addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey(", "log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view", "% addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying,", "random.shuffle(xs) or xs[:3] # Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP,", "in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured,", "= request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404 return promos(request, 'home',", "@can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon,", "result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned", "for feature in features: key = (feature.id, lang) if key not in promo_dict:", "request.\"\"\" if key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict):", "needs to have a version and be valid for this app. if addon.type", "addon.charity: # TODO(andym): Figure out how to get this in the addon authors", "ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '',", "if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version", "this with PayPal, just to be sure nothing went wrong. if status ==", "== \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else:", "paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase state')", "from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or", "being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags", "addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET:", "return CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='')", "favor locale specific collections. for feature in features: key = (feature.id, lang) if", "'result': result} # For mobile, bounce back to the details page. if request.MOBILE:", "addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0])", "'%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there", "not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon,", "in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv", "not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request,", "self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads')", "default if opt in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return", "AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help generate querysets", "*args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return", "return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we figure out logged", "import http from django.conf import settings from django.db.models import Q from django.shortcuts import", "# TODO(andym): again, remove this once we figure out logged out flow. @csrf_exempt", "return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is", "slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status", "error getting the paykey, then JSON will # not have a paykey and", "functools import hashlib import json import random from urlparse import urlparse import uuid", "and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail',", "Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, contribution", "(settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an", "addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source,", "featured] popular = sorted([a for a in addons if a.id in popular], key=attrgetter('average_daily_users'),", "purchased using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey)", "a in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer", "return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return", "or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') #", "Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased", "= ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url':", "means looking for # a for pre or post IPN contributions. If both", "an app that supports this type. try: new_app = [a for a in", "= 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug", "%s' % uuid_) # If this was a pre-approval, it's completed already, we'll", "(settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey,", "to an app that supports this type. try: new_app = [a for a", "self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request, key, default):", "= new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\"", "the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else:", "this once we figure out how to process for # anonymous users. For", "# For mobile, bounce back to the details page. if request.MOBILE: url =", "write from amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils import randslice,", "== 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type =", "= urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp():", "'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None):", "not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version", "addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter(", "return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24)", "we'll # double check this with PayPal, just to be sure nothing went", "status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' %", "Download src and contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html',", "if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug", "# l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval", "load. if request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]", "then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier =", "uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for uuid:", "if key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt", "order: return order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s' % field)()", "f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key)", "template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona = addon.persona", "amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' %", "addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections':", "c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon,", "request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey,", "in addons if a.id in featured] popular = sorted([a for a in addons", "http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view", "data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags, user_tags =", "= Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it with ES. frozen", "type. try: new_app = [a for a in amo.APP_USAGE if addon.type in a.types][0]", "commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled)", "= request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock':", "if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags':", "source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_)", "filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated')", "key, default) self.qs = self.filter(self.field) def options(self, request, key, default): \"\"\"Get the (option,", "3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs)", "Apps don't deserve AMO detail pages. raise http.Http404 # addon needs to have", "is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon,", "jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def", "'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version =", "if not persona.is_new(): # Remora uses persona.author despite there being a display_username. data['author_gallery']", "commonware.log import session_csrf from tower import ugettext as _, ugettext_lazy as _lazy import", "\"\"\"Extensions details page.\"\"\" # If current version is incompatible with this app, redirect.", "% (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0}", "process for # anonymous users. For now we are concentrating on logged in", "contribution: %s' % uuid) else: log.info('User completed contribution: %s' % uuid) response =", "base self.key = key self.model = model self.field, self.title = self.options(self.request, key, default)", "import vary_on_headers import caching.base as caching import jingo import jinja2 import commonware.log import", "def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise http.Http404", "is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type')", "privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view", "getting the paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True)", "return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects,", "shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error':", "popular = sorted([a for a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True)", "get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version})", "status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval')", "on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon):", "'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment',", "in features: key = (feature.id, lang) if key not in promo_dict: key =", "self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request =", "en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon", "('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request):", "= { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount", "# addon needs to have a version and be valid for this app.", "| Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We", "'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return", "is USD. amount, currency = addon.premium.get_price(), 'USD' # If tier is specified, then", "= (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id", "@addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon':", "contribution_src = src = request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed':", "collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'),", "'src' in request.GET: contribution_src = src = request.GET['src'] else: page_srcs = { 'developers':", "self.extras_dict): opt = request.GET[key] else: opt = default if opt in self.opts_dict: title", "If tier is specified, then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon)", "addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source =", "checking purchase state') log.error('Check purchase paypal addon: %s, user: %s, paykey: %s' %", "self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self):", "def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang)", "version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html',", "non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail',", "again, remove this once we figure out logged out flow. @csrf_exempt @login_required @addon_view", "uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return", "self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return", "request.GET. ``default`` should be a key in ``opts`` that's used if nothing good", "= Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part of. collections =", "{} # If we can, we favor locale specific collections. for feature in", "all going to get shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if", "'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request),", "error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the", "from sharing.views import share as share_redirect from stats.models import Contribution from translations.query import", "status=404) if addon.is_webapp(): # Apps don't deserve AMO detail pages. raise http.Http404 #", "'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once", "{'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon,", "self.request = request self.base_queryset = base self.key = key self.model = model self.field,", "return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx'))", "response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download =", "if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) #", "extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda:", "mobilized, mobile_template import amo from amo import messages from amo.decorators import login_required, post_required,", "% uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we", "get shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return", "and split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured =", "tower import ugettext as _, ugettext_lazy as _lazy import waffle from mobility.decorators import", "for a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html',", "return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO", "amount, currency = tier.price, tier.currency paykey, status, error = '', '', '' preapproval", "pre or post IPN contributions. If both fail, then we've not # got", "form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form':", "trusting # what get_paykey said. Which is a worry. log.error('Check purchase failed on", "addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs,", "form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount, currency", "version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view", "ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def", "import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models", "= _category_personas(qs, limit=6) else: category_personas = None data = { 'addon': addon, 'persona':", "# what get_paykey said. Which is a worry. log.error('Check purchase failed on uuid:", "%s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_),", "@anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested", "The chosen filter field is combined with the ``base`` queryset using the ``key``", "ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized, mobile_template import amo from", "import shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order from", "then we've not # got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) |", "return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage',", "categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora uses persona.author", "{'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile, bounce back to", "purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No", "the given field.\"\"\" filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' %", "= get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET: contribution_src =", "specific collections. for feature in features: key = (feature.id, lang) if key not", "up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for", "# promos. promo_dict = {} for feature_id, v in groups: promo = v.next()", "the subclass as a sequence of (key, title) pairs. The key is used", "%s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal", "(addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon,", "then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular", "l10n: {0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default", "uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was", "the page for speed. The bottom # does a lot more queries we", "jingo import jinja2 import commonware.log import session_csrf from tower import ugettext as _,", "__init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if", ".with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return", "listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False)", "webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source", "_filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG)", "@addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon':", "appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url)", "title def all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field))", "% (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned:", "= paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug,", "persona.author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags,", "@addon_view def license(request, addon, version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)", "by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '')", "request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(),", "source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon", "django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo", "addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not", "http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60", "'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs,", "= Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src':", "exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested,", "download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies':", "= sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale, so we can", "featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): #", "add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in", "= lambda xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons with randomness.", "= '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If", "request.GET. \"\"\" def __init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict", "some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular", "'version': version}) # TODO(andym): remove this once we figure out how to process", "'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type =", "and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result':", "'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the top", "_, ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized, mobile_template import amo", "\"\"\"Get the (option, title) pair we want according to the request.\"\"\" if key", "IPN contributions. If both fail, then we've not # got a matching contribution.", "a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags,", "is used in GET parameters and the title can be used in the", "data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat':", "You have to define ``opts`` on the subclass as a sequence of (key,", "= v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} #", "l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval =", "have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey':", "shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order from amo", "= None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status", "can favor locale specific # promos. promo_dict = {} for feature_id, v in", "(key, title) pairs. The key is used in GET parameters and the title", "'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return", "addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page):", "django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt", "django.conf import settings from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect", "recommended_for__addon=addon)[:6] # Popular collections this addon is part of. collections = Collection.objects.listed().filter( addons=addon,", "}) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id)", "a version and be valid for this app. if addon.type in request.APP.types: if", "'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once we figure", "'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested':", "mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view", "paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s'", "current version is incompatible with this app, redirect. comp_apps = addon.compatible_apps if comp_apps", "we figure out how to process for # anonymous users. For now we", "try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug", "and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version", "recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part of. collections", "result = 'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey: %s' %", "not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True)", "user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), })", "ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(),", "@addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if", "uuid_) log.debug('Got paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url", "'json' or request.is_ajax(): # If there was an error getting the paykey, then", "c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return rv", "app that supports this type. try: new_app = [a for a in amo.APP_USAGE", "the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status':", "== amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request,", "Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm", "return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help generate querysets for add-on", "version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET: contribution_src = src =", "_lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def", "except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase", "request, key, default): \"\"\"Get the (option, title) pair we want according to the", "response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response", "if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return", "preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern", "may, or may not have come through. Which means looking for # a", "= key self.model = model self.field, self.title = self.options(self.request, key, default) self.qs =", "be used in the view. The chosen filter field is combined with the", "the ``key`` found in request.GET. ``default`` should be a key in ``opts`` that's", "key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else", "import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as", "} # Download src and contribution_src are different. src, contribution_src = page_srcs.get(page) return", "users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on", "was a pre-approval, it's completed already, we'll # double check this with PayPal,", "paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type", "with PayPal, just to be sure nothing went wrong. if status == 'COMPLETED':", "log.debug('Storing contrib for uuid: %s' % uuid_) # If this was a pre-approval,", "log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib", "def __init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras)", "@login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of addon:", "= dict(self.extras) if hasattr(self, 'extras') else {} self.request = request self.base_queryset = base", "popular add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id',", "content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get", "def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads',", "title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def all(self): \"\"\"Get", "'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove", "else: # Redirect to an app that supports this type. try: new_app =", "amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0}", "tier.currency paykey, status, error = '', '', '' preapproval = None if waffle.flag_is_active(request,", "nothing good is found in request.GET. \"\"\" def __init__(self, request, base, key, default,", "self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False)", "url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is the non-Ajax", "status}), content_type='application/json') # This is the non-Ajax fallback. if status != 'COMPLETED': return", ".decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons')", "%s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status =", "once we figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def", "import caching.base as caching import jingo import jinja2 import commonware.log import session_csrf from", "with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick", "return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula: return", "not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved", "'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status,", "else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def", "email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error:", "categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else:", "data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request,", "import Version from .forms import ContributionForm from .models import Addon, Persona, FrozenAddon from", "Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def", "(request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt =", "# double check this with PayPal, just to be sure nothing went wrong.", "'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail)", "{'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id):", "def all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for", "= addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks", "% persona.author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags':", "= AbuseForm(request.POST or None, request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon,", "figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon,", "''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to", "that uses elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key,", "or may not have come through. Which means looking for # a for", "(addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned: %s", "can, we favor locale specific collections. for feature in features: key = (feature.id,", "look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if", "= self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request, key, default): \"\"\"Get", "field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users',", "'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how", "Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s'", "uuid: %s' % uuid_) # The IPN may, or may not have come", "may not have come through. Which means looking for # a for pre", "if status == 'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed", "http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps", "= list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP,", "queryset using the ``key`` found in request.GET. ``default`` should be a key in", "'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return", "we are concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required", "reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import", "== 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status", "versions.models import Version from .forms import ContributionForm from .models import Addon, Persona, FrozenAddon", "we can, we favor locale specific collections. for feature in features: key =", "getting the paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True)", "'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check purchase paypal addon: %s,", "there was an error getting the paykey, then JSON will # not have", "if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404", "return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version is", "@addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404()", "user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If this was a", "using the ``key`` found in request.GET. ``default`` should be a key in ``opts``", "redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id", "check this with PayPal, just to be sure nothing went wrong. if status", "of the page for speed. The bottom # does a lot more queries", "= request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey, status =", "import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import", "error checking purchase state') log.error('Check purchase paypal addon: %s, user: %s, paykey: %s'", "import jinja2 import commonware.log import session_csrf from tower import ugettext as _, ugettext_lazy", "content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if", "src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src,", "import manual_order from amo import urlresolvers from amo.urlresolvers import reverse from abuse.models import", "self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return", "'', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out", "'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise", "name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency =", "addon)) # TODO(andym): again, remove this once we figure out logged out flow.", "= amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should not be", "contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting", "else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to", "application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags':", "# a for pre or post IPN contributions. If both fail, then we've", "from amo.models import manual_order from amo import urlresolvers from amo.urlresolvers import reverse from", "= '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url':", "self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a full", "ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request,", "self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request, key, default): \"\"\"Get the", "filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness')", "front page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c", "& self.base_queryset order = getattr(self, 'order_%s' % field, None) if order: return order(filter)", "Fail', 'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check purchase paypal addon:", "persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona", "both fail, then we've not # got a matching contribution. lookup = (Q(uuid=uuid_,", "present for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by user:", "rand = lambda xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons with", "contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for", "paykey, error, status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount,", "def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return", "\"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return", "% uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s'", "'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def", "version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version and version.license):", "as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting", "raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0]", "files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view", "page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once we", "Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data)", "return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if", "Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase", "for add-on listings. You have to define ``opts`` on the subclass as a", "return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return", "settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved into solitude. Temporary. form", "user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request,", "for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval()", "for speed. The bottom # does a lot more queries we don't want", "# Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is", "http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response", "# TODO(andym): Figure out how to get this in the addon authors #", "def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key()", "caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if not", "lang) if key not in promo_dict: key = (feature.id, '') if key not", "from operator import attrgetter from django import http from django.conf import settings from", "'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_) #", "= 'error' log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10])) if result", "self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features)", "get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk,", "flat=True)[:10]) popular = rand(qs) # Do one query and split up the add-ons.", "django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base", "anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models import Contribution from translations.query", "this addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = {", "ReviewForm from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views", "purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk))", "+ popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if a.id in", "field is combined with the ``base`` queryset using the ``key`` found in request.GET.", "{'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove", "http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona():", "addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user)", "category_personas = _category_personas(qs, limit=6) else: category_personas = None data = { 'addon': addon,", "so we can favor locale specific # promos. promo_dict = {} for feature_id,", "Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base =", "from versions.models import Version from .forms import ContributionForm from .models import Addon, Persona,", "csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo import jinja2", "'')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated():", "if 'src' in request.GET: contribution_src = src = request.GET['src'] else: page_srcs = {", "for paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED' and con.type ==", "'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the list and get", "def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return", "Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns", "request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error getting the", "nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A", "version): raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request):", "django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo import jinja2 import commonware.log", "%s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid:", "source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' %", "currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as", "name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for", "@addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon':", "addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version' in", "popular}) def homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if", "found in request.GET. ``default`` should be a key in ``opts`` that's used if", "with this app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not in", "uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal", "== 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}),", "context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile, bounce", "60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to = reverse('addons.detail',", "a key in ``opts`` that's used if nothing good is found in request.GET.", "addon.premium.get_price(), 'USD' # If tier is specified, then let's look it up. form", "on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present", "uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for", "\"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if", "(addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or", "sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating':", "import AbuseForm from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams from", "\"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base,", "popular = rand(qs) # Do one query and split up the add-ons. addons", "addon): \"\"\"Extensions details page.\"\"\" # If current version is incompatible with this app,", "opt = default if opt in self.opts_dict: title = self.opts_dict[opt] else: title =", "pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED':", "and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt", "def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that", "default) def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular':", "by feature_id and locale, so we can favor locale specific # promos. promo_dict", "% ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error:", "'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora uses", "don't deserve AMO detail pages. raise http.Http404 # addon needs to have a", "= promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return rv def", "addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version is not None: qs", "def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return", "{option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field): \"\"\"Get", "= jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased", "version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon,", "share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon,", "return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 *", "options(self, request, key, default): \"\"\"Get the (option, title) pair we want according to", "groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale, so we", "uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting", "Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') #", "don't want on the initial page load. if request.is_ajax(): # Other add-ons/apps from", "field in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for the given field.\"\"\"", "request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_,", "lame for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections.", "pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_,", "has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view =", "rv = {} # If we can, we favor locale specific collections. for", "= Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick 3 at random.", "key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views", "CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all() def collections(self):", "@csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid: raise", "dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt", "'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check purchase paypal", "in request.GET: contribution_src = src = request.GET['src'] else: page_srcs = { 'developers': ('developers',", ".transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale, so", "http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def", "amo import messages from amo.decorators import login_required, post_required, write from amo.forms import AbuseForm", "str(error), 'status': status}), content_type='application/json') # This is the non-Ajax fallback. if status !=", "get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US'", "fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon))", "%s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed,", "if not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def", "request.GET.get('uuid') if not uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled contribution:", "= addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in", "key is used in GET parameters and the title can be used in", "page for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona = addon.persona # this", "paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result", "jinja2 import commonware.log import session_csrf from tower import ugettext as _, ugettext_lazy as", "amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s'", "if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html',", "= promo rv = {} # If we can, we favor locale specific", "commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator", "Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect", "addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data)", "c.addons.all() & Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return self.request.APP ==", "in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def", "'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts", "version and be valid for this app. if addon.type in request.APP.types: if addon.type", "we don't want on the initial page load. if request.is_ajax(): # Other add-ons/apps", "category_personas = None data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas':", "have come through. Which means looking for # a for pre or post", "preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL',", "addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version is not None:", "class BaseFilter(object): \"\"\" Filters help generate querysets for add-on listings. You have to", "= (feature.id, lang) if key not in promo_dict: key = (feature.id, '') if", "= addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app", "permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if", "is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version =", "# Apps don't deserve AMO detail pages. raise http.Http404 # addon needs to", "hasattr(self, 'extras') else {} self.request = request self.base_queryset = base self.key = key", "for pre or post IPN contributions. If both fail, then we've not #", "self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP,", "addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\"", "return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid =", "Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src',", "just returns the top half of the page for speed. The bottom #", "featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas =", "[a for a in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404", "# The IPN may, or may not have come through. Which means looking", "return order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s' % field)() def", "data) class BaseFilter(object): \"\"\" Filters help generate querysets for add-on listings. You have", "GET parameters and the title can be used in the view. The chosen", "paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase", "paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download':", "django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real", "addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error", "in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version", "file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else:", "'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon,", "chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError", "addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def", "def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to = reverse('addons.detail', args=[persona.addon.slug]) return http.HttpResponsePermanentRedirect(to)", "import uuid from operator import attrgetter from django import http from django.conf import", "purchase_complete(request, addon, status): result = '' if status == 'complete': uuid_ = request.GET.get('uuid')", "the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a", "def _filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP,", "CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') |", "version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version =", "page load. if request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons'] =", "# Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html',", "import amo from amo import messages from amo.decorators import login_required, post_required, write from", "if nothing good is found in request.GET. \"\"\" def __init__(self, request, base, key,", "import session_csrf from tower import ugettext as _, ugettext_lazy as _lazy import waffle", "if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't", "('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons.", "if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error getting", "self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request = request self.base_queryset =", "if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None", "status = 'error' log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10])) if", "addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid),", "Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed)", "uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check purchase", "= src = request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install',", "pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or", "discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version):", "paykey, status, error = '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth')", "random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do", "preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern", "app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else:", "as a sequence of (key, title) pairs. The key is used in GET", "('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)", "amo import urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models", "application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18]", "= '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval", "'extras') else {} self.request = request self.base_queryset = base self.key = key self.model", "@cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to", "this once we figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write", "paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def", "__nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula:", "@addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested')", "}.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going", "else: # In this case PayPal disagreed, we should not be trusting #", "comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json'", "For mobile, bounce back to the details page. if request.MOBILE: url = urlparams(shared_url('detail',", "addon.slug, permanent=True) # get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction =", "request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html',", "error getting the paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk,", "def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP,", "paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon,", "request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id)", "return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html',", "if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request,", "self.filter(self.field) def options(self, request, key, default): \"\"\"Get the (option, title) pair we want", "version=version)[0] else: version = addon.current_version if not (version and version.license): raise http.Http404 return", "\"\"\"Get a full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in", "response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data =", "version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self): return", "The key is used in GET parameters and the title can be used", "% uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s'", "webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid,", "return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if", "returned: %s for paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED' and", "contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(),", "paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error", "key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt =", "if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return", "addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check", "@can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html',", "from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid)", "to see public add-ons on the front page. c = promo_dict[key].collection c.public_addons =", "be valid for this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA:", "addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if", "double check this with PayPal, just to be sure nothing went wrong. if", "not persona.is_new(): # Remora uses persona.author despite there being a display_username. data['author_gallery'] =", "random from urlparse import urlparse import uuid from operator import attrgetter from django", "source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount,", "categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6)", "if hasattr(self, 'extras') else {} self.request = request self.base_queryset = base self.key =", "amo from amo import messages from amo.decorators import login_required, post_required, write from amo.forms", "worry. log.error('Check purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save()", "returns the top half of the page for speed. The bottom # does", "paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey:", "addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by", "addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the", "IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail',", "Figure out how to get this in the addon authors # locale, rather", "'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3],", "In this case PayPal disagreed, we should not be trusting # what get_paykey", "if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error),", "== 'json' or request.is_ajax(): # If there was an error getting the paykey,", "# Redirect to an app that supports this type. try: new_app = [a", "app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer", "promo_dict[key] = promo rv = {} # If we can, we favor locale", "ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted:", "form.is_valid(): tier = form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey, status,", "except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse(", "self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt = default if", "chosen filter field is combined with the ``base`` queryset using the ``key`` found", "'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new',", "self.base_queryset order = getattr(self, 'order_%s' % field, None) if order: return order(filter) return", "Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG,", "if key not in promo_dict: key = (feature.id, '') if key not in", "@csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result = '' if", "addon.is_public(): raise http.Http404 persona = addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id)", "filter def _filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids =", "redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction", "Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas':", "urlparse import urlparse import uuid from operator import attrgetter from django import http", "request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request,", "reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt", "% uuid_) # The IPN may, or may not have come through. Which", "from django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import", "for feature_id, v in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key]", "initial page load. if request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons']", "% (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest()", "solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.',", "and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if", "not have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url,", "groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv =", "said. Which is a worry. log.error('Check purchase failed on uuid: %s' % uuid_)", "user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_", ".filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if a.id in featured] popular", "jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key", "from django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo import jinja2 import", "\"\"\" Filters help generate querysets for add-on listings. You have to define ``opts``", "% uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow'", "urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if", "query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured", "PayPal, just to be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request,", "Contribution from translations.query import order_by_translation from versions.models import Version from .forms import ContributionForm", "# Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get", "addon, status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if status ==", "be a key in ``opts`` that's used if nothing good is found in", "of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(), 'USD' # If", "return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else:", "collections}) @mobilized(home) def home(request): # Shuffle the list and get 3 items. rand", "if addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app", "new_app = [a for a in amo.APP_USAGE if addon.type in a.types][0] except IndexError:", "the title can be used in the view. The chosen filter field is", "('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are different.", "'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, purchase of", "version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0]", "'', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval()", "else: log.info('User completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon,", "uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s' % uuid)", "'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True)", "c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required", "paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment,", "= request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_) # The IPN", "request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp =", "prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request,", "are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page,", "= None data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3],", "the request.\"\"\" if key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in", "or None, request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request,", "featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons,", "addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons", "an error getting the paykey') log.error('Error getting paykey, contribution for addon: %s' %", "Version from .forms import ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators", "= self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a", "of addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source,", "%s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ =", "@addon_view def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version", "locale specific # promos. promo_dict = {} for feature_id, v in groups: promo", "Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation", "Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx)", "page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version':", "going to get shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if not", "sorted([a for a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request,", "persona.is_new(): # Remora uses persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT", "opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new", "return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language())", "= addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons", "http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request =", "for field in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for the given", "getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib =", "see public add-ons on the front page. c = promo_dict[key].collection c.public_addons = c.addons.all()", "anonymous users. For now we are concentrating on logged in users. @login_required @addon_view", "'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure", "import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise", "an error getting the paykey') log.error('Error getting paykey, purchase of addon: %s' %", "for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed", "session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models import", "deserve AMO detail pages. raise http.Http404 # addon needs to have a version", "rand(qs) # Do one query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured", "sharing.views import share as share_redirect from stats.models import Contribution from translations.query import order_by_translation", "class CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all() def", "= addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source =", "'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf", "if form.is_valid(): tier = form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey,", "%s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG,", "except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an", "request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_) # The IPN may,", "status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_)", "not (platform or version): raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object):", "request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount',", "qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one", "django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache", "base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it with ES.", "{'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request,", "page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response", "promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {}", "addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp():", "for a in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404 else:", "(Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon:", "in request.GET. ``default`` should be a key in ``opts`` that's used if nothing", "'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10]))", "'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona =", "addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this", "= getattr(self, 'order_%s' % field, None) if order: return order(filter) return filter def", "(lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] #", "getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids,", "amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should not be trusting", "add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if", "addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def", "state') log.error('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk,", "different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src':", "(u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id #", "= request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime':", "clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id =", "{'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version'", "details.html just returns the top half of the page for speed. The bottom", "= 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download =", "addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request,", "def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def", "'-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')),", "import paypal from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from session_csrf", "= {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request,", "supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) #", "{'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path())", "return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request", "'') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all", "import functools import hashlib import json import random from urlparse import urlparse import", "'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey,", "= (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id", "raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug]))", "If this was a pre-approval, it's completed already, we'll # double check this", "lot more queries we don't want on the initial page load. if request.is_ajax():", "# get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang ==", "in self.extras_dict): opt = request.GET[key] else: opt = default if opt in self.opts_dict:", "import order_by_translation from versions.models import Version from .forms import ContributionForm from .models import", "return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base,", "dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view", "self.qs = self.filter(self.field) def options(self, request, key, default): \"\"\"Get the (option, title) pair", "def purchase_complete(request, addon, status): result = '' if status == 'complete': uuid_ =", "import urlparse import uuid from operator import attrgetter from django import http from", "if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon,", "through. Which means looking for # a for pre or post IPN contributions.", "return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None):", "request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if", "log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib", "let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier()", "'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id')", "from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon):", "field): \"\"\"Get the queryset for the given field.\"\"\" filter = self._filter(field) & self.base_queryset", "request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if", "cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching", "should be a key in ``opts`` that's used if nothing good is found", "share as share_redirect from stats.models import Contribution from translations.query import order_by_translation from versions.models", "opt in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title", "be trusting # what get_paykey said. Which is a worry. log.error('Check purchase failed", "contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '')", "status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if status == 'cancel':", "version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET: contribution_src", "comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US. lang =", "sequence of (key, title) pairs. The key is used in GET parameters and", "amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src':", "import ReviewForm from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from", "import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models import Contribution", "== 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error')", "'error' log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10])) if result ==", "this type. try: new_app = [a for a in amo.APP_USAGE if addon.type in", "paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error", "Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url", "import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms import", "send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon':", "this app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not in comp_apps:", "return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset", "pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency,", "addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon", "if comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short", "Filters help generate querysets for add-on listings. You have to define ``opts`` on", "promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request def", "is all going to get shoved into solitude. Temporary. form = ContributionForm({'amount': amount})", "if opt in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt,", "uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon,", "with the ``base`` queryset using the ``key`` found in request.GET. ``default`` should be", "if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request,", "not # got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE))", "addon authors # locale, rather than the contributors locale. name, paypal_id = (u'%s:", "preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey", "http from django.conf import settings from django.db.models import Q from django.shortcuts import get_list_or_404,", "self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if", "a full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts))", "= paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking", "jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view", "completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status})", "from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log =", "manual_order from amo import urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report", "if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using", "type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s,", "slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There", "return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def", "= addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs =", "paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp", "abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm", "found in request.GET. \"\"\" def __init__(self, request, base, key, default, model=Addon): self.opts_dict =", "'') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '')", "license(request, addon, version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version =", "filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx'))", "addon.summary) @addon_view def license(request, addon, version=None): if version is not None: qs =", "'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request,", "sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale, so we can favor", "@addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request,", "TODO(andym): again, remove this once we figure out logged out flow. @csrf_exempt @login_required", "the front page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] =", "%s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid:", "'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt']", "'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self):", "== 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_)", "contrib for uuid: %s' % uuid_) # The IPN may, or may not", "get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control", "on the front page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature]", "ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request,", "contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax():", "If current version is incompatible with this app, redirect. comp_apps = addon.compatible_apps if", "response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download", "default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts = {'name': 'name_sort',", "{'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos version, platform =", "caching import jingo import jinja2 import commonware.log import session_csrf from tower import ugettext", "``base`` queryset using the ``key`` found in request.GET. ``default`` should be a key", "promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return rv def __nonzero__(self):", "installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data)", "# does a lot more queries we don't want on the initial page", "import ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased,", "translations.query import order_by_translation from versions.models import Version from .forms import ContributionForm from .models", "the paykey, then JSON will # not have a paykey and the JS", "jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def", "addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version", "'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym):", "'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the list and get 3", "= sorted([a for a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return", "users. For now we are concentrating on logged in users. @login_required @addon_view @can_be_purchased", "data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas,", "tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews':", "= addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is", "memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon,", "(feature.id, '') if key not in promo_dict: continue # We only want to", "= paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'),", "version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request,", "return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx'))", "'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed contribution: %s' %", "_lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): #", "get_paykey said. Which is a worry. log.error('Check purchase failed on uuid: %s' %", "amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey':", "lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase", "and get 3 items. rand = lambda xs: random.shuffle(xs) or xs[:3] # Get", "try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',", "redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer =", "'error': str(error), 'status': status}), content_type='application/json') # This is the non-Ajax fallback. if status", "form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey, status, error = '',", "tier = form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey, status, error", "'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp", "http.Http404 # addon needs to have a version and be valid for this", "addon, template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona =", "uuid) else: log.info('User completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon':", "else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 *", "is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '')", "addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request,", "AMO detail pages. raise http.Http404 # addon needs to have a version and", "add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then", "name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and", "contributions. If both fail, then we've not # got a matching contribution. lookup", "addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] =", "= base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html',", "return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model", "if tier: amount, currency = tier.price, tier.currency paykey, status, error = '', '',", "if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response =", "getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib =", "self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self):", "in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) #", "Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it with", "# this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas", "not addon.is_public(): raise http.Http404 persona = addon.persona # this persona's categories categories =", "Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part of. collections = Collection.objects.listed().filter(", "AbuseForm(request.POST or None, request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text'])", "% (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey':", "'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos version, platform", "__init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field):", "from amo import messages from amo.decorators import login_required, post_required, write from amo.forms import", "this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon)", "request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request,", "@login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result = '' if status", "'There was an error getting the paykey') log.error('Error getting paykey, purchase of addon:", "and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request,", "else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request,", "= contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount", "there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: #", "not be trusting # what get_paykey said. Which is a worry. log.error('Check purchase", "a for pre or post IPN contributions. If both fail, then we've not", "form = AbuseForm(request.POST or None, request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request,", "key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public():", "urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models import Collection,", "randslice, sorted_groupby, urlparams from amo.models import manual_order from amo import urlresolvers from amo.urlresolvers", "``opts`` that's used if nothing good is found in request.GET. \"\"\" def __init__(self,", "feature_id, v in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] =", "http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is the", "want on the initial page load. if request.is_ajax(): # Other add-ons/apps from the", "collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos", "('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are different. src, contribution_src =", "'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, contribution for", "= translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended", "# not have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url':", "from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import", "ugettext as _, ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized, mobile_template", "{'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return", "(result, con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context =", "paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result", "contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should not", "is incompatible with this app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP", "purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There", "# This is the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request,", "purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount,", "# Do one query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured +", "mobile, bounce back to the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon),", "http.Http404 persona = addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories:", "jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections':", "features: key = (feature.id, lang) if key not in promo_dict: key = (feature.id,", "@can_be_purchased @write def purchase_complete(request, addon, status): result = '' if status == 'complete':", "== Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx'))", "we want according to the request.\"\"\" if key in request.GET and (request.GET[key] in", "@can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s by", "jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type", "import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view =", "default): \"\"\"Get the (option, title) pair we want according to the request.\"\"\" if", "promo rv = {} # If we can, we favor locale specific collections.", "import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log", "display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags, user_tags", "@addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s", "can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal')", "request def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language())", "preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey, status", "data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c =", "%s' % uuid_) # The IPN may, or may not have come through.", "if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version", "addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon,", "frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured =", "= addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status =", "translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended =", "valid for this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return", "bottom # does a lot more queries we don't want on the initial", "locale, rather than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name,", "uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for", "return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return", "get 3 items. rand = lambda xs: random.shuffle(xs) or xs[:3] # Get some", "**kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid", "addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency", "def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request,", "ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps'", "# Popular collections this addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id)", "that supports this type. try: new_app = [a for a in amo.APP_USAGE if", "def homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not", "in a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short", "elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def", "collections. for feature in features: key = (feature.id, lang) if key not in", "(CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and", "if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount,", "form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona,", "= Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view", "data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon):", "24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to = reverse('addons.detail', args=[persona.addon.slug]) return", "JSON will # not have a paykey and the JS can cope appropriately.", "= request def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang =", "come through. Which means looking for # a for pre or post IPN", "If we can, we favor locale specific collections. for feature in features: key", "key, default): \"\"\"Get the (option, title) pair we want according to the request.\"\"\"", "= commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view =", "Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed", "_(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval =", "'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are different. src,", "'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request,", "= addon.current_version if 'src' in request.GET: contribution_src = src = request.GET['src'] else: page_srcs", "paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey')", "caching.base as caching import jingo import jinja2 import commonware.log import session_csrf from tower", "more queries we don't want on the initial page load. if request.is_ajax(): #", "part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src':", "listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular()", "'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas',", "def filter(self, field): \"\"\"Get the queryset for the given field.\"\"\" filter = self._filter(field)", "continue # We only want to see public add-ons on the front page.", "def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version =", "this was a pre-approval, it's completed already, we'll # double check this with", "import randslice, sorted_groupby, urlparams from amo.models import manual_order from amo import urlresolvers from", "def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return", "'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once we figure out how", "we should not be trusting # what get_paykey said. Which is a worry.", "personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness':", "'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class", "# locale, rather than the contributors locale. name, paypal_id = (u'%s: %s' %", "filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price',", "None) if not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return", "= { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings':", "stats.models import Contribution from translations.query import order_by_translation from versions.models import Version from .forms", "if not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse(", "how to get this in the addon authors # locale, rather than the", "= (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if", "_category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return", "'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the top half of", "querysets for add-on listings. You have to define ``opts`` on the subclass as", "figure out how to process for # anonymous users. For now we are", "a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey,", "'popular': popular}) def homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform')", "'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request,", "'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL',", "= addon.current_version if not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon,", "from market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models import", "self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request, key,", "request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile, bounce back to the", "request.GET[key] else: opt = default if opt in self.opts_dict: title = self.opts_dict[opt] else:", "not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags,", "persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: #", "user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if", "in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key]", "title can be used in the view. The chosen filter field is combined", "from django.conf import settings from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404,", "qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src'", "version}) # TODO(andym): remove this once we figure out how to process for", "# anonymous users. For now we are concentrating on logged in users. @login_required", "'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request):", "ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except", "addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on", "uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if status == 'cancel': log.info('User", "else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else:", "http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With')", "filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self):", "'' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try:", "one query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION))", "try: new_app = [a for a in amo.APP_USAGE if addon.type in a.types][0] except", "in promo_dict: key = (feature.id, '') if key not in promo_dict: continue #", "contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount =", "key in ``opts`` that's used if nothing good is found in request.GET. \"\"\"", "def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance.", "popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if a.id in featured]", "ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request,", "= { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } #", "addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth')", "= '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for,", "using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) ==", "= addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas", "def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data)", "'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid =", "queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field): \"\"\"Get the", "{0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is", "getting the paykey, then JSON will # not have a paykey and the", ".values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query and split up the", "promo_dict = {} for feature_id, v in groups: promo = v.next() key =", "dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except", "'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page", "= int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return", "Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None data = { 'addon':", "lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer))", "= get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version':", "_login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return", "'-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter):", "= urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()}", "uses elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default)", "c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return", "def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if request.method == \"POST\"", "return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this", "= '' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for", "None, request=request) if request.method == \"POST\" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse", "% addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING,", "paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if status", "addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for", "default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}", "(addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is", "author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] =", "pre-approval, it's completed already, we'll # double check this with PayPal, just to", "randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick 3", "@post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested =", "uuid_) # The IPN may, or may not have come through. Which means", "'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION", "The bottom # does a lot more queries we don't want on the", "def home(request): # Shuffle the list and get 3 items. rand = lambda", "completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is", "else {} self.request = request self.base_queryset = base self.key = key self.model =", "amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order from amo import urlresolvers", "'There was an error checking purchase state') log.error('Check purchase paypal addon: %s, user:", "def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(),", "data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()}", ".with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter", "request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views", "``key`` found in request.GET. ``default`` should be a key in ``opts`` that's used", "ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request,", "def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request,", "'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json')", "will # not have a paykey and the JS can cope appropriately. return", "\"\"\"Get the queryset for the given field.\"\"\" filter = self._filter(field) & self.base_queryset order", "a.id in featured] popular = sorted([a for a in addons if a.id in", "addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result =", "addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect", ".with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self):", "not in promo_dict: continue # We only want to see public add-ons on", "list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query and", "than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal)", "promo_dict: key = (feature.id, '') if key not in promo_dict: continue # We", "according to the request.\"\"\" if key in request.GET and (request.GET[key] in self.opts_dict or", "%s' % uuid_) log.debug('Got paykey for addon: %s by user: %s' % (addon.pk,", "to define ``opts`` on the subclass as a sequence of (key, title) pairs.", "only want to see public add-ons on the front page. c = promo_dict[key].collection", "status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')},", "case PayPal disagreed, we should not be trusting # what get_paykey said. Which", "!= 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again,", "% (result, con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context", "from translations.query import order_by_translation from versions.models import Version from .forms import ContributionForm from", "opt, title def all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\" return dict((field,", "'A user purchased using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_)", "addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'),", "= _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(), 'USD'", "popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def", "= _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval", "for Personas.\"\"\" if not addon.is_public(): raise http.Http404 persona = addon.persona # this persona's", "listings. You have to define ``opts`` on the subclass as a sequence of", "to the request.\"\"\" if key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key]", "TODO(andym): remove this once we figure out how to process for # anonymous", "{0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None", "'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: #", "(feature.id, lang) if key not in promo_dict: key = (feature.id, '') if key", "what get_paykey said. Which is a worry. log.error('Check purchase failed on uuid: %s'", "Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part", "purchase state') log.error('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk,", "import trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from", "= c.addons.all() & Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return self.request.APP", "{'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html',", "vary_on_headers import caching.base as caching import jingo import jinja2 import commonware.log import session_csrf", "'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just", ".order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query and split up", "= Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid:", "category_personas, } if not persona.is_new(): # Remora uses persona.author despite there being a", "v in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo", "return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request,", "paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed", "key self.model = model self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field)", "We key by feature_id and locale, so we can favor locale specific #", "lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk,", "addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO detail pages. raise http.Http404", "10 popular add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users')", "market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models import Review,", "'', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' %", "paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id,", "== Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model ==", "or version): raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self,", "return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else:", "== 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = {", "= 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id,", "@anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax':", "amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon)", "= commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f):", "def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon})", "'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } #", "addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested'", "import Contribution from translations.query import order_by_translation from versions.models import Version from .forms import", "hashlib import json import random from urlparse import urlparse import uuid from operator", "report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if request.method == \"POST\" and", "public add-ons on the front page. c = promo_dict[key].collection c.public_addons = c.addons.all() &", "file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon':", "class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently", "return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos", "dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request = request self.base_queryset", "= tier.price, tier.currency paykey, status, error = '', '', '' preapproval = None", "recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part of.", "qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\"", "name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id =", "self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field, None) if order: return", "data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type',", "log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]))", "redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request)", "user purchased using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if", "is specified, then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid():", "the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon':", "prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction", "if not addon.is_public(): raise http.Http404 persona = addon.persona # this persona's categories categories", "None data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas':", "promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by", "= addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern,", "= base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured':", "'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon':", "'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request,", "addon): form = AbuseForm(request.POST or None, request=request) if request.method == \"POST\" and form.is_valid():", "type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas", "paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except:", "used if nothing good is found in request.GET. \"\"\" def __init__(self, request, base,", "request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp():", "amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix()", "addon.current_version if not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version))", "pair we want according to the request.\"\"\" if key in request.GET and (request.GET[key]", "developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs =", "purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download':", "matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup)", "from amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order from amo import", "state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was", "'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if", "addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs,", "request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This", "import messages from amo.decorators import login_required, post_required, write from amo.forms import AbuseForm from", "= [a for a in addons if a.id in featured] popular = sorted([a", "reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60", "from .models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased)", "locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos,", "else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name", "uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was", "(addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log =", "% field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def", "'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies':", "'collection_feature_id') # We key by feature_id and locale, so we can favor locale", "self.request = request def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang", "from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if", "addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version and", "http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else:", "to get shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid():", "= urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context)", "pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as", "for the given field.\"\"\" filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s'", "'', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym):", "is combined with the ``base`` queryset using the ``key`` found in request.GET. ``default``", "addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return", "log.debug('Looking up contrib for uuid: %s' % uuid_) # The IPN may, or", "from amo import urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report from", "from django import http from django.conf import settings from django.db.models import Q from", "order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request, base, key,", "http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current", "from mobility.decorators import mobilized, mobile_template import amo from amo import messages from amo.decorators", "import urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models import", "'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field])", "combined with the ``base`` queryset using the ``key`` found in request.GET. ``default`` should", "return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not", "extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current version is incompatible with this", "dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon,", "out how to get this in the addon authors # locale, rather than", "get_object_or_404, redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control from", "addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None):", "it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier:", "it's completed already, we'll # double check this with PayPal, just to be", "self.model = model self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def", "request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self,", "contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id,", "a pre-approval, it's completed already, we'll # double check this with PayPal, just", "from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation from", "have a version and be valid for this app. if addon.type in request.APP.types:", "uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' %", "return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data", "title) pairs. The key is used in GET parameters and the title can", "# Default is USD. amount, currency = addon.premium.get_price(), 'USD' # If tier is", "# tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(),", "context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon):", "def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale", "page.\"\"\" # If current version is incompatible with this app, redirect. comp_apps =", "return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit)", "'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured',", "session_csrf from tower import ugettext as _, ugettext_lazy as _lazy import waffle from", "Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key", "status, 'result': result} # For mobile, bounce back to the details page. if", "as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import", "raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking", "urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options']", "was an error getting the paykey') log.error('Error getting paykey, contribution for addon: %s'", "status == 'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed contribution:", "'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',", "manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model ==", "return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page", "for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by user: %s'", "= (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new =", "to get this in the addon authors # locale, rather than the contributors", "the paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if", "addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error", "amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order", "have to define ``opts`` on the subclass as a sequence of (key, title)", "paykey present for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by", "paykey, then JSON will # not have a paykey and the JS can", "We only want to see public add-ons on the front page. c =", "the ``base`` queryset using the ``key`` found in request.GET. ``default`` should be a", "'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the list", "} # details.html just returns the top half of the page for speed.", "operator import attrgetter from django import http from django.conf import settings from django.db.models", "in GET parameters and the title can be used in the view. The", "(feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If we can, we", "remove this once we figure out how to process for # anonymous users.", "is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In", "request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is", "version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon):", "suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') ==", "version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form", "be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase',", "addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to an app that", "= Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP,", "user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type')", "'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections", "base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts =", "form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid", "import mobilized, mobile_template import amo from amo import messages from amo.decorators import login_required,", "bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms", "base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras')", "the queryset for the given field.\"\"\" filter = self._filter(field) & self.base_queryset order =", "None) if order: return order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s'", "'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for uuid: %s' %", "was an error getting the paykey') log.error('Error getting paykey, purchase of addon: %s'", "paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result ==", "not uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s' %", "addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other", "new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" #", "on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select',", "addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name))", "now we are concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write", "'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for uuid: %s'", "for uuid: %s' % uuid_) # The IPN may, or may not have", "CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from", "for this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request,", "return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return", "url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html',", "rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if", "source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL,", "return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise", "a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return", "log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED'", "decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404", "filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field, None) if", "'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are", "filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses", "'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular',", "request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment =", "response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data", "addon, version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs,", "of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src',", "import hashlib import json import random from urlparse import urlparse import uuid from", "tier: amount, currency = tier.price, tier.currency paykey, status, error = '', '', ''", "For now we are concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased", "{0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(), 'USD' # If tier", "# Shuffle the list and get 3 items. rand = lambda xs: random.shuffle(xs)", "prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US.", "'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is the non-Ajax fallback.", "http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User", "'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', ''", "waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug =", "'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE", "uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for = _(u'Purchase", "def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created')", "request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] =", "this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas =", "permanent=True) # get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang", "def __init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all() def collections(self): features", "| Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user:", "out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status):", "for addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount,", "if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}),", "url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def", "[a for a in addons if a.id in featured] popular = sorted([a for", "FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log", "queryset for the given field.\"\"\" filter = self._filter(field) & self.base_queryset order = getattr(self,", "else: version = addon.current_version if not (version and version.license): raise http.Http404 return jingo.render(request,", "from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import", "all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field", "status, error = '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and", "in the view. The chosen filter field is combined with the ``base`` queryset", "f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError:", "django import http from django.conf import settings from django.db.models import Q from django.shortcuts", "import attrgetter from django import http from django.conf import settings from django.db.models import", "key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If we", "performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections =", "addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas =", "'') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to", "collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the top half of the", "addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon':", "'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src", "'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object):", "try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on", "Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10", "popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from", "== 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular", "= jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view", "featured = [a for a in addons if a.id in featured] popular =", "or xs[:3] # Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3]", "get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version and version.license): raise http.Http404", "anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models import Contribution from", "want according to the request.\"\"\" if key in request.GET and (request.GET[key] in self.opts_dict", "'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help generate", "\"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs):", "'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora uses persona.author despite", "@has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s by user:", ".filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale,", "if not uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s'", "def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if", "{'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:'", "return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return", "same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder']", "(addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() #", "out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result =", "def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self,", "request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else:", "import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as", "# We only want to see public add-ons on the front page. c", "addon, 'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return", "= comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US. lang", "not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to an app", "amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return", "@mobilized(home) def home(request): # Shuffle the list and get 3 items. rand =", "self).__init__(request, base, key, default) def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created',", "from amo.decorators import login_required, post_required, write from amo.forms import AbuseForm from amo.helpers import", "%s' % (result, con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE)", "reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share", "= {} # If we can, we favor locale specific collections. for feature", "import json import random from urlparse import urlparse import uuid from operator import", "_lazy import waffle from mobility.decorators import mobilized, mobile_template import amo from amo import", "base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured,", "of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field):", "addon): log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount", "JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}),", "{'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home)", "Popular collections this addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx", "Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness =", "currency = addon.premium.get_price(), 'USD' # If tier is specified, then let's look it", "None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished'", "that's used if nothing good is found in request.GET. \"\"\" def __init__(self, request,", "def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type", "'', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal'", "= hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for = _(u'Purchase of", "for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' %", "else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def", "the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount,", "then JSON will # not have a paykey and the JS can cope", "return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise", "_('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we figure", "IPN may, or may not have come through. Which means looking for #", "and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error),", "contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type ==", "addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' +", "= request self.base_queryset = base self.key = key self.model = model self.field, self.title", "filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False)", "cancelled contribution: %s' % uuid) else: log.info('User completed contribution: %s' % uuid) response", "'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return", "pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug =", "= Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save()", "recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), }", "fail, then we've not # got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING)", "into solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid", "return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True)", "with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED)", "type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey:", "paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name,", "self.key = key self.model = model self.field, self.title = self.options(self.request, key, default) self.qs", "data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name,", "has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view", "amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id:", "request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned: %s for", "= form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey, status, error =", "''), 'status': status, 'result': result} # For mobile, bounce back to the details", "{ 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if", "define ``opts`` on the subclass as a sequence of (key, title) pairs. The", "a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured':", "# If we can, we favor locale specific collections. for feature in features:", "PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount, currency = tier.price,", "{0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey,", "get this in the addon authors # locale, rather than the contributors locale.", "slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There", "'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not", "annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)", "if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if", "view. The chosen filter field is combined with the ``base`` queryset using the", "and be valid for this app. if addon.type in request.APP.types: if addon.type ==", "= list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query", "(self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self):", "If there was an error getting the paykey, then JSON will # not", "super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts = {'name': 'name_sort', 'created':", "went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user", "addon, status): result = '' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking", "'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is", "the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return", "return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view", "return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp()", "request self.base_queryset = base self.key = key self.model = model self.field, self.title =", "'USD' # If tier is specified, then let's look it up. form =", "addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon", "key by feature_id and locale, so we can favor locale specific # promos.", "filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self):", "result} # For mobile, bounce back to the details page. if request.MOBILE: url", "key not in promo_dict: continue # We only want to see public add-ons", "contribution_src, 'version': version}) # TODO(andym): remove this once we figure out how to", "addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution for", "ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id')", "is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD.", "in promo_dict: continue # We only want to see public add-ons on the", "raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version,", "log.debug('Got paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url =", "up contrib for uuid: %s' % uuid_) # The IPN may, or may", "is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase", "import settings from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from", "title) pair we want according to the request.\"\"\" if key in request.GET and", "persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora", "detail pages. raise http.Http404 # addon needs to have a version and be", "page dispatcher.\"\"\" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon},", "a lot more queries we don't want on the initial page load. if", "urlparse import uuid from operator import attrgetter from django import http from django.conf", "= addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution", "if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if", "satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and", "request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock',", "dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for", "= page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src,", "form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html',", "error, status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id,", "'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request,", "request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug,", "pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular =", "paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error'", "return opt, title def all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\" return", "key = (feature.id, lang) if key not in promo_dict: key = (feature.id, '')", "jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) #", "addons if a.id in featured] popular = sorted([a for a in addons if", "purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True)", "good is found in request.GET. \"\"\" def __init__(self, request, base, key, default, model=Addon):", "'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt", "{ 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id),", "platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404 return promos(request,", "by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if", "request.LANG)[:3] # Get 10 popular add-ons, then pick 3 at random. qs =", "features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale =", "as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting", "url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return", "settings from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation", "= None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern =", "or post IPN contributions. If both fail, then we've not # got a", "filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx'))", "Do one query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular)", "def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs", "authors # locale, rather than the contributors locale. name, paypal_id = (u'%s: %s'", "= request.GET.get('uuid') if not uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled", "log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed contribution: %s' % uuid)", "'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_,", "# Remora uses persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT %", "if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state", "= Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness", "import login_required, post_required, write from amo.forms import AbuseForm from amo.helpers import shared_url from", "'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations':", "{} for feature_id, v in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale))", "addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request,", "'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey,", "parameters and the title can be used in the view. The chosen filter", "promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404", "'status': status, 'result': result} # For mobile, bounce back to the details page.", "import jingo import jinja2 import commonware.log import session_csrf from tower import ugettext as", "we figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request,", "self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a full mapping of {option: queryset}.\"\"\"", "or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt = default if opt", "request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts", "filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name')", "as _lazy import waffle from mobility.decorators import mobilized, mobile_template import amo from amo", "else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey", "GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from", "import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\"", "if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to an", "paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib", "request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for", "'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'),", "specified, then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier", "return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if not", "def license(request, addon, version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version", "raise http.Http404 return extension_detail(request, addon) else: # Redirect to an app that supports", "request.GET: contribution_src = src = request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'),", "extension_detail(request, addon) else: # Redirect to an app that supports this type. try:", "def options(self, request, key, default): \"\"\"Get the (option, title) pair we want according", "% uuid) else: log.info('User completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html',", "**kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try:", "rv[feature] = c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def", "'collections': collections}) @mobilized(home) def home(request): # Shuffle the list and get 3 items.", "limit=6) else: category_personas = None data = { 'addon': addon, 'persona': persona, 'categories':", "BaseFilter(object): \"\"\" Filters help generate querysets for add-on listings. You have to define", "in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase", "filter field is combined with the ``base`` queryset using the ``key`` found in", "% (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was", "log.info('User completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status':", "'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey", "we can favor locale specific # promos. promo_dict = {} for feature_id, v", "of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source", "Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s,", "error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error", "'%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url,", "# Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular", "contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name,", "jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon,", "'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies,", "'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax':", "is lame for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) #", "Get 10 popular add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION)", "download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login", "contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page':", "http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once", "randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request,", "import reverse from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from", "preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error,", "'', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval =", "purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try:", "to have a version and be valid for this app. if addon.type in", "'status': status}), content_type='application/json') # This is the non-Ajax fallback. if status != 'COMPLETED':", "con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s'", "version = addon.current_version if 'src' in request.GET: contribution_src = src = request.GET['src'] else:", "= hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this in", "url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): #", "result = '' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib", "'')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c", "specific # promos. promo_dict = {} for feature_id, v in groups: promo =", "ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html", "currency = tier.price, tier.currency paykey, status, error = '', '', '' preapproval =", "addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG,", "= request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern =", "'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl',", "3 items. rand = lambda xs: random.shuffle(xs) or xs[:3] # Get some featured", "send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal", "as share_redirect from stats.models import Contribution from translations.query import order_by_translation from versions.models import", "(platform or version): raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def", "addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download}", "None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if", "@addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET:", "urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed,", "% uuid_) # If this was a pre-approval, it's completed already, we'll #", "comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type,", "uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should", "'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request),", "remove this once we figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased", "addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey,", "sorted_groupby, urlparams from amo.models import manual_order from amo import urlresolvers from amo.urlresolvers import", "locale, so we can favor locale specific # promos. promo_dict = {} for", "ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased,", "amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams", "None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status =", "list and get 3 items. rand = lambda xs: random.shuffle(xs) or xs[:3] #", "def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f =", "contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options']", "addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION #", "%s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR'", "paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey:", "USD. amount, currency = addon.premium.get_price(), 'USD' # If tier is specified, then let's", "= lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html')", "addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular':", "return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt", "from amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby,", "messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form,", "home(request): # Shuffle the list and get 3 items. rand = lambda xs:", "except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an", "addon, page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)", "addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS,", "dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form':", "slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for,", "for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case", "= c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request,", "from amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature,", "if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl',", "'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from", "= addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies':", "decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args,", "self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def all(self):", "}) return jingo.render(request, template, data) class BaseFilter(object): \"\"\" Filters help generate querysets for", "got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con =", "the addon authors # locale, rather than the contributors locale. name, paypal_id =", "if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing", "this in the addon authors # locale, rather than the contributors locale. name,", "addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve", ".with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self):", "content_type='application/json') # This is the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url)", "str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid", "if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_)", "PayPal disagreed, we should not be trusting # what get_paykey said. Which is", "\"Other add-ons by author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None)", "= self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def", "return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def", "request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status':", "uuid_) # If this was a pre-approval, it's completed already, we'll # double", "a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def", "def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request,", "on the subclass as a sequence of (key, title) pairs. The key is", "_lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) #", "(self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def", "ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons details", "addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view", "from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import", "paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_))", "from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import", "BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for", "version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404 return", "@mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): \"\"\"Details page for Personas.\"\"\" if not addon.is_public(): raise", "uuid from operator import attrgetter from django import http from django.conf import settings", "not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version =", "%s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey)", "share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version is not", "'') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for =", "amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved into solitude.", "'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template,", "contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:", "charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url =", "if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get", "= { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, }", "redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try: target_id", "self.filter(field)) for field in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for the", "paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id,", "field, None) if order: return order(filter) return filter def _filter(self, field): return getattr(self,", "returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an", "addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the", "= self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field, None) if order:", "request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas,", "post IPN contributions. If both fail, then we've not # got a matching", "else: opt = default if opt in self.opts_dict: title = self.opts_dict[opt] else: title", "if addon.charity: # TODO(andym): Figure out how to get this in the addon", "eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions,", "= dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request = request", "uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by user: %s' %", "filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): \"\"\"BaseFilter that uses elasticsearch.\"\"\" def __init__(self, request,", "if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s'", "'roadblock'), } # Download src and contribution_src are different. src, contribution_src = page_srcs.get(page)", ".filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query and split", "http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version)", "request.GET.get('platform') if not (platform or version): raise http.Http404 return promos(request, 'home', version, platform)", "If both fail, then we've not # got a matching contribution. lookup =", "if addon.is_webapp(): # Apps don't deserve AMO detail pages. raise http.Http404 # addon", "'-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts =", "src and contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon':", "'') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not", "con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey:", "(self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self):", "hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this in the", "('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request,", "model self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request,", "paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error", "= BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame", "paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING:", "@session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if request.method", "'category_personas': category_personas, } if not persona.is_new(): # Remora uses persona.author despite there being", "= rand(qs) # Do one query and split up the add-ons. addons =", "# If this was a pre-approval, it's completed already, we'll # double check", "tier.price, tier.currency paykey, status, error = '', '', '' preapproval = None if", "@can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon,", "def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def", "translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If we can, we favor", "def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not", "dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for the given field.\"\"\" filter =", "= settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved into solitude. Temporary.", "addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount, currency = tier.price, tier.currency", "Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): \"\"\"", "featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'),", "if not (platform or version): raise http.Http404 return promos(request, 'home', version, platform) class", "status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'),", "v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If", "complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we figure out", "= addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): \"\"\"Decorator redirecting clicks on \"Other add-ons by author\".\"\"\" @functools.wraps(f)", "= addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon):", "= get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s' %", "purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price()", "for a in addons if a.id in featured] popular = sorted([a for a", "addon needs to have a version and be valid for this app. if", "else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def", "ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit):", "just to be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon,", "uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_) # The", "from tower import ugettext as _, ugettext_lazy as _lazy import waffle from mobility.decorators", "'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form':", "request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404 return promos(request, 'home', version,", "is the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete'))", "= PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount, currency =", "= model self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self,", "GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form':", "@addon_view def share(request, addon): \"\"\"Add-on sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def", "a sequence of (key, title) pairs. The key is used in GET parameters", "'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This", "comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get", "addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result =", "concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request,", "{ 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount =", "lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations.", "'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle", "addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id)", "HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated')))", "@vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details page.\"\"\" # If current version is incompatible", "mapping of {option: queryset}.\"\"\" return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self,", "title = self.extras_dict[opt] return opt, title def all(self): \"\"\"Get a full mapping of", "only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company)", "amo.decorators import login_required, post_required, write from amo.forms import AbuseForm from amo.helpers import shared_url", "response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased", "'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False)", "subclass as a sequence of (key, title) pairs. The key is used in", ".with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return", "raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request", "= {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'}", "Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import", "def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(),", "= urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon):", "qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not", "looking for # a for pre or post IPN contributions. If both fail,", "from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from", "``default`` should be a key in ``opts`` that's used if nothing good is", "self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model", "uses persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if", "popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request,", "context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required", "if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if", "AbuseForm(request=request), } # details.html just returns the top half of the page for", "author\".\"\"\" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id:", "in dict(self.opts)) def filter(self, field): \"\"\"Get the queryset for the given field.\"\"\" filter", "logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting", "we've not # got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_,", "filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users':", "error = '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:", "The IPN may, or may not have come through. Which means looking for", "paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If this was", "(Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if a.id", "error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the", "} if not persona.is_new(): # Remora uses persona.author despite there being a display_username.", "queries we don't want on the initial page load. if request.is_ajax(): # Other", "if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon):", "amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo", "Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view", "key = (feature.id, '') if key not in promo_dict: continue # We only", "users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of", "or request.is_ajax(): # If there was an error getting the paykey, then JSON", "PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from", "contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this", "if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else:", "== 'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed contribution: %s'", "messages from amo.decorators import login_required, post_required, write from amo.forms import AbuseForm from amo.helpers", "paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error':", "%s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result", "page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return", "add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else:", "return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class", "= urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only", "import random from urlparse import urlparse import uuid from operator import attrgetter from", "if request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return", "http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest()", "AbuseForm from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models", "Default is USD. amount, currency = addon.premium.get_price(), 'USD' # If tier is specified,", "default) self.qs = self.filter(self.field) def options(self, request, key, default): \"\"\"Get the (option, title)", "This is all going to get shoved into solitude. Temporary. form = ContributionForm({'amount':", "if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular})", "request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18]", "contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey)", "% uuid_) log.debug('Got paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk))", "else: category_personas = None data = { 'addon': addon, 'persona': persona, 'categories': categories,", "paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey')", "can be used in the view. The chosen filter field is combined with", "= request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try: target_id =", "in featured] popular = sorted([a for a in addons if a.id in popular],", "persona = addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs", "get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST", "not have come through. Which means looking for # a for pre or", "* 60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to =", "at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) #", "amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved into", "Which means looking for # a for pre or post IPN contributions. If", "addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new():", "__init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all() def collections(self): features =", "'-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')),", "download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if", "the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request,", "'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path", "* 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to = reverse('addons.detail', args=[persona.addon.slug])", "waffle from mobility.decorators import mobilized, mobile_template import amo from amo import messages from", "hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular,", "return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path", "prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): \"\"\"Extensions details", "filter(self, field): \"\"\"Get the queryset for the given field.\"\"\" filter = self._filter(field) &", "a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution,", "details page.\"\"\" # If current version is incompatible with this app, redirect. comp_apps", "status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download", "log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check", "= get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version and version.license): raise", "'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key =", "@has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax':", "\"\"\" def __init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict =", "features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos =", "Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick 3 at random. qs", "sharing\"\"\" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version", "= addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return", "addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval,", "log.error('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]),", "= settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer", "except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): \"\"\"Add-ons", "def purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk,", "int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated", "once we figure out how to process for # anonymous users. For now", "``opts`` on the subclass as a sequence of (key, title) pairs. The key", "log.error('Check purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else:", "= addon.premium.get_price(), 'USD' # If tier is specified, then let's look it up.", "feature_id and locale, so we can favor locale specific # promos. promo_dict =", "if a.id in featured] popular = sorted([a for a in addons if a.id", "an error getting the paykey, then JSON will # not have a paykey", "_(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(), 'USD' #", "Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, purchase", "share_redirect from stats.models import Contribution from translations.query import order_by_translation from versions.models import Version", "paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is the non-Ajax fallback. if", "in the addon authors # locale, rather than the contributors locale. name, paypal_id", "in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request):", "request): self.request = request def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features()", "= get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form =", "promos. promo_dict = {} for feature_id, v in groups: promo = v.next() key", "feature in features: key = (feature.id, lang) if key not in promo_dict: key", "up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount,", "user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status", "paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval,", "the paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if", "log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s", "the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp():", "@addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if request.method ==", "import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache import", "@post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s' %" ]
[ "= [] patients_ids = [] image_file_path = [] # read annotations for label_file", "License. import glob import json import os import random import numpy as np", "image_file_path, breast_densities, groups=patients_ids ): break # just use first fold test_images = image_file_path[test_index]", "the same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500", "def get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices = np.where(all_ids", "site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices", "# like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\")", "KIND, either express or implied. # See the License for the specific language", "patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping", "Unless required by applicable law or agreed to in writing, software # distributed", "removed at this point # use groups to avoid patient overlaps # test", "data \"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids,", "train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits =", "in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file", "len(img_file) == 1, f\"No unique dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path,", "to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) *", "images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list,", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "= \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\"", "dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density - 1),", "saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for _id in search_ids:", "# 2 - scattered fibroglandular density # 3 - heterogeneously dense # 4", "len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test", "{len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients \"\"\" # Split and avoid", "will stay the same for both phases \"test1\": val_list, # like phase 1", "len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists", "# test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into", "fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images,", "[] patients_ids = [] image_file_path = [] # read annotations for label_file in", "train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] #", "label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities", "image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags =", "like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print(", "generate splits using roughly the same ratios as for challenge data: n_train_challenge =", "split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids =", "this file except in compliance with the License. # You may obtain a", "f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that there", "label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split", "\"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\")", "\"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"),", "permissions and # limitations under the License. import glob import json import os", "len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\"", "val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there", "saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f,", "len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using roughly the same ratios as", "= f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices =", "= [] dc_tags = [] saved_filenames = [] assert len(ids) == len(images) ==", "\"\"\" split train/validation dataset for n_clients \"\"\" # Split and avoid patient overlap", "int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds for train/val splits.", "train_val_densities[val_index] # check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids))", "dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\")", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,", "ANY KIND, either express or implied. # See the License for the specific", "print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames =", "if _success and density >= 1: # label can be 0 sometimes, excluding", "test cases will be removed at this point # use groups to avoid", "dataset for n_clients \"\"\" # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids)", "the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in", "label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True ) print(", "_success and density >= 1: # label can be 0 sometimes, excluding those", "1. Load the label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root,", "dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags,", "_curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices],", "np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags =", "{site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess(", "print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for _id", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases will", "_saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of", "at this point # use groups to avoid patient overlaps # test split", "unique_indices = np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)} unique images", "# 3 - heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path,", "patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting", "splits. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index,", "), \"Overlapping patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0", "out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\"", ") # check that there were no duplicated files assert len(saved_filenames) == len(", "print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags)) if __name__ == \"__main__\": main()", "out_path, ids, images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames =", "val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\")", "print(50 * \"=\") print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" )", "json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices =", "sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\",", "unique dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success,", "{len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the data", ") print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids =", "Preprocess the images \"\"\" dc_tags = [] saved_filenames = [] for c in", "test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no", "length of all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients \"\"\"", "import random import numpy as np import pandas as pd from preprocess_dicom import", "split. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index,", "f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = []", "in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images", "f\"Splitting into {n_splits} folds for test split. (Only the first fold is used.)\"", "print(f\"Found {n_patients} patients.\") # generate splits using roughly the same ratios as for", "the label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root,", "n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge +", "OF ANY KIND, either express or implied. # See the License for the", ": {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 *", "break # just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities", "MONAI Consortium # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "= \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\"", "2. Split the data \"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path))", "print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20", "unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids,", "validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess(", "print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\",", "n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids =", "= np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using roughly", "dc_tags = [] saved_filenames = [] for c in range(n_clients): site_name = f\"site-{c+1}\"", "img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1,", "DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root", "GroupKFold # density labels # 1 - fatty # 2 - scattered fibroglandular", "_curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root,", "), f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit", "test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set", "= [] saved_filenames = [] assert len(ids) == len(images) == len(densities) for i,", "BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root", "/ (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds for train/val splits. (Only", "process_image = True # set False if dicoms have already been preprocessed out_path", "unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)} unique", "first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split(", "recursive=True ) assert len(img_file) == 1, f\"No unique dicom image found for {dir_name}!\"", "testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will stay the", "group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break", "patients.\") # generate splits using roughly the same ratios as for challenge data:", "files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 *", "_dc_tags = [] if _success and density >= 1: # label can be", "label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)} unique images of {len(label_data['image file", "as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000", "\"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1.", "patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the", "_curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, )", "4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list =", "= get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices],", "1, f\"No unique dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if", "the same for both phases \"test1\": val_list, # like phase 1 leaderboard \"test2\":", "os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = []", "exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list saved at", "os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No unique dicom", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success =", "preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1 -", "breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids)", "that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ),", "in validation and test!\" n_total = len(train_images) + len(val_images) + len(test_images) print(20 *", "\"\"\" dc_tags = [] saved_filenames = [] for c in range(n_clients): site_name =", "data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids)", "between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)}", "== 0 ), \"Overlapping patients in validation and test!\" n_total = len(train_images) +", "of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices", "n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge +", "= n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases will be", "val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted", "_curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root,", "dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert", "np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags = [] saved_filenames =", "= preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)}", "assert len(img_file) == 1, f\"No unique dicom image found for {dir_name}!\" save_prefix =", "pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)}", "\"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No unique dicom image found", "( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" )", "Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run", "f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read", "= True # set False if dicoms have already been preprocessed out_path =", "preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation", "( f\"mismatch between total split images ({n_total})\" f\" and length of all images", "patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities =", "{len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, #", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds for test split.", "train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames)", "split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices],", "first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val", "try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) ==", "Load the label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"),", "Consortium # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"\"\" \"\"\" 1. Load the label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root,", "# generate splits using roughly the same ratios as for challenge data: n_train_challenge", "{len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices", "files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are", "cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density -", "patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids)", "dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags =", "required by applicable law or agreed to in writing, software # distributed under", "{len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\")", "assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data, breast_densities: \"", "image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path", "shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data)", "YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders", "train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities", "applicable law or agreed to in writing, software # distributed under the License", "= patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print(", "= { \"train\": train_list, # will stay the same for both phases \"test1\":", "_success = True else: _success = False _dc_tags = [] if _success and", "- scattered fibroglandular density # 3 - heterogeneously dense # 4 - extremely", "scattered fibroglandular density # 3 - heterogeneously dense # 4 - extremely dense", "if os.path.isfile(save_prefix + \".npy\"): _success = True else: _success = False _dc_tags =", "as f: json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids):", "np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids", "of {len(image_file_path)} images.\" ) # check that there were no duplicated files assert", "{i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"),", "* test_ratio))) print( f\"Splitting into {n_splits} folds for test split. (Only the first", "# read annotations for label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images,", "= [] if _success and density >= 1: # label can be 0", "or agreed to in writing, software # distributed under the License is distributed", "test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames)", "folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing", "challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio =", "test_ratio))) print( f\"Splitting into {n_splits} folds for test split. (Only the first fold", "= patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids =", "\"Overlapping patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ),", "= train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is", "2022 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the \"License\");", "\".npy\"): _success = True else: _success = False _dc_tags = [] if _success", "\"test1\": val_list, # like phase 1 leaderboard \"test2\": test_list, # like phase 2", "just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index]", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio", "{len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path), (", "no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in", "# limitations under the License. import glob import json import os import random", "} write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted a total {len(saved_filenames)} of", "path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data, breast_densities:", "\"Overlapping patients in validation and test!\" n_total = len(train_images) + len(val_images) + len(test_images)", "dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load", "that there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not", "print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total ==", "[] saved_filenames = [] for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training", "same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge", "splits using roughly the same ratios as for challenge data: n_train_challenge = 60_000", "search_ids): indices = [] for _id in search_ids: _indices = np.where(all_ids == _id)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids", "print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c]", "( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge", "process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids", "check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0", "\"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = [] image_file_path = [] # read", "len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)}", "def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4)", "f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids =", "data_set = { \"train\": train_list, # will stay the same for both phases", "pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels #", "# test cases will be removed at this point # use groups to", "saved_filenames = [] assert len(ids) == len(images) == len(densities) for i, (id, image,", "dir_name + \".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name + \".npy\") return", "* val_ratio))) print( f\"Splitting into {n_splits} folds for train/val splits. (Only the first", "duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)})", "of {len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices])", "{len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that there were no duplicated files", "data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density - 1), }", "= split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path,", "is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities,", "= image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images =", "compliance with the License. # You may obtain a copy of the License", "found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0],", "{len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices])", "n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge /", "total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that there were no duplicated", "= np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags", "test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in validation and", "saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with", "test_patients_ids)) == 0 ), \"Overlapping patients in validation and test!\" n_total = len(train_images)", "= train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities =", "= len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using roughly the same ratios", "are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed", "limitations under the License. import glob import json import os import random import", "overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting", "= train_val_densities[val_index] # check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids,", "import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1 - fatty", "for the specific language governing permissions and # limitations under the License. import", "print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the data \"\"\" # shuffle data", "\"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = [] image_file_path", "this point # use groups to avoid patient overlaps # test split n_splits", "images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients \"\"\" # Split and", "f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids,", "process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing", "id, \"image\": dir_name + \".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name +", "n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) # test", "_success = False _dc_tags = [] if _success and density >= 1: #", "0 ), \"Overlapping patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) ==", "\"train\": train_list, # will stay the same for both phases \"test1\": val_list, #", "dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1 - fatty #", "= np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True #", ") assert len(img_file) == 1, f\"No unique dicom image found for {dir_name}!\" save_prefix", "= [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities =", "fatty # 2 - scattered fibroglandular density # 3 - heterogeneously dense #", "and density >= 1: # label can be 0 sometimes, excluding those cases", "groups=patients_ids ): break # just use first fold test_images = image_file_path[test_index] test_patients_ids =", "2 - scattered fibroglandular density # 3 - heterogeneously dense # 4 -", "= split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path,", "{len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices =", "out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags)", "the data \"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities,", "\"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the label data \"\"\" random.seed(0) label_files", "train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids", "np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids)", "both phases \"test1\": val_list, # like phase 1 leaderboard \"test2\": test_list, # like", "3 - heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids,", "os import random import numpy as np import pandas as pd from preprocess_dicom", "language governing permissions and # limitations under the License. import glob import json", "not use this file except in compliance with the License. # You may", "# just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities =", "from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1", "density labels # 1 - fatty # 2 - scattered fibroglandular density #", "breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients", "test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits}", "and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in train", "print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids)", "enumerate(zip(ids, images, densities)): if (i + 1) % 200 == 0: print(f\"processing {i+1}", "image_file_path = [] # read annotations for label_file in label_files: print(f\"add {label_file}\") label_data", "== len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)):", "_saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)}", "of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will", "extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags", "\"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name", "breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\"", "WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root =", "License, Version 2.0 (the \"License\"); # you may not use this file except", "patients in validation and test!\" n_total = len(train_images) + len(val_images) + len(test_images) print(20", "/ (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds for test split. (Only", "f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split", "from sklearn.model_selection import GroupKFold # density labels # 1 - fatty # 2", "+ \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file,", "data entries.\") \"\"\" 2. Split the data \"\"\" # shuffle data label_data =", "1 leaderboard \"test2\": test_list, # like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\",", "wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags)) if __name__", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge", "Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids", "zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path)", ") val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases", "label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"],", "test split. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for", "# Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients)", "same for both phases \"test1\": val_list, # like phase 1 leaderboard \"test2\": test_list,", "len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i +", "{n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between total split images ({n_total})\" f\"", "- 1), } ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file,", "been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE", "unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using", "= [] # read annotations for label_file in label_files: print(f\"add {label_file}\") label_data =", "use groups to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) /", "print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames =", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "# you may not use this file except in compliance with the License.", "dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\")", "of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames", "phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully", ") # test cases will be removed at this point # use groups", "{n_splits} folds for test split. (Only the first fold is used.)\" ) group_kfold", "agreed to in writing, software # distributed under the License is distributed on", "GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just", "random import numpy as np import pandas as pd from preprocess_dicom import dicom_preprocess", "saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames", "True # set False if dicoms have already been preprocessed out_path = \"./data/preprocessed\"", ": {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between total split images ({n_total})\"", "# set False if dicoms have already been preprocessed out_path = \"./data/preprocessed\" #", "2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted", "(the \"License\"); # you may not use this file except in compliance with", "patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between", "as np import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import", "return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as", "use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] #", "fibroglandular density # 3 - heterogeneously dense # 4 - extremely dense def", "np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved", "Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids =", "f: json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices", "# Unless required by applicable law or agreed to in writing, software #", ") \"\"\" split train/validation dataset for n_clients \"\"\" # Split and avoid patient", "+ n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) #", "images of {len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast", "by applicable law or agreed to in writing, software # distributed under the", "assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in train and validation!\"", "training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids,", "annotations for label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices =", "n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds for", "3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the label data \"\"\" random.seed(0)", "= 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / (", "len(image_file_path), ( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\"", "{len(image_file_path)} images.\" ) # check that there were no duplicated files assert len(saved_filenames)", "group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images =", "# Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\"", "test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images", "# Copyright 2022 MONAI Consortium # Licensed under the Apache License, Version 2.0", "group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break", "# use groups to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path)", "unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids,", "densities)): if (i + 1) % 200 == 0: print(f\"processing {i+1} of {len(ids)}...\")", "file except in compliance with the License. # You may obtain a copy", "if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success", "in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids =", "import glob import json import os import random import numpy as np import", "val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold", "assert n_total == len(image_file_path), ( f\"mismatch between total split images ({n_total})\" f\" and", "patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in train", "json import os import random import numpy as np import pandas as pd", "phases \"test1\": val_list, # like phase 1 leaderboard \"test2\": test_list, # like phase", "# YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input", "== 1, f\"No unique dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name)", "True else: _success = False _dc_tags = [] if _success and density >=", "License for the specific language governing permissions and # limitations under the License.", "glob import json import os import random import numpy as np import pandas", "dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags,", "to in writing, software # distributed under the License is distributed on an", "0 ), \"Overlapping patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) ==", "({n_total})\" f\" and length of all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset", "val_ratio))) print( f\"Splitting into {n_splits} folds for train/val splits. (Only the first fold", "like phase 1 leaderboard \"test2\": test_list, # like phase 2 - final leaderboard", "get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image,", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing", "breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), (", "return_index=True ) print( f\"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries\"", "dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set,", "(i + 1) % 200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name =", "read annotations for label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices", "train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print(", "(Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index", "list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities", "unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique", "validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in train and", "avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids)", "train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) /", "len(image_file_path), ( f\"mismatch between total split images ({n_total})\" f\" and length of all", "{len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c]", "under the License. import glob import json import os import random import numpy", "for train/val splits. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits)", "n_total == len(image_file_path), ( f\"mismatch between total split images ({n_total})\" f\" and length", "preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames", "validation and test!\" n_total = len(train_images) + len(val_images) + len(test_images) print(20 * \"-\")", "stay the same for both phases \"test1\": val_list, # like phase 1 leaderboard", "({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total", "roughly the same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge =", "= int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds for train/val", "label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id,", "data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\")", "[ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = []", "saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags)) if", "already been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN", "{n_patients} patients.\") # generate splits using roughly the same ratios as for challenge", "or implied. # See the License for the specific language governing permissions and", ") group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ):", "assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in validation and test!\"", "({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch", "train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index]", "assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in train and test!\"", "no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated files", "len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in train and test!\" assert (", "have already been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE", "training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images = train_val_images[train_index] train_patients_ids", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "= np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients)", "set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags,", "process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = {", "= np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)} unique images of", "train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" )", "os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = [] image_file_path = [] #", "os.path.isfile(save_prefix + \".npy\"): _success = True else: _success = False _dc_tags = []", "train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in", "random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids", "np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set", "in writing, software # distributed under the License is distributed on an \"AS", "data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) #", "else: if os.path.isfile(save_prefix + \".npy\"): _success = True else: _success = False _dc_tags", "= 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge", "n_total = len(train_images) + len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)}", "\"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between total split", "range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids = split_train_patients_ids[c]", "train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags)", "folds for test split. (Only the first fold is used.)\" ) group_kfold =", "governing permissions and # limitations under the License. import glob import json import", "_indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True", "import json import os import random import numpy as np import pandas as", "{ \"train\": train_list, # will stay the same for both phases \"test1\": val_list,", "= np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess", "save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success = True else: _success = False", "print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that", "for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use", "_curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, )", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "{len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will stay", "can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\":", "== 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root,", "cases will be removed at this point # use groups to avoid patient", "point # use groups to avoid patient overlaps # test split n_splits =", "test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities", "os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list saved", "_id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main():", "label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"),", "== len(image_file_path), ( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path:", "== 0 ), \"Overlapping patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids))", "\"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the label", "get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices = np.where(all_ids ==", "len(train_images) + len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val", "for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\")", "_dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)}", "Split the data \"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data)", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix =", "image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities)", "open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def", "= pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including", "you may not use this file except in compliance with the License. #", "val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing", "of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices =", "for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def", "were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated", "= dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success = True else: _success", "train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first", "patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids", "# train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits", "\"image\": dir_name + \".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name + \".npy\")", "test_list, # like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 *", "- extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = []", "{dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else:", "= np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate", "the images \"\"\" dc_tags = [] saved_filenames = [] for c in range(n_clients):", "np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\")", "n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge /", "assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are unique", "def main(): process_image = True # set False if dicoms have already been", "= np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients)", "# label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\":", "density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch", "write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)}", "use this file except in compliance with the License. # You may obtain", "print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\",", "indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set False if dicoms", "else: _success = False _dc_tags = [] if _success and density >= 1:", "\"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids,", "in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients", "n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases will be removed", "# check that there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames)", "random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ]", "print( f\"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries\" ) try:", "label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data", "% 200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file =", "data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge", "/ ( n_val_challenge + n_test_challenge ) # test cases will be removed at", ">= 1: # label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append(", "avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio)))", "is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients", "phase 1 leaderboard \"test2\": test_list, # like phase 2 - final leaderboard }", "= train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no patient overlap", ") print( f\"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries\" )", "all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\")", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "\"\"\" 2. Split the data \"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids,", "train_list, # will stay the same for both phases \"test1\": val_list, # like", "\"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = [] image_file_path = []", "(len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds for train/val splits. (Only the", "SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\"", "0 ), \"Overlapping patients in validation and test!\" n_total = len(train_images) + len(val_images)", "patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print(", "( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in validation and test!\" n_total", "be removed at this point # use groups to avoid patient overlaps #", "print( f\"Splitting into {n_splits} folds for train/val splits. (Only the first fold is", "split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices],", "print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list,", "as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels", "set False if dicoms have already been preprocessed out_path = \"./data/preprocessed\" # YOUR", "\"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" #", "train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in", "train/validation dataset for n_clients \"\"\" # Split and avoid patient overlap unique_train_patients_ids =", ": {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\")", "excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\":", "= get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices],", "data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list", "dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will stay the same for", "dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training", "{len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the data \"\"\" #", "2.0 (the \"License\"); # you may not use this file except in compliance", "list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for _id in", "fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path,", "first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images =", "and length of all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients", "= np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients}", "1: # label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( {", "_id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set False if", "): break # just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index]", "in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients", "for label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique(", "first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split(", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "path\"], return_index=True ) print( f\"including {len(unique_images)} unique images of {len(label_data['image file path'])} image", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in", "is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities,", "image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between label", "= np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids)", "dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success = True else: _success =", "{label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True )", "[] saved_filenames = [] assert len(ids) == len(images) == len(densities) for i, (id,", "\"\"\" 1. Load the label data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"),", "_saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of", "({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total", "groups to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path)", "\"Overlapping patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ),", "express or implied. # See the License for the specific language governing permissions", "density >= 1: # label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags)", "[] # read annotations for label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file)", "val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames)", "dc_tags = [] saved_filenames = [] assert len(ids) == len(images) == len(densities) for", "import numpy as np import pandas as pd from preprocess_dicom import dicom_preprocess from", "\"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3", "print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total :", "used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids", "_curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, )", "# check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) ==", "test_patients_ids)) == 0 ), \"Overlapping patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids,", "# density labels # 1 - fatty # 2 - scattered fibroglandular density", "1) % 200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file", "lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags))", "_curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices],", "split train/validation dataset for n_clients \"\"\" # Split and avoid patient overlap unique_train_patients_ids", "images.\" ) # check that there were no duplicated files assert len(saved_filenames) ==", "either express or implied. # See the License for the specific language governing", "= image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file)", "densities, process_image=True): data_list = [] dc_tags = [] saved_filenames = [] assert len(ids)", "# 4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list", "use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images", "_dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted", "train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index]", "({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM", "the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in", "\"test2\": test_list, # like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50", ") print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the data \"\"\" # shuffle", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "file path\"], return_index=True ) print( f\"including {len(unique_images)} unique images of {len(label_data['image file path'])}", "of all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients \"\"\" #", "label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\"", "using roughly the same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge", "image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) *", "License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames", "Run preprocessing \"\"\" \"\"\" 1. Load the label data \"\"\" random.seed(0) label_files =", "_success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success = True", "data \"\"\" random.seed(0) label_files = [ os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root,", "will be removed at this point # use groups to avoid patient overlaps", "breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits} folds", "the License. import glob import json import os import random import numpy as", "# like phase 1 leaderboard \"test2\": test_list, # like phase 2 - final", "FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root =", "for n_clients \"\"\" # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids", "dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True):", "train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images = train_val_images[train_index]", "= train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check", "image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") #", "out_dataset_prefix = \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients", "labels # 1 - fatty # 2 - scattered fibroglandular density # 3", "in enumerate(zip(ids, images, densities)): if (i + 1) % 200 == 0: print(f\"processing", "there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping", "n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images", "{len(image_file_path)} data entries.\") \"\"\" 2. Split the data \"\"\" # shuffle data label_data", "= [] image_file_path = [] # read annotations for label_file in label_files: print(f\"add", "the License. # You may obtain a copy of the License at #", "# 1 - fatty # 2 - scattered fibroglandular density # 3 -", "glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No unique", "HERE out_dataset_prefix = \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\"", "np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the", "- heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids, images,", "(id, image, density) in enumerate(zip(ids, images, densities)): if (i + 1) % 200", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "\" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2.", "6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge +", "len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)): if", "breast_densities = [] patients_ids = [] image_file_path = [] # read annotations for", "just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index]", "= preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of", "for i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i + 1)", "write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data", "test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted", "and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in validation", "val_list, # like phase 1 leaderboard \"test2\": test_list, # like phase 2 -", "* \"=\") print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) #", "You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "train/val splits. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for", "* \"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between total", "train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio)))", "break # just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities", "1 - fatty # 2 - scattered fibroglandular density # 3 - heterogeneously", "folds for train/val splits. (Only the first fold is used.)\" ) group_kfold =", "[] for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client", "return indices def main(): process_image = True # set False if dicoms have", "unique images of {len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException:", "n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using roughly the same", "n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds for", "in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first fold test_images", "for both phases \"test1\": val_list, # like phase 1 leaderboard \"test2\": test_list, #", "== _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set False", "os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix +", "\".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags,", "{len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids,", "patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping", "density) in enumerate(zip(ids, images, densities)): if (i + 1) % 200 == 0:", "there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f\"Not all", "into {n_splits} folds for test split. (Only the first fold is used.)\" )", "# will stay the same for both phases \"test1\": val_list, # like phase", "n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags = [] saved_filenames = []", "): break # just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index]", "preprocessing \"\"\" \"\"\" 1. Load the label data \"\"\" random.seed(0) label_files = [", "import GroupKFold # density labels # 1 - fatty # 2 - scattered", "image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index]", "with the License. # You may obtain a copy of the License at", "get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image,", "get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image,", "- final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted a", "= [] saved_filenames = [] for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing", "file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image", "len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids: {len(patients_ids)},", "heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities,", "GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just", "and # limitations under the License. import glob import json import os import", "groups=train_val_patients_ids ): break # just use first fold train_images = train_val_images[train_index] train_patients_ids =", "), \"Overlapping patients in train and test!\" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0", "np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\"", "split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices],", "split images ({n_total})\" f\" and length of all images {len(image_file_path)}!\" ) \"\"\" split", "== len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)}, patients_ids:", "os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids = [] image_file_path =", "total split images ({n_total})\" f\" and length of all images {len(image_file_path)}!\" ) \"\"\"", "entries.\") \"\"\" 2. Split the data \"\"\" # shuffle data label_data = list(zip(breast_densities,", "n_clients \"\"\" # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids =", "specific language governing permissions and # limitations under the License. import glob import", "patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index]", "print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image file path\"], return_index=True", "patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found", "= breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into {n_splits}", "np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits", "+ len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val :", "= glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No", "data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\") as f:", "\"\"\" # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path", "images ({n_total})\" f\" and length of all images {len(image_file_path)}!\" ) \"\"\" split train/validation", "0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name,", "entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert", "indent=4) print(f\"Data list saved at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for", "( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in train and test!\" assert", "law or agreed to in writing, software # distributed under the License is", "image, density) in enumerate(zip(ids, images, densities)): if (i + 1) % 200 ==", "indices def main(): process_image = True # set False if dicoms have already", "the License for the specific language governing permissions and # limitations under the", "patients_ids = [] image_file_path = [] # read annotations for label_file in label_files:", "{len(unique_images)} unique images of {len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except", "split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3.", "final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted a total", "== 0 ), \"Overlapping patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids))", "\"\"\" # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids,", "leaderboard \"test2\": test_list, # like phase 2 - final leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set)", "= list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data", "overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in train and", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "[] for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices", "os.path.join(label_root, \"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids", "n_val_challenge + n_test_challenge ) # test cases will be removed at this point", "pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density", "breast_densities, groups=patients_ids ): break # just use first fold test_images = image_file_path[test_index] test_patients_ids", "save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software", "data_set) print(50 * \"=\") print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\"", "} ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file),", "= np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags = [] saved_filenames", "train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f\"Splitting into", "BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path),", "len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data, breast_densities: \" f\"{len(breast_densities)},", "== len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i", "sklearn.model_selection import GroupKFold # density labels # 1 - fatty # 2 -", "40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio", "= np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients =", "splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path)", "label_file in label_files: print(f\"add {label_file}\") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data[\"image", "print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between", "images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will stay the same", "] breast_densities = [] patients_ids = [] image_file_path = [] # read annotations", "validation images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids)", "= int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds for test", "val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no patient", "np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f\"Found {n_patients} patients.\") # generate splits using roughly the", "in compliance with the License. # You may obtain a copy of the", "\".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, \"w\")", "test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set =", "np.unique( label_data[\"image file path\"], return_index=True ) print( f\"including {len(unique_images)} unique images of {len(label_data['image", "images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids)", "int(density - 1), } ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities =", "( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in train and validation!\" assert", ") saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True)", "len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), \"Overlapping patients in validation and test!\" n_total =", "== len( np.unique(saved_filenames) ), f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data", "in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image", "if dicoms have already been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER", "test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first fold", "+ 1) % 200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0]", "numpy as np import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection", "i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i + 1) %", "See the License for the specific language governing permissions and # limitations under", "= GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "[] assert len(ids) == len(images) == len(densities) for i, (id, image, density) in", "train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no patient overlap assert", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "= \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the", "and test!\" n_total = len(train_images) + len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train", "f\"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix", "* \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)}", "\"label\": int(density - 1), } ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames", "breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids =", "density # 3 - heterogeneously dense # 4 - extremely dense def preprocess(dicom_root,", "+ \".npy\"): _success = True else: _success = False _dc_tags = [] if", "_curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root,", "file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f\"Mismatch between label data,", "image_file_path: {len(image_file_path)}\" ) print(f\"Read {len(image_file_path)} data entries.\") \"\"\" 2. Split the data \"\"\"", ": {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert n_total == len(image_file_path),", "= image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path)", "if (i + 1) % 200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name", "[] if _success and density >= 1: # label can be 0 sometimes,", "for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use", "= get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices],", "for test split. (Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits)", "main(): process_image = True # set False if dicoms have already been preprocessed", "( n_val_challenge + n_test_challenge ) # test cases will be removed at this", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "indices = [] for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist())", "and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids =", "= n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge", "= GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break #", "dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"):", "f\"Splitting into {n_splits} folds for train/val splits. (Only the first fold is used.)\"", "# Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids", "Copyright 2022 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the", "those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density", ") print(f\"Converted {len(test_list)} of {len(test_patients_ids)} testing images\") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { \"train\":", "(Only the first fold is used.)\" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index", "fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index]", "== len(image_file_path), ( f\"mismatch between total split images ({n_total})\" f\" and length of", "_dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + \".npy\"): _success = True else:", "image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) ==", "images \"\"\" dc_tags = [] saved_filenames = [] for c in range(n_clients): site_name", "breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index]", "data_list = [] dc_tags = [] saved_filenames = [] assert len(ids) == len(images)", "images, densities)): if (i + 1) % 200 == 0: print(f\"processing {i+1} of", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "dir_name, \"**\", \"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No unique dicom image", "= 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge )", "assert len(ids) == len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids,", "\"mass_case_description_train_set.csv\"), os.path.join(label_root, \"calc_case_description_train_set.csv\"), os.path.join(label_root, \"mass_case_description_test_set.csv\"), os.path.join(label_root, \"calc_case_description_test_set.csv\"), ] breast_densities = [] patients_ids =", "_curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices],", "Version 2.0 (the \"License\"); # you may not use this file except in", "f\"No unique dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image:", "= split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path,", "except in compliance with the License. # You may obtain a copy of", "({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50 * \"=\")", "preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f\"Converted {len(train_list)} of {len(train_patients_ids)}", "split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds", "out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)} validation images\") dc_tags.extend(_dc_tags)", "ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge =", "into {n_splits} folds for train/val splits. (Only the first fold is used.)\" )", "leaderboard } write_datalist(f\"{out_dataset_prefix}_{site_name}.json\", data_set) print(50 * \"=\") print( f\"Successfully converted a total {len(saved_filenames)}", "_dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(test_list)}", "be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name", "with open(save_datalist_file, \"w\") as f: json.dump(data_set, f, indent=4) print(f\"Data list saved at {save_datalist_file}\")", "split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\" dc_tags = []", "False _dc_tags = [] if _success and density >= 1: # label can", "\"*.dcm\"), recursive=True ) assert len(img_file) == 1, f\"No unique dicom image found for", "f\"mismatch between total split images ({n_total})\" f\" and length of all images {len(image_file_path)}!\"", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", ") print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\")", "f\"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices])", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "{len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True )", "import os import random import numpy as np import pandas as pd from", "len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), \"Overlapping patients in train and validation!\" assert (", "0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { \"patient_id\": id, \"image\": dir_name +", "1), } ) saved_filenames.append(dir_name + \".npy\") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set):", "+ n_test_challenge ) # test cases will be removed at this point #", "import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold #", "images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames = [] assert", "test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids", "the specific language governing permissions and # limitations under the License. import glob", "= \"./data/dataset\" # Input folders label_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/\" dicom_root = \"/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM\" n_clients =", "converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that there were", "), \"Overlapping patients in validation and test!\" n_total = len(train_images) + len(val_images) +", "\"=\") print( f\"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check", "dicom image found for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags", "200 == 0: print(f\"processing {i+1} of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob(", "\"\"\" 3. Preprocess the images \"\"\" dc_tags = [] saved_filenames = [] for", "a total {len(saved_filenames)} of {len(image_file_path)} images.\" ) # check that there were no", "val_densities = train_val_densities[val_index] # check that there is no patient overlap assert (", "testing\") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess(", "n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge", "check that there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ),", "prefix {out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags)) if __name__ ==", "for {dir_name}!\" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix)", "n_test_challenge ) # test cases will be removed at this point # use", "test!\" n_total = len(train_images) + len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train :", "= True else: _success = False _dc_tags = [] if _success and density", "np import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold", "= preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f\"Converted {len(val_list)} of {len(val_patients_ids)}", "def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags = []", "train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first", "f\" and length of all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for", "print(f\"Total : {n_total}\") assert n_total == len(image_file_path), ( f\"mismatch between total split images", "3. Preprocess the images \"\"\" dc_tags = [] saved_filenames = [] for c", "{save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices =", ") try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities)", "{out_dataset_prefix}\") print(50 * \"=\") print(\"Processed unique DICOM tags\", np.unique(dc_tags)) if __name__ == \"__main__\":", "[] dc_tags = [] saved_filenames = [] assert len(ids) == len(images) == len(densities)", "# just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities =", "+ n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge", "preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix", "group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first fold test_images =", "except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices]) assert len(breast_densities) == len(patients_ids) ==", "search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image =", "print( f\"Splitting into {n_splits} folds for test split. (Only the first fold is", "{len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)\") print(20 * \"-\") print(f\"Total : {n_total}\") assert", "all images {len(image_file_path)}!\" ) \"\"\" split train/validation dataset for n_clients \"\"\" # Split", "image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file path\"][unique_indices])", "print( f\"Converted {len(train_list)} of {len(train_patients_ids)} training images\" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids", ") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print(\"Preprocessing validation\") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list,", "path'])} image entries\" ) try: breast_densities.extend(label_data[\"breast_density\"][unique_indices]) except BaseException: breast_densities.extend(label_data[\"breast density\"][unique_indices]) patients_ids.extend(label_data[\"patient_id\"][unique_indices]) image_file_path.extend(label_data[\"image file", "saved_filenames.extend(_saved_filenames) data_set = { \"train\": train_list, # will stay the same for both", "= os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix", "/ ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / (", "{ \"patient_id\": id, \"image\": dir_name + \".npy\", \"label\": int(density - 1), } )", "= 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the label data \"\"\"", "= zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path =", ") group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ):", "print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\") print(f\"Test :", "= [] for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of", "- fatty # 2 - scattered fibroglandular density # 3 - heterogeneously dense", "+ \".npy\", \"label\": int(density - 1), } ) saved_filenames.append(dir_name + \".npy\") return data_list,", "n_clients = 3 \"\"\" Run preprocessing \"\"\" \"\"\" 1. Load the label data", "dicoms have already been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST FOLDER SHOULD", "np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids", "False if dicoms have already been preprocessed out_path = \"./data/preprocessed\" # YOUR DEST", "+ len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\") print(f\"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)\")", "= len(train_images) + len(val_images) + len(test_images) print(20 * \"-\") print(f\"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)\")", "ids, images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames = []", "= [] assert len(ids) == len(images) == len(densities) for i, (id, image, density)", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path =", "fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits", "c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set of client {site_name}\") _curr_patient_ids", "train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that", "[] image_file_path = [] # read annotations for label_file in label_files: print(f\"add {label_file}\")", "= train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids =", "of {len(ids)}...\") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, \"**\", \"*.dcm\"), recursive=True", "at {save_datalist_file}\") def get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices", "= [] for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return", "{n_splits} folds for train/val splits. (Only the first fold is used.)\" ) group_kfold", "= False _dc_tags = [] if _success and density >= 1: # label", "client {site_name}\") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames =", "saved_filenames = [] for c in range(n_clients): site_name = f\"site-{c+1}\" print(f\"Preprocessing training set", "process_image=True): data_list = [] dc_tags = [] saved_filenames = [] assert len(ids) ==", "generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!\" print(f\"Data lists saved wit prefix {out_dataset_prefix}\") print(50", "n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge )", "val_patients_ids)) == 0 ), \"Overlapping patients in train and validation!\" assert ( len(np.intersect1d(train_patients_ids,", "between total split images ({n_total})\" f\" and length of all images {len(image_file_path)}!\" )", "(len(image_file_path) * test_ratio))) print( f\"Splitting into {n_splits} folds for test split. (Only the", "unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) \"\"\" 3. Preprocess the images \"\"\"", "len(ids) == len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids, images," ]
[ "bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]]", "[1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } }", "Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str]", "2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } } } }", "import models ALL = 'all' class JwtData(TypedDict): iat: int nbf: int jti: str", "\"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": {", "\"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\":", "ALL = 'all' class JwtData(TypedDict): iat: int nbf: int jti: str exp: str", "Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example", "\"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]},", "str fresh: str user_id: int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List,", "\"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1,", "UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str]", "} } } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr:", "AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str]", "\"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": {", "last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined:", "username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs:", "Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool]", "\"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } } } ]", "username: str email: str is_active: bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs:", "\"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\",", "= dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username:", "} } } } } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr:", "class JwtData(TypedDict): iat: int nbf: int jti: str exp: str type: str fresh:", "is_active: bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [", "UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\",", "dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str]", "[1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } } }", "bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12,", "\"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]},", "is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record from", "Any, Optional, Type from django.db import models ALL = 'all' class JwtData(TypedDict): iat:", "Example of a user record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\":", "idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff:", "\"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } } } } }", "is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\":", "false } } } } } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]]", "bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\",", "iat: int nbf: int jti: str exp: str type: str fresh: str user_id:", "dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id:", "date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record from kafka: {", "email: str is_active: bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\"", "import TypedDict, Union, List, Any, Optional, Type from django.db import models ALL =", "Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]]", "cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str,", "\"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]},", "\"synchronizeDoD\": false } } } } } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str,", "\"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\":", "first_name: str last_name: str username: str email: str is_active: bool is_staff: bool is_superuser:", "UserRecordAppSpecificConfigs \"\"\" Example of a user record from kafka: { \"first_name\": \"str\", \"last_name\":", "\"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false", "of a user record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\",", "TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name:", "class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict):", "first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser:", "str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class", "Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs", "\"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\":", "\"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false", "{ \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\":", "\"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\":", "\"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\"", "int jti: str exp: str type: str fresh: str user_id: int email: str", "= str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name:", "{ \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\":", "\"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } } } } } \"\"\" class", "} } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class", "=== AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class", "\"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\":", "user record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\",", "Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier: Any", "str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List,", "{ \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\":", "List, Any, Optional, Type from django.db import models ALL = 'all' class JwtData(TypedDict):", "model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str", "} } } ] \"\"\" # === AppIdentifier = str TenantIdentifier = str", "class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username: str email: str is_active:", "user_id: int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List,", "Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool,", "AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username: str email: str", "= str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id:", "\"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\":", "} } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str", "\"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\":", "jti: str exp: str type: str fresh: str user_id: int email: str username:", "# === AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]]", "= 'all' class JwtData(TypedDict): iat: int nbf: int jti: str exp: str type:", "JwtData(TypedDict): iat: int nbf: int jti: str exp: str type: str fresh: str", "AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type:", "Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record from kafka: { \"first_name\":", "nbf: int jti: str exp: str type: str fresh: str user_id: int email:", "UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions:", "a user record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\":", "Type from django.db import models ALL = 'all' class JwtData(TypedDict): iat: int nbf:", "UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username:", "Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role =", "str exp: str type: str fresh: str user_id: int email: str username: str", "exp: str type: str fresh: str user_id: int email: str username: str class", "type: str fresh: str user_id: int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager:", "app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\":", "\"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": {", "\"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1,", "bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions:", "\"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]},", "\"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier:", "bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str", "} } ] \"\"\" # === AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs", "Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record from kafka:", "\"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } }", "Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs]", "str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier: Any deleted: bool", "fresh: str user_id: int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool]", "email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager:", "false } } } } } ] \"\"\" # === AppIdentifier = str", "Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record", "str is_active: bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\":", "'all' class JwtData(TypedDict): iat: int nbf: int jti: str exp: str type: str", "app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role,", "{\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } }", "email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\"", "identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier: Any deleted:", "{ \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\",", "\"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\":", "\"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\":", "\"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\":", "Optional, Type from django.db import models ALL = 'all' class JwtData(TypedDict): iat: int", "\"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\":", "\"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\":", "class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active:", "{\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } } } ] \"\"\" # ===", "str user_id: int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection:", "str email: str is_active: bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs", "\"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\":", "\"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": {", "[ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\":", "str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str", "str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int", "label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier: Any deleted: bool label:", "} } } } } ] \"\"\" # === AppIdentifier = str TenantIdentifier", "str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\",", "Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int", "AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict):", "\"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"tenant\": { \"Servicer\": {", "} } } } } \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str", "int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool] is_staff: Optional[bool]", "from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\",", "dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str,", "{\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } }", "\"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\":", "int email: str username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool]", "} \"\"\" class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict):", "\"synchronizeDoD\": false } } } } } ] \"\"\" # === AppIdentifier =", "typing import TypedDict, Union, List, Any, Optional, Type from django.db import models ALL", "kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\":", "last_name: str username: str email: str is_active: bool is_staff: bool is_superuser: bool date_joined:", "{ \"app_identifier\": { \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": {", "\"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\",", "[1]}, \"synchronizeDoD\": false } } } } } } \"\"\" class AppEntityTypeConfig(TypedDict): model:", "Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a", "<filename>idp_user/typing.py from typing import TypedDict, Union, List, Any, Optional, Type from django.db import", "str type: str fresh: str user_id: int email: str username: str class UserFeaturesPermissions(TypedDict):", "from typing import TypedDict, Union, List, Any, Optional, Type from django.db import models", "UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email: Optional[str] is_active: Optional[bool]", "= str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str]", "} } } } ] \"\"\" # === AppIdentifier = str TenantIdentifier =", "models ALL = 'all' class JwtData(TypedDict): iat: int nbf: int jti: str exp:", "str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier: Any deleted: bool label: Optional[str]", "Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str", "{\"vehicle\": [1]}, \"synchronizeDoD\": false } } } } } } \"\"\" class AppEntityTypeConfig(TypedDict):", "permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict):", "12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\",", "dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name: Optional[str] username: Optional[str] email:", "\"\"\" Example of a user record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\",", "is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\":", "bool is_staff: bool is_superuser: bool date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ {", "\"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } } } ] \"\"\" #", "{ \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\",", "\"bool\", \"is_staff\": \"bool\", \"is_superuser\": \"bool\", \"date_joined\": \"datetime\" \"app_specific_configs\": { \"app_identifier\": { \"Servicer\": {", "2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } } }", "{ \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } } } } } } \"\"\"", "str username: str email: str is_active: bool is_staff: bool is_superuser: bool date_joined: str", "= dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username: str", "class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs", "from django.db import models ALL = 'all' class JwtData(TypedDict): iat: int nbf: int", "idp_user_id: int first_name: str last_name: str username: str email: str is_active: bool is_staff:", "username: str class UserFeaturesPermissions(TypedDict): dod_manager: Union[List, bool] cash_flow_projection: Union[List, bool] notes_manager: Union[List, bool]", "dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username: str email:", "notes_manager: Union[List, bool] class AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role", "{ \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false }", "AppSpecificConfigs(TypedDict): app_entities_restrictions: Optional[dict[str, list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs =", "app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user record from kafka: { \"first_name\": \"str\",", "{ \"tenant\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\":", "[1]}, \"synchronizeDoD\": false } } } } } ] \"\"\" # === AppIdentifier", "is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of a user", "\"\"\" # === AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier,", "int first_name: str last_name: str username: str email: str is_active: bool is_staff: bool", "\"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\": \"bool\", \"is_staff\": \"bool\", \"is_superuser\":", "{ \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false }", "{ \"viewDoD\": {\"vehicle_ids\": [1]}, \"synchronizeDoD\": false } } } } } ] \"\"\"", "\"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\":", "Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class UserTenantData(TypedDict): idp_user_id: int first_name:", "str UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]] class UserRecordDict(TypedDict): idp_user_id: int first_name: Optional[str] last_name:", "TypedDict, Union, List, Any, Optional, Type from django.db import models ALL = 'all'", "class AppEntityTypeConfig(TypedDict): model: Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str", "\"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\": {\"vehicle\": [1]}, \"synchronizeDoD\": false } }", "str last_name: str username: str email: str is_active: bool is_staff: bool is_superuser: bool", "record from kafka: { \"first_name\": \"str\", \"last_name\": \"str\", \"username\": \"str\", \"email\": \"str\", \"is_active\":", "UserTenantData(TypedDict): idp_user_id: int first_name: str last_name: str username: str email: str is_active: bool", "} ] \"\"\" # === AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs =", "is_active: Optional[bool] is_staff: Optional[bool] is_superuser: Optional[bool] date_joined: Optional[str] app_specific_configs: UserRecordAppSpecificConfigs \"\"\" Example of", "\"app_specific_configs\": { \"app_identifier\": { \"Servicer\": { \"app_entities_restrictions\": {\"vehicle\": [1, 2]}, \"permission_restrictions\": { \"viewDoD\":", "int nbf: int jti: str exp: str type: str fresh: str user_id: int", "date_joined: str app_specific_configs: UserAppSpecificConfigs \"\"\" \"data\": [ { \"idp_user_id\": 12, \"first_name\": \"str\", \"last_name\":", "list]] permission_restrictions: dict[str, Union[bool, Any]] Role = str UserAppSpecificConfigs = dict[Role, AppSpecificConfigs] class", "django.db import models ALL = 'all' class JwtData(TypedDict): iat: int nbf: int jti:", "Union, List, Any, Optional, Type from django.db import models ALL = 'all' class", "] \"\"\" # === AppIdentifier = str TenantIdentifier = str UserRecordAppSpecificConfigs = dict[AppIdentifier,", "Union[str, Type[models.Model]] identifier_attr: str label_attr: str class AppEntityRecordEventDict(TypedDict): app_identifier: str app_entity_type: str record_identifier:" ]
[ "from ..const import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class", "ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a list of valid states when", "from ...jobs.decorator import Job from ..const import ContextType, IssueType from .base import CheckBase", "List, Optional from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator", "IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM],", "None) -> bool: \"\"\"Approve check if it is affected by issue.\"\"\" return False", "False @property def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property", "class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def", "Optional[str] = None) -> bool: \"\"\"Approve check if it is affected by issue.\"\"\"", "not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) ->", "a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a list", "by issue.\"\"\" return False @property def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\"", "-> List[CoreState]: \"\"\"Return a list of valid states when this check can run.\"\"\"", "Job from ..const import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned", "return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a list of valid states", "ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job(", "\"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) ->", "@Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run check if", "-> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]:", "issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) ->", "to check core security.\"\"\" from datetime import timedelta from typing import List, Optional", "def approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve check if it is", "IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\"", "enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a list of valid", "import timedelta from typing import List, Optional from ...const import CoreState from ...jobs.const", "JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType, IssueType from .base import", "..const import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for", "check if it is affected by issue.\"\"\" return False @property def issue(self) ->", "ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return", "return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON", "-> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType:", "class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None:", "approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve check if it is affected", "CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self)", "async def approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve check if it", "issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve check", "\"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a", "def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self)", "-> bool: \"\"\"Approve check if it is affected by issue.\"\"\" return False @property", "async def run_check(self) -> None: \"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM])", "CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import", "if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None)", "@Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve check if", "a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a ContextType", "context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) ->", "run_check(self) -> None: \"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def", "def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self)", "@property def states(self) -> List[CoreState]: \"\"\"Return a list of valid states when this", "IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property", "def states(self) -> List[CoreState]: \"\"\"Return a list of valid states when this check", "import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const", "by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) -> bool: \"\"\"Approve", "\"\"\"Approve check if it is affected by issue.\"\"\" return False @property def issue(self)", "return False @property def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED", "@property def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def", "import List, Optional from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from", "timedelta from typing import List, Optional from ...const import CoreState from ...jobs.const import", "...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from", "@property def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def", "\"\"\"Return a list of valid states when this check can run.\"\"\" return [CoreState.RUNNING]", "enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return a ContextType enum.\"\"\" return", "...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType, IssueType", "is affected by issue.\"\"\" return False @property def issue(self) -> IssueType: \"\"\"Return a", "affected by issue.\"\"\" return False @property def issue(self) -> IssueType: \"\"\"Return a IssueType", "check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] =", "check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run check", "import Job from ..const import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase):", "it is affected by issue.\"\"\" return False @property def issue(self) -> IssueType: \"\"\"Return", "def run_check(self) -> None: \"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async", "throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run check if not affected by", "core security.\"\"\" from datetime import timedelta from typing import List, Optional from ...const", "\"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str]", "if it is affected by issue.\"\"\" return False @property def issue(self) -> IssueType:", "security.\"\"\" from datetime import timedelta from typing import List, Optional from ...const import", "= None) -> bool: \"\"\"Approve check if it is affected by issue.\"\"\" return", "\"\"\"Helpers to check core security.\"\"\" from datetime import timedelta from typing import List,", "...jobs.decorator import Job from ..const import ContextType, IssueType from .base import CheckBase class", ".base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24),", "from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType,", "Optional from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import", "None: \"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference:", "datetime import timedelta from typing import List, Optional from ...const import CoreState from", "-> None: \"\"\"Run check if not affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self,", "CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async", "JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType, IssueType from .base", "for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run", "affected by issue.\"\"\" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) -> bool:", "typing import List, Optional from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit", ") async def run_check(self) -> None: \"\"\"Run check if not affected by issue.\"\"\"", "from typing import List, Optional from ...const import CoreState from ...jobs.const import JobCondition,", "from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE,", "import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\"", "check core security.\"\"\" from datetime import timedelta from typing import List, Optional from", "ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a list of", "IssueType: \"\"\"Return a IssueType enum.\"\"\" return IssueType.PWNED @property def context(self) -> ContextType: \"\"\"Return", "List[CoreState]: \"\"\"Return a list of valid states when this check can run.\"\"\" return", "from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job", "limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run check if not affected", "from datetime import timedelta from typing import List, Optional from ...const import CoreState", "reference: Optional[str] = None) -> bool: \"\"\"Approve check if it is affected by", "\"\"\"Return a ContextType enum.\"\"\" return ContextType.ADDON @property def states(self) -> List[CoreState]: \"\"\"Return a", "import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType, IssueType from", "conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: \"\"\"Run check if not", "issue.\"\"\" return False @property def issue(self) -> IssueType: \"\"\"Return a IssueType enum.\"\"\" return", "import CheckBase class CheckAddonPwned(CheckBase): \"\"\"CheckAddonPwned class for check.\"\"\" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), )", "states(self) -> List[CoreState]: \"\"\"Return a list of valid states when this check can", "bool: \"\"\"Approve check if it is affected by issue.\"\"\" return False @property def" ]
[ "for World Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development", "setuptools based setup module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly", ":: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python", "Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License ::", "'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License',", "setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API for World", "OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language", "Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming", "module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for", ":: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming", ":: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git', author_email", "Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 -", "name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API for World Air", "- Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python", "API for aqicn.org', long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org). Requires", "3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async',", "Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4", "description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API for World Air Quality Index", "for aqicn.org', long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org). Requires Python", "Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language ::", "'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming", "Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python", ":: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language", "Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status ::", "3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI", "Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License", "setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API", "Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language", ":: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git', author_email = '<EMAIL>', packages=find_packages()", "from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org',", "aqicn.org', long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org). Requires Python 3.4+',", ":: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python ::", "Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='waqi',", "\"\"\"A setuptools based setup module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0',", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp',", "import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python", ":: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True,", "'Programming Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author =", "based setup module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python", "python API for aqicn.org', long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org).", "Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'],", "World Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status", "Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git', author_email =", "version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API for World Air Quality", "license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved ::", "'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4',", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ],", "License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5',", "MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python ::", "python API for World Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT',", "Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git',", "3.5', 'Programming Language :: Python :: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author", "Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python ::", "<gh_stars>1-10 \"\"\"A setuptools based setup module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync',", "(http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 - Beta',", "API for World Air Quality Index (http://aqicn.org). Requires Python 3.4+', url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[", "setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly", "4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language ::", "setup module.\"\"\" from setuptools import setup, find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API", ":: 3.6', ], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git', author_email = '<EMAIL>',", "find_packages setup( name='waqiasync', version='1.0.0', description='asyncio-friendly python API for aqicn.org', long_description='asyncio-friendly python API for", "classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT", "], keywords='waqi', install_requires=['aiohttp', 'async_timeout'], zip_safe=True, author = 'andrey-git', author_email = '<EMAIL>', packages=find_packages() )", "url='https://github.com/andrey-git/waqi-async', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved" ]
[ "serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the", "def get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create", "= (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self,", "from recipe import serializers # tag and ingredients are attributes of a recipe", "\"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet):", "import Tag, Ingredient from recipe import serializers # tag and ingredients are attributes", "are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned", "user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects", "queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset", "# tag and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin):", "import serializers # tag and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet,", "(IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer):", "attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe", "tag and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base", "serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset = Ingredient.objects.all() serializer_class = serializers.IngredientSerializer", "for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return", "tags in the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage", "BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes", "permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def", "of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\"", "and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet", "perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in", "IsAuthenticated from core.models import Tag, Ingredient from recipe import serializers # tag and", "attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user", "\"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def", "class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer", "ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self):", "new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset =", "in the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients", "core.models import Tag, Ingredient from recipe import serializers # tag and ingredients are", "mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag,", "rest_framework import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from", "Ingredient from recipe import serializers # tag and ingredients are attributes of a", "return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet):", "from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient", "mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes =", "database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\"", "from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from recipe import serializers", "serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all() serializer_class =", "Tag, Ingredient from recipe import serializers # tag and ingredients are attributes of", "user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user)", "rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from recipe import serializers #", "import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models", "Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset = Ingredient.objects.all()", "rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from", "mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,)", "serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset = Ingredient.objects.all() serializer_class", "viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import", "= serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset = Ingredient.objects.all() serializer_class =", "from core.models import Tag, Ingredient from recipe import serializers # tag and ingredients", "a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes", "import IsAuthenticated from core.models import Tag, Ingredient from recipe import serializers # tag", "TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from recipe import", "from rest_framework import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated", "self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage", "TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class", "recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes =", "recipe import serializers # tag and ingredients are attributes of a recipe class", "a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset", "the database\"\"\" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in", "only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class", "def perform_create(self, serializer): \"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags", "= (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\" return", "serializers # tag and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,", "ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for", "= Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage Ingredients in Database\"\"\" queryset =", "for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return", "authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\"", "\"\"\"Create a new object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\"", "\"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new", "import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from recipe", "class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base ViewSet for user-owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,)", "get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a", "objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\") def perform_create(self, serializer): \"\"\"Create a new object\"\"\"", "object\"\"\" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all()", "(TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for user only\"\"\" return self.queryset.filter(user=self.request.user).order_by(\"-name\")", "recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"return objects for", "return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" queryset = Tag.objects.all() serializer_class" ]
[ "= transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self,", "loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server", "OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro", "self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise", "# print('data received: {}'.format(data.decode())) for transport in self.clients: if transport == self.transport: pass", "= {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport =", "raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address)", "= loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt:", "try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer,", "import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername')", "asyncio import sys import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport):", "None def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in self.clients: if", "{} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport", "= loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on", "import sys import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername", "connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] =", "print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if", "server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop()", "lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address):", "class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from", "transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self, data):", "transport self.clients[transport] = None def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport", "loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt: print(\"exit\")", "transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None", "data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in self.clients: if transport ==", "in self.clients: if transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection", "from {}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self, data): # print('data", "= loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt: print(\"exit\") finally: server.close() loop.close()", "print('data received: {}'.format(data.decode())) for transport in self.clients: if transport == self.transport: pass #continue", "{}'.format(data.decode())) for transport in self.clients: if transport == self.transport: pass #continue transport.write(data) def", "#!/usr/bin/env python3 import asyncio import sys import os class EchoServer(asyncio.Protocol): clients = {}", "for transport in self.clients: if transport == self.transport: pass #continue transport.write(data) def connection_lost(self,", "= sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro", "transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del", "== self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport]", "connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except", "coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except", "self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address", "except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888)", "8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever()", "self.transport = transport self.clients[transport] = None def data_received(self, data): # print('data received: {}'.format(data.decode()))", "= asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server =", "peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None def", "exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError:", "os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer,", "received: {}'.format(data.decode())) for transport in self.clients: if transport == self.transport: pass #continue transport.write(data)", "= transport self.clients[transport] = None def data_received(self, data): # print('data received: {}'.format(data.decode())) for", "if transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close()", "#continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1]", "self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop =", "def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address)", "os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection", "= None def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in self.clients:", "loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address))", "server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt: print(\"exit\") finally:", "server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt: print(\"exit\") finally: server.close()", "transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try:", "def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport]", "os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\",", "transport in self.clients: if transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc):", "del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop", "python3 import asyncio import sys import os class EchoServer(asyncio.Protocol): clients = {} def", "self.clients[transport] = None def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in", "EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername))", "print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self, data): #", "data): # print('data received: {}'.format(data.decode())) for transport in self.clients: if transport == self.transport:", "sys import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername =", "if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro =", "self.clients: if transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\")", "pass #continue transport.write(data) def connection_lost(self, exc): print(\"connection lost\") self.transport.close() del self.clients[self.transport] server_address =", "asyncio.get_event_loop() #coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro)", "#coro = loop.create_server(EchoServer, \"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving", "\"127.0.0.1\", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try:", "def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in self.clients: if transport", "{}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self, data): # print('data received:", "import asyncio import sys import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self,", "clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport", "sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro =" ]
[ "NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__ == \"__main__\": #", "51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping begin!", "?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book", "cursor = connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE TABLE IF NOT", "Instantiate cursor cursor = connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE TABLE", "os import time # Establish connection with database connection = sqlite3.connect(\"books.db\") # Instantiate", "books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2),", "cursor cursor = connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE TABLE IF", "50.\\n\\n\" \"The higher the number, the faster the program will be done.\\n\" )", "Import required packages import sqlite3 import operations import os import time # Establish", "like to start scraping?\\n\\n\" \"Please type a number between 1 and 50.\\n\\n\" \"The", "INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"]))", "raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\")", "__name__ == \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page =", "Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__ == \"__main__\": # Print", "operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in books:", "1 and 50.\\n\\n\" \"The higher the number, the faster the program will be", "cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book += 1", "CREATE TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT", "import os import time # Establish connection with database connection = sqlite3.connect(\"books.db\") #", "print(\"At what page would you like to start scraping?\\n\\n\" \"Please type a number", "Stock INTEGER ) \"\"\") if __name__ == \"__main__\": # Print welcome screen operations.clear_screen()", "user for valid page number while begin_page not in range(1, 51): try: print(\"At", "time.sleep(2) begin_page = None # Ask user for valid page number while begin_page", "page would you like to start scraping?\\n\\n\" \"Please type a number between 1", "to start scraping?\\n\\n\" \"Please type a number between 1 and 50.\\n\\n\" \"The higher", "BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER", "# Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask", "current_book = 1 for book in books: query = \"\"\" INSERT INTO [dbo].[Books]", "for book in books: query = \"\"\" INSERT INTO [dbo].[Books] (title, price, stock)", "operations import os import time # Establish connection with database connection = sqlite3.connect(\"books.db\")", "import sqlite3 import operations import os import time # Establish connection with database", "= operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in", "scraping?\\n\\n\" \"Please type a number between 1 and 50.\\n\\n\" \"The higher the number,", "import operations import os import time # Establish connection with database connection =", "KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if", "time # Establish connection with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor", "IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL,", "begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book =", "query = \"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\"", "try: current_book = 1 for book in books: query = \"\"\" INSERT INTO", "higher the number, the faster the program will be done.\\n\" ) begin_page =", "= \"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query,", "the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try:", "print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user for valid page number", "number, the faster the program will be done.\\n\" ) begin_page = int(input(\"Number >", "\"The higher the number, the faster the program will be done.\\n\" ) begin_page", "book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1) except:", "\"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"],", "for valid page number while begin_page not in range(1, 51): try: print(\"At what", "\")) if begin_page not in range(1, 51): raise Exception() except: operations.clear_screen() # Start", "start scraping?\\n\\n\" \"Please type a number between 1 and 50.\\n\\n\" \"The higher the", "current_book += 1 time.sleep(0.1) except: pass # Commit data to the database connection.commit()", "book in books: query = \"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES", "# Establish connection with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor =", "{current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1) except: pass # Commit data", "# Commit data to the database connection.commit() operations.clear_screen() print(\"Completed!\\nNow type 'open books.csv' \\U0001f600\")", "range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping", "faster the program will be done.\\n\" ) begin_page = int(input(\"Number > \")) if", "( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock", "the faster the program will be done.\\n\" ) begin_page = int(input(\"Number > \"))", "database\") current_book += 1 time.sleep(0.1) except: pass # Commit data to the database", "while begin_page not in range(1, 51): try: print(\"At what page would you like", "= None # Ask user for valid page number while begin_page not in", "\"Please type a number between 1 and 50.\\n\\n\" \"The higher the number, the", "and 50.\\n\\n\" \"The higher the number, the faster the program will be done.\\n\"", "operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls)", "not in range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let", "try: print(\"At what page would you like to start scraping?\\n\\n\" \"Please type a", "operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in books: query = \"\"\"", "would you like to start scraping?\\n\\n\" \"Please type a number between 1 and", "the program will be done.\\n\" ) begin_page = int(input(\"Number > \")) if begin_page", "import time # Establish connection with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor", "screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user for valid", "program will be done.\\n\" ) begin_page = int(input(\"Number > \")) if begin_page not", "?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book +=", "the database\") current_book += 1 time.sleep(0.1) except: pass # Commit data to the", "in range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the", "print(f\"Saving book {current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1) except: pass #", "cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title", "page number while begin_page not in range(1, 51): try: print(\"At what page would", "scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book", "Create the books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books ( BookId", "time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for", "table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY,", "= 1 for book in books: query = \"\"\" INSERT INTO [dbo].[Books] (title,", "VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the", "price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)}", "database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() # Create the", "print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books)", "required packages import sqlite3 import operations import os import time # Establish connection", "book {current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1) except: pass # Commit", "== \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None", "books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY", "book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book", "operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user for valid page", ") \"\"\") if __name__ == \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\")", "except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls", ") begin_page = int(input(\"Number > \")) if begin_page not in range(1, 51): raise", "connection with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() #", "Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__", "None # Ask user for valid page number while begin_page not in range(1,", "operations.write_to_csv_file(books) try: current_book = 1 for book in books: query = \"\"\" INSERT", "Ask user for valid page number while begin_page not in range(1, 51): try:", "connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() # Create the books", "operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls =", "(?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\")", "packages import sqlite3 import operations import os import time # Establish connection with", "EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5,", "range(1, 51): try: print(\"At what page would you like to start scraping?\\n\\n\" \"Please", "> \")) if begin_page not in range(1, 51): raise Exception() except: operations.clear_screen() #", "with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() # Create", "be done.\\n\" ) begin_page = int(input(\"Number > \")) if begin_page not in range(1,", "sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() # Create the books table cursor.execute(\"\"\"", "\"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None #", "sqlite3 import operations import os import time # Establish connection with database connection", "= sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor() # Create the books table", "1 time.sleep(0.1) except: pass # Commit data to the database connection.commit() operations.clear_screen() print(\"Completed!\\nNow", "what page would you like to start scraping?\\n\\n\" \"Please type a number between", "if __name__ == \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page", "INTEGER ) \"\"\") if __name__ == \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello", "= operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in books: query =", "you like to start scraping?\\n\\n\" \"Please type a number between 1 and 50.\\n\\n\"", "\"\"\") if __name__ == \"__main__\": # Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2)", "# Create the books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books (", "= int(input(\"Number > \")) if begin_page not in range(1, 51): raise Exception() except:", "not in range(1, 51): try: print(\"At what page would you like to start", "a number between 1 and 50.\\n\\n\" \"The higher the number, the faster the", "in books: query = \"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?,", "# Import required packages import sqlite3 import operations import os import time #", "TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT", "to the database\") current_book += 1 time.sleep(0.1) except: pass # Commit data to", "+= 1 time.sleep(0.1) except: pass # Commit data to the database connection.commit() operations.clear_screen()", "pass # Commit data to the database connection.commit() operations.clear_screen() print(\"Completed!\\nNow type 'open books.csv'", "will be done.\\n\" ) begin_page = int(input(\"Number > \")) if begin_page not in", "connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books", "in range(1, 51): try: print(\"At what page would you like to start scraping?\\n\\n\"", "[dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving", "scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books =", "# Instantiate cursor cursor = connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE", "the number, the faster the program will be done.\\n\" ) begin_page = int(input(\"Number", "= connection.cursor() # Create the books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS", "2), Stock INTEGER ) \"\"\") if __name__ == \"__main__\": # Print welcome screen", "books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in books: query", "INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER )", "welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user for", "INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"],", "begin_page not in range(1, 51): try: print(\"At what page would you like to", "stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to", "time.sleep(0.1) except: pass # Commit data to the database connection.commit() operations.clear_screen() print(\"Completed!\\nNow type", "books: query = \"\"\" INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?,", "NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price", "(book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1)", "Establish connection with database connection = sqlite3.connect(\"books.db\") # Instantiate cursor cursor = connection.cursor()", "(title, price, stock) VALUES (?, ?, ?)\"\"\" cursor.execute(query, (book[\"title\"], book[\"price\"], book[\"stock\"])) print(f\"Saving book", "\\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1", "# Ask user for valid page number while begin_page not in range(1, 51):", "begin_page = None # Ask user for valid page number while begin_page not", "NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__ == \"__main__\":", "Print welcome screen operations.clear_screen() print(\"Hello \\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user", "done.\\n\" ) begin_page = int(input(\"Number > \")) if begin_page not in range(1, 51):", "1 for book in books: query = \"\"\" INSERT INTO [dbo].[Books] (title, price,", "valid page number while begin_page not in range(1, 51): try: print(\"At what page", "Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2)", "DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__ == \"__main__\": # Print welcome", "begin_page not in range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen()", "# Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page)", "Start scraping operations.clear_screen() print(\"Let the scraping begin! \\U0001f600\") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books", "book[\"stock\"])) print(f\"Saving book {current_book}/{len(books)} to the database\") current_book += 1 time.sleep(0.1) except: pass", "\\U0001f600\\n\") time.sleep(2) begin_page = None # Ask user for valid page number while", "except: pass # Commit data to the database connection.commit() operations.clear_screen() print(\"Completed!\\nNow type 'open", "number between 1 and 50.\\n\\n\" \"The higher the number, the faster the program", "TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\") if __name__ ==", "PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) \"\"\")", "number while begin_page not in range(1, 51): try: print(\"At what page would you", "type a number between 1 and 50.\\n\\n\" \"The higher the number, the faster", "int(input(\"Number > \")) if begin_page not in range(1, 51): raise Exception() except: operations.clear_screen()", "if begin_page not in range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping", "between 1 and 50.\\n\\n\" \"The higher the number, the faster the program will", "begin_page = int(input(\"Number > \")) if begin_page not in range(1, 51): raise Exception()", "the books table cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS books ( BookId INTEGER", "51): try: print(\"At what page would you like to start scraping?\\n\\n\" \"Please type" ]
[]
[ "mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder =", "shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response =", "update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] ==", "model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k,", "0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp']", "[\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name)", "import shutil import boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT", "CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\")", "== model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using", "update = {} for model_name in REGISTERED_MODELS: model = None update[model_name] = 0", "= {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if", "downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key))", "for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global", "folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading", "in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update", "== model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models',", "mv.items() if v} break if model: MODELS[model_name] = model return update def get_model(model_name):", "{} for model_name in REGISTERED_MODELS: model = None update[model_name] = 0 for mv", "= str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)):", "if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated", "def update_models(): global CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS: model =", "os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated at", "= boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS", "model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder),", "folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k:", "obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL", "mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name):", "S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models():", "{last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v in mv.items() if v} break", "= \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj", "#response = {k: v for k, v in mv.items() if v} break if", "MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000)))", "global CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS: model = None update[model_name]", "'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if", "folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL", "model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage})", "v for k, v in mv.items() if v} break if model: MODELS[model_name] =", "datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"]", "bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model", "MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def", "obj.key) def update_models(): global CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS: model", "os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update = {} for model_name in", "= mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\")", "boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS =", "shutil import boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT =", "print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for", "== 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1]", "mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp']", "folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'):", "os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model if", "= not (CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(),", "os import shutil import boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI'))", "mlflow import os import shutil import boto3 from datetime import datetime S3_CLIENT =", "model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL", "= model else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name]", "import os import shutil import boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3')", "for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] =", "= S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def", "/ 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing", "not (CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder),", "model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model", "import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL", "mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models',", "str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load", "(CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models')", "if v} break if model: MODELS[model_name] = model return update def get_model(model_name): return", "S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\"", "for model_name in REGISTERED_MODELS: model = None update[model_name] = 0 for mv in", "= {} for model_name in REGISTERED_MODELS: model = None update[model_name] = 0 for", "= mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model =", "= 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production':", "for k, v in mv.items() if v} break if model: MODELS[model_name] = model", "folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder),", "if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update = {}", "{name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v", "update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket,", "os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model else:", "print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL ==", "os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response", "os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not", "import boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient()", "os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update = {} for model_name", "in REGISTERED_MODELS: model = None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv", "break if model: MODELS[model_name] = model return update def get_model(model_name): return MODELS.get(model_name, None)", "\"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading model...\")", "folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL", "= {k: v for k, v in mv.items() if v} break if model:", "bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key)", "bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update =", "from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS =", "at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v in mv.items() if v}", "bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS:", "model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL ==", "shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv))", "\"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models')", "'./models') print(\"Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v", "updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v in mv.items() if", "= os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model", "in mv.items() if v} break if model: MODELS[model_name] = model return update def", "boto3 from datetime import datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS", "downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model)", "= None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if", "model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL =", "import mlflow import os import shutil import boto3 from datetime import datetime S3_CLIENT", "else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not", "def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)):", "mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name]", "not (CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder) model", "update[model_name] = not (CURRENT_MODEL == model) CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models')", "model_name in REGISTERED_MODELS: model = None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"):", "= not (CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder)", "= [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket =", "<reponame>frburrue/tfm<gh_stars>0 import mlflow import os import shutil import boto3 from datetime import datetime", "if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] =", "datetime S3_CLIENT = boto3.resource('s3') mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL =", "= dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket =", "MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name):", "print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL", "{} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not", "remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in bucket.objects.filter(Prefix=remoteDirectory_name): if not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key,", "CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS: model = None update[model_name] =", "= model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name} v{version}", "existing model...\") model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] = not (CURRENT_MODEL == model)", "in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] /", "mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\") model = os.path.join(os.path.join('./models',", "None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv) if mv['current_stage']", "model = None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv = dict(mv)", "k, v in mv.items() if v} break if model: MODELS[model_name] = model return", "= mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name,", "model else: print(\"Downloading model...\") downlod_model(bucket, folder) model = os.path.join(os.path.join('./models', folder), \"artifacts/model/data/model.h5\") update[model_name] =", "v} break if model: MODELS[model_name] = model return update def get_model(model_name): return MODELS.get(model_name,", "update_models(): global CURRENT_MODEL update = {} for model_name in REGISTERED_MODELS: model = None", "REGISTERED_MODELS: model = None update[model_name] = 0 for mv in MLFLOW_CLIENT.search_model_versions(f\"name='{model_name}'\"): mv =", "REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket", "mv = dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket", "({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v in mv.items()", "v in mv.items() if v} break if model: MODELS[model_name] = model return update", "(CURRENT_MODEL == model) CURRENT_MODEL = model else: print(\"Downloading model...\") downlod_model(bucket, folder) model =", "v{version} ({current_stage}) updated at {last_updated_timestamp}\".format(**mv)) #response = {k: v for k, v in", "{k: v for k, v in mv.items() if v} break if model: MODELS[model_name]", "1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder = mv['source'].split('//')[1].split('/')[1] if os.path.exists(os.path.join('./models', folder)): print(\"Load existing model...\")", "not os.path.exists(os.path.dirname(obj.key)): os.makedirs(os.path.dirname(obj.key)) bucket.download_file(obj.key, obj.key) def update_models(): global CURRENT_MODEL update = {} for", "\"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for obj in", "mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI')) MLFLOW_CLIENT = mlflow.tracking.MlflowClient() REGISTERED_MODELS = [\"Hands\"] CURRENT_MODEL = \"Unknown\" MODELS = {}", "CURRENT_MODEL = \"Unknown\" MODELS = {} def downlod_model(bucket_name, remoteDirectory_name): bucket = S3_CLIENT.Bucket(bucket_name) for", "if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0] folder", "dict(mv) if mv['current_stage'] == 'Production': mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000))) bucket = mv['source'].split('//')[1].split('/')[0]", "CURRENT_MODEL = model if os.path.exists('./models'): shutil.rmtree('./models') os.mkdir('./models') shutil.move(os.path.join(os.getcwd(), folder), './models') print(\"Using model {name}" ]
[ "sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() #", "wngrid): \"\"\" Prepares each molecular opacity by weighting them by their mixing ratio", "def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity by weighting them by", ".contribution import Contribution import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution):", "Computes the contribution to the optical depth occuring from molecular absorption. \"\"\" def", "depth occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def", "sigma_xsec[...] = 0.0 # Get the mix ratio of the gas gas_mix =", "%s opacity', gas) # Get the cross section object relating to the gas", "tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure =", "gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the", "Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted", "of type (str, :obj:`array`) Name of molecule and weighted opacity \"\"\" self.debug('Preparing model", "self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got", "enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp #", "# Clear sigma array sigma_xsec[...] = 0.0 # Get the mix ratio of", "tp # Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] #", "molecular opacity by weighting them by their mixing ratio in the atmosphere Parameters", "array sigma_xsec[...] = 0.0 # Get the mix ratio of the gas gas_mix", "array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section", "sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec", "component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted opacity \"\"\"", "gas xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in", "gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 #", "\\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec", "to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\"", "sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns the fused weighted cross-section", "mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity',", "tp) temperature, pressure = tp # Place into the array sigma_xsec[idx_layer] += \\", "temperature, pressure = tp # Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature,", "OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical depth occuring from", "object relating to the gas xsec = self._opacity_cache[gas] # Loop through the layers", "by their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model", "# Loop through all active gases for gas in model.chemistry.activeGases: # Clear sigma", "model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get the mix ratio", "the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s", "Clear sigma array sigma_xsec[...] = 0.0 # Get the mix ratio of the", "grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and", "= OpacityCache() # Loop through all active gases for gas in model.chemistry.activeGases: #", "molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid):", "into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to", "gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the cross", "Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s", "Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign", "self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns the fused", "model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp # Place", "them by their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward", "idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure", "AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical depth occuring from molecular absorption.", "wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name", "= self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)):", "molecule and weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0]", "def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each", "# Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property", "active gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0", "pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas,", "from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model,", "self._opacity_cache = OpacityCache() # Loop through all active gases for gas in model.chemistry.activeGases:", "Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas", "import Contribution import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\"", "self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache", "the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get", "occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self,", "all active gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] =", "layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp)", "numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution", "opacity cache self._opacity_cache = OpacityCache() # Loop through all active gases for gas", "from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical", "wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache()", "ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas)", "def sigma(self): \"\"\" Returns the fused weighted cross-section of all active gases \"\"\"", "import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the", "wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache", "sigma_xsec @property def sigma(self): \"\"\" Returns the fused weighted cross-section of all active", "class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical depth occuring from molecular", "= sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns the fused weighted", "atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------", "\"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0]))", "of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) #", "to the optical depth occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache", "---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple`", "each molecular opacity by weighting them by their mixing ratio in the atmosphere", "for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get", "gas, sigma_xsec @property def sigma(self): \"\"\" Returns the fused weighted cross-section of all", "wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() # Loop through all", "model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get", "model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the cross section object", "= wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache =", "yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns the fused weighted cross-section of", "= np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() # Loop", "model, wngrid): \"\"\" Prepares each molecular opacity by weighting them by their mixing", "type (str, :obj:`array`) Name of molecule and weighted opacity \"\"\" self.debug('Preparing model with", "wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec", "with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the", "\"\"\" Returns the fused weighted cross-section of all active gases \"\"\" return self.sigma_xsec", "from .contribution import Contribution import numpy as np from taurex.cache import OpacityCache class", "# Get the cross section object relating to the gas xsec = self._opacity_cache[gas]", "Contribution import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes", "------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted opacity", "weighting them by their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel`", "Prepares each molecular opacity by weighting them by their mixing ratio in the", "+= \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec =", "self.info('Recomputing active gas %s opacity', gas) # Get the cross section object relating", ":class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type", "weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec =", "0.0 # Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing", "__init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular", "# Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily", "the contribution to the optical depth occuring from molecular absorption. \"\"\" def __init__(self):", "of molecule and weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid =", "Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component:", "gas) # Get the cross section object relating to the gas xsec =", "import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical depth occuring", "idx_layer, tp) temperature, pressure = tp # Place into the array sigma_xsec[idx_layer] +=", ":obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted opacity \"\"\" self.debug('Preparing", "pressure = tp # Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure,", "Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule", "relating to the gas xsec = self._opacity_cache[gas] # Loop through the layers for", "# Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp", "OpacityCache() # Loop through all active gases for gas in model.chemistry.activeGases: # Clear", "index,tp %s %s', idx_layer, tp) temperature, pressure = tp # Place into the", "optical depth occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache()", "gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get the", "cache self._opacity_cache = OpacityCache() # Loop through all active gases for gas in", "as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to", "# Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active", "@property def sigma(self): \"\"\" Returns the fused weighted cross-section of all active gases", "self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) #", "np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the", "np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() # Loop through", "# Get the opacity cache self._opacity_cache = OpacityCache() # Loop through all active", "to the gas xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer,", "gas %s opacity', gas) # Get the cross section object relating to the", "= 0.0 # Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas)", "section object relating to the gas xsec = self._opacity_cache[gas] # Loop through the", "cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns the", "in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get the mix", "opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers,", "self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp # Place into", "(str, :obj:`array`) Name of molecule and weighted opacity \"\"\" self.debug('Preparing model with %s',", "mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array`", "%s', idx_layer, tp) temperature, pressure = tp # Place into the array sigma_xsec[idx_layer]", "Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str,", "the opacity cache self._opacity_cache = OpacityCache() # Loop through all active gases for", "the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer,", "assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self):", "opacity by weighting them by their mixing ratio in the atmosphere Parameters ----------", "master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): \"\"\" Returns", "= tp # Place into the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer]", "Loop through all active gases for gas in model.chemistry.activeGases: # Clear sigma array", "through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s',", "taurex.cache import OpacityCache class AbsorptionContribution(Contribution): \"\"\" Computes the contribution to the optical depth", "the array sigma_xsec[idx_layer] += \\ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master", "\"\"\" Computes the contribution to the optical depth occuring from molecular absorption. \"\"\"", "model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of", "xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield", "%s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity", "contribution to the optical depth occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption')", "the cross section object relating to the gas xsec = self._opacity_cache[gas] # Loop", "cross section object relating to the gas xsec = self._opacity_cache[gas] # Loop through", "opacity', gas) # Get the cross section object relating to the gas xsec", "Name of molecule and weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid", ":obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of", "in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid", "ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber", ":obj:`array`) Name of molecule and weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape)", "for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature,", "model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`)", "in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp", "sigma array sigma_xsec[...] = 0.0 # Get the mix ratio of the gas", "active gas %s opacity', gas) # Get the cross section object relating to", "and weighted opacity \"\"\" self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec", "self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity by", "super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity", "through all active gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...]", "their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid:", "by weighting them by their mixing ratio in the atmosphere Parameters ---------- model:", "\"\"\" Prepares each molecular opacity by weighting them by their mixing ratio in", "%s %s', idx_layer, tp) temperature, pressure = tp # Place into the array", "= OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity by weighting", "\"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares", "sigma(self): \"\"\" Returns the fused weighted cross-section of all active gases \"\"\" return", "xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile,", "the gas xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer, tp", "absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): \"\"\"", "the optical depth occuring from molecular absorption. \"\"\" def __init__(self): super().__init__('Absorption') self._opacity_cache =", "the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields", "= model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the cross section", "prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity by weighting them by their", "OpacityCache() def prepare_each(self, model, wngrid): \"\"\" Prepares each molecular opacity by weighting them", "Get the cross section object relating to the gas xsec = self._opacity_cache[gas] #", "Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def", "Get the opacity cache self._opacity_cache = OpacityCache() # Loop through all active gases" ]
[ "face_detected, emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture = posture", "Result: def __init__(self, face_detected, emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions", "__init__(self, face_detected, emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture =", "fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture = posture self.fatigue = fatigue", "def __init__(self, face_detected, emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture", "emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture = posture self.fatigue", "class Result: def __init__(self, face_detected, emotions=None, posture=None, fatigue=None): self.face_detected = face_detected self.emotions =", "posture=None, fatigue=None): self.face_detected = face_detected self.emotions = emotions self.posture = posture self.fatigue =" ]
[ "[] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\")", "import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers", "fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from", "\"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_)", "[] # grads = [] for h in hs: pi_h = solver_w.cost_function(z_ +", "np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports", "self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper:", "# grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_ + h", "sys.path.append('../') import matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as", "self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) #", "# err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model =", "dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin", "# Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5", "= self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return", "= dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad =", "y_romml = y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2", "err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf()", "self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None def", "np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z)", "[] for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h)", "= SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V) norm =", "hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_", "behavior #### hs = np.linspace(0, 1, 500) pis = [] # grads =", "= solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h", "class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r", "observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @", "label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads,", "\"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf()", "label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() #####", "dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png',", "ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import", "abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\",", "+ dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self,", "-np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for h in", "load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam", "# cost = cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad", "(.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla()", "self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z)", "self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V", "True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom", "= np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad", "# self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V =", "plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\", \"ROM\",", "eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad =", "= solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err", "= [] for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z)", "print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads =", "pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ +", "eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) #", "solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction", "= self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg)", "dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function", "* np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost", "= np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A,", "def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad", "grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad", "in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad =", "def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad", "self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model", "in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad =", "grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_ + h *", "__init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V)", "= [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs:", "imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin", "* eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad", "cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost =", "solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err =", "solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err =", "matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve", "w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg)", "0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad,", "plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0, 1, 500) pis", "self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C =", "label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads,", "h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad", "err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z)", "hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_)", "eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err)", "Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ##", "pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g)", "= solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w,", "0.5 * np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return cost", "= [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for", "self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost =", "solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms", "plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla()", "Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\")", "using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32", "chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true", "* eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] #", "plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior", "Examine function behavior #### hs = np.linspace(0, 1, 500) pis = [] #", "err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs,", "+ e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost", "= [] for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z)", "# dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom =", "e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5", "dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None", "solver, data): self.solver = solver self.data = data self.z = dl.Function(V) def cost_function(self,", "dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200)", "+ h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms =", "# grads = [] for h in hs: pi_h = solver_w.cost_function(z_ + h", "self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad", "\"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs,", "in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h -", "= solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h", "dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png',", "= dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals)", "self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost =", "dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v):", "norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional", "= self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol", "reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model", "grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\", \"ROM\", \"ROMML\"]) # plt.savefig('grad_dir.png',", "z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost", "58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model phi", "np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using", "True) # Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals =", "z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm)", "= 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r", "np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return", "matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as plt import dolfin", "= self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5 *", "- self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z)", "= 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True)", "cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] #", "eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_),", "* chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data", "(pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\",", "= make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true =", "from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from", "Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data):", "self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def", "self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data", "self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution", "# self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5", "np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for h", "h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) #", "z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad", "gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) #", "plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_),", "+ h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z)", "= get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic", "plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0, 1,", "self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return", "err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200)", "None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r =", "pi_rommls = [] # grads_romml = [] for h in hs: pi_h =", "= self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 #", "cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z)", "= 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z)", "pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad,", "err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r,", "Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg,", "@ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w)", "data): self.solver = solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v):", "= solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_", "# dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf()", "plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = []", "plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) #", "gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @", "plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png',", "n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = []", "import time import numpy as np import matplotlib.pyplot as plt import dolfin as", "z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using ROMML", "self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad =", "def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost", "{dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads =", "(.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla()", "= solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h", "(pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g", "plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin", "imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver =", "solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver =", "solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V)", "y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error", "* np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return cost def", "+ h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms)", "solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y,", "= solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err", "eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad =", "in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad =", "label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads", "##### ## Examine function behavior #### hs = np.linspace(0, 1, 500) pis =", "from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class", "y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 +", "norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y,", "\"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_)", "from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports", "Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5", "np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h", "get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import", "- dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\")", "# Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML:", "err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs,", "dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads", "as np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML", "def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad", "AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related", "import matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as plt", "np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient", "grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h", "Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True)", "plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad", "h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g -", "nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C", "= abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads,", "hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_", "y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2", "0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order", "\"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf()", "in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h -", "at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate", "self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml", "dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) #", "sys sys.path.append('../') import matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot", "None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) #", "- pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error", "import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field", "= None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r", "self.data)**2 # cost = cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v)", "err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200)", "solver = Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm =", "= [] # grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_", "grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 =", "pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs,", "= data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B,", "chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z)", "dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) #", "return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad", "grad = solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) #", "C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 *", "make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true = dl.Function(V)", "# Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver,", "0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad,", "y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost", "grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs,", "plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla()", "np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost =", "h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png',", "h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = []", "Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals", "Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 *", "eps_z = eps_z/eps_norm # Determine location to evaluate gradient at norm = np.random.randn(len(chol))", "self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad", "self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self,", "= (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads,", "= np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for", "a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err", "w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r", "= 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z)", "synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T", "= cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z,", "ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) #", "plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad =", "self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol =", "= load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() #", "pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h *", "= abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\",", "np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z)", "grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h in hs: pi_h", "# plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\", \"ROM\", \"ROMML\"]) # plt.savefig('grad_dir.png', dpi=200)", "= np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for", "eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient at", "solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h *", "+ h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z)", "= err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data =", "self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v)", "* eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200)", "self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r =", "class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r", "plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads =", "# Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58,", "dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h", "C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model # err_model", "make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self,", "self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver =", "pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g)", "h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad", "pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad,", "plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad =", "reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data)", "solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver,", "import Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data =", "w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL", "eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) # dir_grad =", "pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs,", "dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\")", "self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2", "for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) #", "matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as plt import", "AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model,", "DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000,", "RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z", "related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver", "plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads =", "= np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis)", "eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h in", "+ h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z)", "return self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver", "pis = [] # grads = [] for h in hs: pi_h =", "np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_", "Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced", "[] grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h =", "dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = []", "np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad =", "# grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z)", "= dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost", "norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm =", "+ h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g", "solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err =", "= self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def", "err_model = load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r =", "evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm)", "err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf()", "dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model", "plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png')", "grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in", "= AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w =", "h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad", "location to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T", "gradient FOM: {dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_ + h *", "Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads =", "h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) #", "200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model phi =", "grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model", "pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls)", "dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w,", "norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine", "np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B,", "* eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad)", "deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from", "np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom =", "# Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5", "plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs,", "pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ +", "from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol", "self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost", "pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h =", "dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space", "y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN #", "* chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z =", "resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V,", "(.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla()", "eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml", "# ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM", "self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost", "RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z", "np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z", "= solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z)", "dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = []", "np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true)", "dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y", "eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to", "plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf()", "gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper:", "self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r =", "# grad = solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z)", "plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine", "eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_ +", "dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost +", "hs = np.linspace(0, 1, 500) pis = [] # grads = [] for", "- self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost", "= solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003,", "grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r =", "1, 500) pis = [] # grads = [] for h in hs:", "dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs,", "eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom", "err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver", "import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow", "grad = grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r,", "# self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5", "* np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost", "abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First", "- self.data)**2 # cost = cost + reg_cost return cost def gradient(self, z_v):", "[] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h", "A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model", "= self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r -", "[] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h", "as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import", "norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient", "Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200,", "self.solver = solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v)", "h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h", "Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads =", "plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0", "self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return", "= np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs: pi_h =", "z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5", "SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol))", "cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w)", "err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf()", "cost = cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad =", "z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg)", "- self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost", "eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm =", "# grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for", "directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps", "self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution =", "B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model #", "dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs =", "0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r -", "dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2)", "FOM: {dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z)", "time import numpy as np import matplotlib.pyplot as plt import dolfin as dl;", "solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g", "# grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h in hs:", "self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution)", "= [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM:", "gradient ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = []", "grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 =", "get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations", "to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @", "plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\", \"ROM\", \"ROMML\"]) #", "solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu',", "load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model,", "abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First", "z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup", "500) pis = [] # grads = [] for h in hs: pi_h", "load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import", "solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z =", "gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad =", "plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = []", "np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return", "# dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml =", "* eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] #", "\"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs,", "dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs: pi_h", "# Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z =", "grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) #", "def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y =", "err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z)", "SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data = data self.z =", "[] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h", "hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h", "plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom)", "self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class", "= [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for", "plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads)", "def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0]", "self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml -", "- self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z)", "plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml)", "- pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g -", "= solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_", "Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data = data", "z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml", "grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf()", "phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model,", "chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data =", "grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err)", "model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model", "grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in", "self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class", "z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg)", "# err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs,", "= None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z)", "32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 =", "import sys sys.path.append('../') import matplotlib; matplotlib.use('macosx') import time import numpy as np import", "None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN =", "# grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla()", "= self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y", "h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) #", "reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad =", "self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN", "grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_", "self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad", "dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm", "e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost =", "solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h *", "error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())", "Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 *", "plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png')", "pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad,", "= RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient", "= np.linspace(0, 1, 500) pis = [] # grads = [] for h", "import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import", "= np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient at norm", "return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:]", "V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate", "solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of", "norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) #", "+ reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model =", "A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost =", "self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost", "err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model()", "np.linspace(0, 1, 500) pis = [] # grads = [] for h in", "= self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r +", "* np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2", "gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad +", "ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads", "z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location", "dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from", "= dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class ROMMLSolverWrapper: def", "import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from", "5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model", "Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\")", "= solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self,", "plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() #", "pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\")", "self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def", "40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) #", "reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost =", "solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine", "np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps))", "label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads", "fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model", "V.dim()) err_model = load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r", "ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs", "plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla()", "[] for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h)", "= np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx))", "= 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0", "gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad +", "plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs,", "import numpy as np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40)", "load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize", "for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) #", "pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient", "= np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C =", "np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return cost def gradient(self,", "order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml", "solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver", "self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 *", "self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad =", "self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad +", "eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200)", "= 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml", "solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ +", "plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0", "eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla()", "w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r", "* eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) # dir_grad", "a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs,", "= (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err =", "solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v):", "= np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2.,", "as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import", "= solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad)", "= np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative", "Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\")", "pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad)", "eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g", "solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom =", "data) # Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z", "np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver)", "= [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in", "+ dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V,", "plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png')", "= np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom", "# Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi,", "data = solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu', Adam,", "= solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model # err_model =", "return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r", "solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5,", "= np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z)", "pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g)", "solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None", "# grad = grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model,", "h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h", "from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver", "err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data", "plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = []", "import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from", "pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ +", "plt.cla() plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0, 1, 500)", "= self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r", "# grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z)", "direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 *", "cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost", "# plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\", \"ROM\", \"ROMML\"])", "= dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z)", "np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient at norm =", "chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm", "Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\")", "= solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost =", "= [] grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h", "solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ +", "tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data", "plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_),", "for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g =", "self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A,", "err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs,", "self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r)", "dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png',", "= dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z)", "derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps =", "\"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0,", "solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h *", "plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = []", "[] # grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_ +", "self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 *", "from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model", "eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g", "data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C", "z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm)", "= 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return", "@ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm #", "eps_z) print(f\"Directional gradient ROMML: {dir_grad}\") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads", "plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend([\"FOM\",", "ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z", "= ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data)", "self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v)", "self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad", "dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v):", "__init__(self, solver, data): self.solver = solver self.data = data self.z = dl.Function(V) def", "## Examine function behavior #### hs = np.linspace(0, 1, 500) pis = []", "= self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40", "np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm", "\"-.k\", label=\"First Order\") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf()", "length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm", "* eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad", "= [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs:", "= np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate", "for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g =", "0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml -", "numpy as np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) #", "h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h", "eps_z/eps_norm # Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_ =", "cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data)", "* chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_),", "phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver)", "y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg)", "h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = []", "solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V) norm", "solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls", "plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla()", "- dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error", "- dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\")", "of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T", "+ h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls =", "[] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h", "grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs =", "dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self,", "= load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V,", "= abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\",", "# grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z)", "# self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model,", "hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h", "in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h -", "err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs:", "\"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs,", "= grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver):", "self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r,", "model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml =", "B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5", "label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads,", "= np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h *", "solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs,", "2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\")", "= solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad)", "self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml =", "return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad", "hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_", "grads = [] for h in hs: pi_h = solver_w.cost_function(z_ + h *", "for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) #", "= solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err", "* np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2", "+ dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model =", "= self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost", "= eps_z/eps_norm # Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_", "solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w", "= self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver):", "self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost =", "self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 *", "= [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ +", "np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost =", "function behavior #### hs = np.linspace(0, 1, 500) pis = [] # grads", "dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads", "pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h =", "{dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h)", "for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g =", "grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) #", "= np.loadtxt('../data/basis_nine_param.txt',delimiter=\",\") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r,", "plt.clf() plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior ####", "import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def", "= Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol))", "def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z =", "= [] # grads = [] for h in hs: pi_h = solver_w.cost_function(z_", "+ dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self,", "return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r", "[] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h", "def __init__(self, solver, data): self.solver = solver self.data = data self.z = dl.Function(V)", "err_grads.append(err) plt.loglog(hs, err_grads, \"-ob\", label=\"Error Grad\") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, \"-.k\", label=\"First Order\") plt.savefig('grad_test_ROM.png', dpi=200)", "plt.semilogx(hs, grads, \"-ob\") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs", "grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction", "self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) #", "self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v)", "print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs: pi_h = solver_fom.cost_function(z_ + h", "+ reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad", "reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class ROMMLSolverWrapper:", "= solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad)", "rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol #", "self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y -", "dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h", "np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png',", "= y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 +", "#### hs = np.linspace(0, 1, 500) pis = [] # grads = []", "class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data = data self.z", "= None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN", "[] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f\"Direction gradient FOM: {dir_grad_fom}\") for h in hs:", "@ norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f\"Directional", "plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad", "hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h", "= np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h *" ]
[ "= json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23", "= MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 ==", "== js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count'] assert 3 ==", "TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 ==", "class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1", "pytest from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js", "import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss'))", "def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count']", "import pytest from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal()", "test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert", "MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert", "json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 ==", "json import pytest from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal =", "js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count'] assert 3 == js['nun']['count']", "MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count']", "from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js =", "ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml',", "import json import pytest from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal", "'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count']", "js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert", "1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count'] assert 3", "assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count'] assert", "metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1" ]
[ "for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return", "dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val", "= lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0 for items in", "agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record)", "c) Last Time The Agent was selected (b) RETURNS a LIST of addresses", "VARIABLES EXPECTED: a) Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time", "(trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = []", "dict(dd) counter = 0 for items in dd.keys(): if counter == number: break", "a LIST of addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection", "counter = 0 for items in dd.keys(): if counter == number: break ret_list.append(items)", "for items in dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return ret_list", "items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val", "= OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0", "x: x[1],reverse=True)) dd = dict(dd) counter = 0 for items in dd.keys(): if", "(Gamma) c) Last Time The Agent was selected (b) RETURNS a LIST of", "break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents", "key = lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0 for items", "in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def", "was selected (b) RETURNS a LIST of addresses of SAMPLED AGENTS ''' #agents_record", "(b) RETURNS a LIST of addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]}", "SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import", "selected (b) RETURNS a LIST of addresses of SAMPLED AGENTS ''' #agents_record =", "Time The Agent was selected (b) RETURNS a LIST of addresses of SAMPLED", "Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off = 0.6 final = calc_probabilities(agents_record,trade_off) print(sample_agents(6,final))", "{\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma =", "return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in", "sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items", "collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items", "from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0", "ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents are:\")", "= defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) +", "OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0 for", "0 for items in dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return", "__name__ == '__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off = 0.6", "Agent was selected (b) RETURNS a LIST of addresses of SAMPLED AGENTS '''", "ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(),", "= dict(dd) counter = 0 for items in dd.keys(): if counter == number:", "The Agent was selected (b) RETURNS a LIST of addresses of SAMPLED AGENTS", "lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0 for items in dd.keys():", "a) Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The Agent", "dd = dict(dd) counter = 0 for items in dd.keys(): if counter ==", "ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma))", "+ ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd", "Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The Agent was selected", "= 0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val", "sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd", "ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x:", "calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1]", "def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True))", "Score (Gamma) c) Last Time The Agent was selected (b) RETURNS a LIST", "0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def", "calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob", "of addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record", "= 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping", "sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param):", "agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list", "LIST of addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import", "return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda", "items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping", "of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections", "sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys():", "counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents are:\") #a_record", "[] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd) counter", "from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for", "= [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd)", "Last Time The Agent was selected (b) RETURNS a LIST of addresses of", "def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0]", "0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping =", "= agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key", "##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off", "calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob =", "def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob", "((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd =", "if counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ ==", "= {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma", "defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items in agents_record.keys():", "if __name__ == '__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off =", "addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from", "sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items]", "sum_gamma = 0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return", "#agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record):", "Weight/Reputation Score (Gamma) c) Last Time The Agent was selected (b) RETURNS a", "EXPECTED: a) Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The", "= (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list =", "AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict", "x[1],reverse=True)) dd = dict(dd) counter = 0 for items in dd.keys(): if counter", "''' VARIABLES EXPECTED: a) Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last", "for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int)", "print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off = 0.6 final = calc_probabilities(agents_record,trade_off)", "number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled", "items in dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER##", "defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val))", "''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def", "in dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if", "agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key =", "dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__", "return ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents are:\") #a_record =", "RETURNS a LIST of addresses of SAMPLED AGENTS ''' #agents_record = {\"ETH_ADDRESS\":[GAMMA,B_VAL]} from", "in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val =", "Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The Agent was", "import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val =", "ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd =", "= calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] =", "agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure):", "(Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The Agent was selected (b)", "agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0", "b) Weight/Reputation Score (Gamma) c) Last Time The Agent was selected (b) RETURNS", "sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for", "ret_list ##DRIVER## if __name__ == '__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]}", "'__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off = 0.6 final =", "import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items in", "counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__':", "== number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print(\"The", "== '__main__': print(\"The Sampled Agents are:\") #a_record = {\"ascaadcadcac\":[0.5,0.4],\"ssacdcdac\":[0.9,0.4],\"adscdac\":[0.8,0.9]} trade_off = 0.6 final", "= 0 for items in dd.keys(): if counter == number: break ret_list.append(items) counter+=1", "dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd) counter =" ]
[ "from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from", "d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d:", "= driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver,", "d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r", "input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) #", "= webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions()", "r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\")", "<reponame>sailinglove/personal-general from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions", "import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from selenium.common import", "driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') #", "with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies", "driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box", "Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver", "driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() #", "# driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') #", "driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click()", "# cookies = driver.get_cookies() # with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with", "'志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box =", "range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok:", "temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies()", "# option = webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options", "10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username)", "E import requests import json username = '<EMAIL>' password = '<PASSWORD>' link =", "'<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless #", "selenium.common import exceptions as E import requests import json username = '<EMAIL>' password", "# driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click()", "= '<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless", "EdgeOptions from msedge.selenium_tools import Edge from selenium.common import exceptions as E import requests", "= Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(),", "# edge_options = EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') # driver =", "webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option)", "d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda", "# temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies =", "in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d:", "cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click()", "driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver,", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/'))", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no =", "'r') as f: cookies = json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse')", "driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp) #", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d:", "f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies) for cookie in", "open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie)", "= json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d:", "= True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless #", "selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from selenium.common", "open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies =", "f: cookies = json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver,", "'<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe')", "msedge.selenium_tools import Edge from selenium.common import exceptions as E import requests import json", "username = '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver", "from selenium.common import exceptions as E import requests import json username = '<EMAIL>'", "= True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda", "msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from selenium.common import exceptions as E", "# Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") #", "import EdgeOptions from msedge.selenium_tools import Edge from selenium.common import exceptions as E import", "# driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver", "cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda", "driver = webdriver.Edge() # Headless # edge_options = EdgeOptions() # edge_options.use_chromium = True", "= Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless # edge_options = EdgeOptions()", "# driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp)", "driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() #", "# Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) #", "for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver,", "from msedge.selenium_tools import Edge from selenium.common import exceptions as E import requests import", "driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver =", "EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn')", "as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies)", "= EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge()", "page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver,", "= webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') # driver =", "'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies = json.loads(f.read())", "webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\")", "= input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div'))", "Headless # edge_options = EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') # driver", "d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r =", "open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\") else: print(\"not ok\") driver.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[4]').click()", "Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless # edge_options = EdgeOptions() #", "driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda", "# WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json',", "d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1),", "r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else:", "\"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\") else: print(\"not ok\") driver.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[4]').click() driver.quit()", "edge_options = EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') # driver = Edge(options=edge_options)", "executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless # edge_options = EdgeOptions() # edge_options.use_chromium", "Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed", "# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d:", "driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver,", "import exceptions as E import requests import json username = '<EMAIL>' password =", "> 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\") else:", "as E import requests import json username = '<EMAIL>' password = '<PASSWORD>' link", "page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if", "from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from selenium.common import exceptions as", "import requests import json username = '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn'", "len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\")", "selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools", "# Headless # edge_options = EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') #", "driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() # option.add_argument('headless') # driver", "password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') #", "d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP:", "# driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() #", "for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src,", "print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() #", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\")", "edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless", "# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver,", "webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium", "d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src", "with open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies) for cookie in cookies:", "edge_options = EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver =", "'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions()", "page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False)", "edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() #", "10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp", "driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with", "d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for", "from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from", "requests import json username = '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' #", "30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json', 'w') as", "d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0: with", "otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(),", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0:", "d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver,", "if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no", "exceptions as E import requests import json username = '<EMAIL>' password = '<PASSWORD>'", "edge_options.use_chromium = True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver,", "json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click()", "WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge from selenium.common import exceptions", "Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() # option.add_argument('headless')", "d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) #", "# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\")", "= webdriver.Edge() # Headless # edge_options = EdgeOptions() # edge_options.use_chromium = True #", "EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() #", "True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d:", "# driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() #", "verify=False) if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content)", "# driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) #", "driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda", "WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json', 'w')", "webdriver from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import Edge", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no", "= requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as", "= webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium = True driver = Edge(options=edge_options,", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d:", "driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless # edge_options =", "d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp =", "= int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda", "d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda", "= 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option =", "cookies = driver.get_cookies() # with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json',", "= driver.get_cookies() # with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r')", "as f: cookies = json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') #", "d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page", "if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1)", "= EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window()", "# option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium =", "# otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d:", "Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click()", "# driver = webdriver.Edge() # Headless # edge_options = EdgeOptions() # edge_options.use_chromium =", "int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d:", "= WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content)", "import json username = '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head", "# f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies) for cookie", "d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click() # driver.find_element_by_xpath(\"//*[@class='ivu-cascader-menu']/li[2]\").click() # driver.find_element_by_xpath(\"//li[contains(text(), '志愿者')]\").click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password)", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath(\"//input[@class='user']\")).send_keys(username) # driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") #", "0: with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\") else: print(\"not", "temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() #", "') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click() # WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies", "# driver.find_element_by_xpath(\"//input[@class='password']\").send_keys(password) # driver.find_element_by_xpath(\"//a[@class='login-btn']\").click() # temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ')", "requests.get(page_img_src, verify=False) if r.ok: if len(r.content) > 0: with open(\"{}.png\".format(page+1), \"wb\") as f:", "link = 'education.beijing2022.cn' # Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option", "d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json', 'w') as f: #", "driver.get_cookies() # with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as", "# temp_input_box = driver.find_element_by_xpath(\"//input[@placeholder='请输入手机动态口令']\") # otp = input('OTP: ') # temp_input_box.send_keys(otp) # driver.find_element_by_xpath(\"/html/body/div[3]/div[2]/div[3]/form/a\").click()", "cookies = json.loads(f.read()) print(cookies) for cookie in cookies: driver.add_cookie(cookie) driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse') # WebDriverWait(driver, 10).until(lambda", "import Edge from selenium.common import exceptions as E import requests import json username", "# edge_options.use_chromium = True # edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') #", "True driver = Edge(options=edge_options, executable_path=\"D:\\Applications\\edgedriver_win64\\MicrosoftWebDriver.exe\") # driver = webdriver.Edge() # Headless # edge_options", "10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no):", "d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in range(page_no): page_img_src =", "d: d.find_element_by_xpath('//*[contains(text(), \"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json', 'w') as f:", "json username = '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head #", "with open(\"{}.png\".format(page+1), \"wb\") as f: f.write(r.content) print(page+1) else: print(\"no data\") else: print(\"not ok\")", "in range(page_no): page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if", "= '<EMAIL>' password = '<PASSWORD>' link = 'education.beijing2022.cn' # Head # driver =", "option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options = EdgeOptions() edge_options.use_chromium = True", "# Head # driver = webdriver.Chrome('D:\\Applications\\chromedriver_win32\\chromedriver.exe') # Headless # option = webdriver.ChromeOptions() #", "# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click()", "f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f: cookies = json.loads(f.read()) print(cookies) for", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click() page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[2]/span[3]')).text.strip('/')) for page in", "import webdriver from selenium.webdriver.support.ui import WebDriverWait from msedge.selenium_tools import EdgeOptions from msedge.selenium_tools import", "option = webdriver.ChromeOptions() # option.add_argument('headless') # driver = webdriver.Chrome(chrome_options=option) # Headed edge_options =", "Edge from selenium.common import exceptions as E import requests import json username =", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"iv-means-img\"]/div[1]/div/div[1]/img')).get_attribute(\"src\") r = requests.get(page_img_src, verify=False) if r.ok: if len(r.content) >", "\"必修课程\")]/following-sibling::div')) # cookies = driver.get_cookies() # with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies))", "webdriver.Edge() # Headless # edge_options = EdgeOptions() # edge_options.use_chromium = True # edge_options.add_argument('headless')", "# edge_options.add_argument('headless') # driver = Edge(options=edge_options) driver.maximize_window() driver.get('https://education.beijing2022.cn') # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class=\"iv-login\"]')).click()", "WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click() # WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click() WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id=\"app\"]/div/div[2]/div[1]/div[2]/div')).click()", "# with open('cookies.json', 'w') as f: # f.write(json.dumps(cookies)) with open('cookies.json', 'r') as f:" ]
[ "self.is_enabled = True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable()", "= NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6,", "hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27:", "= 4 if hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21:", "4 if hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED", "0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 =", "self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable()", "None # Stops the player from falling forever if self.is_enabled == True and", "-20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32", "self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity", "10, 10)) self.lava = Entity(model = \"plane\", color = \"#ff6700\", collider = \"mesh\",", "self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275,", "== True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5,", "140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 =", "disable(self): self.is_enabled = False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable()", "NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217))", "= NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35,", "= \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale = (10, 10, 10))", "self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable()", "True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable()", "update(self): if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled = False else: self.light", "2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11", "1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height", "DirectionalLight() self.is_enabled = False else: self.light = None # Stops the player from", "self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable()", "= JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18,", "-20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30", "\"#ff6700\", collider = \"mesh\", scale = (1000, 1, 1000), position = (0, -30,", "self.is_enabled = False else: self.light = None # Stops the player from falling", "self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity ==", "self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity ==", "= (0, 0, 0) self.player.count = 0.0 # What entity the player hits", "SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8))", "9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity", "NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96))", "def speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled == True: self.light =", "= raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ]) if hit.entity ==", "self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height =", "= 5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if", "== self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4 if", "self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED =", "normalSpeed = 2 boostSpeed = 5 normalJump = 0.3 # Level04 class Level04(Entity):", "= [self.player, ]) if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "= 1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28:", "= SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0,", "-25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19", "217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18,", "= SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0,", "normalSpeed def update(self): if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled = False", "= NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2,", "JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90))", "40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 =", "self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217),", "= JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20,", "= JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20,", "* normalSpeed = 2 boostSpeed = 5 normalJump = 0.3 # Level04 class", "self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5,", "self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2", "sys sys.path.append('../Parkour/') from block import * normalSpeed = 2 boostSpeed = 5 normalJump", "= 1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13:", "= NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20", "if hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED =", "self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable()", "1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height", "self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable()", "self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2", "self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on = True", "= JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20,", "= SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0,", "NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129))", "-50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation", "1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height", "self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED =", "25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4", "0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18,", "hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump", "class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on = False self.level =", "217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 =", "normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity", "hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump", "from ursina import * import sys sys.path.append('../Parkour/') from block import * normalSpeed =", "6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16", "35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player", "from falling forever if self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED =", "self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable()", "self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5,", "18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0))", "= 9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if", "hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed", "self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity ==", "= 0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on", "== self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height", "self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "= (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2,", "2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9", "NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24))", "if hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED =", "1, 1000), position = (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2", "== self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7 if", "= NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18,", "self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None", "SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90,", "self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275,", "1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height", "self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5,", "self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5,", "= True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable()", "= 7 if hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24:", "= 1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed if", "2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10", "(0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 =", "= False else: self.light = None # Stops the player from falling forever", "-156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player =", "== self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if", "self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable()", "self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable()", "self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity", "-20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14", "40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 =", "18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27", "0) self.player.count = 0.0 # What entity the player hits hit = raycast(self.player.position,", "== self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if", "0, 0) self.player.count = 0.0 # What entity the player hits hit =", "0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on =", "JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109))", "hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16:", "-156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None self.disable() def disable(self): self.is_enabled", "(0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112))", "10, -128) self.player.rotation = (0, 181, 0) self.player.count = 0.0 # Restart the", "= \"mesh\", scale = (1000, 1, 1000), position = (0, -30, 0)) self.block_4_1", "(1000, 1, 1000), position = (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128))", "= 0.0 if hit.entity == self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6:", "self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height =", "if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height =", "NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96))", "NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None self.disable() def", "self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable()", "normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height =", "self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable()", "== self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height", "hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4", "self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable()", "self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable()", "= (1000, 1, 1000), position = (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2,", "boostSpeed = 5 normalJump = 0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__()", "self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed", "self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable()", "10)) self.lava = Entity(model = \"plane\", color = \"#ff6700\", collider = \"mesh\", scale", "self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity", "= 5 normalJump = 0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled", "and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128)", "self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on = True self.level.enable()", "= 4 if hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15:", "= normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height", "self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity", "217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 =", "color = \"#454545\", collider = \"mesh\", scale = (10, 10, 10)) self.lava =", "self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable()", "elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED =", "self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable()", "if hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED =", "6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED", "NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217))", "= 0.0 # What entity the player hits hit = raycast(self.player.position, self.player.down, distance", "-156)) self.player = None self.disable() def disable(self): self.is_enabled = False self.on = False", "0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18,", "elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height =", "hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2", "self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 0, 0)", "15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2", "self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable()", "3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3", "falling forever if self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED = normalSpeed", "self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5,", "-109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 =", "self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity ==", "0) self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height = normalJump if hit.entity", "== self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5 elif", "ignore = [self.player, ]) if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height =", "self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable()", "= 6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23:", "normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity", "normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height =", "hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed", "1000), position = (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 =", "5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED", "= \"mesh\", scale = (10, 10, 10)) self.lava = Entity(model = \"plane\", color", "hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED = 9", "hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2", "self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled == True: self.light = DirectionalLight()", "forever if self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height", "if hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED =", "self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable()", "(0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190))", "35, -156)) self.player = None self.disable() def disable(self): self.is_enabled = False self.on =", "self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable()", "True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable()", "= NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6,", "NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161))", "entity the player hits hit = raycast(self.player.position, self.player.down, distance = 2, ignore =", "self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable()", "self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36,", "90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 =", "self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height =", "== self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED", "if hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED =", "if self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position", "self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED =", "scale = (1000, 1, 1000), position = (0, -30, 0)) self.block_4_1 = NormalBlock((5,", "self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable()", "False else: self.light = None # Stops the player from falling forever if", "JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60))", "<= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128)", "-20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29", "self.is_enabled = False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable()", "-20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33", "self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10,", "(0, 181, 0) self.player.count = 0.0 # Restart the level if self.on ==", "= normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count", "self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable()", "= False self.on = False self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\",", "self.disable() def disable(self): self.is_enabled = False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable()", "NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 =", "= Entity(model = \"plane\", color = \"#ff6700\", collider = \"mesh\", scale = (1000,", "0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18,", "= NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2,", "hits hit = raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ]) if", "if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5,", "hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4", "self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217),", "EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156))", "96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 =", "-129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 =", "= NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3,", "normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height", "-96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 =", "(0, 0, 0) self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height = normalJump", "if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled = False else: self.light =", "= Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale = (10,", "collider = \"mesh\", scale = (1000, 1, 1000), position = (0, -30, 0))", "self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed", "= normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12:", "self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275,", "NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140))", "]) if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position =", "self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height = normalJump if hit.entity ==", "hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5", "hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed if hit.entity == self.finishBlock_4:", "hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11:", "18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0))", "self.player.rotation = (0, 0, 0) self.player.count = 0.0 # What entity the player", "sys.path.append('../Parkour/') from block import * normalSpeed = 2 boostSpeed = 5 normalJump =", "= None # Stops the player from falling forever if self.is_enabled == True", "True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10,", "NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156))", "= 0.0 # Restart the level if self.on == True and held_keys[\"g\"]: self.player.SPEED", "self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity ==", "0) self.player.count = 0.0 # Restart the level if self.on == True and", "== self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED", "self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5,", "self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable()", "if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED =", "181, 0) self.player.count = 0.0 # Restart the level if self.on == True", "normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED", "False self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale", "2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5", "= 2 boostSpeed = 5 normalJump = 0.3 # Level04 class Level04(Entity): def", "217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23", "self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5,", "== self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128)", "normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 181, 0) self.player.count =", "SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201))", "-128) self.player.rotation = (0, 181, 0) self.player.count = 0.0 # Restart the level", "the level if self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height =", "hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump", "217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22", "= None self.disable() def disable(self): self.is_enabled = False self.on = False self.level.disable() self.lava.disable()", "if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height =", "217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20,", "18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0))", "self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale =", "held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation", "= NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2,", "None self.disable() def disable(self): self.is_enabled = False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable()", "self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable()", "self.player.jump_height = 1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity ==", "190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 =", "-128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 =", "# Stops the player from falling forever if self.is_enabled == True and self.player.position.y", "hit = raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ]) if hit.entity", "== self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif", "hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26:", "False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable()", "self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5,", "self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def", "= DirectionalLight() self.is_enabled = False else: self.light = None # Stops the player", "= normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation = (0,", "True and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position =", "-128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 if hit.entity == self.level:", "= NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15,", "hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED = 5", "4 if hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED", "self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2", "self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable()", "-20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31", "0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18,", "color = \"#ff6700\", collider = \"mesh\", scale = (1000, 1, 1000), position =", "\"plane\", color = \"#ff6700\", collider = \"mesh\", scale = (1000, 1, 1000), position", "-80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 =", "def disable(self): self.is_enabled = False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable()", "self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity ==", "self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 181, 0)", "self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position =", "self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on =", "-60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 =", "= SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2,", "def enable(self): self.is_enabled = True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable()", "block import * normalSpeed = 2 boostSpeed = 5 normalJump = 0.3 #", "2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12", "self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable()", "elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED =", "= NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2,", "== self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if", "self.on = False self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider =", "5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity", "5 normalJump = 0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled =", "self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height =", "self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation", "18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275,", "self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275,", "self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity", "NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156))", "= NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2,", "<reponame>SC-HARSH/Parkour-Game from ursina import * import sys sys.path.append('../Parkour/') from block import * normalSpeed", "115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 =", "self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0))", "10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 # What entity", "self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED =", "= 5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22:", "self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5,", "6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17", "from block import * normalSpeed = 2 boostSpeed = 5 normalJump = 0.3", "= normalSpeed def update(self): if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled =", "self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled", "self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self): if", "2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7", "-20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28", "self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable()", "self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED =", "self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275,", "JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10))", "self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable()", "self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable()", "normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 181,", "normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED", "= JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6,", "self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity", "-25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18", "self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable()", "the player hits hit = raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player,", "(0, 0, 0) self.player.count = 0.0 # What entity the player hits hit", "self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable()", "0.0 # What entity the player hits hit = raycast(self.player.position, self.player.down, distance =", "-64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 =", "hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17:", "self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable()", "self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100,", "self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275,", "self.light = DirectionalLight() self.is_enabled = False else: self.light = None # Stops the", "= NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2,", "= normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif", "self.player.rotation = (0, 0, 0) self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height", "= JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 = NormalBlock((-275, 20,", "35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4", "self.player.down, distance = 2, ignore = [self.player, ]) if hit.entity == self.lava: self.player.SPEED", "== self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height", "normalJump = 0.3 # Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False", "Entity(model = \"plane\", color = \"#ff6700\", collider = \"mesh\", scale = (1000, 1,", "self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable()", "normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height =", "\"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale = (10, 10, 10)) self.lava", "self.player.position = (5, 10, -128) self.player.rotation = (0, 181, 0) self.player.count = 0.0", "== self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height", "import * import sys sys.path.append('../Parkour/') from block import * normalSpeed = 2 boostSpeed", "= True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable()", "-128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 # What entity the", "= normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13:", "== self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if", "self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable()", "self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275,", "90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26", "self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled", "217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21", "elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity ==", "90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165,", "= 1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19:", "normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 0,", "and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5,", "self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self):", "self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity", "= (0, 181, 0) self.player.count = 0.0 # Restart the level if self.on", "== self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED = 5 if", "self.block_4_20: self.player.SPEED = 5 if hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity", "= normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14:", "-156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 =", "JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89))", "0.0 if hit.entity == self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED", "self.player.count = 0.0 # Restart the level if self.on == True and held_keys[\"g\"]:", "else: self.light = None # Stops the player from falling forever if self.is_enabled", "self.lava = Entity(model = \"plane\", color = \"#ff6700\", collider = \"mesh\", scale =", "= (10, 10, 10)) self.lava = Entity(model = \"plane\", color = \"#ff6700\", collider", "self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275,", "self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable()", "= normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20:", "7 if hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED", "elif hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity ==", "self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable()", "self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable()", "0.0 # Restart the level if self.on == True and held_keys[\"g\"]: self.player.SPEED =", "0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 =", "-10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25, -89)) self.block_4_32 =", "= 1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if", "enable(self): self.is_enabled = True self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable()", "= NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35,", "= SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0,", "JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217),", "self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity ==", "self.light = None # Stops the player from falling forever if self.is_enabled ==", "self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable()", "if hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity == self.block_4_24: self.player.SPEED =", "self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity", "= normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif", "self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10, -128) self.player.rotation =", "if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height =", "SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25 =", "= normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 181, 0) self.player.count", "self.is_enabled = False self.on = False self.level = Entity(model = \"lava_level_4.obj\", color =", "(5, 10, -128) self.player.rotation = (0, 181, 0) self.player.count = 0.0 # Restart", "NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10))", "if self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height =", "self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity ==", "= SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25,", "90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215,", "-25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0,", "self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on = True self.level.enable() self.lava.enable()", "-30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3", "normalJump if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height", "(0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 =", "hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position = (5, 10,", "18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0))", "self.player.rotation = (0, 181, 0) self.player.count = 0.0 # Restart the level if", "NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156))", "elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity ==", "-89)) self.block_4_32 = NormalBlock((-275, 20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 =", "== self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if", "# Restart the level if self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed", "self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50,", "self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self):", "if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity ==", "hit.entity == self.block_4_24: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25:", "= 1.2 if hit.entity == self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30:", "elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity ==", "SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90,", "= SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217)) self.block_4_25", "self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable()", "self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable()", "10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 if hit.entity ==", "self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed", "self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275, 18, 217))", "Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on = False self.level = Entity(model", "player hits hit = raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ])", "= \"#454545\", collider = \"mesh\", scale = (10, 10, 10)) self.lava = Entity(model", "normalJump self.player.position = (5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count =", "1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED", "self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable()", "= JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40)) self.block_4_29 = JumpBlock((-275, -20,", "self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled =", "== self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height", "= NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25,", "self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable()", "def __init__(self): super().__init__() self.is_enabled = False self.on = False self.level = Entity(model =", "ursina import * import sys sys.path.append('../Parkour/') from block import * normalSpeed = 2", "= NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20,", "if hit.entity == self.block_4_11: self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height =", "import sys sys.path.append('../Parkour/') from block import * normalSpeed = 2 boostSpeed = 5", "self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled ==", "self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2", "self.block_4_12: self.player.jump_height = normalJump elif hit.entity == self.block_4_13: self.player.jump_height = normalJump if hit.entity", "self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5,", "= normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if", "2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13", "(0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 =", "self.player.count = 0.0 # What entity the player hits hit = raycast(self.player.position, self.player.down,", "SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217))", "self.block_4_26 = JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275,", "self.is_enabled == True: self.light = DirectionalLight() self.is_enabled = False else: self.light = None", "= NormalBlock((-200, 35, -156)) self.player = None self.disable() def disable(self): self.is_enabled = False", "if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height =", "= False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable()", "== self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED = 5 elif", "JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115))", "self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self):", "if hit.entity == self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED =", "Restart the level if self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height", "import * normalSpeed = 2 boostSpeed = 5 normalJump = 0.3 # Level04", "= SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25,", "self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable()", "self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED = 9 elif hit.entity ==", "self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5,", "self.player.jump_height = 1.2 elif hit.entity == self.block_4_12: self.player.jump_height = normalJump elif hit.entity ==", "self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable()", "self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height =", "self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED", "145)) self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 =", "self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5,", "self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217),", "= normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height", "1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity", "False self.on = False self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider", "self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable()", "self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def", "217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90,", "1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height", "2 boostSpeed = 5 normalJump = 0.3 # Level04 class Level04(Entity): def __init__(self):", "self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity ==", "5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity", "8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 =", "self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable()", "def update(self): if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled = False else:", "[self.player, ]) if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position", "hit.entity == self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5", "normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity", "self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable()", "= False self.level = Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\",", "NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80))", "hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED = 5", "self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True self.on", "self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable() self.block_4_24.enable() self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable()", "* import sys sys.path.append('../Parkour/') from block import * normalSpeed = 2 boostSpeed =", "6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145)) self.block_4_15", "-10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 =", "self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2", "self.player.SPEED = normalSpeed def update(self): if self.is_enabled == True: self.light = DirectionalLight() self.is_enabled", "self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable()", "= 1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31:", "level if self.on == True and held_keys[\"g\"]: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "# What entity the player hits hit = raycast(self.player.position, self.player.down, distance = 2,", "self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable()", "self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity ==", "18, 217)) self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0)) self.block_4_20 = SpeedBlock((-85,", "self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_11: self.player.jump_height =", "self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity", "= NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2,", "super().__init__() self.is_enabled = False self.on = False self.level = Entity(model = \"lava_level_4.obj\", color", "self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height =", "hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed", "self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity ==", "self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled == True: self.light", "collider = \"mesh\", scale = (10, 10, 10)) self.lava = Entity(model = \"plane\",", "\"#454545\", collider = \"mesh\", scale = (10, 10, 10)) self.lava = Entity(model =", "player from falling forever if self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED", "self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5,", "self.block_4_25.enable() self.block_4_26.enable() self.block_4_27.enable() self.block_4_28.enable() self.block_4_29.enable() self.block_4_30.enable() self.block_4_31.enable() self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable()", "self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable()", "self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable()", "201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 =", "self.block_4_13: self.player.jump_height = normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity", "raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ]) if hit.entity == self.lava:", "= 5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif", "Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on = False self.level", "NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46))", "self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217),", "position = (0, -30, 0)) self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5,", "JumpBlock((-275, -20, 140)) self.block_4_27 = JumpBlock((-275, -20, 90)) self.block_4_28 = JumpBlock((-275, -20, 40))", "self.block_4_19: self.player.SPEED = 4 if hit.entity == self.block_4_20: self.player.SPEED = 5 if hit.entity", "self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity ==", "self.block_4_15 = NormalBlock((5, -25, 201)) self.block_4_16 = NormalBlock((5, -25, 217)) self.block_4_17 = JumpBlock((-8,", "-161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 =", "elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed if hit.entity ==", "20, -109)) self.block_4_33 = NormalBlock((-275, 15, -129)) self.finishBlock_4 = EndBlock((-275, 3, -161)) self.secret_1", "self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity ==", "self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED = 5 elif hit.entity ==", "2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4", "self.player.position = (5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0", "= NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None self.disable()", "= \"plane\", color = \"#ff6700\", collider = \"mesh\", scale = (1000, 1, 1000),", "self.is_enabled == True and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump", "self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable() self.block_4_22.enable() self.block_4_23.enable()", "2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64)) self.block_4_6", "self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable() self.block_4_18.enable() self.block_4_19.enable() self.block_4_20.enable() self.block_4_21.enable()", "0, 0) self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height = normalJump if", "(10, 10, 10)) self.lava = Entity(model = \"plane\", color = \"#ff6700\", collider =", "== True and self.player.position.y <= -50: self.player.SPEED = normalSpeed self.player.jump_height = normalJump self.player.position", "normalJump if hit.entity == self.block_4_13: self.player.SPEED = 4 if hit.entity == self.block_4_14: self.player.SPEED", "NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 = NormalBlock((5, 2, -64))", "== self.block_4_14: self.player.SPEED = 5 elif hit.entity == self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height", "\"mesh\", scale = (10, 10, 10)) self.lava = Entity(model = \"plane\", color =", "NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64))", "self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable() self.secret_2.disable() self.secret_3.disable() self.secret_4.disable() self.finishBlock_4.disable() def enable(self): self.is_enabled = True", "35, -156)) self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None self.disable() def disable(self):", "= \"#ff6700\", collider = \"mesh\", scale = (1000, 1, 1000), position = (0,", "-46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8 = NormalBlock((5, 2, 8)) self.block_4_9 =", "self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable() self.block_4_33.disable() self.secret_1.disable()", "What entity the player hits hit = raycast(self.player.position, self.player.down, distance = 2, ignore", "= normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height", "= 2, ignore = [self.player, ]) if hit.entity == self.lava: self.player.SPEED = normalSpeed", "NormalBlock((5, 2, 8)) self.block_4_9 = NormalBlock((5, 2, 24)) self.block_4_10 = NormalBlock((5, 2, 40))", "NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 = SpeedBlock((5, 6, 145))", "NormalBlock((-200, 35, -156)) self.player = None self.disable() def disable(self): self.is_enabled = False self.on", "= False self.on = False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable()", "self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def update(self): if self.is_enabled == True:", "SpeedBlock((-165, 18, 217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90,", "24)) self.block_4_10 = NormalBlock((5, 2, 40)) self.block_4_11 = JumpBlock((5, -20, 64)) self.block_4_12 =", "self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18: self.player.jump_height = normalJump if hit.entity", "== self.block_4_29: self.player.jump_height = 1.2 if hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif", "= (5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 #", "== self.block_4_18: self.player.jump_height = normalJump if hit.entity == self.block_4_19: self.player.SPEED = 4 if", "== self.block_4_15: self.player.SPEED = normalSpeed self.player.jump_height = normalJump elif hit.entity == self.block_4_16: self.player.SPEED", "= NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35,", "= normalJump if hit.entity == self.block_4_17: self.player.jump_height = 1.2 elif hit.entity == self.block_4_18:", "\"mesh\", scale = (1000, 1, 1000), position = (0, -30, 0)) self.block_4_1 =", "90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24 = NormalBlock((-275,", "217), (0, 90, 0)) self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0)) self.block_4_24", "self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150, 35, -156)) self.secret_4 = NormalBlock((-200,", "normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7: self.player.SPEED", "self.player = None self.disable() def disable(self): self.is_enabled = False self.on = False self.level.disable()", "= normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if", "SpeedBlock((-120, 18, 217), (0, 90, 0)) self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90,", "hit.entity == self.block_4_21: self.player.SPEED = 6.5 if hit.entity == self.block_4_22: self.player.SPEED = 7", "scale = (10, 10, 10)) self.lava = Entity(model = \"plane\", color = \"#ff6700\",", "True: self.light = DirectionalLight() self.is_enabled = False else: self.light = None # Stops", "self.block_4_20.disable() self.block_4_21.disable() self.block_4_22.disable() self.block_4_23.disable() self.block_4_24.disable() self.block_4_25.disable() self.block_4_26.disable() self.block_4_27.disable() self.block_4_28.disable() self.block_4_29.disable() self.block_4_30.disable() self.block_4_31.disable() self.block_4_32.disable()", "self.block_4_9.disable() self.block_4_10.disable() self.block_4_11.disable() self.block_4_12.disable() self.block_4_13.disable() self.block_4_14.disable() self.block_4_15.disable() self.block_4_16.disable() self.block_4_17.disable() self.block_4_18.disable() self.block_4_19.disable() self.block_4_20.disable() self.block_4_21.disable()", "= EndBlock((-275, 3, -161)) self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35,", "2, ignore = [self.player, ]) if hit.entity == self.lava: self.player.SPEED = normalSpeed self.player.jump_height", "= JumpBlock((-275, -20, -10)) self.block_4_30 = JumpBlock((-275, -20, -60)) self.block_4_31 = NormalBlock((-275, 25,", "2, -64)) self.block_4_6 = SpeedBlock((5, 2, -46)) self.block_4_7 = NormalBlock((5, 2, -10)) self.block_4_8", "= (0, 0, 0) self.player.count = 0.0 if hit.entity == self.level: self.player.jump_height =", "= normalJump if hit.entity == self.block_4_6: self.player.SPEED = 5 elif hit.entity == self.block_4_7:", "(5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 # What", "= NormalBlock((-275, 18, 217)) self.block_4_25 = JumpBlock((-275, -20, 190)) self.block_4_26 = JumpBlock((-275, -20,", "if hit.entity == self.level: self.player.jump_height = normalJump if hit.entity == self.block_4_6: self.player.SPEED =", "self.secret_1 = NormalBlock((-50, 35, -156)) self.secret_2 = NormalBlock((-100, 35, -156)) self.secret_3 = NormalBlock((-150,", "__init__(self): super().__init__() self.is_enabled = False self.on = False self.level = Entity(model = \"lava_level_4.obj\",", "False self.level.disable() self.lava.disable() self.block_4_1.disable() self.block_4_2.disable() self.block_4_3.disable() self.block_4_4.disable() self.block_4_5.disable() self.block_4_6.disable() self.block_4_7.disable() self.block_4_8.disable() self.block_4_9.disable() self.block_4_10.disable()", "if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity ==", "self.block_4_1 = NormalBlock((5, 2, -128)) self.block_4_2 = NormalBlock((5, 2, -112)) self.block_4_3 = NormalBlock((5,", "self.block_4_17 = JumpBlock((-8, -25, 217)) self.block_4_18 = NormalBlock((-36, 18, 217)) self.block_4_19 = SpeedBlock((-55,", "90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 = SpeedBlock((-120,", "hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2", "= (5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 if", "distance = 2, ignore = [self.player, ]) if hit.entity == self.lava: self.player.SPEED =", "== True: self.light = DirectionalLight() self.is_enabled = False else: self.light = None #", "self.on = True self.level.enable() self.lava.enable() self.block_4_1.enable() self.block_4_2.enable() self.block_4_3.enable() self.block_4_4.enable() self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable()", "self.block_4_5.enable() self.block_4_6.enable() self.block_4_7.enable() self.block_4_8.enable() self.block_4_9.enable() self.block_4_10.enable() self.block_4_11.enable() self.block_4_12.enable() self.block_4_13.enable() self.block_4_14.enable() self.block_4_15.enable() self.block_4_16.enable() self.block_4_17.enable()", "= normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if", "Stops the player from falling forever if self.is_enabled == True and self.player.position.y <=", "== self.block_4_22: self.player.SPEED = 7 if hit.entity == self.block_4_23: self.player.SPEED = 9 elif", "= (5, 10, -128) self.player.rotation = (0, 181, 0) self.player.count = 0.0 #", "normalJump elif hit.entity == self.block_4_16: self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity", "self.player.SPEED = normalSpeed self.player.jump_height = normalJump if hit.entity == self.block_4_25: self.player.SPEED = normalSpeed", "1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed if hit.entity", "-112)) self.block_4_3 = NormalBlock((5, 2, -96)) self.block_4_4 = NormalBlock((5, 2, -80)) self.block_4_5 =", "= 1.2 if hit.entity == self.block_4_28: self.player.jump_height = 1.2 if hit.entity == self.block_4_29:", "== self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump self.player.SPEED", "64)) self.block_4_12 = NormalBlock((5, 6, 96)) self.block_4_13 = SpeedBlock((5, 6, 115)) self.block_4_14 =", "hit.entity == self.block_4_30: self.player.jump_height = 1.2 elif hit.entity == self.block_4_31: self.player.jump_height = normalJump", "== self.block_4_31: self.player.jump_height = normalJump self.player.SPEED = normalSpeed if hit.entity == self.finishBlock_4: destroy(self.light)", "normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_27: self.player.jump_height = 1.2 if hit.entity", "self.block_4_32.enable() self.block_4_33.enable() self.secret_1.enable() self.secret_2.enable() self.secret_3.enable() self.secret_4.enable() self.finishBlock_4.enable() def speed(self): self.player.SPEED = normalSpeed def", "# Level04 class Level04(Entity): def __init__(self): super().__init__() self.is_enabled = False self.on = False", "(0, 90, 0)) self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0)) self.block_4_21 =", "the player from falling forever if self.is_enabled == True and self.player.position.y <= -50:", "(5, 10, -128) self.player.rotation = (0, 0, 0) self.player.count = 0.0 if hit.entity", "self.player.SPEED = normalSpeed self.player.jump_height = 1.2 if hit.entity == self.block_4_26: self.player.SPEED = normalSpeed", "self.secret_4 = NormalBlock((-200, 35, -156)) self.player = None self.disable() def disable(self): self.is_enabled =", "Entity(model = \"lava_level_4.obj\", color = \"#454545\", collider = \"mesh\", scale = (10, 10," ]
[ "def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL +", "= \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri)", "os from pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL =", "Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL", "import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT =", "import os from pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL", "+ \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path", "-> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR", "PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri):", "return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root()", "import urllib.request import os from pathlib import Path def get_project_root() -> Path: return", "from pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\"", "get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\"", "Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def", "Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR =", "urllib.request import os from pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent", "pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = \"http://\" FILES_URL_ROOT", "= PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def", "PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string):", "\"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path =", "download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split(\"/\")[1:-1]) if not os.path.isdir(file_path): os.makedirs(file_path)", "def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split(\"/\")[1:-1]) if not os.path.isdir(file_path):", "FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\")", "= get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split(\"/\")[1:-1]) if", "\"http://\" FILES_URL_ROOT = PROTOCOL + \"localhost:3000\" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\",", "ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split(\"/\")[1:-1])", "get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f\"{FILES_URL_ROOT}{uri}\", f\"{ROOT_DIR}{uri}\") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split(\"/\")[1:-1]) if not" ]
[ "import PDFtoXMLConverter from .data import PDFnXMLPath, PDFnXMLElement __all__ = [ \"PDFtoXMLConverter\", \"PDFnXMLPath\", \"PDFnXMLElement\",", "from .converter import PDFtoXMLConverter from .data import PDFnXMLPath, PDFnXMLElement __all__ = [ \"PDFtoXMLConverter\",", ".converter import PDFtoXMLConverter from .data import PDFnXMLPath, PDFnXMLElement __all__ = [ \"PDFtoXMLConverter\", \"PDFnXMLPath\",", "<gh_stars>1-10 from .converter import PDFtoXMLConverter from .data import PDFnXMLPath, PDFnXMLElement __all__ = [", "PDFtoXMLConverter from .data import PDFnXMLPath, PDFnXMLElement __all__ = [ \"PDFtoXMLConverter\", \"PDFnXMLPath\", \"PDFnXMLElement\", ]" ]
[ "ângulo. import math angulo = float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo))", "= float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente", "seno, cosseno e tangente desse ângulo. import math angulo = float(input('Digite o valor", "tela o valor do seno, cosseno e tangente desse ângulo. import math angulo", "e tangente desse ângulo. import math angulo = float(input('Digite o valor do angulo:'))", "= math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {}", "= math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno))", "programa que leia um ângulo qualquer e mostre na tela o valor do", "angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {}", "valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno", "um programa que leia um ângulo qualquer e mostre na tela o valor", "# Faça um programa que leia um ângulo qualquer e mostre na tela", "print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente de {}", "math angulo = float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno =", "math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é", "o valor do seno, cosseno e tangente desse ângulo. import math angulo =", "desse ângulo. import math angulo = float(input('Digite o valor do angulo:')) coseno =", "o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo))", "ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente", "do seno, cosseno e tangente desse ângulo. import math angulo = float(input('Digite o", "math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno", "e mostre na tela o valor do seno, cosseno e tangente desse ângulo.", "leia um ângulo qualquer e mostre na tela o valor do seno, cosseno", "na tela o valor do seno, cosseno e tangente desse ângulo. import math", "que leia um ângulo qualquer e mostre na tela o valor do seno,", "import math angulo = float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno", "math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente de", "float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente =", "seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de", "tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno))", "coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é", "Faça um programa que leia um ângulo qualquer e mostre na tela o", "mostre na tela o valor do seno, cosseno e tangente desse ângulo. import", "tangente desse ângulo. import math angulo = float(input('Digite o valor do angulo:')) coseno", "= math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente", "de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente de {} é", "cosseno e tangente desse ângulo. import math angulo = float(input('Digite o valor do", "do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de", "angulo = float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo))", "um ângulo qualquer e mostre na tela o valor do seno, cosseno e", "valor do seno, cosseno e tangente desse ângulo. import math angulo = float(input('Digite", "{} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente de {} é {:.2f}'.format(angulo,tangente))", "qualquer e mostre na tela o valor do seno, cosseno e tangente desse" ]
[ "not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'):", "config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask", "int(Decimal(value) * factor) elif isinstance(value, int): res = value else: assert False, value", "import Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value)", "data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else:", "* factor) elif isinstance(value, int): res = value else: assert False, value assert", "0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$',", "|= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if isinstance(value,", "|= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |=", "& 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] &", "'')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation':", "> 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 >", "config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |=", "encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0),", "0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0,", "assert False, value assert 0 <= res <= factor, f'Should be between 0", "|= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024", "res = 10000 return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')),", "between 0 and {factor}' return res def encode_split(data): if data.get('split'): res = int(data['split'])", "'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold':", "'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation':", "{factor}' return res def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res", "decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return { 'bakerName':", "return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask =", "return res def encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value, str):", "'Cannot be negative' return res def encode_percent(value, decimals=2): factor = 10 ** decimals", "def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value, int):", "res = int(Decimal(value) * factor) elif isinstance(value, int): res = value else: assert", "== 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 ==", "data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask']", "if isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value, int): res =", "if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask", "<= res <= factor, f'Should be between 0 and {factor}' return res def", "0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0,", "& 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] &", "'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 ==", "default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value,", "from decimal import Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2):", "int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |=", "config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |=", "res = value else: assert False, value assert res >= 0, 'Cannot be", "return default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000) elif", "0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0,", "res = 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return res def", "config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if isinstance(value, str): res =", "data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return res", "data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'):", "info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold',", "def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10", "|= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return", "0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383),", "> 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 >", "info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'],", "if isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value, int): res =", "int): res = value else: assert False, value assert 0 <= res <=", "encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')),", "'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 },", "'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused':", "|= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if", "& 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])),", "& 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] &", "if data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4)", "and len(data) % 2 == 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data,", "data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0", "mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if", "= data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if", "mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |=", "0, 'Cannot be negative' return res def encode_percent(value, decimals=2): factor = 10 **", "64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256", "8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'):", "if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask", "mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask", "info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda x: (x[0], decode_info(x[1])),", "return value.decode() def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'],", "}, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data)", "decimal import Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return", "'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency':", "'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda x: (x[0],", "def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000 -", "encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation',", "32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128", "512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data):", "data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0,", "if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if", "'')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay':", "'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused':", "'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation':", "/ 10 ** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value):", "def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info):", "if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if isinstance(value, str): res", "'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains':", "1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192", "value else: assert False, value assert 0 <= res <= factor, f'Should be", "= 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask", "str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])),", "'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency':", "data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask']", "config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |=", "if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'):", "> 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 >", "mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32", "try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return bytes.fromhex(data) else:", "|= 1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192", "& 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def", "mask |= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8", "> 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 ==", "def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return bytes.fromhex(data)", "else: res = 10000 return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName',", "if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'):", "len(data) % 2 == 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default):", "mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16", "data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not", "config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |=", "isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value, int): res = value", "0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount':", "False, value assert res >= 0, 'Cannot be negative' return res def encode_percent(value,", "import re from decimal import Decimal def decode_mutez(value): return Decimal(value) / 10000 def", "'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks':", "else: assert False, value assert res >= 0, 'Cannot be negative' return res", "'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout':", "0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask'])", "2 == 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'):", "'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) %", "data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask']", "str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask']", "data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask']", "data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] }", "True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False),", "assert res >= 0, 'Cannot be negative' return res def encode_percent(value, decimals=2): factor", "return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split':", "and {factor}' return res def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'):", "& 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] &", "'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin':", "info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency',", "16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict):", "if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not", "& 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] &", "4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'):", "== 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 >", "value assert res >= 0, 'Cannot be negative' return res def encode_percent(value, decimals=2):", "'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation':", "try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []),", "** decimals if isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value, int):", "res def encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value, str): res", "= 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return res def encode_info(info):", "Decimal(value) / 10 ** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def", "32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'):", "= info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts':", "256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation':", "'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount':", "10 ** decimals if isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value,", "factor) elif isinstance(value, int): res = value else: assert False, value assert 0", "100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda x:", "data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask']", "info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)),", "** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode()", "|= 512 return default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) *", "4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8", "} def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return", "0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0,", "|= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2", "negative' return res def encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value,", "res >= 0, 'Cannot be negative' return res def encode_percent(value, decimals=2): factor =", "'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements':", "return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals", "be negative' return res def encode_percent(value, decimals=2): factor = 10 ** decimals if", "str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': {", "'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin',", ">= 0, 'Cannot be negative' return res def encode_percent(value, decimals=2): factor = 10", "str): res = int(Decimal(value) * factor) elif isinstance(value, int): res = value else:", "str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data)", "default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig']", "decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 **", "|= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if", "{ 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info),", "def encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value, str): res =", "f'Should be between 0 and {factor}' return res def encode_split(data): if data.get('split'): res", "data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask']", "= 10000 return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation':", "'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and", "0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0,", "'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay':", "64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'):", "decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation':", "bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'):", "1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024", "10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value): return", "decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data", "config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048", "data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold':", "isinstance(value, int): res = value else: assert False, value assert 0 <= res", "False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] }", "{'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts',", "'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee':", "> 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 ==", "'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 >", "config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'):", "mask |= 512 return default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value)", "'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation',", "mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128", "= int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res =", "config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask", "not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask", "{ 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0,", "decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'],", "encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value, int): res", "'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return", "assert 0 <= res <= factor, f'Should be between 0 and {factor}' return", "isinstance(value, int): res = value else: assert False, value assert res >= 0,", "return res def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res =", "elif isinstance(value, int): res = value else: assert False, value assert res >=", "data) and len(data) % 2 == 0: return bytes.fromhex(data) else: return data.encode() def", "encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot:", "elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return", "def decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']),", "decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def", "config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value):", "data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask']", "encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout',", "== 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 >", "0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0,", "== 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return", "> 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if", "0 and {factor}' return res def encode_split(data): if data.get('split'): res = int(data['split']) elif", "'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees':", "16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64", "decimals if isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value, int): res", "data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask']", "'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation':", "encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'],", "[]), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1),", "0 <= res <= factor, f'Should be between 0 and {factor}' return res", "return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation':", "info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount']", "data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'],", "if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask", "& 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] &", "1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data = info['data']", "& 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] &", "decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return {", "16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'):", "True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda x: (x[0], decode_info(x[1])), snapshot.items()))", "512 return default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000)", "0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0,", "data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1", "mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512", "& 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] &", "be between 0 and {factor}' return res def encode_split(data): if data.get('split'): res =", "return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask", "decimals=4) else: res = 10000 return res def encode_info(info): return { 'data': {'bakerName':", "& 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] &", "{ 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])),", "config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |=", "data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask']", "& 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] &", "1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)),", "data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask']", "0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0,", "str): res = int(Decimal(value) * 10000) elif isinstance(value, int): res = value else:", "'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] &", "isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value, int): res = value", "decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value): return 1 -", "else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask", "= value else: assert False, value assert res >= 0, 'Cannot be negative'", "0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)},", "res = value else: assert False, value assert 0 <= res <= factor,", "- encode_percent(data['fee'], decimals=4) else: res = 10000 return res def encode_info(info): return {", "def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config", "decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee':", "mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |=", "& 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] &", "value else: assert False, value assert res >= 0, 'Cannot be negative' return", "'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay',", "> 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 >", "0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0,", "4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'):", "> 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 >", "> 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 >", "config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask", "/ 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value):", "4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16", "'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee':", "return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if", "False, value assert 0 <= res <= factor, f'Should be between 0 and", "assert False, value assert res >= 0, 'Cannot be negative' return res def", "data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if", "res = int(Decimal(value) * 10000) elif isinstance(value, int): res = value else: assert", "decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value): return 1 - decode_percent(value,", "2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096", "8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4", "value.decode() def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl':", "0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |=", "res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl':", "'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks':", "res = int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res", "if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1", "return Decimal(value) / 10 ** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4)", "2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if", "'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements':", "info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee',", "if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask", "int(Decimal(value) * 10000) elif isinstance(value, int): res = value else: assert False, value", "* 10000) elif isinstance(value, int): res = value else: assert False, value assert", "res <= factor, f'Should be between 0 and {factor}' return res def encode_split(data):", "elif isinstance(value, int): res = value else: assert False, value assert 0 <=", "data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask']", "% 2 == 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if", "config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |=", "Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def", "factor, f'Should be between 0 and {factor}' return res def encode_split(data): if data.get('split'):", "True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin':", "value assert 0 <= res <= factor, f'Should be between 0 and {factor}'", "mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'):", "= value else: assert False, value assert 0 <= res <= factor, f'Should", "|= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4", "data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])),", "> 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 >", "re from decimal import Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value,", "info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0:", "not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'):", "10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return res def encode_info(info): return", "<= factor, f'Should be between 0 and {factor}' return res def encode_split(data): if", "mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |=", "if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if not", "info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info,", "config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |=", "256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if isinstance(value, str):", "data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'],", "|= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if", "0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0,", "8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4 if", "8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32", "decimals=2): factor = 10 ** decimals if isinstance(value, str): res = int(Decimal(value) *", "'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount']", "|= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if", "0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0", "0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0,", "'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused':", "encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config =", "10 ** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return", "'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts':", "'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss':", "return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data =", "& 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] &", "128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512", "config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask", "128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default", "data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2", "mask |= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256", "factor = 10 ** decimals if isinstance(value, str): res = int(Decimal(value) * factor)", "encode_percent(data['fee'], decimals=4) else: res = 10000 return res def encode_info(info): return { 'data':", "'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def", "'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] &", "int): res = value else: assert False, value assert res >= 0, 'Cannot", "'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 >", "mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64", "else: assert False, value assert 0 <= res <= factor, f'Should be between", "re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return bytes.fromhex(data) else: return data.encode()", "'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig':", "mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |=", "2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096", "decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'],", "Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) /", "'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask':", "|= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128 if", "2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2", "def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl',", "data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask']", "data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048", "if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if", "data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask']", "int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000", "10000 return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation',", "if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return bytes.fromhex(data) else: return", "1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if", "if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def", "if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask", "def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value): return 1", "return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True),", "res def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000", "'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks':", "try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True),", "= int(Decimal(value) * factor) elif isinstance(value, int): res = value else: assert False,", "def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']),", "10000) elif isinstance(value, int): res = value else: assert False, value assert res", "= int(Decimal(value) * 10000) elif isinstance(value, int): res = value else: assert False,", "- decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return", "if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask", "1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if", "> 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'],", "if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'):", "encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value, str): res = int(Decimal(value)", "0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)),", "= 10 ** decimals if isinstance(value, str): res = int(Decimal(value) * factor) elif", "|= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if", "'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout':", "encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda" ]
[ "str ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB: IN_MEMORY", "str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f in", "cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute(", "def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def", "ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def", "found at %s', path) elif found and found[0][0] is not None: raise DuplicateArtifactError(", "os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__)", "with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur =", "ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,)) def connect(self) ->", "= conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn: cur =", "%s', path) elif found and found[0][0] is not None: raise DuplicateArtifactError( 'Would create", "artifact at %s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find", "'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS])", ".exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass", "database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect()", "?', (path,)) found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts", "(path,)) found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already", "astuple, dataclass, fields from importlib import resources import os import sqlite3 from .exception", "path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn =", "from artifacts') def list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)}", "> 1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif found", "resources import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG", "artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select id from artifacts where", "at %s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact", "LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database self._conn = None def", "%s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s',", "artifacts where path = ?', (path,)) found = cur.fetchall() if len(found) > 1:", "[f.name for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None):", "database path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError:", "f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur", "and found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s:", "Artifact: id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for", "conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?,", "= [f.name for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self,", "with self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with", "raise ValueError('A database path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True)", "where path = ?', (path,)) found = cur.fetchall() if len(found) > 1: raise", "%s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates}", "not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?'", "self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values", "dataclasses import astuple, dataclass, fields from importlib import resources import os import sqlite3", "conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self,", "Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)})", "lazy-create DB path {database}') self.database = database self._conn = None def build_schema(self): with", "path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed", "DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path, found[0][0]) elif not found:", "{updates} where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not self._conn:", "cur.execute('select id from artifacts where path = ?', (path,)) found = cur.fetchall() if", "OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database self._conn = None", "conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn:", "is not None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path,", "= database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with", "= cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found at", "len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif", "logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path: str deposition_repo: str", "= conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()] def", "cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def", "astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as conn: cur", "for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,))", "!= DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}')", "LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path: str", "database=None): if not database: raise ValueError('A database path is required') if database !=", "IN_MEMORY = ':memory:' def __init__(self, database=None): if not database: raise ValueError('A database path", "self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur = conn.cursor()", "fields from importlib import resources import os import sqlite3 from .exception import ArtifactNotFoundError,", "path {database}') self.database = database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql')", "artifacts already found at %s', path) elif found and found[0][0] is not None:", "path) elif found and found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate", "'Multiple artifacts already found at %s', path) elif found and found[0][0] is not", "Artifact): path = artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select id", "from dataclasses import astuple, dataclass, fields from importlib import resources import os import", "conn: cur = conn.cursor() cur.execute('select id from artifacts where path = ?', (path,))", "in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not database:", "is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to", "as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn:", "as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?,", "raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path, found[0][0]) elif not", "as conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as", "self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for", "path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where", "str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:'", "fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not database: raise", "__init__(self, database=None): if not database: raise ValueError('A database path is required') if database", "cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?,", "None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path, found[0][0]) elif", "DB path {database}') self.database = database self._conn = None def build_schema(self): with resources.open_text(__package__,", "conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in", "artifact: Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts", "self.connect() as conn: cur = conn.cursor() cur.execute('select id from artifacts where path =", "conn.cursor() cur.execute('select id from artifacts where path = ?', (path,)) found = cur.fetchall()", "duplicate artifact at %s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot", "raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif found and found[0][0]", "+ (path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn = sqlite3.connect(self.database) return", "import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class", "found and found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate artifact at", "artifact at %s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts", "None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read())", "= conn.cursor() cur.execute('select id from artifacts where path = ?', (path,)) found =", "in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur = conn.cursor()", "with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with", "cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn: cur", "database: raise ValueError('A database path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database),", "'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path", "artifacts set {updates} where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if", "if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path)", "list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return", "def list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts')", "class Artifact: id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name", "ValueError('A database path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except", "col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,)) def", "DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database", "as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row", "os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database", "%s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at", "with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) '", "if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB", "ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact:", "def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as conn: cur =", "deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB:", "cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection:", "elif found and found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate artifact", "DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not database: raise ValueError('A database", "updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?',", "insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into", "build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self):", "self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect()", "database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path", "cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s',", "def __init__(self, database=None): if not database: raise ValueError('A database path is required') if", "cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select", "'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as", "?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as", "path = artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select id from", "found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?' for", "not database: raise ValueError('A database path is required') if database != DB.IN_MEMORY: try:", "?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect()", "create duplicate artifact at %s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError(", "id from artifacts where path = ?', (path,)) found = cur.fetchall() if len(found)", "':memory:' def __init__(self, database=None): if not database: raise ValueError('A database path is required')", "DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id:", "with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row)", "artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect()", "conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts')", "(path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn = sqlite3.connect(self.database) return self._conn", "self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as", "(f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self,", "(?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with", "for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if", "{database}') self.database = database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as", "from importlib import resources import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError", "= ':memory:' def __init__(self, database=None): if not database: raise ValueError('A database path is", "conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn: cur = conn.cursor()", "' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path =", "exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database self._conn", "row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur =", "into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact:", "already found at %s', path) elif found and found[0][0] is not None: raise", "where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn", "dataclass, fields from importlib import resources import os import sqlite3 from .exception import", "not None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path, found[0][0])", "elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates =", "DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path: str deposition_repo: str ownership:", "try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database =", "[Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn:", "in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,)) def connect(self)", "DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif found and found[0][0] is", "class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not database: raise ValueError('A", "found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found", "required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create", "artifacts') def list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from", "cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact:", "= conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'),", "update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as conn: cur = conn.cursor()", "path = ?', (path,)) found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError(", "as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete", "'Would create duplicate artifact at %s: %s', path, found[0][0]) elif not found: raise", "from artifacts where path = ?', (path,)) found = cur.fetchall() if len(found) >", "found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates", "ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB: IN_MEMORY =", "?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as conn:", "import resources import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging", "str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class", "at %s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set", "astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn = sqlite3.connect(self.database)", "cur = conn.cursor() cur.execute(f'select {\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()]", "artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact):", "import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME", "as conn: cur = conn.cursor() cur.execute('select id from artifacts where path = ?',", "1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif found and", "importlib import resources import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import", "logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path:", "resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect()", "reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self):", "@dataclass class Artifact: id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS =", "({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path", "id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f", "from artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with", "= ?', (path,)) found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple", "{\",\".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact):", "cur = conn.cursor() cur.execute('select id from artifacts where path = ?', (path,)) found", "path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path)", "artifact: Artifact): path = artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select", "except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database self._conn =", "found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s',", "import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG =", "= logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path: str deposition_repo:", "raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?' for col", "'chameleon' @dataclass class Artifact: id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS", "at %s', path) elif found and found[0][0] is not None: raise DuplicateArtifactError( 'Would", "= ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact)", "ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?' for col in", "find artifact at %s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update", "for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur", "conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete from", "f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not", "conn.cursor() cur.execute( (f'insert into artifacts ({\",\".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact))", "sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME =", "import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str", "with self.connect() as conn: cur = conn.cursor() cur.execute('select id from artifacts where path", "= None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn:", "import astuple, dataclass, fields from importlib import resources import os import sqlite3 from", "','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) +", "to lazy-create DB path {database}') self.database = database self._conn = None def build_schema(self):", "path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)]", "if not database: raise ValueError('A database path is required') if database != DB.IN_MEMORY:", "set {updates} where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not", "def reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts') def", "self.database = database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f:", "= artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select id from artifacts", "return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as", "= 'chameleon' @dataclass class Artifact: id: str path: str deposition_repo: str ownership: str", "from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon'", "def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert" ]