id
stringlengths
1
7
text
stringlengths
6
1.03M
dataset_id
stringclasses
1 value
1752576
<gh_stars>0 from .perceptron import * from .ilayer import *
StarcoderdataPython
1718199
import os import cv2 from tqdm import tqdm import argparse def parsePaths(path): name_parts=path.split('_') mask_path = path.replace('composite_images','masks') mask_path = mask_path.replace(('_'+name_parts[-1]),'.png') target_path = path.replace('composite_images','real_images') target_path = target_path.replace(('_'+name_parts[-2]+'_'+name_parts[-1]),'.jpg') return path, mask_path, target_path if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dir_iharmony4', type=str, required=True, help='data directory to iHarmony4 dataset') parser.add_argument('--save_dir', type=str, default='none', help='data directory to save the resized images. Default none') parser.add_argument('--image_size', type=int, default=512, help='image size to save in the local device. Default 512') parser.add_argument('--keep_aspect_ratio', action='store_true', help='if keep the height-width aspect ratio unchanged. Default False') args = parser.parse_args() with open(os.path.join(args.dir_iharmony4, 'IHD_train.txt'), 'r') as f: train_list = f.readlines() train_list = [item.strip() for item in train_list] with open(os.path.join(args.dir_iharmony4, 'IHD_test.txt'), 'r') as f: test_list = f.readlines() test_list = [item.strip() for item in test_list] sub_dataset = ['HAdobe5k', 'HCOCO', 'Hday2night', 'HFlickr'] data_root = args.dir_iharmony4 if args.save_dir == 'none': save_path = os.path.join(os.path.split(data_root)[0], 'iHarmony4Resized') else: save_path = args.save_dir os.makedirs(save_path, exist_ok=True) os.system('cp %s %s' % (os.path.join(args.dir_iharmony4, 'IHD_*'), save_path)) target_size = args.image_size keep_aspect_ratio = args.keep_aspect_ratio for item in sub_dataset: i_save_path = os.path.join(save_path, item) os.makedirs(i_save_path, exist_ok=True) os.makedirs(os.path.join(i_save_path, 'composite_images'), exist_ok=True) os.makedirs(os.path.join(i_save_path, 'masks'), exist_ok=True) os.makedirs(os.path.join(i_save_path, 'real_images'), exist_ok=True) # start to read and rewrite the images for running_list in [train_list, test_list]: for item in tqdm(running_list): comp_path, mask_path, real_path = parsePaths(item) save_comp_path = os.path.join(save_path, comp_path) # target path of the composite image save_mask_path = os.path.join(save_path, mask_path) # target path of the mask image save_real_path = os.path.join(save_path, real_path) # target path of the real image # if you have saved the image before, then skip this image if os.path.exists(save_comp_path): continue # read the images comp = cv2.imread(os.path.join(data_root, comp_path)) size = comp.shape[:2] scale = target_size / min(size) if keep_aspect_ratio: new_size = (int(scale * size[0]), int(scale * size[1])) # the new image keep its width-height ratio else: new_size = (target_size, target_size) # resize the image comp = cv2.resize(comp, (new_size[1], new_size[0]), cv2.INTER_CUBIC) mask = cv2.imread(os.path.join(data_root, mask_path)) mask = cv2.resize(mask, (new_size[1], new_size[0]), cv2.INTER_NEAREST) real = cv2.imread(os.path.join(data_root, real_path)) real = cv2.resize(real, (new_size[1], new_size[0]), cv2.INTER_CUBIC) cv2.imwrite(save_comp_path, comp) cv2.imwrite(save_mask_path, mask) cv2.imwrite(save_real_path, real)
StarcoderdataPython
1714954
<gh_stars>0 import random import sys class Node: def __init__(self, value): self.value = value self.left = None self.right = None def __init__(self, value, left, right): self.parent = -1 self.data = value self.left_node = left self.right_node = right class NodeMgmt: def __init__(self, head): self.head = head def insert(self, value): self.current_node = self.head while True: if value < self.current_node.value: if self.current_node.left is not None: self.current_node = self.current_node.left else: self.current_node.left = Node(value) break else: if self.current_node.right is not None: self.current_node = self.current_node.right else: self.current_node.right = Node(value) break def search(self, value): self.current_node = self.head while self.current_node: if self.current_node.value == value: return True elif value < self.current_node.value: self.current_node = self.current_node.left else: self.current_node = self.current_node.right return False def delete(self, value): searched = False self.current_node = self.head self.parent = self.head while self.current_node: if self.current_node.value == value: searched = True break elif value < self.current_node.value: self.parent = self.current_node self.current_node = self.current_node.left else: self.parent = self.current_node self.current_node = self.current_node.right if not searched: return False if self.current_node.left is None and self.current_node.right is None: if value < self.parent.value: self.parent.left = None else: self.parent.right = None del self.current_node elif self.current_node.left is not None and self.current_node.right is None: if value < self.parent.value: self.parent.left = self.current_node.left else: self.parent.right = self.current_node.left elif self.current_node.left is None and self.current_node.right is not None: if value < self.parent.value: self.parent.left = self.current_node.right else: self.parent.right = self.current_node.right elif self.current_node.left is not None and self.current_node.right is not None: if value < self.parent.value: self.change_node = self.current_node.right self.change_node_parent = self.current_node.right while self.change_node.left is not None: self.change_node_parent = self.change_node self.change_node = self.change_node.left if self.change_node.right is not None: self.change_node_parent.left = self.change_node.right else: self.change_node_parent.left = None self.parent.left = self.change_node self.change_node.right = self.current_node.right self.change_node.left = self.change_node.left else: self.change_node = self.current_node.right self.change_node_parent = self.current_node.right while self.change_node.left is not None: self.change_node_parent = self.change_node self.change_node = self.change_node.left if self.change_node.right is not None: self.change_node_parent.left = self.change_node.right else: self.change_node_parent.left = None self.parent.right = self.change_node self.change_node.right = self.current_node.right self.change_node.left = self.current_node.left return True tree = {} level_min = [] level_max = [0] x = 1 level_depth = 1 def pre_order(node): sys.stdout.write(f"{node.data}") if node.left_node != '.': pre_order(tree[node.left_node]) if node.right_node != '.': pre_order(tree[node.right_node]) def in_order(node): if node.left_node != '.': in_order(tree[node.left_node]) sys.stdout.write(f"{node.data}") if node.right_node != '.': in_order(tree[node.right_node]) def in_order(node, level): global level_depth, x level_depth = max(level_depth, level) if node.left_node != -1: in_order(tree[node.left_node], level + 1) level_min[level] = min(level_min[level], x) level_max[level] = max(level_max[level], x) x += 1 if node.right_node != -1: in_order(tree[node.right_node], level + 1) def post_order(node): if node.left_node != '.': post_order(tree[node.left_node]) if node.right_node != '.': post_order(tree[node.right_node]) sys.stdout.write(f"{node.data}") def tree_1991(): n = int(sys.stdin.readline()) for i in range(n): data, left_node, right_node = sys.stdin.readline().split() tree[data] = Node(data, left_node, right_node) pre_order(tree['A']) sys.stdout.write(f"\n") in_order(tree['A']) sys.stdout.write(f"\n") post_order(tree['A']) def tree_2250(): n = int(sys.stdin.readline()) root = -1 level_min.append(n) for i in range(1, n + 1): tree[i] = Node(i, -1, -1) level_min.append(n) level_max.append(0) for _ in range(n): number, left_node, right_node = map(int, sys.stdin.readline().split()) tree[number].left_node = left_node tree[number].right_node = right_node if left_node != -1: tree[left_node].parent = number if right_node != -1: tree[right_node].parent = number for i in range(1, n + 1): if tree[i].parent == -1: root = i in_order(tree[root], 1) result_level = 1 result_width = level_max[1] - level_min[1] + 1 for i in range(2, level_depth + 1): width = level_max[i] - level_min[i] + 1 if result_width < width: result_level = i result_width = width sys.stdout.write(f"{result_level} {result_width}") if __name__ == "__main__": # bst_nums = set() # for num in range(100): # bst_nums.add(num) # head = Node(50) # binary_tree = NodeMgmt(head) # for num in bst_nums: # binary_tree.insert(num) # for num in bst_nums: # if not binary_tree.search(num): # print('search failed', num) # delete_nums = set() # bst_nums = list(bst_nums) # while len(delete_nums) != 10: # delete_nums.add(bst_nums[random.randint(0, 99)]) # for before_delete_num in delete_nums: # print(f"{before_delete_num} in tree : {binary_tree.search(before_delete_num)}") # for del_num in delete_nums: # if not binary_tree.delete(del_num): # print('delete failed', del_num) # print(f"{del_num} in tree : {binary_tree.search(del_num)}") # tree_1991() tree_2250()
StarcoderdataPython
3242486
<reponame>linusbrogan/edurange-flask<filename>tests/test_functional.py # -*- coding: utf-8 -*- """Functional tests using WebTest. See: http://webtest.readthedocs.org/ """ from flask import url_for from edurange_refactored.user.models import StudentGroups, User, GroupUsers from .factories import GroupFactory, UserFactory class TestLoggingIn: """Login.""" def test_can_log_in_returns_200(self, user, testapp): """Login successful.""" # Goes to homepage res = testapp.get("/") # Fills out login form in navbar form = res.forms["loginForm"] form["username"] = user.username form["password"] = "<PASSWORD>" # Submits res = form.submit().follow() assert res.status_code == 200 def test_sees_alert_on_log_out(self, user, testapp): """Show alert on logout.""" res = testapp.get("/") # Fills out login form in navbar form = res.forms["loginForm"] form["username"] = user.username form["password"] = "<PASSWORD>" # Submits res = form.submit().follow() res = testapp.get(url_for("public.logout")).follow() # sees alert assert "You are logged out." in res def test_sees_error_message_if_password_is_incorrect(self, user, testapp): """Show error if password is incorrect.""" # Goes to homepage res = testapp.get("/") # Fills out login form, password incorrect form = res.forms["loginForm"] form["username"] = user.username form["password"] = "<PASSWORD>" # Submits res = form.submit() # sees error assert "Invalid password" in res def test_sees_error_message_if_username_doesnt_exist(self, user, testapp): """Show error if username doesn't exist.""" # Goes to homepage res = testapp.get("/") # Fills out login form, password incorrect form = res.forms["loginForm"] form["username"] = "unknown" form["password"] = "<PASSWORD>" # Submits res = form.submit() # sees error assert "Unknown user" in res class TestRegistering: """Register a user.""" def test_can_register(self, user, testapp): """Register a new user.""" # Goes to homepage res = testapp.get("/") # Check Number of Users old_count = len(User.query.all()) # Clicks Create Account button res = res.click("Create account") # Fills out the form form = res.forms["registerForm"] form["username"] = "foobar" form["email"] = "<EMAIL>" form["password"] = "<PASSWORD>" form["confirm"] = "<PASSWORD>" # Submits res = form.submit().follow() assert res.status_code == 200 # A new user was created assert len(User.query.all()) == old_count + 1 def test_sees_error_message_if_passwords_dont_match(self, user, testapp): """Show error if passwords don't match.""" # Goes to registration page res = testapp.get(url_for("public.register")) # Fills out form, but passwords don't match form = res.forms["registerForm"] form["username"] = "foobar" form["email"] = "<EMAIL>" form["password"] = "<PASSWORD>" form["confirm"] = "secrets" # Submits res = form.submit() # sees error message assert "Passwords must match" in res def test_sees_error_message_if_user_already_registered(self, user, testapp): """Show error if user already registered.""" user = UserFactory(active=True) # A registered user user.save() # Goes to registration page res = testapp.get(url_for("public.register")) # Fills out form, but username is already registered form = res.forms["registerForm"] form["username"] = user.username form["email"] = "<EMAIL>" form["password"] = "<PASSWORD>" form["confirm"] = "secret" # Submits res = form.submit() # sees error assert "Username already registered" in res class TestGroupManagement: def test_can_create_group(self, admin, testapp): """Can create a group""" # Test group creation without ajax res = testapp.get(url_for("dashboard.admin")) old_count = len(StudentGroups.query.all()) form = res.forms["createGroup"] form["name"] = "Test Group 1" form["size"] = 0 res = form.submit(name="create") assert res.status_code == 200 assert len(StudentGroups.query.all()) == old_count + 1 assert "*Save these pairs" not in res # Test group creation with ajax old_count = len(StudentGroups.query.all()) data = {"name": "Test Group 2", "create": "Create", "size": 0} # data packaged as it is in javascript on admin page headers = [('X-Requested-With', 'XMLHttpRequest')] # headers for ajax request res = testapp.post(url_for("dashboard.admin"), data, headers) assert res.status_code == 200 assert len(StudentGroups.query.all()) == old_count + 1 assert "*Save these pairs" not in res def test_can_generate_group(self, admin, testapp): """Can generate group of x temporary accounts""" # Test group generation without ajax res = testapp.get(url_for("dashboard.admin")) old_count = len(StudentGroups.query.all()) form = res.forms["createGroup"] form["name"] = "Test Group 1" form[None] = True # check box for group generation form["size"] = 5 # generate group of size 5 res = form.submit(name="create") assert res.status_code == 200 assert len(StudentGroups.query.all()) == old_count + 1 gid = StudentGroups.query.filter_by(name='Test Group 1').first() gid = gid.id assert len(GroupUsers.query.filter_by(group_id=gid).all()) == 5 assert "*Save these pairs" in res # Test group generation with ajax old_count = len(StudentGroups.query.all()) data = {"name": "Test Group 2", "create": "Create", "size": 10} headers = [('X-Requested-With', 'XMLHttpRequest')] res = testapp.post(url_for("dashboard.admin"), data, headers) assert res.status_code == 200 assert len(StudentGroups.query.all()) == old_count + 1 gid = StudentGroups.query.filter_by(name='Test Group 2').first() gid = gid.id assert len(GroupUsers.query.filter_by(group_id=gid).all()) == 10 assert "*Save these pairs" in res
StarcoderdataPython
1625926
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from ..model_builder import ModelBuilder def mobilenet(args): # Mobile Net like module filt_list = [32, 16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320, 1280] strides_list = [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1] n_blocks = 1 n_blocks_per_block = 1 # Build the model with the helper cells_settings = [{'blocks': [{'ID': '0', 'in': ['model_input'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_0_out'], 'ops': ['3xinvmobilex1']}]}, {'blocks': [{'ID': '0', 'in': ['cell_1_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_2_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_3_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_4_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_5_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_6_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_7_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_8_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_9_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_10_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_11_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_12_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_13_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_14_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_15_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_16_out'], 'ops': ['3xinvmobile']}]}, {'blocks': [{'ID': '0', 'in': ['cell_17_out'], 'ops': ['1xconv']}]}] model_helper = ModelBuilder(cells_settings=cells_settings, filters_list=filt_list, strides_list=strides_list, settings=args, n_blocks=n_blocks, n_blocks_per_block=n_blocks_per_block) return model_helper.get_model() def vgg(args): # VGG like module filt_list = [64, 64, 128, 128, 256, 256, 512, 512, 1024, 1024] strides_list = [1, 2, 1, 2, 1, 2, 1, 2, 1, 1] n_blocks = 1 n_blocks_per_block = 1 # Build the model with the helper cells_settings = [{'blocks': [{'ID': '0', 'in': ['model_input'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_0_out'], 'ops': ['3xmax']}]}, {'blocks': [{'ID': '0', 'in': ['cell_1_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_2_out'], 'ops': ['3xmax']}]}, {'blocks': [{'ID': '0', 'in': ['cell_3_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_4_out'], 'ops': ['3xmax']}]}, {'blocks': [{'ID': '0', 'in': ['cell_5_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_6_out'], 'ops': ['3xmax']}]}, {'blocks': [{'ID': '0', 'in': ['cell_7_out'], 'ops': ['1xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_8_out'], 'ops': ['1xconv']}]}] model_helper = ModelBuilder(cells_settings=cells_settings, filters_list=filt_list, strides_list=strides_list, settings=args, n_blocks=n_blocks, n_blocks_per_block=n_blocks_per_block) return model_helper.get_model() def vgg_fashion(args): # VGG like module filt_list = [48, 48, 48, 96, 96, 96, 96, 96, 96, 192, 192, 192] strides_list = [1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1] n_blocks = 1 n_blocks_per_block = 1 # Build the model with the helper cells_settings = [{'blocks': [{'ID': '0', 'in': ['model_input'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_0_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_1_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_2_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_3_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_4_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_5_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_6_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_7_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_8_out'], 'ops': ['3xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_9_out'], 'ops': ['1xconv']}]}, {'blocks': [{'ID': '0', 'in': ['cell_10_out'], 'ops': ['1xconv']}]}] model_helper = ModelBuilder(cells_settings=cells_settings, filters_list=filt_list, strides_list=strides_list, settings=args, n_blocks=n_blocks, n_blocks_per_block=n_blocks_per_block) return model_helper.get_model() def resnet_fashion(args): # VGG like module filt_list = [64, 64, 128, 128, 256, 256] strides_list = [1, 1, 2, 1, 2, 1] n_blocks = 2 n_blocks_per_block = 1 # Build the model with the helper cells_settings = [{'blocks': [{'ID': '0', 'in': ['model_input'], 'ops': ['3xconv']}, {'ID': '1', 'in': ['model_input'], 'ops': ['identity']}]}, {'blocks': [{'ID': '0', 'in': ['cell_0_out'], 'ops': ['3xconv']}, {'ID': '1', 'in': ['cell_0_out'], 'ops': ['identity']}]}, {'blocks': [{'ID': '0', 'in': ['cell_1_out'], 'ops': ['3xconv']}, {'ID': '1', 'in': ['cell_1_out'], 'ops': ['identity']}]}, {'blocks': [{'ID': '0', 'in': ['cell_2_out'], 'ops': ['3xconv']}, {'ID': '1', 'in': ['cell_2_out'], 'ops': ['identity']}]}, {'blocks': [{'ID': '0', 'in': ['cell_3_out'], 'ops': ['3xconv']}, {'ID': '1', 'in': ['cell_3_out'], 'ops': ['identity']}]}, {'blocks': [{'ID': '0', 'in': ['cell_4_out'], 'ops': ['1xconv']}, {'ID': '1', 'in': ['cell_4_out'], 'ops': ['identity']}]}] model_helper = ModelBuilder(cells_settings=cells_settings, filters_list=filt_list, strides_list=strides_list, settings=args, n_blocks=n_blocks, n_blocks_per_block=n_blocks_per_block) return model_helper.get_model()
StarcoderdataPython
3376768
#!/usr/bin/env python from peyotl import read_as_json import codecs import json import sys try: subproblem_ids_file, in_annotations_file, out_annotations_file = sys.argv[1:] except: sys.exit('Expecting 3 arguments:\n subproblem_ids_file, in_annotations_file, out_annotations_file') import os bin_dir = os.path.abspath(os.path.dirname(sys.argv[0])) sys.path.append(os.path.join(bin_dir)) from document_outputs import stripped_nonempty_lines subproblems = [] for s in stripped_nonempty_lines(subproblem_ids_file): assert s.endswith('.tre') subproblems.append(s[:-4]) jsonblob = read_as_json(in_annotations_file) nodes_dict = jsonblob['nodes'] for ott_id in subproblems: d = nodes_dict.setdefault(ott_id, {}) d['was_constrained'] = True d['was_uncontested'] = True with codecs.open(out_annotations_file, 'w', encoding='utf-8') as out_stream: json.dump(jsonblob, out_stream, indent=2, sort_keys=True, separators=(',', ': '))
StarcoderdataPython
195628
import collectd from CinderMetrics import CinderMetrics from NeutronMetrics import NeutronMetrics from NovaMetrics import NovaMetrics def config_callback(conf): """Receive configuration block""" project_name = "demo" project_domainid = "default" user_domainid = "default" region_name = None interval = 10 testing = False ssl_verify = True OPENSTACK_CLIENT = {} plugin_conf = {} custom_dimensions = {} required_keys = frozenset(("authurl", "username", "password")) for node in conf.children: try: if node.key.lower() in required_keys: plugin_conf[node.key.lower()] = node.values[0] elif node.key.lower() == "projectname": project_name = node.values[0] elif node.key.lower() == "projectdomainid": project_domainid = node.values[0] elif node.key.lower() == "userdomainid": user_domainid = node.values[0] elif node.key.lower() == "regionname": if node.values[0]: region_name = node.values[0] elif node.key.lower() == "dimension": if len(node.values) == 2: custom_dimensions.update({node.values[0]: node.values[1]}) else: collectd.warning("WARNING: Check configuration setting for %s" % node.key) elif node.key.lower() == "interval": interval = node.values[0] elif node.key.lower() == "sslverify": ssl_verify = node.values[0] except Exception as e: collectd.error("Failed to load the configuration {0} due to {1}".format(node.key, e)) raise e for key in required_keys: try: plugin_conf[key] except KeyError: raise KeyError("Missing required config setting: %s" % key) if testing: return plugin_conf try: novametrics = NovaMetrics( plugin_conf["authurl"], plugin_conf["username"], plugin_conf["password"], project_name, project_domainid, user_domainid, region_name, ssl_verify ) OPENSTACK_CLIENT["nova"] = novametrics cindermetrics = CinderMetrics( plugin_conf["authurl"], plugin_conf["username"], plugin_conf["password"], project_name, project_domainid, user_domainid, region_name, ssl_verify ) OPENSTACK_CLIENT["cinder"] = cindermetrics neutronmetrics = NeutronMetrics( plugin_conf["authurl"], plugin_conf["username"], plugin_conf["password"], project_name, project_domainid, user_domainid, region_name, ssl_verify ) OPENSTACK_CLIENT["neutron"] = neutronmetrics OPENSTACK_CLIENT["custdims"] = custom_dimensions except Exception as e: collectd.error("Failed to authenticate Openstack client due to {0}".format(e)) collectd.register_read(read_callback, interval, data=OPENSTACK_CLIENT, name=project_name) def read_callback(data): try: hypervisorMetrics = data["nova"].collect_hypervisor_metrics() serverMetrics = data["nova"].collect_server_metrics() limitMetrics = data["nova"].collect_limit_metrics() blockStorageMetrics = data["cinder"].collect_cinder_metrics() networkMetrics = data["neutron"].collect_neutron_metrics() serverCounterMetrics = ["cpu0_time", "cpu1_time", "rx", "rx_packets", "tx", "tx_packets"] for hypervisor in hypervisorMetrics: metrics, dims, props = hypervisorMetrics[hypervisor] for (metric, value) in metrics: dispatch_values(metric, value, dims, props, data["custdims"]) for server in serverMetrics: metrics, dims, props = serverMetrics[server] for (metric, value) in metrics: if metric.split(".")[3] in serverCounterMetrics: dispatch_values(metric, value, dims, props, data["custdims"], "counter") else: dispatch_values(metric, value, dims, props, data["custdims"]) for limit in limitMetrics: metrics, dims, props = limitMetrics[limit] for (metric, value) in metrics: dispatch_values(metric, value, dims, props, data["custdims"]) for storage in blockStorageMetrics: metrics, dims, props = blockStorageMetrics[storage] for (metric, value) in metrics: dispatch_values(metric, value, dims, props, data["custdims"]) for network in networkMetrics: metrics, dims, props = networkMetrics[network] for (metric, value) in metrics: dispatch_values(metric, value, dims, props, data["custdims"]) except Exception as e: collectd.error("Failed to fetch Openstack metrics due to {0}".format(e)) def prepare_dims(dims, custdims): if bool(custdims) is False: return dims for (key, val) in custdims.items(): dims[key] = val return dims def _formatDimsForSignalFx(dims): formatted = ",".join(["{0}={1}".format(d, dims[d]) for d in dims]) return "[{0}]".format(formatted) if formatted != "" else "" def dispatch_values(metric, value, dims, props, custdims, metric_type="gauge"): dims = prepare_dims(dims, custdims) val = collectd.Values(type=metric_type) val.type_instance = "{0}{1}".format(metric, _formatDimsForSignalFx(dims)) val.plugin = "openstack" val.plugin_instance = _formatDimsForSignalFx(props) val.values = [float(value)] val.dispatch() if __name__ == "__main__": # run standalone pass else: collectd.register_config(config_callback)
StarcoderdataPython
4811904
<filename>custom_components/nicehash_excavator/excavator.py """Nicehash Excavator API""" from __future__ import annotations import logging import aiohttp from aiohttp.client_reqrep import ClientResponse from .data_containers import Algorithm, GraphicsCard, RigInfo, Worker _LOGGER = logging.getLogger(__name__) class ExcavatorAPI: """Excavator API Implementation.""" def __init__( self, host_address: str, host_port: int, enable_debug_logging: bool = False ) -> None: """Init ExcavatorAPI.""" self.host_address = self.format_host_address(host_address) self._host_port = host_port self._enable_debug_logging = enable_debug_logging async def request(self, query: str) -> ClientResponse | None: """Excavator API Request""" url = f"{self.host_address}:{self._host_port}/api?command={query}" if self._enable_debug_logging: _LOGGER.info("GET %s", url) async with aiohttp.ClientSession() as session: try: async with session.get(url) as response: if response.status == 200: return await response.json() if response.content: raise Exception( str(response.status) + ": " + response.reason + ": " + str(await response.text()) ) raise Exception(str(response.status) + ": " + response.reason) except Exception: if self._enable_debug_logging: _LOGGER.warning("Error while getting data from %s", url) return None async def test_connection(self) -> bool: """Test connectivity""" query = '{"id":1,"method":"info","params":[]}' response = await self.request(query) if response is not None: return True return False async def get_rig_info(self) -> RigInfo: """Get Rig Information""" query = '{"id":1,"method":"info","params":[]}' response = await self.request(query) if response is not None: return RigInfo(response) return None async def get_devices(self) -> dict[int, GraphicsCard]: """Get the devices""" query = '{"id":1,"method":"devices.get","params":[]}' response = await self.request(query) if response is not None: devices = {} for device_data in response.get("devices"): card = GraphicsCard(device_data) devices[card.id] = card return devices return {} async def get_algorithms(self) -> dict[int, Algorithm]: """Get the Algorithms""" query = '{"id":1,"method":"algorithm.list","params":[]}' response = await self.request(query) if response is not None: algorithms = {} for algorithm_data in response.get("algorithms"): algorithm = Algorithm(algorithm_data) algorithms[algorithm.id] = algorithm return algorithms return {} async def get_workers(self) -> dict[int, Worker]: """Get the workers""" query = '{"id":1,"method":"worker.list","params":[]}' response = await self.request(query) if response is not None: workers = {} for worker_data in response.get("workers"): worker = Worker(worker_data) workers[worker.id] = worker return workers return {} @staticmethod def format_host_address(host_address: str) -> str: """Add http if missing""" if not host_address.startswith("http://") and not host_address.startswith( "https://" ): host_address = "http://" + host_address return host_address
StarcoderdataPython
191628
<reponame>ajharry69/django-xently class AppNotFoundError(Exception): pass class ClassNotFoundError(Exception): pass
StarcoderdataPython
1745804
<filename>data_utils/augmentor/spec_augment.py import random import numpy as np from PIL import Image from PIL.Image import BICUBIC from ppasr.data_utils.augmentor.base import AugmentorBase class SpecAugmentor(AugmentorBase): """Augmentation model for Time warping, Frequency masking, Time masking. SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition https://arxiv.org/abs/1904.08779 SpecAugment on Large Scale Datasets https://arxiv.org/abs/1912.05533 """ def __init__(self, rng, F, T, n_freq_masks, n_time_masks, p=1.0, W=40, adaptive_number_ratio=0, adaptive_size_ratio=0, max_n_time_masks=20, replace_with_zero=True): """SpecAugment class. Args: :param F: 频率屏蔽参数 :type F: int :param T: 时间屏蔽参数 :type T: int :param n_freq_masks: 频率屏蔽数量 :type n_freq_masks: int :param n_time_masks: 时间屏蔽数量 :type n_time_masks: int :param p: 时间屏蔽上限参数 :type p: float :param W: 时间变形参数 :type W: int :param adaptive_number_ratio: 时间屏蔽的自适应多重比 :type adaptive_number_ratio: float :param adaptive_size_ratio: 时间屏蔽的自适应大小比 :type adaptive_size_ratio: float :param max_n_time_masks: 时间屏蔽的最大数目 :type max_n_time_masks: int :param replace_with_zero: 如果真的话,在pad补0,否则使用平均值 :type replace_with_zero: bool """ super().__init__() self._rng = rng self.inplace = True self.replace_with_zero = replace_with_zero self.W = W self.F = F self.T = T self.n_freq_masks = n_freq_masks self.n_time_masks = n_time_masks self.p = p # adaptive SpecAugment self.adaptive_number_ratio = adaptive_number_ratio self.adaptive_size_ratio = adaptive_size_ratio self.max_n_time_masks = max_n_time_masks if adaptive_number_ratio > 0: self.n_time_masks = 0 if adaptive_size_ratio > 0: self.T = 0 self._freq_mask = None self._time_mask = None @property def freq_mask(self): return self._freq_mask @property def time_mask(self): return self._time_mask def __repr__(self): return f"specaug: F-{self.F}, T-{self.T}, F-n-{self.n_freq_masks}, T-n-{self.n_time_masks}" def time_warp(self, x, mode='PIL'): """time warp for spec augment move random center frame by the random width ~ uniform(-window, window) Args: x (np.ndarray): spectrogram (time, freq) mode (str): PIL or sparse_image_warp Raises: NotImplementedError: [description] NotImplementedError: [description] Returns: np.ndarray: time warped spectrogram (time, freq) """ window = self.W if window == 0: return x t = x.shape[0] if t - window <= window: return x # NOTE: randrange(a, b) emits a, a + 1, ..., b - 1 center = random.randrange(window, t - window) warped = random.randrange(center - window, center + window) + 1 # 1 ... t - 1 left = Image.fromarray(x[:center]).resize((x.shape[1], warped), BICUBIC) right = Image.fromarray(x[center:]).resize((x.shape[1], t - warped), BICUBIC) if self.inplace: x[:warped] = left x[warped:] = right return x return np.concatenate((left, right), 0) def mask_freq(self, x, replace_with_zero=False): """freq mask Args: x (np.ndarray): spectrogram (time, freq) replace_with_zero (bool, optional): Defaults to False. Returns: np.ndarray: freq mask spectrogram (time, freq) """ n_bins = x.shape[1] for i in range(0, self.n_freq_masks): f = int(self._rng.uniform(a=0, b=self.F)) f_0 = int(self._rng.uniform(a=0, b=n_bins - f)) assert f_0 <= f_0 + f if replace_with_zero: x[:, f_0:f_0 + f] = 0 else: x[:, f_0:f_0 + f] = x.mean() self._freq_mask = (f_0, f_0 + f) return x def mask_time(self, x, replace_with_zero=False): """time mask Args: x (np.ndarray): spectrogram (time, freq) replace_with_zero (bool, optional): Defaults to False. Returns: np.ndarray: time mask spectrogram (time, freq) """ n_frames = x.shape[0] if self.adaptive_number_ratio > 0: n_masks = int(n_frames * self.adaptive_number_ratio) n_masks = min(n_masks, self.max_n_time_masks) else: n_masks = self.n_time_masks if self.adaptive_size_ratio > 0: T = self.adaptive_size_ratio * n_frames else: T = self.T for i in range(n_masks): t = int(self._rng.uniform(a=0, b=T)) t = min(t, int(n_frames * self.p)) t_0 = int(self._rng.uniform(a=0, b=n_frames - t)) assert t_0 <= t_0 + t if replace_with_zero: x[t_0:t_0 + t, :] = 0 else: x[t_0:t_0 + t, :] = x.mean() self._time_mask = (t_0, t_0 + t) return x def __call__(self, x, train=True): if not train: return x return self.transform_feature(x) def transform_feature(self, x: np.ndarray): """ Args: x (np.ndarray): `[T, F]` Returns: x (np.ndarray): `[T, F]` """ assert isinstance(x, np.ndarray) assert x.ndim == 2 x = self.time_warp(x) x = self.mask_freq(x, self.replace_with_zero) x = self.mask_time(x, self.replace_with_zero) return x
StarcoderdataPython
1791642
from Bio import SeqIO import sys #Create genome file for bedtools print("Extracting chromosome lengths from %s ..." % str(sys.argv[1])) ids = [] lengths = [] for rec in SeqIO.parse(str(sys.argv[1]), "fasta"): ids.append(rec.id) lengths.append(len(rec)) print("Writing lengths to %s ..." % sys.argv[2]) with open(sys.argv[2], "w") as f: for id, length in zip(ids, lengths): chrom = [id, str(length)] f.write("\t".join(chrom) + "\n") f.close() print("Extraction complete.")
StarcoderdataPython
32132
# Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import os import struct from .ModelNodeConvert import ModelNodeVertexStream, ModelNodeGeometryData, addModelType from .SceneResourcesConvert import modelVertexAttribEnum class Object: pass gltfVertexAttribEnum = { 'POSITION': modelVertexAttribEnum['Position'], 'NORMAL': modelVertexAttribEnum['Normal'], 'TANGENT': modelVertexAttribEnum['Tangent'], 'TEXCOORD_0': modelVertexAttribEnum['TexCoord0'], 'TEXCOORD`1': modelVertexAttribEnum['TexCoord1'], 'TEXCOORD`2': modelVertexAttribEnum['TexCoord2'], 'TEXCOORD`3': modelVertexAttribEnum['TexCoord3'], 'TEXCOORD`4': modelVertexAttribEnum['TexCoord4'], 'TEXCOORD`5': modelVertexAttribEnum['TexCoord5'], 'TEXCOORD`6': modelVertexAttribEnum['TexCoord6'], 'TEXCOORD`7': modelVertexAttribEnum['TexCoord7'], 'COLOR_0': modelVertexAttribEnum['Color0'], 'COLOR_1': modelVertexAttribEnum['Color1'], 'JOINTS_0': modelVertexAttribEnum['BlendIndices'], 'WEIGHTS_0': modelVertexAttribEnum['BlendWeights'], } gltfTypeMap = { ('SCALAR', 5120): ('X8', 'Int'), ('SCALAR', 5121): ('X8', 'UInt'), ('SCALAR', 5122): ('X16', 'Int'), ('SCALAR', 5123): ('X16', 'UInt'), ('SCALAR', 5125): ('X32', 'UInt'), ('SCALAR', 5126): ('X32', 'Float'), ('VEC2', 5120): ('X8Y8', 'Int'), ('VEC2', 5121): ('X8Y8', 'UInt'), ('VEC2', 5122): ('X16Y16', 'Int'), ('VEC2', 5123): ('X16Y16', 'UInt'), ('VEC2', 5126): ('X32Y32', 'Float'), ('VEC3', 5120): ('X8Y8Z8', 'Int'), ('VEC3', 5121): ('X8Y8Z8', 'UInt'), ('VEC3', 5122): ('X16Y16Z16', 'Int'), ('VEC3', 5123): ('X16Y16Z16', 'UInt'), ('VEC3', 5126): ('X32Y32Z32', 'Float'), ('VEC4', 5120): ('X8Y8Z8W8', 'Int'), ('VEC4', 5121): ('X8Y8Z8W8', 'UInt'), ('VEC4', 5122): ('X16Y16Z16W16', 'Int'), ('VEC4', 5123): ('X16Y16Z16W16', 'UInt'), ('VEC4', 5126): ('X32Y32Z32W32', 'Float') } gltfPrimitiveTypeMap = ['PointList', 'LineList', 'LineStrip', 'LineStrip', 'TriangleList', 'TriangleStrip', 'TriangleFan'] def convertGLTFModel(convertContext, path): """ Converts an GLTF model for use with ModelNodeConvert. If the "name" element is provided for a mesh, it will be used for the name of the model geometry. Otherwise, the name will be "mesh#", where # is the index of the mesh. If multiple sets of primitives are used, the index will be appended to the name, separated with '.'. Limitations: - Only meshes and dependent data (accessors, buffer views, and buffers) are extracted. All other parts of the scene are ignored, including transforms. - Morph targets aren't supported. - Materials aren't read, and are instead provided in the DeepSea scene configuration. - Buffer data may either be embedded or a file path relative to the main model file. General URIs are not supported. """ with open(path) as f: try: data = json.load(f) except: raise Exception('Invalid GLTF file "' + path + '".') parentDir = os.path.dirname(path) try: # Read the buffers. buffers = [] bufferInfos = data['buffers'] dataPrefix = 'data:application/octet-stream;base64,' try: for bufferInfo in bufferInfos: uri = bufferInfo['uri'] if uri.startswith(dataPrefix): try: buffers.append(base64.b64decode(uri[len(dataPrefix):])) except: raise Exception('Invalid buffer data for GLTF file "' + path + '".') else: with open(os.path.join(parentDir, uri), 'rb') as f: buffers.append(f.read()) except (TypeError, ValueError): raise Exception('Buffers must be an array of objects for GLTF file "' + path + '".') except KeyError as e: raise Exception('Buffer doesn\'t contain element "' + str(e) + '" for GLTF file "' + path + '".') # Read the buffer views. bufferViews = [] bufferViewInfos = data['bufferViews'] try: for bufferViewInfo in bufferViewInfos: bufferView = Object() try: bufferData = buffers[bufferViewInfo['buffer']] except (IndexError, TypeError): raise Exception('Invalid buffer index for GLTF file "' + path + '".') offset = bufferViewInfo['byteOffset'] length = bufferViewInfo['byteLength'] try: bufferView.buffer = bufferData[offset:offset + length] except (IndexError, TypeError): raise Exception('Invalid buffer view range for GLTF file "' + path + '".') bufferViews.append(bufferView) except (TypeError, ValueError): raise Exception( 'Buffer views must be an array of objects for GLTF file "' + path + '".') except KeyError as e: raise Exception('Buffer view doesn\'t contain element "' + str(e) + '" for GLTF file "' + path + '".') # Read the accessors. accessors = [] accessorInfos = data['accessors'] try: for accessorInfo in accessorInfos: accessor = Object() try: accessor.bufferView = bufferViews[accessorInfo['bufferView']] except (IndexError, TypeError): raise Exception('Invalid buffer view index for GLTF file "' + path + '".') gltfType = accessorInfo['type'] componentType = accessorInfo['componentType'] try: accessorType, decorator = gltfTypeMap[(gltfType, componentType)] except (KeyError, TypeError): raise Exception('Invalid accessor type (' + str(gltfType) + ', ' + str(componentType) + ') for GLTF file "' + path + '".') accessor.type = accessorType accessor.decorator = decorator accessor.count = accessorInfo['count'] accessors.append(accessor) except (TypeError, ValueError): raise Exception('Accessors must be an array of objects for GLTF file "' + path + '".') except KeyError as e: raise Exception('Accessor doesn\'t contain element "' + str(e) + '" for GLTF file "' + path + '".') # Read the meshes. meshes = [] meshInfos = data['meshes'] try: meshIndex = 0 for meshInfo in meshInfos: meshName = meshInfo.get('name', 'mesh' + str(meshIndex)) primitiveInfos = meshInfo['primitives'] try: primitiveIndex = 0 for primitiveInfo in primitiveInfos: mesh = Object() mesh.attributes = [] mesh.name = meshName if len(primitiveInfos) > 1: mesh.name += '.' + str(primitiveIndex) primitiveIndex += 1 try: for attrib, index in primitiveInfo['attributes'].items(): if attrib not in gltfVertexAttribEnum: raise Exception('Unsupported attribute "' + str(attrib) + '" for GLTF file "' + path + '".') try: mesh.attributes.append((gltfVertexAttribEnum[attrib], accessors[index])) except (IndexError, TypeError): raise Exception('Invalid accessor index for GLTF file "' + path + '".') except (TypeError, ValueError): raise Exception( 'Mesh primitives attributes must be an object containing attribute ' 'mappings for GLTF file "' + path + '".') if 'indices' in primitiveInfo: try: mesh.indices = accessors[primitiveInfo['indices']] except (IndexError, TypeError): raise Exception( 'Invalid accessor index for GLTF file "' + path + '".') else: mesh.indices = None mode = primitiveInfo.get('mode', 4) try: mesh.primitiveType = gltfPrimitiveTypeMap[mode] except (IndexError, TypeError): raise Exception('Unsupported primitive mode for GLTF file "' + path + '".') meshes.append(mesh) except (TypeError, ValueError): raise Exception( 'Mesh primitives must be an array of objects for GLTF file "' + path + '".') except KeyError as e: raise Exception('Mesh primitives doesn\'t contain element "' + str(e) + '" for GLTF file "' + path + '".') meshIndex += 1 except (TypeError, ValueError): raise Exception('Meshes must be an array of objects for GLTF file "' + path + '".') except KeyError as e: raise Exception('Mesh doesn\'t contain element "' + str(e) + '" for GLTF file "' + path + '".') except (TypeError, ValueError): raise Exception('Root value in GLTF file "' + path + '" must be an object.') except KeyError as e: raise Exception('GLTF file "' + path + '" doesn\'t contain element "' + str(e) + '".') # Convert meshes to geometry list. GLTF uses separate vertex streams rather than interleved # vertices, so the index buffer will need to be separate for each. This will have some # data duplication during processing, but isn't expected to be a large amount in practice. geometry = [] for mesh in meshes: if mesh.indices: indexData = mesh.indices.bufferView.buffer if mesh.indices.type == 'X16': indexSize = 2 elif mesh.indices.type == 'X32': indexSize = 4 else: raise Exception('Unsupported index type "' + mesh.indices.type + '" for GLTF file "' + path + '".') else: indexData = None indexSize = 0 vertexStreams = [] for attrib, accessor in mesh.attributes: vertexFormat = [(attrib, accessor.type, accessor.decorator)] vertexStreams.append(ModelNodeVertexStream(vertexFormat, accessor.bufferView.buffer, indexSize, indexData)) geometry.append(ModelNodeGeometryData(mesh.name, vertexStreams, mesh.primitiveType)) return geometry def registerGLTFModelType(convertContext): """ Registers the GLTF model type under the name "gltf". """ addModelType(convertContext, 'gltf', convertGLTFModel)
StarcoderdataPython
3237859
<gh_stars>10-100 from pages.browser import Browser from selenium import webdriver from locators import * import time class LoginPage(Browser): def __init__(self): self.LOGIN = '/' def log_in_as(self, username, password): """ Locates username & password elements and sends credentials and clicks submit """ self.visit(self.LOGIN) username_field = self.find_element(*LoginPageLocators.username_field) password_field = self.find_element(*LoginPageLocators.password_field) submit_btn = self.find_element(*LoginPageLocators.submit_btn) username_field.send_keys(username) password_field.send_keys(password) submit_btn.click() time.sleep(2) def get_auth_message(self): """ Locates and verifies message after authentication. """ return self.find_element(*LoginPageLocators.message) def click_register_link(self): """ Clicks on the "Create an account" link """ create_account_link = self.find_element(*LoginPageLocators.create_acct_link) create_account_link.click() time.sleep(2) def get_page_header(self): """ Returns the CreatePage header """ return self.find_element(*CreateAccountPageLocators.header)
StarcoderdataPython
77222
import os.path as osp import random import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from mmaction.core import (ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy) from mmaction.core.evaluation.ava_utils import ava_eval def gt_confusion_matrix(gt_labels, pred_labels, normalize=None): """Calculate the ground truth confusion matrix.""" max_index = max(max(gt_labels), max(pred_labels)) confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64) for gt, pred in zip(gt_labels, pred_labels): confusion_mat[gt][pred] += 1 del_index = [] for i in range(max_index): if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0: del_index.append(i) confusion_mat = np.delete(confusion_mat, del_index, axis=0) confusion_mat = np.delete(confusion_mat, del_index, axis=1) if normalize is not None: confusion_mat = np.array(confusion_mat, dtype=np.float) m, n = confusion_mat.shape if normalize == 'true': for i in range(m): s = np.sum(confusion_mat[i], dtype=float) if s == 0: continue confusion_mat[i, :] = confusion_mat[i, :] / s print(confusion_mat[i, :]) elif normalize == 'pred': for i in range(n): s = sum(confusion_mat[:, i]) if s == 0: continue confusion_mat[:, i] = confusion_mat[:, i] / s elif normalize == 'all': s = np.sum(confusion_mat) if s != 0: confusion_mat /= s return confusion_mat def test_activitynet_localization(): data_prefix = osp.normpath( osp.join(osp.dirname(__file__), '../data/eval_localization')) gt_path = osp.join(data_prefix, 'gt.json') result_path = osp.join(data_prefix, 'result.json') localization = ActivityNetLocalization(gt_path, result_path) results = localization.evaluate() mAP = np.array([ 0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222, 0.52083333, 0.52083333, 0.52083333, 0.5 ]) average_mAP = 0.6177579365079365 assert_array_almost_equal(results[0], mAP) assert_array_almost_equal(results[1], average_mAP) def test_ava_detection(): data_prefix = osp.normpath( osp.join(osp.dirname(__file__), '../data/eval_detection')) gt_path = osp.join(data_prefix, 'gt.csv') result_path = osp.join(data_prefix, 'pred.csv') label_map = osp.join(data_prefix, 'action_list.txt') # eval bbox detection = ava_eval(result_path, 'mAP', label_map, gt_path, None) assert_array_almost_equal(detection['mAP@0.5IOU'], 0.09385522) def test_confusion_matrix(): # custom confusion_matrix gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)] pred_labels = np.random.randint(10, size=100, dtype=np.int64) for normalize in [None, 'true', 'pred', 'all']: cf_mat = confusion_matrix(pred_labels, gt_labels, normalize) gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize) assert_array_equal(cf_mat, gt_cf_mat) with pytest.raises(ValueError): # normalize must be in ['true', 'pred', 'all', None] confusion_matrix([1], [1], 'unsupport') with pytest.raises(TypeError): # y_pred must be list or np.ndarray confusion_matrix(0.5, [1]) with pytest.raises(TypeError): # y_real must be list or np.ndarray confusion_matrix([1], 0.5) with pytest.raises(TypeError): # y_pred dtype must be np.int64 confusion_matrix([0.5], [1]) with pytest.raises(TypeError): # y_real dtype must be np.int64 confusion_matrix([1], [0.5]) def test_topk(): scores = [ np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) ] # top1 acc k = (1, ) top1_labels_0 = [3, 1, 1, 1] top1_labels_25 = [2, 0, 4, 3] top1_labels_50 = [2, 2, 3, 1] top1_labels_75 = [2, 2, 2, 3] top1_labels_100 = [2, 2, 2, 4] res = top_k_accuracy(scores, top1_labels_0, k) assert res == [0] res = top_k_accuracy(scores, top1_labels_25, k) assert res == [0.25] res = top_k_accuracy(scores, top1_labels_50, k) assert res == [0.5] res = top_k_accuracy(scores, top1_labels_75, k) assert res == [0.75] res = top_k_accuracy(scores, top1_labels_100, k) assert res == [1.0] # top1 acc, top2 acc k = (1, 2) top2_labels_0_100 = [3, 1, 1, 1] top2_labels_25_75 = [3, 1, 2, 3] res = top_k_accuracy(scores, top2_labels_0_100, k) assert res == [0, 1.0] res = top_k_accuracy(scores, top2_labels_25_75, k) assert res == [0.25, 0.75] # top1 acc, top3 acc, top5 acc k = (1, 3, 5) top5_labels_0_0_100 = [1, 0, 3, 2] top5_labels_0_50_100 = [1, 3, 4, 0] top5_labels_25_75_100 = [2, 3, 0, 2] res = top_k_accuracy(scores, top5_labels_0_0_100, k) assert res == [0, 0, 1.0] res = top_k_accuracy(scores, top5_labels_0_50_100, k) assert res == [0, 0.5, 1.0] res = top_k_accuracy(scores, top5_labels_25_75_100, k) assert res == [0.25, 0.75, 1.0] def test_mean_class_accuracy(): scores = [ np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) ] # test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0] mean_cls_acc_0 = np.int64([1, 4, 0, 2]) mean_cls_acc_25 = np.int64([2, 0, 4, 3]) mean_cls_acc_33 = np.int64([2, 2, 2, 3]) mean_cls_acc_75 = np.int64([4, 2, 2, 4]) mean_cls_acc_100 = np.int64([2, 2, 2, 4]) assert mean_class_accuracy(scores, mean_cls_acc_0) == 0 assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25 assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3 assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75 assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0 def test_mmit_mean_average_precision(): # One sample y_true = [np.array([0, 0, 1, 1])] y_scores = [np.array([0.1, 0.4, 0.35, 0.8])] map = mmit_mean_average_precision(y_scores, y_true) precision = [2.0 / 3.0, 0.5, 1., 1.] recall = [1., 0.5, 0.5, 0.] target = -np.sum(np.diff(recall) * np.array(precision)[:-1]) assert target == map def test_pairwise_temporal_iou(): target_segments = np.array([]) candidate_segments = np.array([]) with pytest.raises(ValueError): pairwise_temporal_iou(target_segments, candidate_segments) # test temporal iou target_segments = np.array([[1, 2], [2, 3]]) candidate_segments = np.array([[2, 3], [2.5, 3]]) temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments) assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]]) # test temporal overlap_self target_segments = np.array([[1, 2], [2, 3]]) candidate_segments = np.array([[2, 3], [2.5, 3]]) temporal_iou, temporal_overlap_self = pairwise_temporal_iou( candidate_segments, target_segments, calculate_overlap_self=True) assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]]) # test temporal overlap_self when candidate_segments is 1d target_segments = np.array([[1, 2], [2, 3]]) candidate_segments = np.array([2.5, 3]) temporal_iou, temporal_overlap_self = pairwise_temporal_iou( candidate_segments, target_segments, calculate_overlap_self=True) assert_array_equal(temporal_overlap_self, [0, 1]) def test_average_recall_at_avg_proposals(): ground_truth1 = { 'v_test1': np.array([[0, 1], [1, 2]]), 'v_test2': np.array([[0, 1], [1, 2]]) } ground_truth2 = {'v_test1': np.array([[0, 1]])} proposals1 = { 'v_test1': np.array([[0, 1, 1], [1, 2, 1]]), 'v_test2': np.array([[0, 1, 1], [1, 2, 1]]) } proposals2 = { 'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]), 'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]]) } proposals3 = { 'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)]) } recall, avg_recall, proposals_per_video, auc = ( average_recall_at_avg_proposals(ground_truth1, proposals1, 4)) assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10) assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.]) assert_array_almost_equal( proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10) assert auc == 25.5 recall, avg_recall, proposals_per_video, auc = ( average_recall_at_avg_proposals(ground_truth1, proposals2, 4)) assert_array_equal(recall, [[0.] * 100] * 10) assert_array_equal(avg_recall, [0.] * 100) assert_array_almost_equal( proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10) assert auc == 0 recall, avg_recall, proposals_per_video, auc = ( average_recall_at_avg_proposals(ground_truth2, proposals3, 100)) assert_array_equal(recall, [[1.] * 100] * 10) assert_array_equal(avg_recall, ([1.] * 100)) assert_array_almost_equal( proposals_per_video, np.arange(1, 101, 1), decimal=10) assert auc == 99.0 def test_get_weighted_score(): score_a = [ np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) ] score_b = [ np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]), np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]) ] weighted_score = get_weighted_score([score_a], [1]) assert np.all(np.isclose(np.array(score_a), np.array(weighted_score))) coeff_a, coeff_b = 2., 1. weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b]) ground_truth = [ x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b) ] assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score))) def test_mean_average_precision(): def content_for_unittest(scores, labels, result): gt = mean_average_precision(scores, labels) assert gt == result scores = [ np.array([0.1, 0.2, 0.3, 0.4]), np.array([0.2, 0.3, 0.4, 0.1]), np.array([0.3, 0.4, 0.1, 0.2]), np.array([0.4, 0.1, 0.2, 0.3]) ] label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]]) result1 = 2 / 3 label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]]) result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0]) content_for_unittest(scores, label1, result1) content_for_unittest(scores, label2, result2)
StarcoderdataPython
28425
import argparse import subprocess import sys import logging logger = logging.getLogger("helper") def azcli(command): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out,err = process.communicate() logger.debug(str(out,"utf-8")) exit_code = process.returncode if exit_code and exit_code != 0: logger.error("{}".format(str(err,"utf-8"))) sys.exit(exit_code) else: return out
StarcoderdataPython
121973
import cv2 import time # Remove Later import numpy as np video = cv2.VideoCapture("./img/vert2.mp4") target_low = (0, 0, 0) target_high = (50, 50, 50) while True: ret, frame = video.read() if not ret: video = cv2.VideoCapture("./img/vert2.mp4") continue image = frame image = cv2.resize(image, (0,0), fx=0.25, fy=0.25) image = cv2.GaussianBlur(image, (5,5), 3) Blackline= cv2.inRange(image, target_low, target_high) kernel = np.ones((3,3), np.uint8) Blackline = cv2.erode(Blackline, kernel, iterations=1) # Remove noise Blackline = cv2.dilate(Blackline, kernel, iterations=9) # Restore box sizes contours, hierarchy = cv2.findContours(Blackline.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(image, contours, -1, (0, 200, 0), 3) for c in contours: x,y,w,h = cv2.boundingRect(c) cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 3) cv2.line(image, (x+(w//2), 200), (x+(w//2), 250),(255,0,0),3) cv2.imshow("orginal with line", image) time.sleep(0.025) key = cv2.waitKey(1) if key == 27: break cv2.waitKey(0) cv2.destroyAllWindows()
StarcoderdataPython
1715626
<gh_stars>0 import csv import unittest from unittest import result from rdflib import URIRef from RQSSFramework.Availability.DereferencePossibility import \ DerefrenceExplorer class TestDereferency(unittest.TestCase): def setUp(self): self.test_data = [URIRef("https://www.wikidata.org/wiki/Q1"), URIRef("https://www.wikidata.org/wiki/Q1"), URIRef("https://www.wikidata.org/wiki/Qabc"), URIRef("https://www2.macs.hw.ac.uk/~sh200/")] # dereference of redirects def test_not_computed(self): """ Test the type of a non computed class be NoneType """ test_class = DerefrenceExplorer(self.test_data) self.assertEqual(test_class.results, None) self.assertEqual(test_class.score, None) def test_remove_duplication(self): """ Test that the constructor will remove the duplicated URIs """ test_class = DerefrenceExplorer(self.test_data) result = test_class.check_dereferencies() self.assertEqual(len(result), 3) self.assertEqual(len(test_class.results), 3) def test_check_dereferencies(self): """ Test that it results are according to the real world data for a few set of URIs """ test_class = DerefrenceExplorer(self.test_data) test_ret = test_class.check_dereferencies() self.assertGreaterEqual(test_class.score, 0) self.assertLessEqual(test_class.score, 1) with open('dereferencing_ratio.test.csv', 'w') as file_handler: file_handler.write(str(test_class)) with open('dereferencing.test.csv', 'w', newline='') as f: w = csv.writer(f) # write header from NamedTuple fields w.writerow([field for field in test_class.results[0]._fields]) for result in test_class.results: row = [result._asdict()[field] for field in result._fields] w.writerow(row)
StarcoderdataPython
1653801
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 服务端工具 Case Name : 指定导入操作使用的角色名,并指定具体角色用户的角色密码 Description : 1.创建数据 2.导出数据 3.删除表 4.导入数据 5.校验数据是否导入 6.清理环境 Expect : 1.创建数据成功 2.导出数据成功 3.删除表成功 4.导入数据失败 5.校验数据成功,数据未导入 5.清理环境成功 """ import os import unittest from yat.test import Node from yat.test import macro from testcase.utils.Constant import Constant from testcase.utils.Logger import Logger from testcase.utils.CommonSH import CommonSH class Tools(unittest.TestCase): def setUp(self): self.log = Logger() self.log.info("-Opengauss_Function_Tools_gs_restore_Case0083 开始执行-") self.constant = Constant() self.dbuser_node = Node('dbuser') self.root_user = Node('default') self.commonsh = CommonSH('dbuser') self.db_name = 'd_gs_restore_case0083' self.table_name = 't_gs_restore_case0083' self.user_name = 'u_gs_restore_case0083' self.role_name = 'r_gs_restore_case0083' self.tablespace_name = 'tbspc_gs_restore_case0083' self.tablespace_rename = 're_tbspc_gs_restore_case0083' self.schema_name = 's_gs_restore_case0083' self.func_name = 'f_gs_restore_case0083' def test_server_tools1(self): text1 = '-----step1.创建数据; expect:创建数据成功-----' self.log.info(text1) sql = f'create database {self.db_name};' sql_cmd = self.commonsh.execut_db_sql(sql) self.log.info(sql_cmd) self.assertIn(self.constant.CREATE_DATABASE_SUCCESS, sql_cmd, '执行失败:' + text1) sql = f'''create table {self.table_name} (id int,name char(20)); insert into {self.table_name} values(1,'xixi'),\ (2,'haha'),(3,'hehe'); create user {self.user_name} identified by \ '{macro.PASSWD_REPLACE}'; create role {self.role_name} identified by \ '{macro.PASSWD_REPLACE}'; create tablespace {self.tablespace_name} relative \ location 'tablespace/tablespace_1'; alter tablespace {self.tablespace_name} rename to \ {self.tablespace_rename}; create schema {self.schema_name}; create function {self.func_name}(i integer) returns integer as \$$ begin return i+1; end; \$$ language plpgsql; select * from {self.table_name}; ''' sql_cmd = self.commonsh.execut_db_sql(sql, dbname=self.db_name) self.log.info(sql_cmd) assert_1 = self.constant.TABLE_CREATE_SUCCESS in sql_cmd assert_2 = self.constant.CREATE_ROLE_SUCCESS_MSG in sql_cmd assert_3 = sql_cmd.count(self.constant.CREATE_ROLE_SUCCESS_MSG) == 2 assert_4 = self.constant.TABLESPCE_CREATE_SUCCESS in sql_cmd assert_5 = self.constant.TABLESPCE_ALTER_SUCCESS in sql_cmd assert_6 = self.constant.CREATE_SCHEMA_SUCCESS_MSG in sql_cmd assert_7 = self.constant.CREATE_FUNCTION_SUCCESS_MSG in sql_cmd assert_8 = '3 rows' in sql_cmd self.assertTrue(assert_1 and assert_2 and assert_3 and assert_4 and assert_5 and assert_6 and assert_7 and assert_8, '执行失败:' + text1) text2 = '-----step2.导出数据; expect:导出tar格式文件成功-----' self.log.info(text2) dump_cmd = f"source {macro.DB_ENV_PATH};" \ f"gs_dump -p {self.dbuser_node.db_port} " \ f"{self.db_name} -f " \ f"{os.path.join(f'{macro.DB_INSTANCE_PATH}', 'gs_restore.tar')} " \ f"-F t" self.log.info(dump_cmd) dump_msg = self.dbuser_node.sh(dump_cmd).result() self.log.info(dump_msg) self.assertIn(self.constant.GS_DUMP_SUCCESS_MSG, dump_msg, '执行失败:' + text2) text3 = '-----step3.删除表; expect:删除表成功-----' self.log.info(text3) sql_cmd = self.commonsh.execut_db_sql(f"drop table " f"{self.table_name};", dbname=self.db_name) self.log.info(sql_cmd) self.assertIn(self.constant.TABLE_DROP_SUCCESS, sql_cmd, '执行失败:' + text3) text4 = '-----step4.导入之前导出的数据; expect:校验数据成功,导入数据失败-----' self.log.info(text4) restore_cmd = f"source {macro.DB_ENV_PATH}; " \ f"gs_restore -p {self.dbuser_node.db_port} " \ f"-d {self.db_name} -U {self.user_name} " \ f"-W {macro.PASSWD_REPLACE} " \ f"{os.path.join(f'{macro.DB_INSTANCE_PATH}', 'gs_restore.tar')}" \ f" --role={self.role_name} --rolepassword={macro.PASSWD_REPLACE}" self.log.info(restore_cmd) restore_msg = self.dbuser_node.sh(restore_cmd).result() self.log.info(restore_msg) self.assertIn('ERROR: permission denied', restore_msg, '执行失败:' + text4) assert_1 = restore_msg.count('ERROR: permission denied') == 5 self.assertTrue(assert_1, '执行失败:' + text4) text5 = '-----step5.校验表数据是否导入; expect:表数据未导入-----' self.log.info(text5) sql_cmd = self.commonsh.execut_db_sql(f"select * from " f"{self.table_name};", dbname=self.db_name) self.log.info(sql_cmd) self.assertIn(f'ERROR: relation "{self.table_name}" ' f'does not exist', sql_cmd, '执行失败:' + text5) def tearDown(self): text6 = '-----step6.清理环境; expect:清理环境成功-----' self.log.info(text6) sql_cmd = self.commonsh.execut_db_sql(f'drop database ' f'{self.db_name};' f'drop tablespace ' f'{self.tablespace_rename};' f'drop user {self.user_name};' f'drop role {self.role_name};') self.log.info(sql_cmd) rm_cmd = f"rm -rf " \ f"{os.path.join(f'{macro.DB_INSTANCE_PATH}', 'gs_restore.tar')}" self.log.info(rm_cmd) rm_msg = self.dbuser_node.sh(rm_cmd).result() self.log.info(rm_msg) assert_1 = self.constant.DROP_DATABASE_SUCCESS in sql_cmd assert_2 = self.constant.TABLESPCE_DROP_SUCCESS in sql_cmd assert_3 = self.constant.DROP_ROLE_SUCCESS_MSG in sql_cmd assert_4 = sql_cmd.count(self.constant.DROP_ROLE_SUCCESS_MSG) == 2 assert_5 = '' in sql_cmd self.assertTrue(assert_1 and assert_2 and assert_3 and assert_4 and assert_5, '执行失败:' + text6) self.log.info("-Opengauss_Function_Tools_gs_restore_Case0083 执行结束-")
StarcoderdataPython
142987
<filename>jobs/44-streaming-activity-data-file.py #read activity data, example from spark definitive guide. files are in json format. #create spark session from pyspark.sql import SparkSession from pyspark.sql.types import * from pyspark.sql.functions import * spark = SparkSession\ .Builder().appName("streaming-activity-data")\ .master("local[3]")\ .getOrCreate() sc=spark.sparkContext spark.conf.set("spark.sql.shuffle.partitions",5) static = spark.read.json("/home/user/workarea/projects/Spark-The-Definitive-Guide/data/activity-data/") static.show(5) static.printSchema() streaming=spark.readStream.schema(static.schema).option("maxFilesPerTrigger",1).json("/home/user/workarea/projects/Spark-The-Definitive-Guide/data/activity-data/") activityCounts=streaming.groupBy("gt").count() activityQuery=activityCounts.writeStream.queryName("activity_counts").format("memory").outputMode("complete").start() #activityQuery.awaitTermination() from time import sleep for i in range(100): spark.sql("select * from activity_counts").show() sleep(1) activityQuery.awaitTermination()
StarcoderdataPython
1750457
<gh_stars>0 from ._chart_forecast import create_forecast_chart from ._options import ForecastOptions (create_forecast_chart, ForecastOptions)
StarcoderdataPython
137849
<filename>ava/auditors/text.py from copy import copy from ava.common.auditor import _Auditor from ava.common.constant import HTTP from ava.handlers.value import _ValueHandler # metadata name = __name__ description = "audits text data" class _TextValueHandler(_ValueHandler): def _get_targets(self, vector): """ Returns list with a single target for text data. Content-Type must be text/plain. :param vector: vector dictionary :return: target as list """ headers = vector['headers'] targets = [] # check data if not vector['data']: return [] # check content-type content_type = headers.get('Content-Type') if content_type and content_type.startswith(HTTP.CONTENT_TYPE.TEXT): targets = ['0'] return targets def _generate_variations(self, check, vector, target): """ Generates variations for value checks. Variations are created by replacing text data with payloads. :param check: check object :param vector: vector dictionary :param target: target key :return: list of variations """ original = vector['data'] # each payload for check for payload in check.payloads(vector['url'], target, original): # set vector variation = copy(vector) variation['data'] = payload # set auditable auditable = { 'vector': variation, 'payload': payload, 'value': payload } yield auditable class TextAuditor(_Auditor): """ Audits parts of text data. It audits by replacing text data with payloads from the given check. """ key = "text" name = "Text Data" description = "Audits by replacing plain text data" handlers = [ _TextValueHandler ]
StarcoderdataPython
1683706
import xml.dom.minidom from xml.dom.minidom import Node fileName = 'strings' projectName = 'my-module' filename = "path/to/project/" + projectName + "/src/main/res/values/" + fileName + ".xml" oputputfilename = "path/to/output/output-" + projectName + "-" + fileName + ".csv" fileout = open(oputputfilename, "w") def nodeDataToString(node): text = node.firstChild.data text = text.replace("\n", "\\n") return text # return text.encode('utf8') // If required def readItemsArray(node): line = "" for item in node.getElementsByTagName("item"): try: if item.hasAttribute("quantity"): line = line + "quantity=" + item.getAttribute("quantity") + ";" line = line + nodeDataToString(item) + ";" except Exception: line = line + "error-script;" pass return line ################ # Start SCRIPT # ################ doc = xml.dom.minidom.parse(filename) for node in doc.getElementsByTagName("string"): name = node.getAttribute("name") line = "string;" + name + ";" try: line = line + nodeDataToString(node) except Exception: line = line + " " pass line = line + "\n" fileout.write(line) for node in doc.getElementsByTagName("string-array"): name = node.getAttribute("name") line = "string-array;" + name + ";" + readItemsArray(node) + "\n" fileout.write(line) for node in doc.getElementsByTagName("plurals"): name = node.getAttribute("name") line = "plurals;" + name + ";" + readItemsArray(node) + "\n" fileout.write(line) fileout.close()
StarcoderdataPython
1788625
<reponame>arcticlimer/botinho<gh_stars>0 from abc import ABC, abstractmethod class IRepository(ABC): """ """ @abstractmethod async def create(self): pass @abstractmethod async def find(self): pass @abstractmethod async def update(self): pass @abstractmethod async def delete(self): pass
StarcoderdataPython
3362499
<reponame>tpdn/python-winscard<filename>winscard/scard.py from ctypes import * from ctypes.wintypes import * from winscard import * class SCard: context = SCARDCONTEXT() card = SCARDHANDLE() dwAutoAllocate = DWORD(-1) def __init__(self, dwScope=SCARD_SCOPE_USER, pvReserved1=None, pvReserved2=None): result = ULONG(scard_dll.SCardEstablishContext(dwScope, pvReserved1, pvReserved2, pointer(self.context))) if not result.value == SCARD_S_SUCCESS: raise WinSCardError(result.value) def list_readers(self, mszGroups=None): nd = LPSTR() result = ULONG( scard_dll.SCardListReadersA(self.context, mszGroups, pointer(nd), pointer(self.dwAutoAllocate))) if not result.value == SCARD_S_SUCCESS: raise WinSCardError(result.value) self.reader_names = str(nd.value).split('\r\n') readers = [Reader(LPSTR(x), self.card, self.context) for x in self.reader_names] return readers
StarcoderdataPython
146378
""" @ Author : <NAME>, <NAME>, <NAME> @ Date : 04/29/2018, 11/01/2018, 04/29/2019 @ Description : Youless Sensor - Monitor power consumption. """ VERSION = '2.0.1' import json import logging from datetime import timedelta from urllib.request import urlopen import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import CONF_MONITORED_VARIABLES from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle DOMAIN = 'youless' CONF_HOST = "host" SENSOR_PREFIX = 'youless_' _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_MONITORED_VARIABLES, default=['pwr', 'net']): vol.All( cv.ensure_list, vol.Length(min=1), [vol.In(['pwr', 'net', 'p1', 'p2', 'n1', 'n2', 'cs0', 'ps0', 'gas'])]) }) }, extra=vol.ALLOW_EXTRA) SENSOR_TYPES = { 'pwr': ['Current Power usage', 'current_power_usage', 'W', 'mdi:flash', 'energy.png'], 'net': ['Net Power usage', 'net_power_meter', 'kWh', 'mdi:gauge', 'electric-meter.png'], 'p1': ['Power Meter Low', 'power_meter_low', 'kWh', 'mdi:gauge', 'energy.png'], 'p2': ['Power Meter High', 'power_meter_high', 'kWh', 'mdi:gauge', 'energy.png'], 'n1': ['Power Delivery Low', 'power_delivery_low', 'kWh', 'mdi:gauge', 'energy.png'], 'n2': ['Power Delivery High', 'power_delivery_high', 'kWh', 'mdi:gauge', 'energy.png'], 'cs0': ['Power Meter Extra', 'power_meter_extra', 'kWh', 'mdi:gauge', 'energy.png'], 'ps0': ['Current Power usage Extra', 'current_power_usage_extra', 'W', 'mdi:flash', 'energy.png'], 'gas': ['Gas consumption', 'gas_meter', 'm3', 'mdi:gas-cylinder', 'electric-meter.png'] } def setup_platform(hass, config, add_devices, discovery_info=None): host = config.get(CONF_HOST) sensors = config.get(CONF_MONITORED_VARIABLES) data_bridge = YoulessDataBridge(host) devices = [] for sensor in sensors: sensor_config = SENSOR_TYPES[sensor] devices.append(YoulessSensor(data_bridge, sensor_config[0], sensor, sensor_config[1], sensor_config[2], sensor_config[3], sensor_config[4])) add_devices(devices) class YoulessDataBridge(object): def __init__(self, host): self._url = 'http://' + host + '/e' self._data = None def data(self): return self._data @Throttle(timedelta(seconds=1)) def update(self): raw_res = urlopen(self._url) self._data = json.loads(raw_res.read().decode('utf-8'))[0] class YoulessSensor(Entity): def __init__(self, data_bridge, name, prpt, sensor_id, uom, icon, image_uri): self._state = None self._name = name self._property = prpt self._icon = icon #self._image = image_uri self._uom = uom self._data_bridge = data_bridge self.entity_id = 'sensor.' + SENSOR_PREFIX + sensor_id self._raw = None @property def name(self): return self._name @property def icon(self): return self._icon #@property #def entity_picture(self): # return '/local/youless/' + self._image @property def unit_of_measurement(self): return self._uom @property def state(self): return self._state @property def state_attributes(self): if self._raw is not None: return { 'timestamp': self._raw['tm'] } def update(self): self._data_bridge.update() self._raw = self._data_bridge.data() if self._raw is not None: self._state = self._raw[self._property]
StarcoderdataPython
1781826
<gh_stars>0 import os from flask import Flask, flash, make_response, request, redirect, url_for, render_template, send_from_directory from werkzeug.utils import secure_filename import numpy as np import cv2 from mainDetector import glass_detector UPLOAD_FOLDER = r'C:/Users/rianl/Desktop/tensorflow-yolov4-tflite/uploadFolder' ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'} app = Flask(__name__) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.secret_key = "super secret key" def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/') def index(): return render_template('index.html') @app.route('/upload', methods=['GET', 'POST']) def upload(): if request.method == 'POST': # check if the post request has the file part if 'pic' not in request.files: flash('No file part') return redirect(request.url) file = request.files['pic'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) path = app.config['UPLOAD_FOLDER'] + '/' + filename img = cv2.imread(path) dimensions = img.shape altura = dimensions[0] anchura = dimensions[1] pathTs = './uploadFolder/' + filename glass_detector(img, filename) print(altura) print(anchura) labeledTs = 'label' + filename #return redirect(url_for('uploaded_file', filename=filename)) return render_template("draw.html", image_name=labeledTs, imgHeight=altura, imgWidth=anchura) return "Hello world" @app.route('/upload/<filename>') def send_image(filename): return send_from_directory("uploadFolder", filename) @app.route('/load/<imagen>', methods=['GET', 'POST']) def load(imagen): if request.method == 'POST': # check if the post request has the file part if 'archivo' not in request.files: flash('No file part') return redirect(request.url) file = request.files['archivo'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) path = app.config['UPLOAD_FOLDER'] + '/' + imagen img = cv2.imread(path) dimensions = img.shape altura = dimensions[0] anchura = dimensions[1] #return redirect(url_for('uploaded_file', filename=filename)) return render_template("filed.html", file_name=filename, image_name=imagen, imgHeight=altura, imgWidth=anchura) return "Hello world algo no esta bien" @app.route('/load/file/<filename>') def send_file(filename): return send_from_directory("uploadFolder", filename) ''' @app.route('/uploads/<filename>') def uploaded_file(filename): path = app.config['UPLOAD_FOLDER'] + '/' + filename img = cv2.imread(path) ret, jpeg = cv2.imencode('.jpg', img) response = make_response(jpeg.tobytes()) response.headers['Content-Type'] = 'image/png' return response ''' if __name__ == "__main__": #app.run(debug=True) app.run(host='0.0.0.0')
StarcoderdataPython
196049
""" Helper functions for testing. """ import inspect import os import string from matplotlib.testing.compare import compare_images from pygmt.exceptions import GMTImageComparisonFailure from pygmt.src import which def check_figures_equal(*, extensions=("png",), tol=0.0, result_dir="result_images"): """ Decorator for test cases that generate and compare two figures. The decorated function must return two arguments, *fig_ref* and *fig_test*, these two figures will then be saved and compared against each other. This decorator is practically identical to matplotlib's check_figures_equal function, but adapted for PyGMT figures. See also the original code at https://matplotlib.org/3.3.1/api/testing_api.html# matplotlib.testing.decorators.check_figures_equal Parameters ---------- extensions : list The extensions to test. Default is ["png"]. tol : float The RMS threshold above which the test is considered failed. result_dir : str The directory where the figures will be stored. Examples -------- >>> import pytest >>> import shutil >>> from pygmt import Figure >>> @check_figures_equal(result_dir="tmp_result_images") ... def test_check_figures_equal(): ... fig_ref = Figure() ... fig_ref.basemap(projection="X5c", region=[0, 5, 0, 5], frame=True) ... fig_test = Figure() ... fig_test.basemap( ... projection="X5c", region=[0, 5, 0, 5], frame=["WrStZ", "af"] ... ) ... return fig_ref, fig_test >>> test_check_figures_equal() >>> assert len(os.listdir("tmp_result_images")) == 0 >>> shutil.rmtree(path="tmp_result_images") # cleanup folder if tests pass >>> @check_figures_equal(result_dir="tmp_result_images") ... def test_check_figures_unequal(): ... fig_ref = Figure() ... fig_ref.basemap(projection="X5c", region=[0, 6, 0, 6], frame=True) ... fig_test = Figure() ... fig_test.basemap(projection="X5c", region=[0, 3, 0, 3], frame=True) ... return fig_ref, fig_test >>> with pytest.raises(GMTImageComparisonFailure): ... test_check_figures_unequal() ... >>> for suffix in ["", "-expected", "-failed-diff"]: ... assert os.path.exists( ... os.path.join( ... "tmp_result_images", ... f"test_check_figures_unequal{suffix}.png", ... ) ... ) ... >>> shutil.rmtree(path="tmp_result_images") # cleanup folder if tests pass """ # pylint: disable=invalid-name ALLOWED_CHARS = set(string.digits + string.ascii_letters + "_-[]()") KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY def decorator(func): import pytest os.makedirs(result_dir, exist_ok=True) old_sig = inspect.signature(func) @pytest.mark.parametrize("ext", extensions) def wrapper(*args, ext="png", request=None, **kwargs): if "ext" in old_sig.parameters: kwargs["ext"] = ext if "request" in old_sig.parameters: kwargs["request"] = request try: file_name = "".join(c for c in request.node.name if c in ALLOWED_CHARS) except AttributeError: # 'NoneType' object has no attribute 'node' file_name = func.__name__ try: fig_ref, fig_test = func(*args, **kwargs) ref_image_path = os.path.join(result_dir, f"{file_name}-expected.{ext}") test_image_path = os.path.join(result_dir, f"{file_name}.{ext}") fig_ref.savefig(ref_image_path) fig_test.savefig(test_image_path) # Code below is adapted for PyGMT, and is originally based on # matplotlib.testing.decorators._raise_on_image_difference err = compare_images( expected=ref_image_path, actual=test_image_path, tol=tol, in_decorator=True, ) if err is None: # Images are the same os.remove(ref_image_path) os.remove(test_image_path) else: # Images are not the same for key in ["actual", "expected", "diff"]: err[key] = os.path.relpath(err[key]) raise GMTImageComparisonFailure( f"images not close (RMS {err['rms']:.3f}):\n" f"\t{err['actual']}\n" f"\t{err['expected']}" ) finally: del fig_ref del fig_test parameters = [ param for param in old_sig.parameters.values() if param.name not in {"fig_test", "fig_ref"} ] if "ext" not in old_sig.parameters: parameters += [inspect.Parameter("ext", KEYWORD_ONLY)] if "request" not in old_sig.parameters: parameters += [inspect.Parameter("request", KEYWORD_ONLY)] new_sig = old_sig.replace(parameters=parameters) wrapper.__signature__ = new_sig # reach a bit into pytest internals to hoist the marks from # our wrapped function new_marks = getattr(func, "pytestmark", []) + wrapper.pytestmark wrapper.pytestmark = new_marks return wrapper return decorator def download_test_data(): """ Convenience function to download remote data files used in PyGMT tests and docs. """ # List of datasets to download datasets = [ # Earth relief grids "@earth_relief_01d_p", "@earth_relief_01d_g", "@earth_relief_30m_p", "@earth_relief_30m_g", "@earth_relief_10m_p", "@earth_relief_05m_p", "@earth_relief_05m_g", # List of tiles of 03s srtm data. # Names like @N35E135.earth_relief_03s_g.nc is for internal use only. # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS. "@N30W120.earth_relief_15s_p.nc", "@N35E135.earth_relief_03s_g.nc", "@N37W120.earth_relief_03s_g.nc", "@N00W090.earth_relief_03m_p.nc", # Earth seafloor age grids "@earth_age_01d_g", "S90W180.earth_age_05m_g.nc" # Specific grid for 05m test # Other cache files "@EGM96_to_36.txt", "@fractures_06.txt", "@hotspots.txt", "@ridge.txt", "@mars370d.txt", "@srtm_tiles.nc", # needed for 03s and 01s relief data "@Table_5_11.txt", "@test.dat.nc", "@tut_bathy.nc", "@tut_quakes.ngdc", "@tut_ship.xyz", "@usgs_quakes_22.txt", ] which(fname=datasets, download="a")
StarcoderdataPython
3263033
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Create seperate, binarised nii files for each tissue type in fs segm.""" import os import numpy as np from nibabel import load, save, Nifti1Image # set parent path parent_path = os.path.join(str(os.environ['parent_path']), 'data', 'segmentation_data', 'data_mprage') # set programme switch switch = 'fs' # list all subject names app = [ 'sub-02', 'sub-03', 'sub-05', 'sub-06', 'sub-07' ] # set segmentation labels tissue = [ 'WM', 'GM', # 'CSF', # 'vessel', 'ventricle', 'subcortex', # 'sinus' ] # set curent integer corresponding to segmentation label label = [ [2, 41, 250, 251, 252, 253, 254, 255], [3, 42], # [24, 122, 257, 701], # [30, 62], [4, 43, 72], [6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 24, 26, 27, 28, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, 59, 60, 15, 192], # [262] ] # derive length of different lists subjLen = len(app) tissueLen = len(tissue) for subj in app: # deduce name for segmentation result file segm = os.path.join(parent_path, 'derivatives', subj, 'segmentations', switch, subj + '_T1w_seg.nii.gz') # loop throuh different tissue types for lab, tis in zip(label, tissue): # determine oupt image output = os.path.join(parent_path, 'derivatives', subj, 'segmentations', switch, subj + '_T1w_' + tis + '.nii.gz') # Load segmentation image nii = load(segm) # Get voxels where tissue type is True (label conditions are met) aryLab = np.any( np.stack([nii.get_data() == ind_lab for ind_lab in lab]), axis=0) # Convert data type to input data type aryOut = aryLab.astype(nii.get_data_dtype()) # save as nifti out = Nifti1Image(aryOut, header=nii.header, affine=nii.affine) save(out, output) print('Saved:') print(output)
StarcoderdataPython
69092
<filename>bea/api.py """ This module contains specifics based on the current documentation, to make calls and consume the data released by the BEA. Although the BEA (at the time of this writing) supports both XML and JSON responses; this module does not have XML response support at this time, only JSON and Python types. -------------------- API Calling Limits -------------------- The API has default calling limits as shown below. These limits are meant to protect BEA’s API and webserver infrastructure from activity that may be detrimental to that infrastructure and/or unfairly impede other API users. • 1000 API calls per minute, and/or • 30 errors per minute, and/or • 50 MB (raw data) per minute. Any user that exceeds the above calling limits will receive an explanatory error message for each API call until the per-minute cause has expired. The best way to avoid such errors is to design your application to call the API within these limits, e.g., programmatically regulate the frequency/size of API calls. """ import requests import collections from pprint import pprint class BaseHandler(object): """ This is the super class handler with attributes common to all subclassed handlers for BEA requests """ def __init__(self, user_key, result_format='JSON'): """ :param user_key: :type user_key: :param result_format: :type result_format: """ # The current base URL for the BEA's API self.base_url = 'https://www.bea.gov/api/data/' self.user_key = user_key # The current node hierachies to be used to 'unpack' # responses to obtain the target data self.request_node_hierarchy = collections.OrderedDict( { 'root_node': 'BEAAPI', 'request_node': 'Request', 'target_node': 'RequestParam', } ) self.results_node_hierarchy = collections.OrderedDict( { 'root_node': 'BEAAPI', 'results_node': 'Results', 'target_node': None } ) # Only JSON result format, i.e. no XML support if result_format != 'JSON': raise ValueError("Only 'JSON' responses are supported at this time") else: self.result_format = result_format # Check base_url response to see if API service # is available response = requests.get(self.base_url) if not response.ok: raise requests.HTTPError # Helper methods def _get_and_process_response(self, url, target_node, echo_request=False): """ Takes the request URL and a target node, delegating to a few other helper methods to unpack and process the response. :param url: The target request URL :type url: str :param target_node: The response node containing the data :type target_node: str :param echo_request: Whether to echo the request (params, etc.) in the response. :type echo_request: bool :return: The targeted data results as JSON :rtype: JSON """ response = requests.get(url) if response.ok: # Decode JSON response to Python type(s) response = response.json() # Unpack results target_results = self._unpack_results(response, target_node) if echo_request: # Unpack the request echo in the response, # return the echo and results as a tuple echo_request_params = self._unpack_request(response) return (echo_request_params, target_results) else: return target_results else: raise requests.HTTPError def _unpack_request(self, response): """ Caller to unpack the request node hierarchy, i.e. NOT the results node hierarchy. Delegates to other helper methods. :param response: The JSON response :type response: JSON response :return: JSON response's request :rtype: """ node_hierarchy = self.request_node_hierarchy return self._traverse_nodes(response, node_hierarchy) def _unpack_results(self, response, target_node): """ :param response: :type response: :param target_node: :type target_node: :return: :rtype: """ node_hierarchy = self.results_node_hierarchy # Target node for results are not static, add target node node_hierarchy['target_node'] = target_node return self._traverse_nodes(response, node_hierarchy) def _traverse_nodes(self, response, node_hierarchy): """ :param response: :type response: :param node_hierarchy: :type node_hierarchy: :return: :rtype: """ try: for k, v in node_hierarchy.items(): response = response[v] return response except KeyError as e: print("The key: {} does not exist in the response" .format(e)) pprint(response) class MetadataHandler(BaseHandler): def __init__(self, user_key, result_format='JSON'): """ :param user_key: :type user_key: :param result_format: :type result_format: """ super().__init__(user_key, result_format) # BEA metadata equivalent request methods def get_dataset_list(self, target_node='Dataset', echo_request=False): """ Retrieves the BEA datasets list :param target_node: The dataset target node :type target_node: str :param echo_request: Whether to echo the request and params in the response :type echo_request: bool :return: The targeted data results as JSON :rtype: JSON """ url = ( '{}?&' 'UserID={}&' 'method=GetDatasetList&' 'ResultFormat={}&'.format( self.base_url, self.user_key, self.result_format ) ) return super()._get_and_process_response( url, target_node, echo_request ) def get_param_list(self, dataset_name, target_node='Parameter', echo_request=False): """ :param dataset_name: :type dataset_name: :param target_node: :type target_node: :param echo_request: :type echo_request: :return: :rtype: """ url = ( '{}?&' 'UserID={}&' 'method=GetParameterList&' 'datasetname={}&' 'ResultFormat={}&'.format( self.base_url, self.user_key, dataset_name, self.result_format ) ) return super()._get_and_process_response( url, target_node, echo_request ) def get_param_values(self, dataset_name, param_name, target_node='ParamValue', echo_request=False): """ :param dataset_name: :type dataset_name: :param param_name: :type param_name: :param target_node: :type target_node: :param echo_request: :type echo_request: :return: :rtype: """ url = ( '{}?&' 'UserID={}&' 'method=GetParameterValues&' 'datasetname={}&' 'ParameterName={}&' 'ResultFormat={}&'.format( self.base_url, self.user_key, dataset_name, param_name, self.result_format ) ) return super()._get_and_process_response( url, target_node, echo_request ) def get_param_values_filtered(self, dataset_name, target_param, table_name, target_node='ParamValue', echo_request=False): """ :param dataset_name: :type dataset_name: :param target_param: :type target_param: :param table_name: :type table_name: :param target_node: :type target_node: :param echo_request: :type echo_request: :return: :rtype: """ url = ( '{}?&' 'UserID={}&' 'method=GetParameterValuesFiltered&' 'datasetname={}&' 'TargetParameter={}&' 'TableName={}' 'ResultFormat={}&'.format( self.base_url, self.user_key, dataset_name, target_param, table_name, self.result_format ) ) return super()._get_and_process_response( url, target_node, echo_request ) def create_metadata_dict(self, dataset_name): """ :param dataset_name: :type dataset_name: :return: :rtype: """ metadata_dict = {} # Add dataset name to dict and create a params key with an empty dict metadata_dict['dataset_name'] = dataset_name metadata_dict['parameters'] = {} # Get param list for dataset, then get possible # param values for each param and add to dict param_list = self.get_param_list(dataset_name) for d in param_list: param_name = d['ParameterName'] metadata_dict['parameters'][param_name] = {} metadata_dict['parameters'][param_name]['param_details'] = d param_values = self.get_param_values(dataset_name, param_name) metadata_dict['parameters'][param_name]['args'] = param_values return metadata_dict class DataHandler(BaseHandler): def __init__(self, user_key, result_format='JSON'): """ :param user_key: :type user_key: :param result_format: :type result_format: """ super().__init__(user_key, result_format) def get_data(self, dataset_name, **params): """ :param dataset_name: :type dataset_name: :param params: :type params: :return: :rtype: """ url = ( '{}?&' 'UserID={}&' 'method=GetData&' 'datasetname={}&' 'ResultFormat={}&'.format( self.base_url, self.user_key, dataset_name, self.result_format ) ) if __name__=='__main__': BEA_API_USER_KEY = '' handler = MetadataHandler(BEA_API_USER_KEY) my_dict = handler.create_metadata_dict('RegionalIncome') pprint(my_dict['parameters']['TableName'])
StarcoderdataPython
1691471
import json from workflow import PasswordNotFound class AuthKeys(dict): keychain_account = '2fa_keys' def __init__(self, workflow, **kwargs): super(AuthKeys, self).__init__(**kwargs) self._wf = workflow self._load() def _load(self): try: json_data = self._wf.get_password(self.keychain_account) except PasswordNotFound: self._wf.logger.info('No data found in keychain') return data = json.loads(json_data) for key, value in data.iteritems(): self[key] = value def save(self): json_data = json.dumps(self) self._wf.save_password(self.keychain_account, json_data)
StarcoderdataPython
4805156
# ! Desafio 29 # ! Crie um programa que leia a velocidade de um carro, se ele ultrapassar 80Kmh mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada km acima do limite. v = float(input('Qual a velocidade atual do seu carro? ')) if v <=80: print('Muito bem, pode seguir viagem!') else: m = (v - 80) * 7 print('Você sera multado por excesso de velocidade, pague agora o valor de R$ {:.2f}'.format(m))
StarcoderdataPython
1762613
import sys import time from scipy.special import binom as binom from timer import Timer from util import * import numpy as np class SATCounter: def __init__(self, sat_problem, verbose=True, use_regular=False): """ Each SATCounter solves a specific sat problem that must be specified at creation """ self.sat = sat_problem self.verbose = verbose self.binom = BigBinom(sat_problem.n) self.use_regular = use_regular def lower_bound(self, f, min_confidence=0.9, max_time=600, best_slack=0.1): """ Get a log lower bound for the SAT problem with specified constraint density and minimum confidence Set the best_slack if we are satisfied with a solution that is 1 / (1 + best_slack) smaller than best try, and we wish the algorithm returns early before max_time Returns a tuple of the best bound obtained, the best m to give us that bound, and the percentage of trials performed on the best m. If no results are obtained returns -1""" # Start a timer timer = Timer(max_time) # Define some shorthands ln = math.log sqrt = math.sqrt delta = 1 - min_confidence n = self.sat.n nan = float('NaN') big_two = BigFloat(2.0) big_zero = BigFloat(0.0) # Define required variables trial_count = [0] * n # The number of trials performed on m true_count = [0] * n # The number of trials m is satisfiable false_count = [0] * n # The number of trials m is unsatisfiable incentive = [0.0] * n # The incentive to perform trial on m in the next step trial_time = [0.0] * n # The amount of time spent on the investigation of a certain m total_run = 0 # The total number of of trials performed on all m while True: # Select the m with the strongest incentive to investigate if self.verbose: print("Finding the best m to explore ....."), start_time = time.time() max_exp = BigFloat(-1) max_exp_m = 0 # Compute the unnormalized incentive for all values of m for m in range(0, n): if trial_count[m] != 0: c = float(true_count[m]) / trial_count[m] # The expected # of trials we can perform if the current m is the bandit we will explore expected_trial = timer.time() / (trial_time[m] / trial_count[m]) / 2 + trial_count[m] if expected_trial * c > -ln(delta): # We have enough time to obtain some kind of bound kappa = -3 * ln(delta) + sqrt(ln(delta) ** 2 - 8 * c * expected_trial * ln(delta)) kappa /= 2 * (c * expected_trial + ln(delta)) if c != 0: incentive[m] = (big_two ** m) * c / (1 + kappa) else: incentive[m] = big_zero if incentive[m] < 0: incentive[m] = big_zero else: incentive[m] = big_zero # Find m with best outlook if incentive[m] > max_exp: max_exp = incentive[m] max_exp_m = m # Limit our range of exploration between [max_exp_m - 8, max_exp_m + 8) m_min = max_exp_m - 8 if m_min < 0: m_min = 0 m_max = max_exp_m + 8 if m_max > n: m_max = n # Normalize incentive to [0, 1] and add exploration term for m in range(m_min, m_max): if trial_count[m] == 0: incentive[m] = 100.0 # Very strong incentive if this has never been explored before else: if max_exp == 0: incentive[m] = 0.0 else: incentive[m] = float(incentive[m] / max_exp) incentive[m] += math.sqrt(2.0 * math.log(total_run) / trial_count[m]) # Find the m with best incentive as m_star m_star = int(np.argmax(incentive[m_min:m_max]) + m_min) incentive_star = np.max(incentive[m_min:m_max]) if self.verbose: print("Found m = %d with incentive %f. center = %d, time left %f" % (m_star, incentive_star, max_exp_m, timer.time())) # Perform a trial on that m if self.use_regular: self.sat.add_regular_constraints(m_star, f) else: self.sat.add_parity_constraints(m_star, f) (outcome, run_time) = self.sat.solve(max_time=timer.time()) if outcome is True: true_count[m_star] += 1 trial_count[m_star] += 1 elif outcome is False: false_count[m_star] += 1 trial_count[m_star] += 1 if self.verbose: status_msg = "%d/%d satisfiable @ m = %d" % (true_count[m_star], trial_count[m_star], m_star) if outcome is True: print("SAT......" + status_msg) elif outcome is False: print("UNSAT...." + status_msg) else: print("timeout.." + status_msg) # Check if that m_star satisfies our termination criteria if trial_count[m_star] != 0: c = float(true_count[m_star]) / trial_count[m_star] confidence = 1 - math.exp(-best_slack * best_slack * c * trial_count[m_star] / ((1 + best_slack) * (2 + best_slack))) if confidence >= min_confidence: best_log_bound = m_star * ln(2) + ln(c) - ln(1 + best_slack) if self.verbose: print("Complete! best log bound is %f @ m_star = %d" % (best_log_bound, m_star)) print("%d/%d satisfiable, kappa = %f" % (true_count[m_star], trial_count[m_star], best_slack)) break end_time = time.time() trial_time[m_star] += end_time - start_time # If timer times out, find out the best bound we have if timer.timeout(): best_log_bound = nan m_star = -1 kappa_star = 0.0 for m in range(0, n): if trial_count[m] == 0: continue c = float(true_count[m]) / trial_count[m] T = trial_count[m] if c * T > -ln(delta): kappa = -3 * ln(delta) + sqrt(ln(delta) ** 2 - 8 * c * T * ln(delta)) kappa /= 2 * (c * T + ln(delta)) cur_bound = m * ln(2) + ln(c) - ln(1 + kappa) if math.isnan(best_log_bound) or cur_bound > best_log_bound: best_log_bound = cur_bound m_star = m kappa_star = kappa if self.verbose: print("Time up! best log bound is %f @ m_star = %d" % (best_log_bound, m_star)) print("%d/%d satisfiable, kappa = %f" % (true_count[m_star], trial_count[m_star], kappa_star)) break total_run += 1 if self.verbose: print("Time usage: %fs, efficiency: %f" % (max_time - timer.time(), float(trial_count[m_star]) / total_run)) return best_log_bound, m_star, max_time - timer.time(), float(trial_count[m_star]) / total_run def lower_bound_enumerate(self, f, min_confidence=0.9, max_time=600, min_m=0, max_m=-1): """ Get a lower bound for the SAT problem with specified constraint density and minimum confidence This does a brute force search on all possible m, each possible m is investigated until max_time Returns a tuple of the log of best bound obtained, the best m to give us that bound""" # Define some shorthands ln = math.log sqrt = math.sqrt delta = 1 - min_confidence n = self.sat.n nan = float('NaN') if max_m < 0: max_m = n log_bound_list = [] for i in range(0, n): log_bound_list.append(nan) for m in range(min_m, max_m): # Start a timer timer = Timer(max_time) # Define required variables true_count = 0 # The number of trials m is satisfiable false_count = 0 # The number of trials m is unsatisfiable if self.verbose: print("Performing trials on m = " + str(m)) while True: # Perform a trial on that m self.sat.add_parity_constraints(m, f) outcome = self.sat.solve(timer.time()) if outcome is True: true_count += 1 elif outcome is False: false_count += 1 if self.verbose: status_msg = "%d/%d satisfiable @ m = %d" % (true_count, true_count + false_count, m) if outcome is True: print("SAT......" + status_msg) elif outcome is False: print("UNSAT...." + status_msg) else: print("timeout.." + status_msg) # If timer times out, find out the best bound we have if timer.timeout(): if true_count + false_count == 0: log_bound_list[m] = nan if self.verbose: print("m = %d failed, not a single instance can be evaluated" % m) break T = float(true_count + false_count) c = float(true_count) / T if c * T > -ln(delta): kappa = -3 * ln(delta) + sqrt(ln(delta) ** 2 - 8 * c * T * ln(delta)) kappa = kappa / 2 / (c * T + ln(delta)) log_bound_list[m] = m * ln(2) + ln(c) - ln(1 + kappa) if self.verbose: print("m = %d complete, obtained log bound = %f" % (m, log_bound_list[m])) print("%d/%d satisfiable, kappa = %f" % (true_count, true_count + false_count, kappa)) else: log_bound_list[m] = nan if self.verbose: print("m = %d failed, too few instance evaluated" % m) break # Find the best bound obtained m_star = -1 best_bound = nan for m in range(min_m, max_m): if math.isnan(best_bound) or (not math.isnan(log_bound_list[m]) and log_bound_list[m] > best_bound): best_bound = log_bound_list[m] m_star = m if self.verbose: print("Complete! best log bound is " + str(best_bound) + " @ " + str(m_star)) return best_bound, m_star def upper_bound(self, f, min_confidence=0.9, bold=False, max_time=600): """ Get a upper bound for the SAT problem with specified constraint density and minimum confidence Returns a tuple of the best bound obtained, the best m to give us that bound, time usage, percentage of computational resources used on optimal m. If no results are obtained returns -1""" timer = Timer(max_time) start_time = time.time() # Define some notation shorthands ln = math.log delta = 1 - min_confidence n = self.sat.n T = int(math.ceil(24 * ln(1 / delta))) if self.verbose: print("Requires %d samples to verify" % T) # If in bold mode, estimate the place where the problem becomes UNSAT and limit our search in that region max_m_global = n if bold: if self.verbose: print("Searching for the maximum reasonable m") for m in range(0, n, 5): self.sat.add_parity_constraints(m, f) outcome = self.sat.solve(timer.time()) if self.verbose: print("[m=%d, %s]" % (m, str(outcome))), if outcome is not True: print("") if outcome is None: return None if outcome is False: max_m_global = m + 5 break if timer.timeout(): print("Timeout!") return float('nan'), -1, max_time, 0 if self.verbose: print("Starting the search maximum m of %d " % max_m_global) print("Computing expected bound if trial successful") # Upper bound that would be obtained if half the bins are empty upper_bound_list = [] for m in range(0, max_m_global + 1): expected_bound = self.upper_bound_expected(m, f) upper_bound_list.append(expected_bound) if self.verbose: print("[%d:%.2f]" % (m, float(bf.log(expected_bound)))), if m == max_m_global: print("") # Define required variables trial_count = [0] * n # The number of trials performed on m true_count = [0] * n # The number of trials m is satisfiable false_count = [0] * n # The number of trials m is unsatisfiable incentive = [BigFloat(0)] * n # The incentive to perform trial on m in the next step posterior_success_prob = [0.0] * n total_run = 0 while True: if self.verbose: print("Finding the best m to explore ....."), # Select the m with the strongest incentive to investigate max_exp = BigFloat(-1) max_exp_m = 0 # Compute the un-normalized incentive for all values of m for m in range(max_m_global - 1, -1, -1): if true_count[m] < T / 2 and trial_count[m] != 0: incentive[m] = posterior_success_prob[m] / upper_bound_list[m] / m else: incentive[m] = BigFloat(0) if incentive[m] > max_exp: max_exp = incentive[m] max_exp_m = m # Limit our range of exploration between [max_exp_m - 5, max_exp_m + 5) m_min = max_exp_m - 5 if m_min < 0: m_min = 0 m_max = max_exp_m + 5 if m_max > max_m_global: m_max = max_m_global # Normalize incentive between [0, 1] and add exploration term for m in range(m_min, m_max): if trial_count[m] == 0: incentive[m] = BigFloat(100) # Very strong incentive if this has never been explored before else: if max_exp == BigFloat(0): incentive[m] = BigFloat(0) else: incentive[m] = incentive[m] / max_exp incentive[m] += math.sqrt(2.0 * math.log(total_run) / trial_count[m]) # Find the m_star that maximizes incentive incentive_star = BigFloat(-1) m_star = -1 for m in range(m_min, m_max): if incentive[m] > incentive_star: incentive_star = incentive[m] m_star = m if self.verbose: print("Found m = %d with incentive %f. center = %d" % (m_star, incentive_star, max_exp_m)) # Perform a trial on that m self.sat.add_parity_constraints(m_star, f) outcome = self.sat.solve(timer.time()) if timer.timeout(): print("Timeout!") return float('nan'), -1, max_time, 0 if outcome is True: true_count[m_star] += 1 trial_count[m_star] += 1 elif outcome is False: false_count[m_star] += 1 trial_count[m_star] += 1 if self.verbose: status_msg = "%d/%d satisfiable @ m = %d" % (true_count[m_star], trial_count[m_star], m_star) if outcome is True: print("SAT......" + status_msg) elif outcome is False: print("UNSAT...." + status_msg) else: print("timeout.." + status_msg) # Update posterior success probability if trial_count[m_star] > 0: posterior_success_prob[m_star] = self.posterior_success_prob(T, trial_count[m_star], true_count[m_star]) # Check if that m_star satisfies our termination criteria if trial_count[m_star] >= T and true_count[m_star] < false_count[m_star]: best_bound = upper_bound_list[m_star] if self.verbose: print("Complete! best log bound is %f @ m_star = %d" % (float(bf.log(best_bound)), m_star)) break total_run += 1 end_time = time.time() if self.verbose: print("Time usage: %fs, efficiency: %f" % (end_time - start_time, float(T) / total_run)) return float(bf.log(best_bound)), m_star, end_time - start_time, float(T) / total_run def upper_bound_enumerate(self, f, min_confidence=0.9, min_m=0, max_m=-1, max_time=600): """ Get a log upper bound for the SAT problem with specified constraint density and minimum confidence This does a brute force search on all possible m, each possible m is investigated for adequate number of times Returns the best bound, m to give us that bound, and time consumed on that particular m """ # Define some notation shorthands delta = 1 - min_confidence n = self.sat.n if max_m < 0: max_m = n T = int(round(24 * math.log(1 / delta))) if self.verbose: print("Requires %d samples to verify" % T) # Iterate from small m to large, smaller m gives better bounds for m in range(min_m, max_m): timer = Timer(max_time) time_out = False print("Inspecting m = %d" % m) start_time = time.time() true_count = 0 # The number of trials m is satisfiable false_count = 0 # The number of trials m is unsatisfiable for trial in range(0, T): self.sat.parityConstraints(m, f) outcome = self.sat.solve(timer.time()) if outcome is True: true_count += 1 print("T"), elif outcome is False: false_count += 1 print("F"), else: print("E"), time_out = True break if timer.timeout(): time_out = True break end_time = time.time() print("\n %d out of %d evaluated to be satisfiable" % (true_count, T)) if true_count < false_count and not time_out: actual_bound = self.upper_bound_expected(m, f) print("Solved @ m = %d with log upper bound %f" % (m, float(bf.log(actual_bound)))) return float(bf.log(actual_bound)), m, end_time - start_time return -1, -1, -1 @staticmethod def posterior_success_prob(T, k, l): """ If out of the first k samples, l are one, this computes the MLE of the probability that less than half of the T samples shall be 1 (Generally T should not be greater than 100 to guarantee numerical stability """ # MLE for probability a sample is 1 p = float(l) / k sum = 0.0 for i in range(0, int(T / 2 - l)): sum += binom(T - k, i) * (p ** i) * ((1 - p) ** (T - k - i)) return sum def upper_bound_expected(self, m, f, tolerance=0.001): """ Obtain the expected upper bound for set size given m, and f, accurate to given tolerance """ # Shorthand definition two_to_m = BigFloat(2.0) ** m # Use binary search to find the minimum q so that z > 3/4 q_min = BigFloat(1.0) q_max = BigFloat(2.0) ** self.sat.n for iteration in range(0, self.sat.n + 10): q_mid = bf.sqrt(q_min * q_max) # Search by geometric mean v = q_mid / two_to_m * (1 + self.compute_eps_q(m, q_mid, f) - q_mid / two_to_m) z = 1 - v / (v + (q_mid / two_to_m) ** 2) if z > 3.0 / 4: q_max = q_mid else: q_min = q_mid # If difference between q_min and q_max is less than tolerance, stop the search if q_max < q_min * (1 + tolerance): break return bf.sqrt(q_max * q_min) def compute_eps_q(self, m, q, f): """ This function computes epsilon(n, m, q, f) * (q - 1), and is optimized for multiple queries """ # Use binary search to find maximum w_star so that sum_{w = 1}^{w_star} C(n, w) <= q - 1 # The possible w_star lies in [w_min, w_max] w_min = 0 w_max = self.sat.n while w_min != w_max: w = int(math.ceil(float(w_min + w_max) / 2)) # If w_min + 1 = w_max, assign w to w_max if self.binom.binom_sum(w) < q: w_min = w elif self.binom.binom_sum(w) == q: w_min = w break else: w_max = w - 1 w_star = w_min r = q - 1 - self.binom.binom_sum(w_star) # Compute eps * (q - 1) epsq = r * (0.5 + 0.5 * (BigFloat(1.0 - 2 * f) ** (w_star + 1))) ** m for w in range(1, w_star + 1): epsq += self.binom.binom(w) * (0.5 + 0.5 * (BigFloat(1.0 - 2 * f) ** w)) ** m return epsq @staticmethod def compute_eps_q_static(n, m, q, f, binom=None): """ This function computes epsilon(n, m, q, f) * (q - 1), and is not optimized for multiple queries """ # Use binary search to find maximum w_star so that sum_{w = 1}^{w_star} C(n, w) <= q - 1 # The possible w_star lies in [w_min, w_max] w_min = 0 w_max = n if binom is None: binom = BigBinom(n) while w_min != w_max: w = int(math.ceil(float(w_min + w_max) / 2)) # If w_min + 1 = w_max, assign w to w_max if binom.binom_sum(w) < q: w_min = w elif binom.binom_sum(w) == q: w_min = w break else: w_max = w - 1 w_star = w_min r = q - 1 - binom.binom_sum(w_star) # Compute eps * (q - 1) epsq = r * (0.5 + 0.5 * (BigFloat(1.0 - 2 * f) ** (w_star + 1))) ** m for w in range(1, w_star + 1): epsq += binom.binom(w) * (0.5 + 0.5 * (BigFloat(1.0 - 2 * f) ** w)) ** m return epsq def compute_f_star(self, m): """Compute the minimum f to guarantee a constant factor approximation""" q = BigFloat(2.0) ** (m + 2) threshold = 31 / 5 # Find f_star by binary search f_left = 0.0 f_right = 0.5 for iteration in range(0, 10): f_mid = (f_left + f_right) / 2 # Compute eps from f eps = self.compute_eps_q(m, q, f_mid) if eps < threshold: f_right = f_mid else: f_left = f_mid return (f_left + f_right) / 2 @staticmethod def compute_f_star_static(n, m): """Compute the minimum f to guarantee a constant factor approximation""" q = BigFloat(2.0) ** (m + 2) threshold = 8 binom = BigBinom(n) # Find f_star by binary search f_left = 0.0 f_right = 0.5 for iteration in range(0, 10): f_mid = (f_left + f_right) / 2 # Compute eps from f eps = SATCounter.compute_eps_q_static(n, m, q, f_mid, binom) if eps < threshold: f_right = f_mid else: f_left = f_mid return (f_left + f_right) / 2
StarcoderdataPython
3358417
import math from itertools import combinations l=list(input().split(',')) p=int(math.sqrt(len(l))) b=[] max=sum(list(map(int,l))) for i in range(2,len(l)+1): a=list(combinations(l,i)) for j in a: c=sum(list(map(int,j))) if c==max and len(j)==4: b.append(j) b=set(b) print(b)
StarcoderdataPython
1785610
#!/usr/bin/python # -*- coding: UTF-8 -*- # pylint: disable=C0111 import unittest import platform from pyxmpp2.etree import ElementTree import pyxmpp2.version from pyxmpp2.iq import Iq from pyxmpp2.jid import JID from pyxmpp2.stanzaprocessor import StanzaProcessor from pyxmpp2.settings import XMPPSettings from pyxmpp2.stanzapayload import XMLPayload from pyxmpp2.ext.version import VersionPayload, VersionProvider from pyxmpp2.ext.version import request_software_version IQ1 = '''<iq type="get" id="1" xmlns="jabber:client"> <query xmlns="jabber:iq:version"/> </iq>''' IQ2 = '''<iq type="response" id="1" xmlns="jabber:client"> <query xmlns="jabber:iq:version"> <name>NAME</name> <version>VERSION</version> <os>OS</os> </query> </iq>''' class TestVersionPayload(unittest.TestCase): def test_parse_empty(self): element = ElementTree.XML(IQ1) payload = VersionPayload.from_xml(element[0]) self.assertIsNone(payload.name) self.assertIsNone(payload.version) self.assertIsNone(payload.os_name) def test_parse_full(self): element = ElementTree.XML(IQ2) payload = VersionPayload.from_xml(element[0]) self.assertEqual(payload.name, 'NAME') self.assertEqual(payload.version, 'VERSION') self.assertEqual(payload.os_name, 'OS') def test_build_empty(self): payload = VersionPayload() self.assertIsNone(payload.name) self.assertIsNone(payload.version) self.assertIsNone(payload.os_name) element = payload.as_xml() self.assertEqual(element.tag, "{jabber:iq:version}query") self.assertEqual(len(element), 0) class Processor(StanzaProcessor): def __init__(self, handlers): StanzaProcessor.__init__(self) self.setup_stanza_handlers(handlers, "post-auth") self.stanzas_sent = [] def send(self, stanza): self.stanzas_sent.append(stanza) class TestVersionProvider(unittest.TestCase): def test_defaults(self): provider = VersionProvider() processor = Processor([provider]) stanza = Iq(ElementTree.XML(IQ1)) processor.uplink_receive(stanza) self.assertEqual(len(processor.stanzas_sent), 1) response = processor.stanzas_sent[0] self.assertIsInstance(response, Iq) self.assertEqual(response.stanza_type, "result") payload = response.get_payload(VersionPayload) self.assertIsInstance(payload, VersionPayload) self.assertEqual(payload.name, "PyXMPP2") self.assertEqual(payload.version, pyxmpp2.version.version) expected = u" ".join((platform.system(), platform.release(), platform.machine())) self.assertEqual(payload.os_name, expected) def test_custom(self): settings = XMPPSettings({ "software_name": "NAME", "software_version": "VERSION", "software_os": "OS", }) provider = VersionProvider(settings) processor = Processor([provider]) stanza = Iq(ElementTree.XML(IQ1)) processor.uplink_receive(stanza) self.assertEqual(len(processor.stanzas_sent), 1) response = processor.stanzas_sent[0] self.assertIsInstance(response, Iq) self.assertEqual(response.stanza_type, "result") payload = response.get_payload(VersionPayload) self.assertIsInstance(payload, VersionPayload) self.assertEqual(payload.name, "NAME") self.assertEqual(payload.version, "VERSION") self.assertEqual(payload.os_name, "OS") def test_bad_request(self): provider = VersionProvider() processor = Processor([provider]) stanza = Iq(ElementTree.XML(IQ2)) stanza.stanza_type = 'set' processor.uplink_receive(stanza) self.assertEqual(len(processor.stanzas_sent), 1) response = processor.stanzas_sent[0] self.assertIsInstance(response, Iq) self.assertEqual(response.stanza_type, "error") self.assertEqual(response.error.condition.tag, "{urn:ietf:params:xml:ns:xmpp-stanzas}service-unavailable") class TestVersionRequest(unittest.TestCase): def test_request(self): payload_received = [] errors_received = [] def callback(payload): payload_received.append(payload) def error_callback(stanza): errors_received.append(stanza) processor = Processor([]) request_software_version(processor, JID("<EMAIL>"), callback, error_callback) self.assertEqual(len(processor.stanzas_sent), 1) request = processor.stanzas_sent[0] self.assertIsInstance(request, Iq) self.assertEqual(request.stanza_type, "get") payload = request.get_payload(VersionPayload) self.assertIsNone(payload.name) self.assertIsNone(payload.version) self.assertIsNone(payload.os_name) response = request.make_result_response() payload = XMLPayload(ElementTree.XML(IQ2)[0]) response.set_payload(payload) processor.uplink_receive(response) self.assertEqual(len(processor.stanzas_sent), 1) self.assertEqual(len(payload_received), 1) self.assertEqual(len(errors_received), 0) payload = payload_received[0] self.assertEqual(payload.name, "NAME") self.assertEqual(payload.version, "VERSION") self.assertEqual(payload.os_name, "OS") def test_request_error(self): payload_received = [] errors_received = [] def callback(payload): payload_received.append(payload) def error_callback(stanza): errors_received.append(stanza) processor = Processor([]) request_software_version(processor, JID("<EMAIL>"), callback, error_callback) self.assertEqual(len(processor.stanzas_sent), 1) request = processor.stanzas_sent[0] self.assertIsInstance(request, Iq) self.assertEqual(request.stanza_type, "get") payload = request.get_payload(VersionPayload) self.assertIsNone(payload.name) self.assertIsNone(payload.version) self.assertIsNone(payload.os_name) response = request.make_error_response(u'service-unavailable') processor.uplink_receive(response) self.assertEqual(len(processor.stanzas_sent), 1) self.assertEqual(len(payload_received), 0) self.assertEqual(len(errors_received), 1) received = errors_received[0] self.assertIsInstance(received, Iq) self.assertEqual(received.stanza_type, "error") # pylint: disable=W0611 from pyxmpp2.test._support import load_tests, setup_logging def setUpModule(): setup_logging() if __name__ == "__main__": unittest.main()
StarcoderdataPython
152798
# Generated by Django 3.1.8 on 2021-04-23 08:35 import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import model_utils.fields import uuid class Migration(migrations.Migration): replaces = [('flags', '0001_initial'), ('flags', '0002_auto_20200227_1130'), ('flags', '0003_flaggingrule'), ('flags', '0004_auto_20200326_1548'), ('flags', '0005_flaggingrule_is_for_verified_goods_only'), ('flags', '0006_auto_20200407_1437'), ('flags', '0007_flag_blocks_approval'), ('flags', '0008_auto_20201223_2340'), ('flags', '0009_auto_20201229_1454'), ('flags', '0010_flaggingrule_excluded_values'), ('flags', '0011_auto_20210128_1102'), ('flags', '0012_auto_20210309_1521')] initial = True dependencies = [ ('teams', '0001_initial'), ] operations = [ migrations.CreateModel( name='Flag', fields=[ ('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created_at')), ('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='updated_at')), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(default='Untitled Flag', max_length=25, unique=True)), ('level', models.CharField(choices=[('Case', 'Case'), ('Organisation', 'Organisation'), ('Good', 'Good'), ('Destination', 'Destination')], max_length=20)), ('status', models.CharField(choices=[('Active', 'Active'), ('Deactivated', 'Deactivated')], default='Active', max_length=20)), ('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teams.team')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='FlaggingRule', fields=[ ('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created_at')), ('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='updated_at')), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('level', models.CharField(choices=[('Case', 'Case'), ('Organisation', 'Organisation'), ('Good', 'Good'), ('Destination', 'Destination')], max_length=20)), ('status', models.CharField(choices=[('Active', 'Active'), ('Deactivated', 'Deactivated')], default='Active', max_length=20)), ('matching_value', models.CharField(max_length=100)), ('flag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flags.flag')), ('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teams.team')), ], options={ 'abstract': False, 'ordering': ['team__name', '-created_at'], }, ), migrations.AddIndex( model_name='flaggingrule', index=models.Index(fields=['created_at'], name='flags_flagg_created_0d8cd4_idx'), ), migrations.AddField( model_name='flaggingrule', name='is_for_verified_goods_only', field=models.BooleanField(blank=True, null=True), ), migrations.AlterModelOptions( name='flag', options={'ordering': ['team']}, ), migrations.RemoveIndex( model_name='flaggingrule', name='flags_flagg_created_0d8cd4_idx', ), migrations.AddField( model_name='flag', name='colour', field=models.CharField(choices=[('default', 'Default'), ('red', 'Red'), ('yellow', 'Yellow'), ('green', 'Green'), ('blue', 'Blue'), ('purple', 'Purple'), ('orange', 'Orange'), ('brown', 'Brown'), ('turquoise', 'Turquoise'), ('pink', 'Pink')], default='default', max_length=20), ), migrations.AddField( model_name='flag', name='label', field=models.CharField(blank=True, max_length=15, null=True), ), migrations.AddField( model_name='flag', name='priority', field=models.PositiveSmallIntegerField(default=0), ), migrations.AddIndex( model_name='flaggingrule', index=models.Index(fields=['created_at'], name='flagging_ru_created_7400b0_idx'), ), migrations.AlterModelTable( name='flag', table='flag', ), migrations.AlterModelTable( name='flaggingrule', table='flagging_rule', ), migrations.AddField( model_name='flag', name='blocks_approval', field=models.BooleanField(default=False), ), migrations.AddField( model_name='flaggingrule', name='matching_groups', field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(default=''), default=list, size=None), ), migrations.AddField( model_name='flaggingrule', name='matching_values', field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(default=''), default=list, size=None), ), migrations.RemoveField( model_name='flaggingrule', name='matching_value', ), migrations.AlterField( model_name='flaggingrule', name='flag', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flagging_rules', to='flags.flag'), ), migrations.AddField( model_name='flaggingrule', name='excluded_values', field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(default=''), default=list, size=None), ), migrations.AlterField( model_name='flag', name='level', field=models.CharField(choices=[('Case', 'Case'), ('Organisation', 'Organisation'), ('Good', 'Good'), ('Destination', 'Destination'), ('PartyOnApplication', 'PartyOnApplication')], max_length=20), ), migrations.AlterField( model_name='flaggingrule', name='level', field=models.CharField(choices=[('Case', 'Case'), ('Organisation', 'Organisation'), ('Good', 'Good'), ('Destination', 'Destination'), ('PartyOnApplication', 'PartyOnApplication')], max_length=20), ), migrations.AddField( model_name='flag', name='removable_by', field=models.CharField(choices=[('Anyone', 'Anyone'), ('Authorised countersigner', 'Authorised countersigner'), ('Head of Licensing Unit countersigner', 'Head of Licensing Unit countersigner')], default='Anyone', max_length=50), ), migrations.AlterField( model_name='flag', name='name', field=models.CharField(default='Untitled Flag', max_length=100, unique=True), ), ]
StarcoderdataPython
1605647
import math class coord: def __init__(self, Satlatitude, Satlongitude, Glatitude, Glongtitude, Sataltitude, Galtitude, gx, gy, gz): self.lat1 = math.radians(float(Satlatitude)) self.lat2 = math.radians(float(Glatitude)) self.lon1 = math.radians(float(Satlongitude)) self.lon2 = math.radians(float(Glongtitude)) self.dlon = self.lon2 - self.lon1 self.dlat = self.lat2 - self.lat1 self.a1 = float(Sataltitude) self.a2 = float(Galtitude) self.alt = self.a2 - self.a1 self.gx = float(gx) self.gy = float(gy) self.gz = float(gz) def arc(self): a = (math.sin(self.dlat / 2) ** 2) + ( math.cos(self.lat1) * math.cos(self.lat2) * ((math.sin(self.dlon / 2)) ** 2)) x = math.sqrt(a) y = math.sqrt(1 - a) c = 2 * math.atan2(x, y) return c def dist(self): R0 = 6371000 d = R0 * self.arc() return d def lineofsight(self): R0 = 6371000 R = R0 + self.a1 baselength = 2 * R * math.cos((math.pi - self.arc()) / 2) heightlength = self.alt los = baselength ** 2 + heightlength ** 2 - 2 * baselength * heightlength * math.cos((math.pi + self.arc()) / 2) los = math.fabs(los) los = math.sqrt(los) return los def azimuth(self): a = math.sin(self.dlon) * math.cos(self.lat2) b = math.cos(self.lat1) * math.sin(self.lat2) - math.sin(self.lat1) * math.cos(self.lat2) * math.cos(self.dlon) theta = math.atan2(a, b) theta = math.degrees(theta) return theta def heading(self): ht = self.azimuth() - self.gz ht = ht % 360 if ht >= 180: ht = ht - 360 return ht def elevation(self): elev = math.atan2(self.alt, self.dist()) elev = math.degrees(elev) elev = elev - self.gx elev = elev % 360 if elev >= 180: elev = elev - 360 return elev def roll(self): ty = self.gy ty = ty % 360 if ty >= 180: ty = ty - 360 return ty if __name__ == '__main__': test = coord(12.0, 100.0, 12.5, 100.0, 20.1, 25600.2, 1.0, -3.0, 8.0) print(test.dist()) print(test.lineofsight())
StarcoderdataPython
28969
<reponame>pabarros/asgard-api<gh_stars>1-10 import json import unittest from copy import deepcopy from http import HTTPStatus from unittest.mock import call, patch from flask import Response as FlaskResponse from marathon import MarathonApp from marathon.models.group import MarathonGroup from marathon.models.task import MarathonTask from asgard.models.account import AccountDB as Account from hollowman.app import application from hollowman.http_wrappers import Response from hollowman.marathon.group import AsgardAppGroup from hollowman.marathonapp import AsgardApp from hollowman.models import User from tests.utils import with_json_fixture class ResponseTest(unittest.TestCase): def test_remove_namespace_if_exists(self): response = Response(None, None) self.assertEqual("", response._remove_namespace_if_exists("dev", "")) self.assertEqual("/", response._remove_namespace_if_exists("dev", "/")) self.assertEqual( "/", response._remove_namespace_if_exists("dev", "/dev/") ) self.assertEqual( "", response._remove_namespace_if_exists("dev", "/dev") ) self.assertEqual( "/foo", response._remove_namespace_if_exists("dev", "/dev/foo") ) self.assertEqual( "/foo/dev", response._remove_namespace_if_exists("dev", "/dev/foo/dev"), ) self.assertEqual( "/dev", response._remove_namespace_if_exists("dev", "/dev/dev") ) self.assertEqual( None, response._remove_namespace_if_exists("dev", None) ) class SplitTests(unittest.TestCase): def setUp(self): self.empty_ok_response = FlaskResponse( response=b"{}", status=HTTPStatus.OK, headers={} ) self.user = User(tx_name="User One", tx_email="<EMAIL>") self.user.current_account = Account( name="Dev", namespace="dev", owner="company" ) @with_json_fixture("single_full_app.json") def test_a_single_app_response_returns_a_single_marathonapp(self, fixture): with application.test_request_context( "/v2/apps//foo", method="GET", data=b"" ) as ctx: flask_response = FlaskResponse( response=json.dumps({"app": fixture}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, flask_response) with patch.object(response, "marathon_client") as client: client.get_app.return_value = AsgardApp.from_json(fixture) apps = list(response.split()) self.assertEqual([call("/foo")], client.get_app.call_args_list) self.assertEqual( apps, [(AsgardApp.from_json(fixture), client.get_app.return_value)], ) @with_json_fixture("single_full_app.json") def test_multiapp_response_returns_multiple_marathonapp_instances( self, fixture ): modified_app = fixture.copy() modified_app["id"] = "/xablau" apps = [fixture, modified_app] with application.test_request_context( "/v2/apps/", method="GET", data=b"" ) as ctx: response = FlaskResponse( response=json.dumps({"apps": apps}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, response) with patch.object(response, "marathon_client") as client: original_apps = [MarathonApp.from_json(app) for app in apps] client.get_app.side_effect = original_apps apps = list(response.split()) self.assertEqual( apps, [ (AsgardApp.from_json(fixture), original_apps[0]), (AsgardApp.from_json(modified_app), original_apps[1]), ], ) @with_json_fixture("single_full_app.json") def test_a_response_for_restart_operation_with_appid_in_url_path_does_not_split_response( self, fixture ): """ Quando o response retorna um Deployment, não fazemos split. """ with application.test_request_context( "/v2/apps/xablau/restart", method="PUT", data=b'{"force": true}' ) as ctx: response = FlaskResponse( response=b"{}", status=HTTPStatus.OK, headers={} ) response = Response(ctx.request, response) apps = list(response.split()) self.assertEqual(0, len(apps)) @with_json_fixture("../fixtures/group_dev_namespace_with_apps.json") def test_split_groups_read_on_root_group(self, group_dev_namespace_fixture): with application.test_request_context( "/v2/groups/", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(group_dev_namespace_fixture), status=HTTPStatus.OK, headers={}, ) ctx.request.user = self.user response = Response(ctx.request, response) groups_tuple = list(response.split()) self.assertEqual(5, len(groups_tuple)) expected_groups = [ AsgardAppGroup(g) for g in AsgardAppGroup( MarathonGroup.from_json(group_dev_namespace_fixture) ).iterate_groups() ] # Compara com os groups originais self.assertEqual(expected_groups, [g[1] for g in groups_tuple]) @with_json_fixture("../fixtures/group_dev_namespace_with_apps.json") def test_split_group_nonroot_empty_group(self, group_dev_namespace_fixture): with application.test_request_context( "/v2/groups/group-c", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(group_dev_namespace_fixture["groups"][2]), status=HTTPStatus.OK, headers={}, ) ctx.request.user = self.user response = Response(ctx.request, response) groups_tuple = list(response.split()) self.assertEqual(1, len(groups_tuple)) expected_groups = [ AsgardAppGroup(g) for g in AsgardAppGroup( MarathonGroup.from_json( group_dev_namespace_fixture["groups"][2] ) ).iterate_groups() ] # Compara com os groups originais self.assertEqual(expected_groups, [g[1] for g in groups_tuple]) @unittest.skip("A ser implementado") def test_split_groups_write_PUT_on_group(self): self.fail() @with_json_fixture("../fixtures/group_dev_namespace_with_apps.json") def test_split_groups_read_on_specific_group( self, group_dev_namespace_fixture ): with application.test_request_context( "/v2/groups/group-b", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(group_dev_namespace_fixture["groups"][1]), status=HTTPStatus.OK, headers={}, ) ctx.request.user = self.user response = Response(ctx.request, response) groups_tuple = list(response.split()) self.assertEqual(2, len(groups_tuple)) expected_groups = [ AsgardAppGroup(g) for g in AsgardAppGroup( MarathonGroup.from_json( group_dev_namespace_fixture["groups"][1] ) ).iterate_groups() ] # Compara com os groups originais self.assertEqual(expected_groups, [g[1] for g in groups_tuple]) @with_json_fixture("../fixtures/tasks/get.json") def test_split_tasks_GET(self, tasks_get_fixture): """ No cado de um GET, o retorno sempre é uma lista de apps. """ with application.test_request_context( "/v2/tasks/", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) self.assertEqual( [ MarathonTask.from_json(task) for task in tasks_get_fixture["tasks"] ], [task[0] for task in tasks_tuple], ) @with_json_fixture("../fixtures/tasks/get.json") def test_split_staks_POST_scale_false(self, tasks_get_fixture): """ No caso do POST com `?scale=false` o retorno é: - Lista de apps que foram killed Por isso usamos a fixture de tasks/get.json aqui """ with application.test_request_context( "/v2/tasks/delete?scale=false", method="POST" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) self.assertEqual( [ MarathonTask.from_json(task) for task in tasks_get_fixture["tasks"] ], [task[0] for task in tasks_tuple], ) @with_json_fixture("../fixtures/tasks/post?scale=true.json") def test_split_staks_POST_scale_true(self, tasks_post_fixture): """ No caso do POST com `?scale=true` o retorno é: - Deployment Id Isso significa que não faremos split do response """ with application.test_request_context( "/v2/tasks/delete?scale=true", method="POST" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_post_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) self.assertEqual(0, len(tasks_tuple)) @with_json_fixture("../fixtures/queue/get.json") def test_split_queue_GET(self, queue_get_fixture): with application.test_request_context("/v2/queue", method="GET") as ctx: response = FlaskResponse( response=json.dumps(queue_get_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) queue_tuples = list(response.split()) self.assertEqual(2, len(queue_tuples)) class JoinTests(unittest.TestCase): def setUp(self): self.user = User(tx_name="User One", tx_email="<EMAIL>") self.user.current_account = Account( name="Dev", namespace="dev", owner="company" ) def test_join_a_uknown_response(self): """ Como o repsonse roda para qualquer requiest que retornou 200 no upstream, muitas vezes pode passar por ele um request que ele "não trata", ou seja, que ele não tem nada o que fazer. Esse teste certifica que o join() não quebra em casos como esse """ with application.test_request_context( "/v2/apps/myapp/restart", method="POST" ) as ctx: response = FlaskResponse( response=json.dumps({"deploymentId": "myId"}), status=HTTPStatus.OK, ) ctx.request.user = self.user response = Response(ctx.request, response) joined_response = response.join([]) joined_response_data = json.loads(joined_response.data) self.assertEqual("myId", joined_response_data["deploymentId"]) @with_json_fixture("single_full_app.json") def test_it_recreates_a_get_response_for_a_single_app(self, fixture): with application.test_request_context( "/v2/apps//foo", method="GET", data=b"" ) as ctx: response = FlaskResponse( response=json.dumps({"app": fixture}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, response) with patch.object(response, "marathon_client") as client: client.get_app.return_value = AsgardApp.from_json(deepcopy(fixture)) apps = list(response.split()) joined_response = response.join(apps) self.assertIsInstance(joined_response, FlaskResponse) self.assertDictEqual( json.loads(joined_response.data), {"app": fixture} ) @with_json_fixture("single_full_app.json") def test_it_recreates_a_get_response_for_multiple_apps(self, fixture): modified_app = deepcopy(fixture) modified_app["id"] = "/xablau" fixtures = [fixture, modified_app] expected_response = deepcopy(fixtures) with application.test_request_context( "/v2/apps/", method="GET", data=b"" ) as ctx: response = FlaskResponse( response=json.dumps({"apps": fixtures}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, response) with patch.object(response, "marathon_client") as client: original_apps = [AsgardApp.from_json(app) for app in fixtures] client.get_app.side_effect = original_apps apps = list(response.split()) joined_response = response.join(apps) self.assertIsInstance(joined_response, FlaskResponse) self.assertDictEqual( json.loads(joined_response.data), {"apps": expected_response} ) @with_json_fixture("single_full_app.json") def test_should_join_an_empty_list_into_an_empty_response_single_app( self, single_full_app_fixture ): with application.test_request_context( "/v2/apps//foo", method="GET", data=b"" ) as ctx: response = FlaskResponse( response=json.dumps({"app": single_full_app_fixture}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, response) joined_response = response.join([]) self.assertIsInstance(joined_response, FlaskResponse) self.assertDictEqual(json.loads(joined_response.data), {"app": {}}) @with_json_fixture("single_full_app.json") def test_should_join_an_empty_list_into_an_empty_response_multi_app( self, single_full_app_fixture ): modified_app = deepcopy(single_full_app_fixture) modified_app["id"] = "/other-app" fixtures = [single_full_app_fixture, modified_app] expected_response = deepcopy(fixtures) with application.test_request_context( "/v2/apps/", method="GET", data=b"" ) as ctx: response = FlaskResponse( response=json.dumps({"apps": fixtures}), status=HTTPStatus.OK, headers={}, ) response = Response(ctx.request, response) joined_response = response.join([]) self.assertIsInstance(joined_response, FlaskResponse) self.assertDictEqual(json.loads(joined_response.data), {"apps": []}) @with_json_fixture("../fixtures/group_dev_namespace_with_one_full_app.json") def test_join_groups(self, group_dev_namespace_fixture): with application.test_request_context( "/v2/groups/", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(group_dev_namespace_fixture), status=HTTPStatus.OK, headers={}, ) ctx.request.user = self.user response = Response(ctx.request, response) groups_tuple = list(response.split()) joined_response = response.join(groups_tuple) joined_response_data = json.loads(joined_response.data) self.assertEqual("/dev", joined_response_data["id"]) self.assertEqual( "/dev/group-b", joined_response_data["groups"][0]["id"] ) self.assertEqual( [], joined_response_data["dependencies"] ) # Groups should be reendered in full self.assertEqual(1, len(joined_response_data["groups"][0]["apps"])) self.assertEqual( [], joined_response_data["groups"][0]["apps"][0]["constraints"] ) # Apps should also be renderen in full @with_json_fixture("../fixtures/tasks/get_single_namespace.json") def test_join_tasks_GET(self, tasks_single_namespace_fixture): with application.test_request_context( "/v2/tasks/", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_single_namespace_fixture), status=HTTPStatus.OK, ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) joined_response = response.join(tasks_tuple) joined_response_data = json.loads(joined_response.data) self.assertEqual(3, len(joined_response_data["tasks"])) def test_join_tasks_empty_list_GET(self): """ Se o request for GET e a lista de tasks for vazia, significa que todas as tasks foram removidas do response, isso significa que temos que retornar um response vazio. """ with application.test_request_context( "/v2/tasks/", method="GET" ) as ctx: response = FlaskResponse( response=json.dumps({"tasks": [{"id": "some-filtered-task"}]}), status=HTTPStatus.OK, ) ctx.request.user = self.user response = Response(ctx.request, response) joined_response = response.join([]) joined_response_data = json.loads(joined_response.data) self.assertEqual(0, len(joined_response_data["tasks"])) @with_json_fixture("../fixtures/tasks/post?scale=true.json") def test_join_tasks_POST_scale_true(self, tasks_post_fixture): with application.test_request_context( "/v2/tasks/delete?scale=true", method="POST" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_post_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) joined_response = response.join(tasks_tuple) joined_response_data = json.loads(joined_response.data) self.assertEqual( "5ed4c0c5-9ff8-4a6f-a0cd-f57f59a34b43", joined_response_data["deploymentId"], ) @with_json_fixture("../fixtures/tasks/get.json") def test_join_tasks_POST_scale_false(self, tasks_get_fixture): with application.test_request_context( "/v2/tasks/delete?scale=false", method="POST" ) as ctx: response = FlaskResponse( response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK ) ctx.request.user = self.user response = Response(ctx.request, response) tasks_tuple = list(response.split()) joined_response = response.join(tasks_tuple) joined_response_data = json.loads(joined_response.data) self.assertEqual(3, len(joined_response_data["tasks"]))
StarcoderdataPython
1635354
<reponame>JoeBuzh/Pm_Composition_Quallity_Control # -*- encoding: utf-8 -*- ''' @Filename : Extractor.py @Datetime : 2020/04/28 16:15:34 @Author : Joe-Bu @Version : 1.0 @Descrption : 根据指定气象站点信息提取指定气象观测、污染观测数据; 配置项在config.ini中配置,可配置气象、污染观测站点信息,指定提取时间段。 ''' import os import sys import traceback import configparser from datetime import datetime, timedelta def getStationId(filename): assert os.path.exists(filename) with open(filename, 'r') as f: cont = f.readlines() ids = [x.split(' ')[0] for x in cont[1:]] return ids def getData(config: dict): start = config['start'] while start <= config['end']: year = start.strftime("%Y") if config['type'] == 'mete': filename = os.path.join(os.path.join(config['mete_dir'], year), 'obs_{}_'.format(config['type']) + start.strftime("%Y%m%d%H")+'.txt') elif config['type'] == 'envi': filename = os.path.join(os.path.join(config['envi_dir'], year), 'obs_{}_'.format(config['type']) + start.strftime("%Y%m%d%H")+'.txt') else: print("Error type.") sys.exit() print(filename) assert os.path.exists(filename) extr_cmd = extract_cmd(config, filename, start) # print(extr_cmd) # ''' try: os.system(extr_cmd) except Exception as err0: traceback.print_exc(err0) # ''' start += timedelta(hours=1) add_header = add_cmd(config) # print(add_header) try: os.system(add_header) except Exception as err1: traceback.print_exc(err1) def extract_cmd(config: dict, filename: str, start) -> str: return "grep -E '{0}' {1} | sed 's/^/{2},/g' >> {3}.txt".format( '|'.join(config['ids']), filename, start.strftime("%Y%m%d%H"), '{}/{}_{}_{}'.format(config['savedir'], config['start'].strftime("%Y%m%d%H"), config['end'].strftime("%Y%m%d%H"), config['type'])) def add_cmd(config: dict) -> str: if config['type'] == 'mete': return r'''sed -i "1i\时间,站点编号,2分钟平均风向,2分钟平均风速,气温,本站气压,海平面气压,露点温度,相对湿度,小时降水量,能见度,总云量" {} '''.format('{}/{}_{}_{}.txt'.format(config['savedir'], config['start'].strftime("%Y%m%d%H"), config['end'].strftime("%Y%m%d%H"), config['type'])) elif config['type'] == 'envi': return r'''sed -i "1i\时间,站点编号,PM2.5浓度,PM10浓度,CO浓度,NO2浓度,SO2浓度,O3浓度,O3 8小时浓度,AQI,空气质量等级,首要污染物" {} '''.format('{}/{}_{}_{}.txt'.format(config['savedir'], config['start'].strftime("%Y%m%d%H"), config['end'].strftime("%Y%m%d%H"), config['type'])) else: print('Error type') sys.exit() def parseConfig(path: str) -> dict: cfg = configparser.ConfigParser() cfg.read(path) config = {} config['mete_info'] = cfg.get("StationInfo", "mete_info") config['envi_info'] = cfg.get("StationInfo", "envi_info") config['mete_dir'] = cfg.get("DataDir", "mete_dir") config['envi_dir'] = cfg.get("DataDir", "envi_dir") config['start'] = datetime.strptime( cfg.get("Duration", "starttime"), "%Y%m%d%H") config['end'] = datetime.strptime( cfg.get("Duration", "endtime"), "%Y%m%d%H") config['savedir'] = cfg.get("SaveDir", "savedir") config['type'] = cfg.get("Type", "type") return config def main(): workdir = os.path.dirname(__file__) cfgpath = os.path.join(workdir, 'config.ini') configs = parseConfig(cfgpath) if configs['type'] == 'mete': configs['ids'] = getStationId(configs['mete_info']) elif configs['type'] == 'envi': configs['ids'] = getStationId(configs['envi_info']) # for _id in configs['ids']: # configs['id'] = _id getData(configs) if __name__ == "__main__": main()
StarcoderdataPython
3207326
<reponame>JaufreLallement/Wepwawet from functools import reduce def seconds_to_str(t): return "%d:%02d:%02d.%03d" % reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(t * 1000,), 1000, 60, 60])
StarcoderdataPython
170023
<gh_stars>0 import numpy as np from typing import Callable from lanro.tasks.core import Task from lanro.simulation import PyBulletSimulation from lanro.tasks.scene import basic_scene from lanro.utils import RGBCOLORS class Reach(Task): def __init__(self, sim: PyBulletSimulation, get_ee_position: Callable[[], np.ndarray], reward_type: str = "sparse", distance_threshold: float = 0.025, goal_range: float = 0.3): self.sim = sim self.reward_type = reward_type self.distance_threshold = distance_threshold self.get_ee_position = get_ee_position self.goal_range_low = np.array([-goal_range / 2, -goal_range / 2, 0]) self.goal_range_high = np.array([goal_range / 2, goal_range / 2, goal_range]) with self.sim.no_rendering(): self._create_scene() self.sim.place_visualizer() def _create_scene(self) -> None: basic_scene(self.sim) self.sim.create_sphere( body_name="target", radius=self.distance_threshold, mass=0.0, ghost=True, position=[0.0, 0.0, 0.0], rgba_color=RGBCOLORS.RED.value + [0.3], ) def get_goal(self) -> np.ndarray: return self.goal.copy() def get_obs(self) -> np.ndarray: return np.array([]) def get_achieved_goal(self) -> np.ndarray: return self.get_ee_position() def reset(self) -> None: self.goal = self._sample_goal() self.sim.set_base_pose("target", self.goal.tolist(), [0, 0, 0, 1]) def _sample_goal(self) -> np.ndarray: return self.np_random.uniform(self.goal_range_low, self.goal_range_high)
StarcoderdataPython
1646710
''' Speed Test for Internet - v1.2 Apache Lisance 2.0 Created by <NAME> ''' import time #for the next update import colorama #for the next update import requests as requests import speedtest colorama.init() def durum(ping): durum = "Hata - Ölçülemedi" if(ping <= 10): durum = "Çok İyi!" elif(ping > 10 and ping <= 50): durum = "İyi!" elif(ping > 40 and ping <= 100): durum = "Kötü!" elif(ping > 100): durum = "Çok Kötü!" return durum def oneri(downloadspeed, uploadspeed, situation): oneri = "Hata" if(downloadspeed <= 10 and uploadspeed <= 5): oneri = "İnternet Tarifenizi Veya İnternet Sağlayıcınızı Değiştirmeyi Deneyin." elif(downloadspeed > 10 and uploadspeed <= 5 or uploadspeed > 5): if(situation == "Kötü!" or situation == "Çok Kötü!"): oneri = "Fiziksel Konumunuzu Değiştirmeyi Deneyin." else: oneri = "Sorun Yok :)" return oneri def internetbaglantısınıkontrolet(): try: requests.get('https://www.google.com') except: print('[-] İnternet Bağlantısı Bulunamadı') return False else: print('[+] İnternet Bağlantısı Kontrol Ediliyor...') time.sleep(3) print('[+] Bağlantı Başarılı :)') return True if(internetbaglantısınıkontrolet() == False): input() else: s = speedtest.Speedtest() print("[+] Test Yapılıyor...") print(" ") indirmehızı = s.download() / 1048576 yuklemehızı = s.upload() / 1048576 ping = round(s.results.ping) internetdurumu = durum(ping=ping) internetonerisi = oneri(downloadspeed=indirmehızı, uploadspeed=yuklemehızı, situation=internetdurumu) print(f"[+] İndirme Hızı: {indirmehızı:.2f} Mbps") print(f"[+] Yükleme Hızı: {yuklemehızı:.2f} Mbps") print(f"[+] Ping: {ping:.2f} ms") print(" ") time.sleep(0.7) print(f"[+] İnternet Durumu: {internetdurumu}") print("[+] Not: İnternet durumu pinge göre hesaplanır.") print(" ") time.sleep(0.7) print(f"[+] Önerimiz(Beta): {internetonerisi}") input()
StarcoderdataPython
3281638
from bs4 import BeautifulSoup import requests import csv page = requests.get("https://www.myhome.ie/residential/mayo/property-for-sale?page=1") soup = BeautifulSoup(page.content,'html.parser') home_file = open('week03MyHome.csv', mode='w') home_writer = csv.writer(home_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL) listings = soup.findAll("div", class_="PropertyListingCard" ) for listing in listings: entryList = [] price = listing.find(class_="PropertyListingCard__Price").text entryList.append(price) address = listing.find(class_="PropertyListingCard__Address").text entryList.append(address) home_writer.writerow(entryList) home_file.close()
StarcoderdataPython
1608440
import boto3 import sure # noqa # pylint: disable=unused-import import time from moto import mock_glacier from moto.core import ACCOUNT_ID @mock_glacier def test_initiate_job(): client = boto3.client("glacier", region_name="us-west-2") client.create_vault(vaultName="myname") archive = client.upload_archive(vaultName="myname", body=b"body of archive") job = client.initiate_job( vaultName="myname", jobParameters={"ArchiveId": archive["archiveId"], "Type": "archive-retrieval"}, ) job["ResponseMetadata"]["HTTPStatusCode"].should.equal(202) headers = job["ResponseMetadata"]["HTTPHeaders"] headers.should.have.key("x-amz-job-id") # Should be an exact match, but Flask adds 'http' to the start of the Location-header headers.should.have.key("location").match( "//vaults/myname/jobs/" + headers["x-amz-job-id"] ) # Don't think this is correct - the spec says no body is returned, only headers # https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html job.should.have.key("jobId") job.should.have.key("location") @mock_glacier def test_describe_job_boto3(): client = boto3.client("glacier", region_name="us-west-2") client.create_vault(vaultName="myname") archive = client.upload_archive(vaultName="myname", body=b"body of archive") job = client.initiate_job( vaultName="myname", jobParameters={"ArchiveId": archive["archiveId"], "Type": "archive-retrieval"}, ) job_id = job["jobId"] describe = client.describe_job(vaultName="myname", jobId=job_id) describe.should.have.key("JobId").equal(job_id) describe.should.have.key("Action").equal("ArchiveRetrieval") describe.should.have.key("ArchiveId").equal(archive["archiveId"]) describe.should.have.key("VaultARN").equal( f"arn:aws:glacier:us-west-2:{ACCOUNT_ID}:vaults/myname" ) describe.should.have.key("CreationDate") describe.should.have.key("Completed").equal(False) describe.should.have.key("StatusCode").equal("InProgress") describe.should.have.key("ArchiveSizeInBytes").equal(0) describe.should.have.key("InventorySizeInBytes").equal(0) describe.should.have.key("Tier").equal("Standard") @mock_glacier def test_list_jobs(): client = boto3.client("glacier", region_name="us-west-2") client.create_vault(vaultName="myname") archive1 = client.upload_archive(vaultName="myname", body=b"first archive") archive2 = client.upload_archive(vaultName="myname", body=b"second archive") job1 = client.initiate_job( vaultName="myname", jobParameters={"ArchiveId": archive1["archiveId"], "Type": "archive-retrieval"}, ) job2 = client.initiate_job( vaultName="myname", jobParameters={"ArchiveId": archive2["archiveId"], "Type": "archive-retrieval"}, ) jobs = client.list_jobs(vaultName="myname")["JobList"] # Verify the created jobs are in this list found_jobs = [j["JobId"] for j in jobs] found_jobs.should.contain(job1["jobId"]) found_jobs.should.contain(job2["jobId"]) found_job1 = [j for j in jobs if j["JobId"] == job1["jobId"]][0] found_job1.should.have.key("ArchiveId").equal(archive1["archiveId"]) found_job2 = [j for j in jobs if j["JobId"] == job2["jobId"]][0] found_job2.should.have.key("ArchiveId").equal(archive2["archiveId"]) # Verify all jobs follow the correct format for job in jobs: job.should.have.key("JobId") job.should.have.key("Action") job.should.have.key("ArchiveId") job.should.have.key("VaultARN") job.should.have.key("CreationDate") job.should.have.key("ArchiveSizeInBytes") job.should.have.key("Completed") job.should.have.key("StatusCode") job.should.have.key("InventorySizeInBytes") job.should.have.key("Tier") @mock_glacier def test_get_job_output_boto3(): client = boto3.client("glacier", region_name="us-west-2") client.create_vault(vaultName="myname") archive = client.upload_archive(vaultName="myname", body=b"contents of archive") job = client.initiate_job( vaultName="myname", jobParameters={"ArchiveId": archive["archiveId"], "Type": "archive-retrieval"}, ) output = None start = time.time() while (time.time() - start) < 10: try: output = client.get_job_output(vaultName="myname", jobId=job["jobId"]) break except Exception: time.sleep(1) output.should.have.key("status").equal(200) output.should.have.key("contentType").equal("application/octet-stream") output.should.have.key("body") body = output["body"].read().decode("utf-8") body.should.equal("contents of archive")
StarcoderdataPython
1608564
#pythran export run(int, int, int) #runas run(10,10,10) #from https://raw.githubusercontent.com/cphhpc/numpy/victim_cache/benchmark/Python/shallow_water.py import numpy as np def model(height, width, dtype): m = np.ones((height, width),dtype=dtype) m[height/4,width/4] = 6.0 return m def step(H, U, V, dt=0.02, dx=1.0, dy=1.0): g = 9.80665 # gravitational acceleration # Reflecting boundary conditions H[:,0] = H[:,1] ; U[:,0] = U[:,1] ; V[:,0] = -V[:,1] H[:,-1] = H[:,-2] ; U[:,-1] = U[:,-2] ; V[:,-1] = -V[:,-2] H[0,:] = H[1,:] ; U[0,:] = -U[1,:] ; V[0,:] = V[1,:] H[-1,:] = H[-2,:] ; U[-1,:] = -U[-2,:] ; V[-1,:] = V[-2,:] #First half step # height Hx = (H[1:,1:-1]+H[:-1,1:-1])/2 - dt/(2*dx)*(U[1:,1:-1]-U[:-1,1:-1]) # x momentum Ux = (U[1:,1:-1]+U[:-1,1:-1])/2 - \ dt/(2*dx) * ((U[1:,1:-1]**2/H[1:,1:-1] + g/2*H[1:,1:-1]**2) - (U[:-1,1:-1]**2/H[:-1,1:-1] + g/2*H[:-1,1:-1]**2)) # y momentum Vx = (V[1:,1:-1]+V[:-1,1:-1])/2 - \ dt/(2*dx) * ((U[1:,1:-1]*V[1:,1:-1]/H[1:,1:-1]) - (U[:-1,1:-1]*V[:-1,1:-1]/H[:-1,1:-1])) # height Hy = (H[1:-1,1:]+H[1:-1,:-1])/2 - dt/(2*dy)*(V[1:-1,1:]-V[1:-1,:-1]) #x momentum Uy = (U[1:-1,1:]+U[1:-1,:-1])/2 - \ dt/(2*dy)*((V[1:-1,1:]*U[1:-1,1:]/H[1:-1,1:]) - (V[1:-1,:-1]*U[1:-1,:-1]/H[1:-1,:-1])) #y momentum Vy = (V[1:-1,1:]+V[1:-1,:-1])/2 - \ dt/(2*dy)*((V[1:-1,1:]**2/H[1:-1,1:] + g/2*H[1:-1,1:]**2) - (V[1:-1,:-1]**2/H[1:-1,:-1] + g/2*H[1:-1,:-1]**2)) #Second half step # height H[1:-1,1:-1] -= (dt/dx)*(Ux[1:,:]-Ux[:-1,:]) + (dt/dy)*(Vy[:,1:]-Vy[:,:-1]) # x momentum U[1:-1,1:-1] -= (dt/dx)*((Ux[1:,:]**2/Hx[1:,:] + g/2*Hx[1:,:]**2) - (Ux[:-1,:]**2/Hx[:-1,:] + g/2*Hx[:-1,:]**2)) + \ (dt/dy)*((Vy[:,1:] * Uy[:,1:]/Hy[:,1:]) - (Vy[:,:-1] * Uy[:,:-1]/Hy[:,:-1])) # y momentum V[1:-1,1:-1] -= (dt/dx)*((Ux[1:,:] * Vx[1:,:]/Hx[1:,:]) - (Ux[:-1,:]*Vx[:-1,:]/Hx[:-1,:])) + \ (dt/dy)*((Vy[:,1:]**2/Hy[:,1:] + g/2*Hy[:,1:]**2) - (Vy[:,:-1]**2/Hy[:,:-1] + g/2*Hy[:,:-1]**2)) return (H, U, V) def simulate(H, timesteps): U = np.zeros_like(H) V = np.zeros_like(H) for i in xrange(timesteps): (H, U, V) = step(H, U, V) return H def run(H, W, I): m = model(H, W, dtype=np.float64) m = simulate(m,I) return m
StarcoderdataPython
1698574
import os from dotenv import load_dotenv from os.path import join, dirname dotenv_path = join(dirname(__file__), '.env') load_dotenv(dotenv_path) DELAY = 0.01 NUM_PARTITIONS = 3 OUTLIERS_GENERATION_PROBABILITY = 0.2 KAFKA_BROKER = "localhost:9092" TRANSACTIONS_TOPIC = "transactions" TRANSACTIONS_CONSUMER_GROUP = "transactions" ANOMALIES_TOPIC = "anomalies" ANOMALIES_CONSUMER_GROUP = "anomalies" SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN") SLACK_CHANNEL = "data-consumption"
StarcoderdataPython
3301274
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # http://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy.exceptions import NotConfigured from tools.proxies import get_proxy, del_proxy class HttpProxyMiddleware(object): """ 代理中间件 """ def __init__(self, settings): if not settings.getbool('RETRY_ENABLED'): raise NotConfigured self.max_retry_times = settings.getint('RETRY_TIMES') self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES')) self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST') or 1 @classmethod def from_crawler(cls, crawler): return cls(crawler.settings) def process_request(self, request, spider): # request.meta['proxy'] = "http://YOUR_PROXY_IP:PORT" # 当前请求代理(保证重试过程,代理一致) request_proxy = request.meta.get('proxy') or get_proxy(spider.name) request.meta['proxy'] = request_proxy spider.log(request.meta) def process_exception(self, request, exception, spider): error_proxy = request.meta.get('proxy') if not error_proxy: return None # 重试失败(默认重试2次,共请求3次),删除代理 if request.meta.get('retry_times', 0) >= self.max_retry_times: del_proxy(spider.name, error_proxy) spider.log('%s del proxy: %s, error reason: %s' % (spider.name, error_proxy, exception)) return None
StarcoderdataPython
3398570
<gh_stars>1-10 """ Copyright BOOSTRY Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 """ from datetime import datetime import json import base64 from Crypto.PublicKey import RSA from Crypto.Cipher import PKCS1_OAEP import pytest from eth_utils import to_checksum_address from config import Config from app.models import ( Token, Transfer ) from .conftest import TestBase from .utils import contract_utils_common from .utils.account_config import eth_account from .utils.contract_utils_common import ( processor_issue_event, index_transfer_event, clean_issue_event ) from .utils.contract_utils_coupon import apply_for_offering from .utils.contract_utils_personal_info import register_personal_info class TestCoupon(TestBase): ############################################################################# # テスト対象URL ############################################################################# url_list = 'coupon/list' # 発行済一覧 url_issue = 'coupon/issue' # 新規発行 url_setting = 'coupon/setting/' # 詳細設定 url_valid = 'coupon/valid' # 有効化(取扱開始) url_invalid = 'coupon/invalid' # 無効化(取扱中止) url_add_supply = 'coupon/add_supply/' # 追加発行 url_start_initial_offering = 'coupon/start_initial_offering' # 募集申込開始 url_stop_initial_offering = 'coupon/stop_initial_offering' # 募集申込停止 url_applications = 'coupon/applications/' # 募集申込一覧 url_get_applications = 'coupon/get_applications/' # 募集申込一覧 url_applications_csv_download = 'coupon/applications_csv_download' # 申込者リストCSVダウンロード url_allocate = 'coupon/allocate' # 割当(募集申込) url_transfer = 'coupon/transfer' # 割当 url_transfer_ownership = 'coupon/transfer_ownership/' # 所有者移転 url_holders = 'coupon/holders/' # 保有者一覧 url_get_holders = 'coupon/get_holders/' # 保有者一覧(API) url_holders_csv_download = 'coupon/holders_csv_download' # 保有者一覧CSVダウンロード url_get_token_name = 'coupon/get_token_name/' # トークン名取得(API) url_holder = 'coupon/holder/' # 保有者詳細 url_positions = 'coupon/positions' # 売出管理 url_sell = 'coupon/sell/' # 新規売出 url_cancel_order = 'coupon/cancel_order/' # 売出中止 url_release = 'coupon/release' # 公開 url_usage_history = 'coupon/usage_history/' # 利用履歴 url_get_usage_history_coupon = 'coupon/get_usage_history_coupon/' # 利用履歴 url_used_csv_download = 'coupon/used_csv_download' # 利用履歴CSVダウンロード url_token_tracker = 'coupon/token/track/' # トークン追跡 ############################################################################# # PersonalInfo情報の暗号化 ############################################################################# issuer_personal_info_json = { "name": "株式会社1", "postal_code": "1234567", "address": "東京都中央区 日本橋11-1 東京マンション101", "email": "<EMAIL>", "birth": "20190902", "phone": "0399999999" } # \uff0d: 「-」FULLWIDTH HYPHEN-MINUS。半角ハイフン変換対象。 # \u30fc: 「ー」KATAKANA-HIRAGANA PROLONGED SOUND MARK。半角ハイフン変換対象外。 trader_personal_info_json = { "key_manager": "", "name": "タンタイテスト", "postal_code": "1040053", "address": "東京都中央区 勝どき1丁目1\uff0d2\u30fc3", "email": "<EMAIL>", "birth": "20191102", "phone": "0399999999" } key = RSA.importKey(open('data/rsa/public.pem').read()) cipher = PKCS1_OAEP.new(key) issuer_encrypted_info = \ base64.encodebytes( cipher.encrypt(json.dumps(issuer_personal_info_json).encode('utf-8'))) trader_encrypted_info = \ base64.encodebytes( cipher.encrypt(json.dumps(trader_personal_info_json).encode('utf-8'))) ############################################################################# # テスト(正常系) ############################################################################# # <前処理> def test_normal_0(self, shared_contract, db): # personalinfo登録 register_personal_info( db=db, invoker=eth_account['issuer'], contract_address=shared_contract['PersonalInfo']['address'], info=self.issuer_personal_info_json, encrypted_info=self.issuer_encrypted_info ) register_personal_info( db=db, invoker=eth_account["trader"], contract_address=shared_contract["PersonalInfo"]["address"], info=self.trader_personal_info_json, encrypted_info=self.trader_encrypted_info ) # <正常系1_1> # 発行済一覧画面の参照(0件) def test_normal_1_1(self, app): client = self.client_with_admin_login(app) # 発行済一覧の参照 response = client.get(self.url_list) assert response.status_code == 200 assert '<title>発行済一覧'.encode('utf-8') in response.data assert 'データが存在しません'.encode('utf-8') in response.data # <正常系1_2> # <0件確認> # 売出管理画面の参照(0件) def test_normal_1_2(self, app): client = self.client_with_admin_login(app) # 売出管理画面の参照 response = client.get(self.url_positions) assert response.status_code == 200 assert '<title>売出管理'.encode('utf-8') in response.data assert 'データが存在しません'.encode('utf-8') in response.data # <正常系2_1> # <新規発行> # 新規発行 def test_normal_2_1(self, app, shared_contract): client = self.client_with_admin_login(app) # 新規発行 response = client.post( self.url_issue, data={ 'name': 'テストクーポン', 'symbol': 'COUPON', 'totalSupply': 2000000, 'expirationDate': '20191231', 'transferable': 'True', 'details': 'details詳細', 'return_details': 'return詳細', 'memo': 'memoメモ', 'image_1': 'https://test.com/image_1.jpg', 'image_2': 'https://test.com/image_2.jpg', 'image_3': 'https://test.com/image_3.jpg', 'tradableExchange': shared_contract['IbetCouponExchange']['address'] } ) assert response.status_code == 302 # <正常系2_2> # <新規発行> # DB取込前確認 def test_normal_2_2(self, app): # 発行済一覧画面の参照 client = self.client_with_admin_login(app) response = client.get(self.url_list) assert response.status_code == 200 assert '<title>発行済一覧'.encode('utf-8') in response.data # <正常系2_3> # <新規発行> # 新規発行(DB取込) → 詳細設定画面の参照 def test_normal_2_3(self, app, db, shared_contract): client = self.client_with_admin_login(app) # DB登録処理 processor_issue_event(db) # 詳細設定画面の参照 tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() response = client.get(self.url_setting + tokens[0].token_address) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '2000000'.encode('utf-8') in response.data assert '20191231'.encode('utf-8') in response.data assert '<option selected value="True">なし</option>'.encode('utf-8') in response.data assert 'details詳細'.encode('utf-8') in response.data assert 'return詳細'.encode('utf-8') in response.data assert 'memoメモ'.encode('utf-8') in response.data assert 'https://test.com/image_1.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_2.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_3.jpg'.encode('utf-8') in response.data assert shared_contract['IbetCouponExchange']['address'].encode('utf-8') in response.data # <正常系2_4> # <新規発行> # 新規発行:譲渡制限あり def test_normal_2_4(self, app, db, shared_contract): client = self.client_with_admin_login(app) # 新規発行 response = client.post( self.url_issue, data={ 'name': 'テストクーポン', 'symbol': 'COUPON', 'totalSupply': 2000000, 'expirationDate': '20191231', 'transferable': 'False', 'details': 'details詳細', 'return_details': 'return詳細', 'memo': 'memoメモ', 'image_1': 'https://test.com/image_1.jpg', 'image_2': 'https://test.com/image_2.jpg', 'image_3': 'https://test.com/image_3.jpg', 'tradableExchange': shared_contract['IbetCouponExchange']['address'] } ) assert response.status_code == 302 # DB登録処理 processor_issue_event(db) # 詳細設定画面の参照 tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() response = client.get(self.url_setting + tokens[1].token_address) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '2000000'.encode('utf-8') in response.data assert '20191231'.encode('utf-8') in response.data assert '<option selected value="False">あり</option>'.encode('utf-8') in response.data assert 'details詳細'.encode('utf-8') in response.data assert 'return詳細'.encode('utf-8') in response.data assert 'memoメモ'.encode('utf-8') in response.data assert 'https://test.com/image_1.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_2.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_3.jpg'.encode('utf-8') in response.data assert shared_contract['IbetCouponExchange']['address'].encode('utf-8') in response.data # <正常系2_5> # <発行画面表示> def test_normal_2_5(self, app): client = self.client_with_admin_login(app) # 新規発行画面の表示 response = client.get(self.url_issue, ) assert response.status_code == 200 assert '<title>新規発行'.encode('utf-8') in response.data assert '名称'.encode('utf-8') in response.data # <正常系3_1> # <1件確認> # 発行済一覧画面の参照(1件) def test_normal_3_1(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] client = self.client_with_admin_login(app) response = client.get(self.url_list) assert response.status_code == 200 assert '<title>発行済一覧'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '取扱中'.encode('utf-8') in response.data assert token.token_address.encode('utf-8') in response.data # <正常系3_2> # <1件確認> # 売出管理画面の参照(1件) def test_normal_3_2(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] client = self.client_with_admin_login(app) response = client.get(self.url_positions) assert response.status_code == 200 assert '<title>売出管理'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert token.token_address.encode('utf-8') in response.data assert '<td>2,000,000</td>\n <td>2,000,000</td>\n <td>0</td>'. \ encode('utf-8') in response.data # <正常系4> # <設定変更> # 設定変更 → 詳細設定画面で確認 def test_normal_4(self, app, shared_contract): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() url_setting = self.url_setting + tokens[0].token_address client = self.client_with_admin_login(app) # 設定変更 response = client.post( url_setting, data={ 'details': 'details詳細2', 'return_details': 'return詳細2', 'memo': 'memoメモ2', 'expirationDate': '20200101', 'transferable': 'False', 'tradableExchange': shared_contract['IbetCouponExchange']['address'], 'image_1': 'https://test.com/image_12.jpg', 'image_2': 'https://test.com/image_22.jpg', 'image_3': 'https://test.com/image_32.jpg', } ) assert response.status_code == 302 response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '2000000'.encode('utf-8') in response.data assert '20200101'.encode('utf-8') in response.data assert '<option selected value="False">あり</option>'.encode('utf-8') in response.data assert 'details詳細2'.encode('utf-8') in response.data assert 'return詳細2'.encode('utf-8') in response.data assert 'memoメモ2'.encode('utf-8') in response.data assert 'https://test.com/image_12.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_22.jpg'.encode('utf-8') in response.data assert 'https://test.com/image_32.jpg'.encode('utf-8') in response.data assert shared_contract['IbetCouponExchange']['address'].encode('utf-8') in response.data # データ戻し response = client.post( url_setting, data={ 'details': 'details詳細', 'return_details': 'return詳細', 'memo': 'memoメモ', 'expirationDate': '20191231', 'transferable': 'True', 'tradableExchange': shared_contract['IbetCouponExchange']['address'], 'image_1': 'https://test.com/image_1.jpg', 'image_2': 'https://test.com/image_2.jpg', 'image_3': 'https://test.com/image_3.jpg', } ) assert response.status_code == 302 # <正常系5_1> # <有効化・無効化> # 無効化 → 発行済一覧で確認 def test_normal_5_1(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() client = self.client_with_admin_login(app) # 無効化 response = client.post( self.url_invalid, data={ 'token_address': tokens[0].token_address } ) assert response.status_code == 302 # 一覧で確認 response = client.get(self.url_list) assert response.status_code == 200 assert '<title>発行済一覧'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '停止中'.encode('utf-8') in response.data # <正常系5_2> # <有効化・無効化> # 有効化 → 発行済一覧で確認 def test_normal_5_2(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() client = self.client_with_admin_login(app) # 有効化 response = client.post( self.url_valid, data={ 'token_address': tokens[0].token_address } ) assert response.status_code == 302 # 一覧で確認 response = client.get(self.url_list) assert response.status_code == 200 assert '<title>発行済一覧'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert 'COUPON'.encode('utf-8') in response.data assert '取扱中'.encode('utf-8') in response.data # <正常系6> # <追加発行> # 追加発行 → 詳細背定画面で確認 def test_normal_6(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() url_add_supply = self.url_add_supply + tokens[0].token_address url_setting = self.url_setting + tokens[0].token_address client = self.client_with_admin_login(app) # 追加発行画面(GET) response = client.get(url_add_supply) assert response.status_code == 200 assert '<title>追加発行'.encode('utf-8') in response.data assert tokens[0].token_address.encode('utf-8') in response.data assert '2000000'.encode('utf-8') in response.data # 追加発行 response = client.post( url_add_supply, data={ 'addSupply': 100 } ) assert response.status_code == 302 # 詳細設定画面で確認 response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert '2000100'.encode('utf-8') in response.data # <正常系7_1> # <割当> # 割当 → 保有者一覧で確認 def test_normal_7_1(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() client = self.client_with_admin_login(app) # 割当処理 response = client.post( self.url_transfer, data={ 'token_address': tokens[0].token_address, 'to_address': eth_account['trader']['account_address'], 'amount': 100, } ) assert response.status_code == 200 # 保有者一覧画面の参照 response = client.get(self.url_holders + tokens[0].token_address) assert response.status_code == 200 assert '<title>保有者一覧'.encode('utf-8') in response.data # 保有者一覧APIの参照 response = client.get(self.url_get_holders + tokens[0].token_address) response_data_list = json.loads(response.data) assert response.status_code == 200 for response_data in response_data_list: if eth_account['issuer']['account_address'] == response_data['account_address']: # issuer assert '発行体1' == response_data['name'] assert '--' == response_data['postal_code'] assert '--' == response_data['address'] assert '--' == response_data['email'] assert '--' == response_data['birth_date'] assert '--' == response_data['phone'] assert 2000000 == response_data['balance'] assert 0 == response_data['used'] elif eth_account['trader']['account_address'] == response_data['account_address']: # trader assert 'タンタイテスト' == response_data['name'] assert '1040053' == response_data['postal_code'] assert '東京都中央区 勝どき1丁目1-2−3' == response_data['address'] assert '<EMAIL>' == response_data['email'] assert '20191102' == response_data['birth_date'] assert '0399999999' == response_data['phone'] assert 100 == response_data['balance'] assert 0 == response_data['used'] else: pytest.raises(AssertionError) # トークン名APIの参照 response = client.get(self.url_get_token_name + tokens[0].token_address) response_data = json.loads(response.data) assert response.status_code == 200 assert 'テストクーポン' == response_data # <正常系7_2> # <割当> # 割当画面表示 def test_normal_7_2(self, app): client = self.client_with_admin_login(app) # 割当処理 response = client.get(self.url_transfer) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data # <正常系8> # <保有者詳細> # 保有者詳細 def test_normal_8(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() client = self.client_with_admin_login(app) # 保有者詳細画面の参照 response = client.get( self.url_holder + tokens[0].token_address + '/' + eth_account['issuer']['account_address'] ) assert response.status_code == 200 assert '<title>保有者詳細'.encode('utf-8') in response.data assert eth_account['issuer']['account_address'].encode('utf-8') in response.data assert '株式会社1'.encode('utf-8') in response.data assert '1234567'.encode('utf-8') in response.data assert '東京都'.encode('utf-8') in response.data assert '中央区'.encode('utf-8') in response.data assert '日本橋11-1'.encode('utf-8') in response.data assert '東京マンション101'.encode('utf-8') in response.data # <正常系9_1> # <売出> # 新規売出画面の参照 def test_normal_9_1(self, app, shared_contract): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 売出画面の参照 client = self.client_with_admin_login(app) response = client.get(self.url_sell + token.token_address) assert response.status_code == 200 assert '<title>新規売出'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert "{:,}".format(2000100).encode('utf-8') in response.data assert 'details詳細'.encode('utf-8') in response.data assert '20191231'.encode('utf-8') in response.data assert 'なし'.encode('utf-8') in response.data assert shared_contract['IbetCouponExchange']['address'].encode('utf-8') in response.data # <正常系9_2> # <売出> # 売出 → 売出管理画面で確認 def test_normal_9_2(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] url_sell = self.url_sell + token.token_address # 売出処理 response = client.post( url_sell, data={ 'sellPrice': 100, } ) assert response.status_code == 302 # 売出管理画面の参照 response = client.get(self.url_positions) assert response.status_code == 200 assert '<title>売出管理'.encode('utf-8') in response.data assert '新規売出を受け付けました。売出開始までに数分程かかることがあります。'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data # 売出中の数量が存在する assert '<td>2,000,100</td>\n <td>0</td>\n <td>2,000,000</td>'. \ encode('utf-8') in response.data # <正常系9_3> # <売出> # 売出停止 → 売出管理画面で確認 def test_normal_9_3(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 売出停止処理 response = client.post( self.url_cancel_order + token.token_address + "/1", ) assert response.status_code == 302 # 売出管理画面の参照 response = client.get(self.url_positions) assert response.status_code == 200 assert '<title>売出管理'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data # 売出中の数量が0 assert '<td>2,000,100</td>\n <td>2,000,000</td>\n <td>0</td>'. \ encode('utf-8') in response.data # <正常系10_1> # <所有者移転> # 所有者移転画面の参照 def test_normal_10_1(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) # 所有者移転画面の参照 response = client.get( self.url_transfer_ownership + token.token_address + '/' + issuer_address) assert response.status_code == 200 assert '<title>所有者移転'.encode('utf-8') in response.data assert ('value="' + str(issuer_address)).encode('utf-8') in response.data # <正常系10_2> # <所有者移転> # 所有者移転処理 → 保有者一覧の参照 def test_normal_10_2(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) trader_address = \ to_checksum_address(eth_account['trader']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + issuer_address, data={ 'to_address': trader_address, 'amount': 10 } ) assert response.status_code == 302 # 保有者一覧の参照 response = client.get(self.url_holders + token.token_address) assert response.status_code == 200 assert '<title>保有者一覧'.encode('utf-8') in response.data # 保有者一覧APIの参照 response = client.get(self.url_get_holders + token.token_address) response_data_list = json.loads(response.data) assert response.status_code == 200 for response_data in response_data_list: if eth_account['issuer']['account_address'] == response_data['account_address']: # issuer assert '発行体1' == response_data['name'] assert '--' == response_data['postal_code'] assert '--' == response_data['address'] assert '--' == response_data['email'] assert '--' == response_data['birth_date'] assert '--' == response_data['phone'] assert 1999990 == response_data['balance'] assert 0 == response_data['used'] elif eth_account['trader']['account_address'] == response_data['account_address']: # trader assert 'タンタイテスト' == response_data['name'] assert '1040053' == response_data['postal_code'] assert '東京都中央区 勝どき1丁目1-2ー3' == response_data['address'] assert '<EMAIL>' == response_data['email'] assert '20191102' == response_data['birth_date'] assert '0399999999' == response_data['phone'] assert 110 == response_data['balance'] assert 0 == response_data['used'] else: pytest.raises(AssertionError) # トークン名APIの参照 response = client.get(self.url_get_token_name + token.token_address) response_data = json.loads(response.data) assert response.status_code == 200 assert 'テストクーポン' == response_data # <正常系11> # <公開> # 公開処理 → 公開済状態になること def test_normal_11(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 公開処理 response = client.post( self.url_release, data={ 'token_address': token.token_address } ) assert response.status_code == 302 # 詳細設定画面の参照 url_setting = self.url_setting + token.token_address response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert '公開済'.encode('utf-8') in response.data # <正常系12_1> # <募集申込開始・停止> # 初期状態:募集申込停止中(詳細設定画面で確認) # ※Token_1が対象 def test_normal_12_1(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 詳細設定画面の参照 url_setting = self.url_setting + token.token_address response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert '募集申込開始'.encode('utf-8') in response.data # <正常系12_2> # <募集申込開始・停止> # 募集申込開始 → 詳細設定画面で確認 # ※Token_1が対象 def test_normal_12_2(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 募集申込開始 response = client.post( self.url_start_initial_offering, data={ 'token_address': token.token_address } ) assert response.status_code == 302 # 詳細設定画面の参照 url_setting = self.url_setting + token.token_address response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert '募集申込停止'.encode('utf-8') in response.data # <正常系12_3> # <募集申込開始・停止> # ※12_2の続き # 募集申込停止 → 詳細設定画面で確認 # ※Token_1が対象 def test_normal_12_3(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 募集申込停止 response = client.post( self.url_stop_initial_offering, data={ 'token_address': token.token_address } ) assert response.status_code == 302 # 詳細設定画面の参照 url_setting = self.url_setting + token.token_address response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert '募集申込開始'.encode('utf-8') in response.data # 募集申込状態に戻す response = client.post( self.url_start_initial_offering, data={ 'token_address': token.token_address } ) assert response.status_code == 302 # <正常系13_1> # <募集申込一覧参照> # 0件:募集申込一覧 # ※Token_1が対象 def test_normal_13_1(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 募集申込一覧参照 response = client.get(self.url_applications + str(token.token_address)) assert response.status_code == 200 assert '<title>募集申込一覧'.encode('utf-8') in response.data assert 'データが存在しません'.encode('utf-8') in response.data # <正常系13_2> # <募集申込一覧参照> # 1件:募集申込一覧 # ※Token_1が対象 def test_normal_13_2(self, db, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) trader_address = eth_account['trader']['account_address'] # 募集申込データの作成:投資家 apply_for_offering( db, eth_account['trader'], token_address ) # 募集申込一覧参照 response = client.get(self.url_applications + token_address) assert response.status_code == 200 assert '<title>募集申込一覧'.encode('utf-8') in response.data applications = client.get(self.url_get_applications + token_address) assert trader_address.encode('utf-8') in applications.data # <正常系14_1> # <割当(募集申込)> # ※12_2の続き # 割当(募集申込)画面参照:GET # ※Token_1が対象 def test_normal_14_1(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) trader_address = eth_account['trader']['account_address'] # 割当(募集申込) url = self.url_allocate + '/' + token_address + '/' + trader_address response = client.get(url) assert response.status_code == 200 assert 'トークン割当'.encode('utf-8') in response.data assert token_address.encode('utf-8') in response.data assert trader_address.encode('utf-8') in response.data # <正常系14_2> # <割当(募集申込)> # ※10_2, 12_2の後に実施 # 割当(募集申込)処理 → 保有者一覧参照 # ※Token_1が対象 def test_normal_14_2(self, db, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) issuer_address = eth_account['issuer']['account_address'] trader_address = eth_account['trader']['account_address'] # 割当(募集申込) url = self.url_allocate + '/' + token_address + '/' + trader_address response = client.post(url, data={'amount': 10}) assert response.status_code == 302 # Transferイベント登録 index_transfer_event( db, '0xac22f75bae96f8e9f840f980dfefc1d497979341d3106aeb25e014483c3f414a', # 仮のトランザクションハッシュ token.token_address, issuer_address, trader_address, 10, block_timestamp=datetime.utcnow() ) # 保有者一覧の参照 response = client.get(self.url_holders + token_address) assert response.status_code == 200 assert '<title>保有者一覧'.encode('utf-8') in response.data # 保有者一覧APIの参照 response = client.get(self.url_get_holders + token_address) response_data = json.loads(response.data) assert response.status_code == 200 # issuer assert issuer_address == response_data[0]['account_address'] assert '発行体1' == response_data[0]['name'] assert '--' == response_data[0]['postal_code'] assert '--' == response_data[0]['address'] assert '--' == response_data[0]['email'] assert '--' == response_data[0]['birth_date'] assert '--' == response_data[0]['phone'] assert 1999980 == response_data[0]['balance'] assert 0 == response_data[0]['used'] # trader assert trader_address == response_data[1]['account_address'] assert 'タンタイテスト' == response_data[1]['name'] assert '1040053' == response_data[1]['postal_code'] assert '東京都中央区 勝どき1丁目1-2ー3' == response_data[1]['address'] assert '<EMAIL>' == response_data[1]['email'] assert '20191102' == response_data[1]['birth_date'] assert '0399999999' == response_data[1]['phone'] assert 120 == response_data[1]['balance'] assert 0 == response_data[1]['used'] # トークン名APIの参照 response = client.get(self.url_get_token_name + token.token_address) response_data = json.loads(response.data) assert response.status_code == 200 assert 'テストクーポン' == response_data # <正常系15> # <保有者一覧CSVダウンロード> # 保有者一覧CSVが取得できること def test_normal_15(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) # csvダウンロード url = self.url_holders_csv_download response = client.post(url, data={'token_address': token_address}) assert response.status_code == 200 # <正常系16> # トークン追跡 def test_normal_16(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] client = self.client_with_admin_login(app) # 登録済みのトランザクションハッシュを取得 transfer_event = Transfer.query.filter_by(token_address=token.token_address).first() tx_hash = transfer_event.transaction_hash # トークン追跡の参照 response = client.get(self.url_token_tracker + token.token_address) assert response.status_code == 200 assert '<title>トークン追跡'.encode('utf-8') in response.data assert tx_hash.encode('utf-8') in response.data # <正常系17_1> # <利用履歴画面> # 利用履歴画面が表示できること def test_normal_17_1(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) url = self.url_usage_history + token_address response = client.get(url) assert response.status_code == 200 # <正常系17_2> # <利用履歴(API)> # 利用履歴が取得できること(0件) def test_normal_17_2(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] token_address = str(token.token_address) url = self.url_get_usage_history_coupon + token_address response = client.get(url) response_data = json.loads(response.data) assert response.status_code == 200 assert len(response_data) == 0 # <正常系17_3> # <利用履歴(API)> # 利用履歴が取得できること(1件) def test_normal_17_3(self, app, db): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] trader_account = eth_account['trader']['account_address'] balance = 7 total_used_amont = 2 used_amount = 1 event = contract_utils_common.index_consume_event( db, '0xac22f75bae96f8e9f840f980dfefc1d497979341d3106aeb25e014483c3f414a', # 仮のトランザクションハッシュ token.token_address, trader_account, balance, total_used_amont, used_amount, datetime(2020, 5, 31, 0, 59, 59, 123) ) token_address = str(token.token_address) url = self.url_get_usage_history_coupon + token_address response = client.get(url) response_data = json.loads(response.data) assert response.status_code == 200 assert len(response_data) == 1 assert response_data[0]['block_timestamp'] == '2020/05/31 09:59:59 +0900' assert response_data[0]['consumer'] == trader_account assert response_data[0]['value'] == used_amount # 後処理 db.session.delete(event) # <正常系17_4> # <利用履歴CSVダウンロード> # 利用履歴CSVが取得できること def test_normal_17_4(self, app, db): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] trader_account = eth_account['trader']['account_address'] balance = 7 total_used_amount = 2 used_amount = 1 event = contract_utils_common.index_consume_event( db, '0xac22f75bae96f8e9f840f980dfefc1d497979341d3106aeb25e014483c3f414a', # 仮のトランザクションハッシュ token.token_address, trader_account, balance, total_used_amount, used_amount, datetime(2020, 5, 31, 0, 59, 59, 123) ) token_address = str(token.token_address) response = client.post( self.url_used_csv_download, data={ 'token_address': token_address } ) assumed_csv = \ 'token_name,token_address,timestamp,account_address,amount\n' + \ f'テストクーポン,{token.token_address},2020/05/31 09:59:59 +0900,{trader_account},{used_amount}\n' assert response.status_code == 200 assert assumed_csv == response.data.decode('utf-8') # 後処理 db.session.delete(event) ############################################################################# # テスト(エラー系) ############################################################################# # <エラー系1> # 新規発行(必須エラー) def test_error_1_1(self, app): client = self.client_with_admin_login(app) # 新規発行 response = client.post( self.url_issue, data={ } ) assert response.status_code == 200 assert '<title>新規発行'.encode('utf-8') in response.data assert '名称は必須です。'.encode('utf-8') in response.data assert '略称は必須です。'.encode('utf-8') in response.data assert '総発行量は必須です。'.encode('utf-8') in response.data assert 'DEXアドレスは必須です。'.encode('utf-8') in response.data # <エラー系2> # 追加発行(必須エラー) def test_error_1_2(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() url_add_supply = self.url_add_supply + tokens[0].token_address client = self.client_with_admin_login(app) # 新規発行 response = client.post( url_add_supply, data={} ) assert response.status_code == 200 assert '<title>追加発行'.encode('utf-8') in response.data assert '追加発行量は必須です。'.encode('utf-8') in response.data # <エラー系3> # 割当(必須エラー) def test_error_1_3(self, app): client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={} ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert 'トークンアドレスは必須です。'.encode('utf-8') in response.data assert '割当先アドレスは必須です。'.encode('utf-8') in response.data assert '割当数量は必須です。'.encode('utf-8') in response.data # <エラー系1_4> # <入力値チェック> # 売出(必須エラー) def test_error_1_4(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] # 売出 client = self.client_with_admin_login(app) response = client.post( self.url_sell + token.token_address, data={ } ) assert response.status_code == 302 # 新規売出でエラーを確認 response = client.get(self.url_sell + token.token_address) assert response.status_code == 200 assert '<title>新規売出'.encode('utf-8') in response.data assert '売出価格は必須です。'.encode('utf-8') in response.data # <エラー系2_1> # <入力値チェック> # 新規発行(DEXアドレス形式エラー) def test_error_2_1(self, app): error_address = '0xc94b0d702422587e361dd6cd08b55dfe1961181f1' client = self.client_with_admin_login(app) # 新規発行 response = client.post( self.url_issue, data={ 'name': 'テストクーポン', 'symbol': 'COUPON', 'totalSupply': 2000000, 'expirationDate': '20191231', 'transferable': 'True', 'details': 'details詳細', 'memo': 'memoメモ', 'image_1': 'https://test.com/image_1.jpg', 'image_2': 'https://test.com/image_2.jpg', 'image_3': 'https://test.com/image_3.jpg', 'tradableExchange': error_address } ) assert response.status_code == 200 assert '<title>新規発行'.encode('utf-8') in response.data assert 'DEXアドレスは有効なアドレスではありません。'.encode('utf-8') in response.data # <エラー系2_2> # <入力値チェック> # 設定画面(DEXアドレス形式エラー) def test_error_2_2(self, app): error_address = '0xc94b0d702422587e361dd6cd08b55dfe1961181f1' tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() url_setting = self.url_setting + tokens[0].token_address client = self.client_with_admin_login(app) response = client.post( url_setting, data={ 'details': 'details詳細2', 'memo': 'memoメモ2', 'tradableExchange': error_address, 'image_1': 'https://test.com/image_12.jpg', 'image_2': 'https://test.com/image_22.jpg', 'image_3': 'https://test.com/image_32.jpg', } ) assert response.status_code == 200 assert 'DEXアドレスは有効なアドレスではありません。'.encode('utf-8') in response.data # <エラー系2_3> # 追加発行(上限エラー) def test_error_2_3(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() url_add_supply = self.url_add_supply + tokens[0].token_address url_setting = self.url_setting + tokens[0].token_address client = self.client_with_admin_login(app) # 追加発行画面(GET) response = client.get(url_add_supply) assert response.status_code == 200 assert '<title>追加発行'.encode('utf-8') in response.data assert tokens[0].token_address.encode('utf-8') in response.data assert '2000100'.encode('utf-8') in response.data # 追加発行 response = client.post( url_add_supply, data={ 'addSupply': 100000000, 'totalSupply': 2000100, } ) assert response.status_code == 302 response = client.get(url_add_supply) assert '総発行量と追加発行量の合計は、100,000,000が上限です。'.encode('utf-8') in response.data # 詳細設定画面で確認 response = client.get(url_setting) assert response.status_code == 200 assert '<title>詳細設定'.encode('utf-8') in response.data assert 'テストクーポン'.encode('utf-8') in response.data assert '2000100'.encode('utf-8') in response.data # <エラー系3_1> # <所有者移転> # URLパラメータチェック:token_addressが無効 def test_error_3_1(self, app): error_address = '0xc94b0d702422587e361dd6cd08b55dfe1961181f1' client = self.client_with_admin_login(app) issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) trader_address = \ to_checksum_address(eth_account['trader']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + error_address + '/' + issuer_address, data={ 'to_address': trader_address, 'amount': 10 } ) assert response.status_code == 404 # <エラー系3_2> # <所有者移転> # URLパラメータチェック:account_addressが無効 def test_error_3_2(self, app): error_address = '0xc94b0d702422587e361dd6cd08b55dfe1961181f1' client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] trader_address = \ to_checksum_address(eth_account['trader']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + error_address, data={ 'to_address': trader_address, 'amount': 10 } ) assert response.status_code == 404 # <エラー系3_3> # <所有者移転> # 入力値チェック:必須チェック def test_error_3_3(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + issuer_address, data={ } ) assert response.status_code == 200 assert '移転先は必須です。'.encode('utf-8') in response.data assert '移転数量は必須です。'.encode('utf-8') in response.data # <エラー系3_4> # <所有者移転> # 入力値チェック:to_addressが無効 def test_error_3_4(self, app): error_address = '0xc94b0d702422587e361dd6cd08b55dfe1961181f1' client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + issuer_address, data={ 'to_address': error_address, 'amount': 10 } ) assert response.status_code == 200 assert '移転先は無効なアドレスです。'.encode('utf-8') in response.data # <エラー系3_5> # <所有者移転> # 入力値チェック:amountが上限超過 def test_error_3_5(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) trader_address = \ to_checksum_address(eth_account['trader']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + issuer_address, data={ 'to_address': trader_address, 'amount': 100000001 } ) assert response.status_code == 200 assert '移転数量は100,000,000が上限です。'.encode('utf-8') in response.data # <エラー系3_6> # <所有者移転> # 入力値チェック:amountが残高超 def test_error_3_6(self, app): client = self.client_with_admin_login(app) tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[0] issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) trader_address = \ to_checksum_address(eth_account['trader']['account_address']) # 所有者移転 response = client.post( self.url_transfer_ownership + token.token_address + '/' + issuer_address, data={ 'to_address': trader_address, 'amount': 2000001 } ) assert response.status_code == 200 assert '移転数量が残高を超えています。'.encode('utf-8') in response.data # <エラー系4_1> # <追加発行> # 追加発行 → 何らかの影響で指定したトークンが存在しない def test_error_4_1(self, app): url_add_supply = self.url_add_supply + "0x1111" # 不正なアドレス client = self.client_with_admin_login(app) # 追加発行画面(GET) response = client.get(url_add_supply) assert response.status_code == 404 # abortされる # <エラー系4_2> # <追加発行> # 詳細設定 → 何らかの影響で指定したトークンが存在しない def test_error_4_2(self, app): url_setting = self.url_setting + "0x2222" # 不正なアドレス client = self.client_with_admin_login(app) # 詳細設定画面(GET) response = client.get(url_setting) assert response.status_code == 404 # abortされる # <エラー系4_3> # <売出> # 新規売出 → 何らかの影響で指定したトークンが存在しない def test_error4_3(self, app): # 売出画面の参照 client = self.client_with_admin_login(app) response = client.get(self.url_sell + "0x3333") # 不正なアドレス assert response.status_code == 404 # abortされる # <エラー系4-4> # <売出停止画面の表示> def test_error_4_4(self, app): client = self.client_with_admin_login(app) # 売出停止処理 response = client.get(self.url_cancel_order + "0x4444") # 不正なアドレス assert response.status_code == 404 # abortされる # <エラー系4_5> # <所有者移転> # 所有者移転画面の参照:GET → 何らかの影響で指定したトークンが存在しない def test_error_4_5(self, app): client = self.client_with_admin_login(app) issuer_address = \ to_checksum_address(eth_account['issuer']['account_address']) # 所有者移転画面の参照 response = client.get( self.url_transfer_ownership + "0x5555" + '/' + issuer_address) # 不正なアドレス assert response.status_code == 404 # abortされる # <エラー系4-6> # <割当(募集申込)> # 割当(募集申込)画面参照:GET → 何らかの影響で指定したトークンが存在しない def test_error_4_6(self, app): client = self.client_with_admin_login(app) trader_address = eth_account['trader']['account_address'] # 割当(募集申込) url = self.url_allocate + '/' + "0x6666" + '/' + trader_address # 不正なアドレス response = client.get(url) assert response.status_code == 404 # abortされる # <エラー系5_1> # 割当(トークンアドレス形式エラー) def test_error_5_1(self, app): client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={ 'token_address': "0x1111", # 不正なアドレス 'to_address': eth_account['trader']['account_address'], 'amount': 1 } ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert 'トークンアドレスは有効なアドレスではありません。'.encode('utf-8') in response.data # <エラー系5_2> # 割当(割当先アドレス形式エラー) def test_error_5_2(self, app): token = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).first() client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={ 'token_address': token.token_address, 'to_address': "0x2222", # 不正なアドレス 'amount': 1 } ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert '割当先アドレスは有効なアドレスではありません。'.encode('utf-8') in response.data # <エラー系5_3> # 割当(割当数量上限エラー) def test_error_5_3(self, app): token = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).first() client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={ 'token_address': token.token_address, 'to_address': eth_account['trader']['account_address'], 'amount': 100_000_001 } ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert '割当数量は100,000,000が上限です。'.encode('utf-8') in response.data # <エラー系5_4> # 割当(残高エラー) def test_error_5_4(self, app): tokens = Token.query.filter_by(template_id=Config.TEMPLATE_ID_COUPON).order_by(Token.created).all() token = tokens[1] total_supply = 2000000 client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={ 'token_address': token.token_address, 'to_address': eth_account['trader']['account_address'], 'amount': total_supply + 1 } ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert '割当数量が残高を超えています。'.encode('utf-8') in response.data # <エラー系5_5> # 割当(トークンアドレスが無効) def test_error_5_5(self, app): client = self.client_with_admin_login(app) response = client.post( self.url_transfer, data={ 'token_address': '0xd05029ed7f520ddaf0851f55d72ac8f28ec31823', # コントラクトが登録されていないアドレス 'to_address': eth_account['trader']['account_address'], 'amount': 1 } ) assert response.status_code == 200 assert '<title>トークン割当'.encode('utf-8') in response.data assert '無効なトークンアドレスです。'.encode('utf-8') in response.data # <エラー系6_1> # <発行体相違> # トークン追跡: 異なる発行体管理化のトークンアドレス def test_error_6_1(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_token_tracker + token.token_address) assert response.status_code == 404 # <エラー系6_2> # <発行体相違> # 申込者リストCSVダウンロード: 異なる発行体管理化のトークンアドレス def test_error_6_2(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_applications_csv_download, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_3> # <発行体相違> # 募集申込一覧取得(API): 異なる発行体管理化のトークンアドレス def test_error_6_3(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_get_applications + token.token_address) assert response.status_code == 404 # <エラー系6_4> # <発行体相違> # 公開: 異なる発行体管理化のトークンアドレス def test_error_6_4(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_release, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_5> # <発行体相違> # 追加発行: 異なる発行体管理化のトークンアドレス def test_error_6_5(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_add_supply + token.token_address) assert response.status_code == 404 response = client.post( self.url_add_supply + token.token_address, data={ 'addSupply': 1 } ) assert response.status_code == 404 # <エラー系6_6> # <発行体相違> # 設定内容修正: 異なる発行体管理化のトークンアドレス def test_error_6_6(self, app, shared_contract): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_setting + token.token_address) assert response.status_code == 404 response = client.post( self.url_setting + token.token_address, data={ 'details': 'details詳細2', 'return_details': 'return詳細2', 'memo': 'memoメモ2', 'expirationDate': '20200101', 'transferable': 'False', 'tradableExchange': shared_contract['IbetCouponExchange']['address'], 'image_1': 'https://test.com/image_12.jpg', 'image_2': 'https://test.com/image_22.jpg', 'image_3': 'https://test.com/image_32.jpg', } ) assert response.status_code == 404 # <エラー系6_7> # <発行体相違> # 売出: 異なる発行体管理化のトークンアドレス def test_error_6_7(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_sell + token.token_address) assert response.status_code == 404 response = client.post( self.url_sell + token.token_address, data={ 'sellPrice': 1000 } ) assert response.status_code == 404 # <エラー系6_8> # <発行体相違> # 売出停止: 異なる発行体管理化のトークンアドレス def test_error_6_8(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_cancel_order + token.token_address + '/1') assert response.status_code == 404 response = client.post( self.url_cancel_order + token.token_address + '/1', data={ } ) assert response.status_code == 404 # <エラー系6_9> # <発行体相違> # 割当: 異なる発行体管理化のトークンアドレス def test_error_6_9(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_transfer, data={ 'token_address': token.token_address, 'to_address': eth_account['issuer2']['account_address'], 'amount': 1 } ) assert response.status_code == 200 assert '無効なトークンアドレスです。'.encode('utf-8') in response.data # <エラー系6_10> # <発行体相違> # 割当(募集申込): 異なる発行体管理化のトークンアドレス def test_error_6_10(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') account_address = eth_account['trader']['account_address'] response = client.get(self.url_allocate + token.token_address + '/' + account_address) assert response.status_code == 404 response = client.post( self.url_allocate + token.token_address + '/' + account_address, data={ 'amount': 1 } ) assert response.status_code == 404 # <エラー系6_11> # <発行体相違> # 保有者移転: 異なる発行体管理化のトークンアドレス def test_error_6_11(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') account_address = eth_account['trader']['account_address'] response = client.get(self.url_transfer_ownership + token.token_address + '/' + account_address) assert response.status_code == 404 response = client.post( self.url_transfer_ownership + token.token_address + '/' + account_address, data={ 'amount': 1 } ) assert response.status_code == 404 # <エラー系6_12> # <発行体相違> # トークン利用履歴取得(API): 異なる発行体管理化のトークンアドレス def test_error_6_12(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_get_usage_history_coupon + token.token_address) assert response.status_code == 404 # <エラー系6_13> # <発行体相違> # トークン利用履歴リストCSVダウンロード: 異なる発行体管理化のトークンアドレス def test_error_6_13(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_used_csv_download, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_14> # <発行体相違> # 保有者一覧取得(CSV): 異なる発行体管理化のトークンアドレス def test_error_6_14(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_holders_csv_download, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_15> # <発行体相違> # 保有者一覧取得(API): 異なる発行体管理化のトークンアドレス def test_error_6_15(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_get_holders + token.token_address) assert response.status_code == 404 # <エラー系6_16> # <発行体相違> # トークン名称取得(API): 異なる発行体管理化のトークンアドレス def test_error_6_16(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.get(self.url_get_token_name + token.token_address) assert response.status_code == 404 # <エラー系6_17> # <発行体相違> # 保有者詳細: 異なる発行体管理化のトークンアドレス def test_error_6_17(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') account_address = eth_account['trader']['account_address'] response = client.get(self.url_holder + token.token_address + '/' + account_address) assert response.status_code == 404 response = client.post( self.url_holder + token.token_address + '/' + account_address, data={ } ) assert response.status_code == 404 # <エラー系6_18> # <発行体相違> # 有効化: 異なる発行体管理化のトークンアドレス def test_error_6_18(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_valid, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_19> # <発行体相違> # 無効化: 異なる発行体管理化のトークンアドレス def test_error_6_19(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_invalid, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_20> # <発行体相違> # 募集申込開始: 異なる発行体管理化のトークンアドレス def test_error_6_20(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_start_initial_offering, data={ 'token_address': token.token_address } ) assert response.status_code == 404 # <エラー系6_21> # <発行体相違> # 募集申込停止: 異なる発行体管理化のトークンアドレス def test_error_6_21(self, app): # 発行体1管理下のトークンアドレス tokens = Token.query.filter_by( template_id=Config.TEMPLATE_ID_COUPON, admin_address=eth_account['issuer']['account_address'].lower() ).order_by(Token.created).all() token = tokens[0] # 発行体2でログイン client = self.client_with_admin_login(app, login_id='admin2') response = client.post( self.url_stop_initial_offering, data={ 'token_address': token.token_address } ) assert response.status_code == 404 ############################################################################# # 後処理 ############################################################################# def test_end(self, db): clean_issue_event(db)
StarcoderdataPython
3273508
<reponame>dataspot/dgp-app<gh_stars>1-10 from slugify import slugify from .models import Models class Controllers(): def __init__(self, connection_string): self.models = Models(connection_string=connection_string) def _key(self, kind, id): return '{}::{}'.format(kind, id)[:128] def create_or_edit(self, kind, id, value, user, admin): id = slugify(id) key = self._key(kind, id) value['id'] = id if admin: edit_allowed = None else: edit_allowed = lambda x: x.created_by == user return self.models.create_or_edit(key, value, create_kw=dict(created_by=user), update_kw=dict(updated_by=user), edit_allowed=edit_allowed) def query_one(self, kind, id, admin=False, user=None): ret = self.models.query_one(self._key(kind, id)) if ret['success']: if admin: return ret else: if user == ret['result']['created_by']: return ret return dict(success=False) def query(self, kind, admin=False, user=None): ret = self.models.query() ret['result'] = [ x for x in ret['result'] if x['key'].startswith(kind + '::') ] if not admin: ret['result'] = [ x for x in ret['result'] if x['created_by'] == user ] return ret def delete(self, kind, id, admin=False, user=None): if admin: return self.models.delete(self._key(kind, id)) else: return self.models.delete(self._key(kind, id), delete_allowed=lambda x: x.created_by == user)
StarcoderdataPython
1688918
<reponame>chrisjdavie/ws_cross_project<gh_stars>0 ''' This generates the legendre polynomials, using the recursive definition found in wikipedia (and checked using other sources) Created on 30 Jan 2013 @author: chris ''' import numpy as np import scipy.misc as sc def Legendre_polys(l,m,x): # the maths and flowchart for this code is on page 172 and 174 of lab book 4 l_i = 0 m_j = 0 P_l_m = 1 # this sets up the equivalence between m and -m in Legendre polynomials. See page 3 labbook 5. if m < 0: m = np.abs(m) P_l_m = np.power(-1,m)*sc.factorial(l-m)/sc.factorial(l+m)*P_l_m P_l_l = P_l_m P_lm1_m = 0 while m_j < m: P_lp1_lp1 = -(2*l_i+1)*np.sqrt(1-x*x)*P_l_l l_i = l_i + 1 m_j = l_i P_l_l = P_lp1_lp1 P_l_m = P_l_l if l_i < l: P_lp1_l = x*(2*l_i+1)*P_l_l l_i = l_i + 1 P_lm1_m = P_l_l P_l_m = P_lp1_l while l_i < l: P_lp1_m = ( (2*l_i+1)*x*P_l_m - (l_i+m)*P_lm1_m )/(l_i-m+1) l_i = l_i + 1 P_lm1_m = P_l_m P_l_m = P_lp1_m return P_l_m
StarcoderdataPython
138081
# Copyright 2021- imbus AG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Dict, Optional, Union from zipfile import ZipFile from abc import ABC, abstractmethod from os import path from xml.etree import ElementTree as ET import sys import base64 from TestBenchCliReporter import questions from TestBenchCliReporter import testbench from TestBenchCliReporter.util import ( close_program, get_project_keys, XmlExportConfig, ImportConfig, login, pretty_print, ) def Action(class_name: str, parameters: dict[str, str]) -> AbstractAction: try: return globals()[class_name](parameters) except AttributeError: print(f"Failed to create class {class_name}") close_program() class AbstractAction(ABC): def __init__(self, parameters: Optional[Dict] = None): self.parameters = parameters or {} self.report_tmp_name = "" self.job_id = "" def prepare(self, connection_log: testbench.ConnectionLog) -> bool: return True @abstractmethod def trigger(self, connection_log: testbench.ConnectionLog) -> bool: raise NotImplementedError def wait(self, connection_log: testbench.ConnectionLog) -> bool: return True def poll(self, connection_log: testbench.ConnectionLog) -> bool: return True def finish(self, connection_log: testbench.ConnectionLog) -> bool: return True def export(self): return {"type": type(self).__name__, "parameters": self.parameters} class UnloggedAction(AbstractAction): def export(self): return None class ExportXMLReport(AbstractAction): def prepare(self, connection_log: testbench.ConnectionLog) -> bool: all_projects = connection_log.active_connection.get_all_projects() selected_project = questions.ask_to_select_project(all_projects) selected_tov = questions.ask_to_select_tov(selected_project) self.parameters["tovKey"] = selected_tov["key"]["serial"] self.parameters["projectPath"] = [ selected_project["name"], selected_tov["name"], ] selected_cycle = questions.ask_to_select_cycle(selected_tov, export=True) print(" Selection:") pretty_print( { "value": f"{' ' * 4 + selected_project['name']: <50}", "style": "#06c8ff bold italic", "end": None, }, {"value": f" projectKey: ", "end": None}, { "value": f"{selected_project['key']['serial']: >15}", "style": "#06c8ff bold italic", }, { "value": f"{' ' * 6 + selected_tov['name']: <50}", "style": "#06c8ff bold italic", "end": None, }, {"value": f" tovKey: ", "end": None}, { "value": f"{selected_tov['key']['serial']: >15}", "style": "#06c8ff bold italic", }, ) if selected_cycle == "NO_EXEC": self.parameters["cycleKey"] = None tttree_structure = connection_log.active_connection.get_tov_structure( self.parameters["tovKey"] ) else: pretty_print( { "value": f"{' ' * 8 + selected_cycle['name']: <50}", "style": "#06c8ff bold italic", "end": None, }, {"value": f" cycleKey: ", "end": None}, { "value": f"{selected_cycle['key']['serial']: >15}", "style": "#06c8ff bold italic", }, ) self.parameters["cycleKey"] = selected_cycle["key"]["serial"] self.parameters["projectPath"].append(selected_cycle["name"]) tttree_structure = ( connection_log.active_connection.get_test_cycle_structure( self.parameters["cycleKey"] ) ) self.parameters["reportRootUID"] = questions.ask_to_select_report_root_uid( tttree_structure ) all_filters = connection_log.active_connection.get_all_filters() self.parameters["filters"] = questions.ask_to_select_filters(all_filters) self.parameters["report_config"] = questions.ask_to_config_report() self.parameters["outputPath"] = questions.ask_for_output_path() return True def trigger(self, connection_log: testbench.ConnectionLog) -> Union[bool, str]: if not self.parameters.get("cycleKey"): if ( not self.parameters.get("tovKey") and len(self.parameters["projectPath"]) >= 2 ): all_projects = connection_log.active_connection.get_all_projects() ( project_key, self.parameters["tovKey"], self.parameters["cycleKey"], ) = get_project_keys(all_projects, *self.parameters["projectPath"]) self.job_id = connection_log.active_connection.trigger_xml_report_generation( self.parameters.get("tovKey"), self.parameters.get("cycleKey"), self.parameters.get("reportRootUID", "ROOT"), self.parameters.get("filters", []), self.parameters.get("report_config", XmlExportConfig["Itep Export"]), ) return self.job_id def wait(self, connection_log: testbench.ConnectionLog) -> Union[bool, str]: try: self.report_tmp_name = ( connection_log.active_connection.wait_for_tmp_xml_report_name( self.job_id ) ) return self.report_tmp_name except KeyError as e: print(f"{str(e)}") return False def poll(self, connection_log: testbench.ConnectionLog) -> bool: result = connection_log.active_connection.get_exp_job_result(self.job_id) if result is not None: self.report_tmp_name = result return result def finish(self, connection_log: testbench.ConnectionLog) -> bool: report = connection_log.active_connection.get_xml_report_data( self.report_tmp_name ) with open(self.parameters["outputPath"], "wb") as output_file: output_file.write(report) pretty_print( {"value": f"Report ", "end": None}, { "value": f'{path.abspath(self.parameters["outputPath"])}', "style": "#06c8ff bold italic", "end": None, }, {"value": f" was generated"}, ) return True class ImportExecutionResults(AbstractAction): def prepare(self, connection_log: testbench.ConnectionLog) -> bool: self.parameters["inputPath"] = questions.ask_for_input_path() project = version = cycle = None try: project, version, cycle = self.get_project_path_from_report() except: pass all_projects = connection_log.active_connection.get_all_projects() selected_project = questions.ask_to_select_project( all_projects, default=project ) selected_tov = questions.ask_to_select_tov(selected_project, default=version) self.parameters["cycleKey"] = questions.ask_to_select_cycle( selected_tov, default=cycle )["key"]["serial"] cycle_structure = connection_log.active_connection.get_test_cycle_structure( self.parameters["cycleKey"] ) self.parameters["reportRootUID"] = questions.ask_to_select_report_root_uid( cycle_structure ) available_testers = connection_log.active_connection.get_all_testers_of_project( selected_project["key"]["serial"] ) self.parameters["defaultTester"] = questions.ask_to_select_default_tester( available_testers ) all_filters = connection_log.active_connection.get_all_filters() self.parameters["filters"] = questions.ask_to_select_filters(all_filters) self.parameters["importConfig"] = questions.ask_to_config_import() return True def get_project_path_from_report(self): zip_file = ZipFile(self.parameters["inputPath"]) xml = ET.fromstring(zip_file.read("report.xml")) project = xml.find("./header/project").get("name") version = xml.find("./header/version").get("name") cycle = xml.find("./header/cycle").get("name") return project, version, cycle def trigger(self, connection_log: testbench.ConnectionLog) -> bool: if not self.parameters.get("cycleKey"): if len(self.parameters.get("projectPath", [])) != 3: self.parameters["projectPath"] = self.get_project_path_from_report() self.set_cycle_key_from_path(connection_log) with open(self.parameters["inputPath"], "rb") as execution_report: execution_report_base64 = base64.b64encode(execution_report.read()).decode() serverside_file_name = ( connection_log.active_connection.upload_execution_results( execution_report_base64 ) ) if serverside_file_name: self.job_id = ( connection_log.active_connection.trigger_execution_results_import( self.parameters["cycleKey"], self.parameters["reportRootUID"], serverside_file_name, self.parameters["defaultTester"], self.parameters["filters"], self.parameters.get("importConfig", ImportConfig["Typical"]), ) ) return True def set_cycle_key_from_path(self, connection_log): all_projects = connection_log.active_connection.get_all_projects() ( project_key, tov_key, self.parameters["cycleKey"], ) = get_project_keys(all_projects, *self.parameters["projectPath"]) if not self.parameters["cycleKey"]: raise ValueError("Invalid Config! 'cycleKey' missing.") def wait(self, connection_log: testbench.ConnectionLog) -> bool: self.report_tmp_name = connection_log.active_connection.wait_for_execution_results_import_to_finish( self.job_id ) return self.report_tmp_name def poll(self, connection_log: testbench.ConnectionLog) -> bool: result = connection_log.active_connection.get_imp_job_result(self.job_id) if result is not None: self.report_tmp_name = result return result def finish(self, connection_log: testbench.ConnectionLog) -> bool: if self.report_tmp_name: pretty_print( {"value": f"Report ", "end": None}, { "value": f'{path.abspath(self.parameters["inputPath"])}', "style": "#06c8ff bold italic", "end": None, }, {"value": f" was imported"}, ) return True class ExportActionLog(UnloggedAction): def prepare(self, connection_log: testbench.ConnectionLog): self.parameters["outputPath"] = questions.ask_for_output_path("config.json") return True def trigger(self, connection_log: testbench.ConnectionLog) -> bool: try: connection_log.export_as_json(self.parameters["outputPath"]) pretty_print( {"value": f"Config ", "end": None}, { "value": f'{path.abspath(self.parameters["outputPath"])}', "style": "#06c8ff bold italic", "end": None, }, {"value": f" was generated"}, ) return True except KeyError as e: print(f"{str(e)}") return False class ChangeConnection(UnloggedAction): def prepare(self, connection_log: testbench.ConnectionLog): self.parameters["newConnection"] = login() return True def trigger(self, connection_log: testbench.ConnectionLog) -> bool: connection_log.active_connection.close() connection_log.add_connection(self.parameters["newConnection"]) return True class Quit(UnloggedAction): def trigger(self, connection_log: testbench.ConnectionLog = None): print("Closing program.") sys.exit(0)
StarcoderdataPython
1728057
from datetime import date from datetime import timedelta from uuid import uuid1 import pytest from fastapi.testclient import TestClient from todo.backend import schema from todo.backend.endpoints import router from todo.backend.schema import ToDo from todo.backend.schema import Uuid @pytest.fixture(scope="module", name="client") def make_client(): return TestClient(router) def test_all_(tasks, client): response = client.get("/all/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == tasks def test_overdue(tasks, client): response = client.get("/overdue/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == tasks[3:4] def test_today(tasks, client): response = client.get("/today/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == tasks[0:1] def test_pending(tasks, client): response = client.get("/pending/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == tasks[5:6] def test_toggle_task(tasks, client): res = client.patch("/toggle/", data=Uuid(uuid=tasks[0].uuid).json()) assert res.status_code == 200 response = client.get("/today/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == [] def test_add_task(tasks, client): new = ToDo.from_list(uuid1(), "Fix test", date.today() + timedelta(days=1), False) res = client.post("/add/", data=new.json()) assert res.status_code == 200 response = client.get("/pending/") assert response.status_code == 200 assert list(map(lambda x: schema.ToDo(**x), response.json())) == tasks[5:6] + [new]
StarcoderdataPython
3351899
<reponame>captholley/ootp-pt-analysis from output_utils.progress.progress_bar import ProgressBar from util.number_utils import add_ip, ip_to_num import statsmodels.api as sm def get_woba_factors(ovr_data, vl_data, vr_data): ovr_factors = _calc_woba_factors(ovr_data) vl_factors = _calc_woba_factors(vl_data) vr_factors = _calc_woba_factors(vr_data) # Printing so we can check our r^2 values. Should be high print("Ovr wOBA r^2 factor:", ovr_factors["r_2"], "vL wOBA r^2 factor:", vl_factors["r_2"], "vR wOBA r^2 factor:", vr_factors["r_2"], "\n") return (ovr_factors, vl_factors, vr_factors) def _calc_woba_factors(player_data): X = [] y = [] X_wsb = [] y_wsb = [] total_ip = 0 total_runs = 0 pa = 0 walks = 0 hbp = 0 singles = 0 doubles = 0 triples = 0 homeruns = 0 ibb = 0 progress_bar = ProgressBar(len(player_data.keys()), "Reading wOBA values") for player in player_data.values(): pa += player["pa"] walks += player["walks"] hbp += player["timeshitbypitch"] singles += player["hits"] - (player["homeruns"] + player["doubles"] + player["triples"]) doubles += player["doubles"] triples += player["triples"] homeruns += player["homeruns"] ibb += player["intentionallywalked"] total_ip = add_ip(add_ip(total_ip, player["sp_ip"]), player["rp_ip"]) total_runs += player["runsscored"] progress_bar.increment() if player["pa"] < 20: continue X.append([ 1, player["walks"], player["timeshitbypitch"], player["hits"] - (player["homeruns"] + player["doubles"] + player["triples"]), player["doubles"], player["triples"], player["homeruns"] ]) y.append(player["woba"] * (player["pa"] - player["intentionallywalked"])) X_wsb.append([ player["stolenbases"], player["caughtstealing"], ((player["hits"] - (player["homeruns"] + player["doubles"] + player["triples"])) + player["walks"] + player["timeshitbypitch"] - player["intentionallywalked"]) ]) y_wsb.append(player["wsb"]) progress_bar.finish("\n") results = sm.OLS(y, X).fit() wsb_results = sm.OLS(y_wsb, X_wsb).fit() avg_woba = (results.params[0] + results.params[1] * walks + results.params[2] * hbp + results.params[3] * singles + results.params[4] * doubles + results.params[5] * triples + results.params[6] * homeruns) / (pa - ibb) return { "lg_woba": avg_woba, "woba_intcpt": results.params[0], "walks_factor": results.params[1], "hbp_factor": results.params[2], "singles_factor": results.params[3], "doubles_factor": results.params[4], "triples_factor": results.params[5], "homeruns_factor": results.params[6], "r_2": results.rsquared, "runSB": wsb_results.params[0], "runCS": wsb_results.params[1], "lgwSB": wsb_results.params[2], "wsb_r_2": wsb_results.rsquared, "outs_per_run": (wsb_results.params[1] - 0.075) / 2, "runs_per_win": 9 * (total_runs / ip_to_num(total_ip)) * 1.5 + 3, }
StarcoderdataPython
1661030
#!/usr/bin/python __author__ = 'ejk' ''' The bootstrap-salt.sh script here is a direct copy of github.bom/saltstack/salt-bootstrap you can find the authors of that script here https://github.com/saltstack/salt-bootstrap/blob/develop/AUTHORS.rst all credit to them for that fine piece of work''' import subprocess from os import path from time import sleep import glob salt_master = '10.0.0.1' salt_name = 'virl' salt_append_domain = 'virl.info' while_exit = 0 cwd = path.realpath('./') proxy = 'None' hostname = 'virl' domain = 'virl.info' while not while_exit: print (30 * '-') print (" V I R L - I N S T A L L - M E N U") print (30 * '-') print ("1. Change salt master from {0} ".format(salt_master)) print ("2. Change salt id from {0} or salt domain from {1}".format(salt_name, salt_append_domain)) print ("3. Change hostname from {0} or domain name {1}".format(hostname, domain)) print ("4. Write out extra.conf") print ("5. Change http proxy from {0}".format(proxy)) print ("6. install salt without preseed keys") print ("7. install salt with preseed keys in {0}".format(cwd + '/preseed_keys')) print ("8. Test if you are connected to controller") print ("9. Install virl installer and settings") print ("10. Edit /etc/virl.ini") print ("11. Exit") print (30 * '-') choice = raw_input('Which step are you on [1-11] : ') choice = int(choice) if choice == 1: salt_master = raw_input('Salt master [%s] ' % salt_master) or 'salt-master.cisco.com' if choice == 2: salt_name = raw_input('Salt id [%s] ' % salt_name) or 'compute1' salt_append_domain = raw_input('Salt domain name [%s] ' % salt_append_domain) or 'virl.info' if choice == 3: hostname = raw_input('System hostname [%s] ' % hostname) or 'compute1' domain = raw_input('System Domain name [%s] ' % domain) or 'virl.info' if choice == 4: subprocess.check_output(['mkdir', '-p', '/etc/salt/virl']) if not path.exists('/etc/salt/virl'): subprocess.check_output(['mkdir', '-p', '/etc/salt/virl']) if not path.exists('/etc/salt/minion.d'): subprocess.check_output(['mkdir', '-p', '/etc/salt/minion.d']) with open(("/etc/salt/minion.d/extra.conf"), "w") as extra: extra.write("""master: [{salt_master}]\n""".format(salt_master=salt_master)) extra.write("""id: {salt_name}\n""".format(salt_name=salt_name)) extra.write("""append_domain: {salt_append_domain}\n""".format(salt_append_domain=salt_append_domain)) if salt_master == 'masterless': #subprocess.check_output(['git', 'clone', '--depth', '1', 'https://github.com/snergster/virl-salt.git', '/srv/salt']) extra.write("""file_client: local fileserver_backend: - git - roots gitfs_provider: Dulwich gitfs_remotes: - https://github.com/Snergster/virl-salt.git\n""") if choice == 5: proxy = raw_input('Http proxy [%s] ' % proxy) or 'None' if not proxy == 'None': if not path.exists('/etc/salt'): subprocess.check_output(['mkdir', '-p', '/etc/salt']) with open(("/etc/salt/grains"), "w") as grains: grains.write("""proxy: True\n""") grains.write("""http_proxy: {proxy}\n""".format(proxy=proxy)) else: with open(("/etc/salt/grains"), "w") as grains: grains.write("""proxy: False\n""") if choice == 6: subprocess.call(['mkdir', '-p','/etc/salt/pki/minion']) subprocess.call(['cp', './master_sign.pub', '/etc/salt/pki/minion']) if salt_master == 'masterless': subprocess.call(['git', 'clone', '--depth', '1', 'https://github.com/Snergster/virl-salt.git', '/srv/salt']) if not proxy == 'None': subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable']) else: subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-X', '-P', 'stable']) else: if not proxy == 'None': subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable']) else: subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', 'stable']) if choice == 8: if salt_master == 'masterless': print "Running in masterless mode skipping ping." else: subprocess.call(['salt-call', 'test.ping']) if choice == 9: subprocess.call(['salt-call', '--local', 'grains.setval', 'kilo', 'true']) if salt_master == 'masterless': subprocess.call(['salt-call', '--local', 'state.sls', 'zero']) else: subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'zero']) if choice == 11: if path.isfile('/etc/salt/grains'): subprocess.call(['rm', '/etc/salt/grains']) subprocess.call(['/usr/local/bin/vinstall', 'salt']) sleep(5) subprocess.call(['salt-call', '-l', 'info', 'state.sls', 'common.users']) subprocess.call(['salt-call', '-l', 'info', 'state.highstate']) subprocess.call(['salt-call', '-l', 'info', 'state.sls', 'common.bridge']) subprocess.call(['salt-call', '-l', 'info', 'grains.setval', 'virl.basics']) subprocess.call(['salt-call', '-l', 'info', 'state.sls', 'common.virl']) subprocess.call(['salt-call', '-l', 'info', 'state.sls', 'openstack.compute']) subprocess.call(['salt-call', '-l', 'info', 'state.sls', 'openstack.setup']) print 'Please validate the contents of /etc/network/interfaces before rebooting!' while_exit = 1
StarcoderdataPython
3201126
<filename>stylization/stylization.py """This module contains various stylization functions of text appearance.""" import sys _style_dict = { "reset": "\033[0m", "bold": "\033[01m", "disable": '\033[02m', "underline": '\033[04m', "reverse": '\033[07m', "strikethrough": '\033[09m', "invisible": '\033[08m' } _fg_dict = { "black": "\033[30m", "red": "\033[31m", "green": "\033[32m", "orange": "\033[33m", "blue": "\033[34m", "purple": "\033[35m", "cyan": "\033[36m", "lightgrey": "\033[37m", "darkgrey": "\033[90m", "lightred": "\033[91m", "lightgreen": "\033[92m", "yellow": "\033[93m", "lightblue": "\033[94m", "pink": "\033[95m", "lightcyan": "\033[96m" } _bg_dict = { "black": "\033[40m", "red": "\033[41m", "green": "\033[42m", "orange": "\033[43m", "blue": "\033[44m", "purple": "\033[45m", "cyan": "\033[46m", "lightgrey": "\033[47m" } def _names2ascii(fg=None, stylename=None, bg=None) -> str: """Convert names of foreground, styles and background to ASCII symbols string""" fg_string = _fg_dict[fg] if fg is not None else "" bg_string = _bg_dict[bg] if bg is not None else "" st_string = "" if stylename is not None: style_list = stylename.split(" ") for style_item in style_list: st_string = "".join((st_string, _style_dict[style_item])) st_bg_fg_str = "".join(( st_string, fg_string, bg_string)) return st_bg_fg_str def style_string(string: str, fg=None, stylename=None, bg=None) -> str: """Apply styles to text. It is able to change style (like bold, underline etc), foreground and background colors of text string.""" ascii_str = _names2ascii(fg, stylename, bg) return "".join(( ascii_str, string, _style_dict["reset"])) def style_func_stream(stream=sys.stdout, fg=None, stylename=None, bg=None): """Apply styles to stream and call the . It is able to change style (like bold, underline etc), foreground and background colors of text string. Example usage: style_stream(_stream, fg=fg, stylename=stylename,bg=bg)\ (sys.print_exception)\ (e, _stream) Also you may use it as decorator function.""" def decorator(func): def wrapper(*args, **kwds): ascii_str = _names2ascii(fg, stylename, bg) stream.write(ascii_str) func(*args, **kwds) stream.write(_style_dict["reset"]) return wrapper return decorator def _chunks(l: bytearray, n: int): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] def hexdump(bytebuffer: bytearray, offset: int = 0): """Print hexdump of bytearray from offset""" for i, chunk in enumerate(_chunks(bytebuffer, 16)): print("%08X: " % (i * 16 + offset), end="") for byte in chunk[:8]: print('%02X ' % byte, end="") print(' ', end="") for byte in chunk[8:]: print('%02X ' % byte, end="") for k in range(16 - len(chunk)): print('%2s ' % " ", end="") print(' | ', end="") for byte in chunk: if 0x20 <= byte <= 0x7F: print("%c" % chr(byte), end="") else: print(".", end="") print()
StarcoderdataPython
3374078
<gh_stars>0 from __future__ import unicode_literals from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from django.template.defaultfilters import truncatechars from mezzanine.weibopub import get_auth_settings FORMFIELD_HTML = """ <div class='send_weibo_container'> <input id='id_send_weibo' name='send_weibo' type='checkbox'> <label class='vCheckboxLabel' for='id_send_weibo'>%s</label> </div> """ class WeiboAdminMixin(object): """ Admin mixin that adds a "Send to Weibo" checkbox to the add/change views, which when checked, will send a weibo with the title、pic and link to the object being saved. """ def formfield_for_dbfield(self, db_field, **kwargs): """ Adds the "Send to Weibo" checkbox after the "status" field, provided by any ``Displayable`` models. The approach here is quite a hack, however the sane approach of using a custom form with a boolean field defined, and then adding it to the formssets attribute of the admin class fell apart quite horrifically. """ formfield = super(WeiboAdminMixin, self).formfield_for_dbfield(db_field, **kwargs) if db_field.name == "status" and get_auth_settings(): def wrapper(render): def wrapped(*args, **kwargs): rendered = render(*args, **kwargs) label = _("Pub to Weibo") return mark_safe(rendered + FORMFIELD_HTML % label) return wrapped formfield.widget.render = wrapper(formfield.widget.render) return formfield def save_model(self, request, obj, form, change): """ Sends a weibo with the title/pic/short_url if applicable. """ super(WeiboAdminMixin, self).save_model(request, obj, form, change) if request.POST.get("send_weibo", False): auth_settings = get_auth_settings() obj.set_short_url() message = truncatechars(obj, 140 - len(obj.short_url) - 1) api = Api(*auth_settings) api.update.post(u'%s。[阅读全文:%s]'%(message,obj.short_url),pic=open('/Users/test.png'))
StarcoderdataPython
3339683
from cms.extensions.toolbar import ExtensionToolbar from cms.toolbar_pool import toolbar_pool from django.utils.translation import ugettext_lazy as _ from cms.api import get_page_draft from cms.toolbar_pool import toolbar_pool from cms.toolbar_base import CMSToolbar from cms.utils.page_permissions import user_can_change_page from django.urls import reverse, NoReverseMatch from django.utils.translation import ugettext_lazy as _ from .models import PageDetailExtension @toolbar_pool.register class PageDetailExtensionToolbar(CMSToolbar): # defines the model for the current toolbar model = PageDetailExtension def populate(self): # always use draft if we have a page self.page = get_page_draft(self.request.current_page) if not self.page: # Nothing to do return if user_can_change_page(user=self.request.user, page=self.page): try: page_extension = PageDetailExtension.objects.get(extended_object_id=self.page.id) except PageDetailExtension.DoesNotExist: page_extension = None try: if page_extension: url = reverse('admin:api_pagedetailextension_change', args=(page_extension.pk,)) else: url = reverse('admin:api_pagedetailextension_add') + '?extended_object=%s' % self.page.pk test = url except NoReverseMatch: # not in urls pass else: not_edit_mode = not self.toolbar.edit_mode_active current_page_menu = self.toolbar.get_or_create_menu('page') current_page_menu.add_modal_item(_('Page Extention'), url=url, disabled=not_edit_mode)
StarcoderdataPython
113871
import logging from qbot.core import registry from qbot.db import plugin_storage from qbot.message import Image, OutgoingMessage, Text, send_message from qbot.plugins.cmc import comic PLUGIN_NAME = "xkcd" LATEST_COMIC_KEY = "latest_comic" logger = logging.getLogger(__name__) @comic async def xkcd(): last_seen_comic = await registry.database.fetch_val( plugin_storage.select().where( (plugin_storage.c.plugin == PLUGIN_NAME) & (plugin_storage.c.key == LATEST_COMIC_KEY) ), column=plugin_storage.c.data, ) try: resp = await registry.http_client.get("https://xkcd.com/info.0.json") except Exception: logger.exception("Failed to retrieve latest XKCD comic.") return if not 200 <= resp.status_code < 400: logger.error(f"Incorrect response from XKCD. Status: {resp.status_code}") return data = resp.json() if data["num"] != last_seen_comic: thread_ts = await send_message( OutgoingMessage( channel=registry.CHANNEL_COMICS, thread_ts=None, blocks=[ Text(f"https://xkcd.com - {data['safe_title']}"), Image(image_url=data["img"], alt_text="XKCD"), ], ) ) await send_message( OutgoingMessage( channel=registry.CHANNEL_COMICS, thread_ts=thread_ts, blocks=[Text(data["alt"])], ) ) if last_seen_comic is None: await registry.database.execute( plugin_storage.insert(), values={ "plugin": PLUGIN_NAME, "key": LATEST_COMIC_KEY, "data": data["num"], }, ) else: await registry.database.execute( plugin_storage.update().where( (plugin_storage.c.plugin == PLUGIN_NAME) & (plugin_storage.c.key == LATEST_COMIC_KEY) ), values={"data": data["num"]}, )
StarcoderdataPython
3241732
# Copyright 2018 Propel http://propel.site/. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. { 'variables': { 'tensorflow_include_dir': '<(module_root_dir)/deps/libtensorflow/include', 'tensorflow_headers': [ '<@(tensorflow_include_dir)/tensorflow/c/c_api.h', '<@(tensorflow_include_dir)/tensorflow/c/eager/c_api.h', ] }, 'targets': [ { 'target_name': 'tensorflow-binding', 'sources': [ 'src/tf_binding.cc' ], 'include_dirs': [ '<(tensorflow_include_dir)', '<(module_root_dir)', ], 'conditions': [ ['OS=="win"', { 'defines': [ 'COMPILER_MSVC' ], 'libraries': [ 'tensorflow' ], 'library_dirs': [ '<(INTERMEDIATE_DIR)' ], 'msvs_disabled_warnings': [ # Warning C4190: 'TF_NewWhile' has C-linkage specified, but returns # UDT 'TF_WhileParams' which is incompatible with C. # (in include/tensorflow/c/c_api.h) # This is a tensorflow bug but it doesn't affect propel. 4190 ], 'actions': [ { 'action_name': 'generate_def', 'inputs': [ '<(module_root_dir)/tools/generate_def.js', '<@(tensorflow_headers)' ], 'outputs': [ '<(INTERMEDIATE_DIR)/tensorflow.def' ], 'action': [ 'cmd', '/c node <@(_inputs) > <@(_outputs)' ] }, { 'action_name': 'build-tensorflow-lib', 'inputs': [ '<(INTERMEDIATE_DIR)/tensorflow.def' ], 'outputs': [ '<(INTERMEDIATE_DIR)/tensorflow.lib' ], 'action': [ 'lib', '/def:<@(_inputs)', '/out:<@(_outputs)', '/machine:<@(target_arch)' ] }, { 'action_name': 'extract_dll', 'inputs': [ '<(module_root_dir)/tools/extract_dll.js' ], 'outputs': [ '<(PRODUCT_DIR)/tensorflow.dll' ], 'action': [ 'node', '<@(_inputs)', '<(PRODUCT_DIR)' ] } ], }, { # Linux or Mac 'actions': [ { 'action_name': 'extract_so', 'inputs': [ '<(module_root_dir)/tools/extract_so.js' ], 'outputs': [ '<(PRODUCT_DIR)/libtensorflow.so', # unlisted to avoid spurious rebuilds: # '<(PRODUCT_DIR)/libtensorflow_framework.so' ], 'action': [ 'node', '<@(_inputs)', '<(PRODUCT_DIR)' ] } ] }], ['OS=="linux"', { 'libraries': [ '-Wl,-rpath,\$$ORIGIN', '-ltensorflow' ], 'library_dirs': [ '<(PRODUCT_DIR)' ], }], ['OS=="mac"', { 'libraries': [ '-Wl,-rpath,@loader_path', '-ltensorflow', ], }], ] } ] }
StarcoderdataPython
3336552
import time class progressbar(object): progressTime = 0 def __init__(self, stepNum, frontStr='', backStr=''): self.stepNum = stepNum self.fullRate = 100 self.lastOutputLen = 0 self.frontStr = frontStr self.backStr = backStr self.lastTime = time.perf_counter() def clear(self): spaceStr = ' ' * self.lastOutputLen print('\r%s' % (spaceStr),end='' ) def output(self, nowStep): self.progressTime += time.perf_counter() - self.lastTime self.lastTime = time.perf_counter() rate = nowStep / self.stepNum if(rate == 0): predictRestTime = -1 else: predictRestTime = self.progressTime * (1-rate) / rate rate *= self.fullRate barLen = int(rate ) // 2 barStr = '#' * barLen #self.clear() outputStr = '\r%s%s%.2f%% [%.2fs/%.2fs]%s' % \ (self.frontStr, barStr, rate, self.progressTime, predictRestTime, self.backStr) self.lastOutputLen = len(outputStr) print(outputStr, end='' ) def getProgressTime(): return self.progressTime if __name__ == '__main__': bar = progressbar(100) for i in range(100): bar.output(i+1) time.sleep(0.1)
StarcoderdataPython
126997
import re import requests import xbmc from ..scraper import Scraper from ..common import random_agent, clean_title, googletag, filter_host, clean_search class Gostream(Scraper): domains = ['gostream.is'] name = "gostream" sources = [] def __init__(self): self.base_link = 'https://gostream.is' # self.scrape_movie('sleight', '2016', '') def scrape_movie(self, title, year, imdb, debrid = False): try: start_url = self.base_link+'/movie/search/'+title.replace(' ','+') html = requests.get(start_url).text match = re.compile('<div data-movie-id=.+?href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html) for url,name in match: if title.lower().replace(' ','').replace(':','') in name.lower().replace(' ','').replace(':',''): if title.lower()[0] == name.lower()[0]: html2 = requests.get(url).text match2 = re.compile('<div id="mv-info">.+?<a.+?href="(.+?)".+?title',re.DOTALL).findall(html2) for url2 in match2: html2 = requests.get(url2).text movie_qual = re.compile('<span class="quality">(.+?)</').findall(html2) qual = movie_qual[0] match_year = re.compile('<p><strong>Release:</strong>(.+?)</').findall(html2) if year in (str(match_year)): match3 = re.compile('<a onclick="favorite\((.+?),',re.DOTALL).findall(html2) for i in match3: html3 = requests.get(self.base_link+'/ajax/movie_episodes/'+i).json() data = re.findall('data-id="(.+?)"',html3['html']) for u in data: if len(u) == 6: s = self.base_link+'/ajax/movie_token?eid='+u+'&mid='+i html3 = requests.get(s).content x,y = re.findall("_x='(.+?)', _y='(.+?)'",html3)[0] fin_url = self.base_link+'/ajax/movie_sources/'+u+'?x='+x+'&y='+y h = requests.get(fin_url).content source = re.findall('"sources":\[(.+?)\]',h) single = re.findall('{(.+?)}',str(source)) for l in single: playlink = re.findall('"file":"(.+?)"',str(l)) for p in playlink: if 'http' in p: p = p.replace('\\','') p = p+'|User-Agent=Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36&Referer='+url2+'?ep='+u print p self.sources.append({'source': 'Gvideo', 'quality': qual, 'scraper': self.name, 'url': p,'direct': True}) return self.sources except: pass
StarcoderdataPython
1738325
<gh_stars>1-10 #!/usr/bin/env python3 import sys commands = {} commands['seq'] = { # non-arg commands 0xff: ['end'], 0xfe: ['delay1'], 0xfd: ['delay', 'var'], 0xfc: ['call', 'addr'], 0xfb: ['jump', 'addr'], 0xfa: ['beqz', 'addr'], 0xf9: ['bltz', 'addr'], 0xf8: ['loop', 'u8'], 0xf7: ['loopend'], 0xf5: ['bgez', 'addr'], 0xf2: ['reservenotes', 'u8'], 0xf1: ['unreservenotes'], 0xdf: ['transpose', 's8'], 0xde: ['transposerel', 's8'], 0xdd: ['settempo', 'u8'], 0xdc: ['addtempo', 's8'], 0xdb: ['setvol', 'u8'], 0xda: ['changevol', 's8'], 0xd7: ['initchannels', 'hex16'], 0xd6: ['disablechannels', 'hex16'], 0xd5: ['setmutescale', 's8'], 0xd4: ['mute'], 0xd3: ['setmutebhv', 'hex8'], 0xd2: ['setshortnotevelocitytable', 'addr'], 0xd1: ['setshortnotedurationtable', 'addr'], 0xd0: ['setnoteallocationpolicy', 'u8'], 0xcc: ['setval', 'u8'], 0xc9: ['bitand', 'u8'], 0xc8: ['subtract', 'u8'], # arg commands 0x00: ['testchdisabled', 'arg'], 0x50: ['subvariation', 'ign-arg'], 0x70: ['setvariation', 'ign-arg'], 0x80: ['getvariation', 'ign-arg'], 0x90: ['startchannel', 'arg', 'addr'], } commands['chan'] = { # non-arg commands 0xff: ['end'], 0xfe: ['delay1'], 0xfd: ['delay', 'var'], 0xfc: ['call', 'addr'], 0xfb: ['jump', 'addr'], 0xfa: ['beqz', 'addr'], 0xf9: ['bltz', 'addr'], 0xf8: ['loop', 'u8'], 0xf7: ['loopend'], 0xf6: ['break'], 0xf5: ['bgez', 'addr'], 0xf3: ['hang'], 0xf2: ['reservenotes', 'u8'], 0xf1: ['unreservenotes'], 0xe4: ['dyncall'], 0xe3: ['setvibratodelay', 'u8'], 0xe2: ['setvibratoextentlinear', 'u8', 'u8', 'u8'], 0xe1: ['setvibratoratelinear', 'u8', 'u8', 'u8'], 0xe0: ['setvolscale', 'u8'], 0xdf: ['setvol', 'u8'], 0xde: ['freqscale', 'u16'], 0xdd: ['setpan', 'u8'], 0xdc: ['setpanmix', 'u8'], 0xdb: ['transpose', 's8'], 0xda: ['setenvelope', 'addr'], 0xd9: ['setdecayrelease', 'u8'], 0xd8: ['setvibratoextent', 'u8'], 0xd7: ['setvibratorate', 'u8'], 0xd6: ['setupdatesperframe_unimplemented', 'u8'], 0xd4: ['setreverb', 'u8'], 0xd3: ['pitchbend', 's8'], 0xd2: ['setsustain', 'u8'], 0xd1: ['setnoteallocationpolicy', 'u8'], 0xd0: ['stereoheadseteffects', 'u8'], 0xcc: ['setval', 'u8'], 0xcb: ['readseq', 'addr'], 0xca: ['setmutebhv', 'hex8'], 0xc9: ['bitand', 'u8'], 0xc8: ['subtract', 'u8'], 0xc7: ['writeseq', 'u8', 'addr'], 0xc6: ['setbank', 'u8'], 0xc5: ['dynsetdyntable'], 0xc4: ['largenoteson'], 0xc3: ['largenotesoff'], 0xc2: ['setdyntable', 'addr'], 0xc1: ['setinstr', 'u8'], # arg commands 0x00: ['testlayerfinished', 'arg'], 0x10: ['startchannel', 'arg', 'addr'], 0x20: ['disablechannel', 'arg'], 0x30: ['iowriteval2', 'arg', 'u8'], 0x40: ['ioreadval2', 'arg', 'u8'], 0x50: ['ioreadvalsub', 'arg'], 0x60: ['setnotepriority', 'arg'], 0x70: ['iowriteval', 'arg'], 0x80: ['ioreadval', 'arg'], 0x90: ['setlayer', 'arg', 'addr'], 0xa0: ['freelayer', 'arg'], 0xb0: ['dynsetlayer', 'arg'], } commands_layer_base = { # non-arg commands 0xc0: ['delay', 'var'], 0xc1: ['setshortnotevelocity', 'u8'], 0xc2: ['transpose', 'u8'], 0xc3: ['setshortnotedefaultplaypercentage', 'var'], 0xc4: ['somethingon'], # ?? (something to do with decay behavior) 0xc5: ['somethingoff'], # ?? 0xc6: ['setinstr', 'u8'], 0xc7: ['portamento', 'hex8', 'u8', 'u8'], 0xc8: ['disableportamento'], 0xc9: ['setshortnoteduration', 'u8'], 0xca: ['setpan', 'u8'], 0xf7: ['loopend'], 0xf8: ['loop', 'u8'], 0xfb: ['jump', 'addr'], 0xfc: ['call', 'addr'], 0xff: ['end'], # arg commands 0xd0: ['setshortnotevelocityfromtable', 'arg'], 0xe0: ['setshortnotedurationfromtable', 'arg'], } commands['layer_large'] = dict(list(commands_layer_base.items()) + list({ 0x00: ['note0', 'arg', 'var', 'u8', 'u8'], 0x40: ['note1', 'arg', 'var', 'u8'], 0x80: ['note2', 'arg', 'u8', 'u8'], }.items())) commands['layer_small'] = dict(list(commands_layer_base.items()) + list({ 0x00: ['smallnote0', 'arg', 'var'], 0x40: ['smallnote1', 'arg'], 0x80: ['smallnote2', 'arg'], }.items())) print_end_padding = False if "--print-end-padding" in sys.argv: print_end_padding = True sys.argv.remove("--print-end-padding") if len(sys.argv) != 2: print(f"Usage: {sys.argv[0]} (--emit-asm-macros | input.m64)") sys.exit(0) if sys.argv[1] == "--emit-asm-macros": print("# Macros for disassembled sequence files. This file was automatically generated by seq_decoder.py.") print("# To regenerate it, run: ./tools/seq_decoder.py --emit-asm-macros >seq_macros.inc") print() def print_hword(x): print(f" .byte {x} >> 8, {x} & 0xff") def emit_cmd(key, op, cmd): mn = cmd[0] args = cmd[1:] param_names = [] param_list = [] nibble_param_name = None for i, arg in enumerate(args): param_name = chr(97 + i) param_names.append(param_name) param_list.append(param_name + ("=0" if arg == "ign-arg" else "")) if arg == "ign-arg" or arg == "arg": nibble_param_name = param_name print(f".macro {key}_{mn} {', '.join(param_list)}".rstrip()) if nibble_param_name is not None: print(f" .byte {hex(op)} + \\{nibble_param_name}") else: print(f" .byte {hex(op)}") for arg, param_name in zip(args, param_names): if arg in ['arg', 'ign-arg']: pass elif arg in ['s8', 'u8', 'hex8']: print(f" .byte \\{param_name}") elif arg in ['u16', 'hex16']: print_hword("\\" + param_name) elif arg == 'addr': print_hword(f"(\\{param_name} - sequence_start)") elif arg == 'var_long': print(f" var_long \\{param_name}") elif arg == 'var': print(f" var \\{param_name}") else: raise Exception("Unknown argument type " + arg) print(".endm") print() def emit_env_cmd(op, cmd): mn = cmd[0] param_list = [] for i, arg in enumerate(cmd[1:]): param_list.append(chr(97 + i)) print(f".macro envelope_{mn} {', '.join(param_list)}".rstrip()) if op is not None: print(f" .byte {hex(op >> 8)}, {hex(op & 0xff)}") for param in param_list: print_hword("\\" + param) print(".endm\n") for key in ['seq', 'chan', 'layer']: print(f"# {key} commands\n") if key == 'layer': cmds = commands['layer_large'] for op in sorted(commands['layer_small'].keys()): if op not in cmds: emit_cmd(key, op, commands['layer_small'][op]) else: cmds = commands[key] eu = [] non_eu = [] for op in sorted(cmds.keys()): mn = cmds[op][0] if mn == 'setnotepriority': eu.append((0xe9, ['setnotepriority', 'u8'])) non_eu.append((op, cmds[op])) elif mn in ['reservenotes', 'unreservenotes']: eu.append((op - 1, cmds[op])) non_eu.append((op, cmds[op])) elif mn not in ['portamento', 'writeseq']: emit_cmd(key, op, cmds[op]) if key == 'chan': print(".macro chan_writeseq val, pos, offset") print(" .byte 0xc7, \\val") print_hword("(\\pos - sequence_start + \\offset)") print(".endm\n") print(".macro chan_writeseq_nextinstr val, offset") print(" .byte 0xc7, \\val") print_hword("(writeseq\\@ - sequence_start + \\offset)") print(" writeseq\\@:") print(".endm\n") print(".macro layer_portamento a, b, c") print(" .byte 0xc7, \\a, \\b") print(" .if ((\\a & 0x80) == 0)") print(" var \\c") print(" .else") print(" .byte \\c") print(" .endif") print(".endm\n") emit_cmd(key, 0xfd, ['delay_long', 'var_long']) if key == 'layer': emit_cmd(key, 0xc0, ['delay_long', 'var_long']) emit_cmd(key, 0x40, ['note1_long', 'arg', 'var_long', 'u8']) if eu: print(".ifdef VERSION_EU\n") for (op, cmd) in eu: emit_cmd(key, op, cmd) print(".else\n") for (op, cmd) in non_eu: emit_cmd(key, op, cmd) print(".endif\n") print("# envelope commands\n") emit_env_cmd(0, ['disable', 'u16']) emit_env_cmd(2**16-1, ['hang', 'u16']) emit_env_cmd(2**16-2, ['goto', 'u16']) emit_env_cmd(2**16-3, ['restart', 'u16']) emit_env_cmd(None, ['line', 'u16', 'u16']) print("# other commands\n") print(".macro var_long x") print(" .byte (0x80 | (\\x & 0x7f00) >> 8), (\\x & 0xff)") print(".endm\n") print(".macro var x") print(" .if (\\x >= 0x80)") print(" var_long \\x") print(" .else") print(" .byte \\x") print(" .endif") print(".endm\n") print(".macro sound_ref a") print_hword("(\\a - sequence_start)") print(".endm") sys.exit(0) filename = sys.argv[1] try: lang = filename.split('/')[-2] assert lang in ['us', 'jp', 'eu'] seq_num = int(filename.split('/')[-1].split('_')[0], 16) except Exception: lang = '' seq_num = -1 try: with open(filename, 'rb') as f: data = f.read() except Exception: print("Error: could not open file {filename} for reading.", file=sys.stderr) sys.exit(1) output = [None] * len(data) output_instate = [None] * len(data) label_name = [None] * len(data) script_start = [False] * len(data) hit_eof = False errors = [] seq_writes = [] # Our analysis of large notes mode doesn't persist through multiple channel activations # For simplicity, we force large notes always instead, which is valid for SM64. force_large_notes = True if lang == 'eu': # unreservenotes moved to 0xf0 in EU, and reservenotes took its place commands['chan'][0xf0] = commands['chan'][0xf1] commands['chan'][0xf1] = commands['chan'][0xf2] del commands['chan'][0xf2] # total guess: the same is true for the 'seq'-type command commands['seq'][0xf0] = commands['seq'][0xf1] commands['seq'][0xf1] = commands['seq'][0xf2] del commands['seq'][0xf2] # setnotepriority moved to 0xe9, becoming a non-arg command commands['chan'][0xe9] = ['setnotepriority', 'u8'] del commands['chan'][0x60] def is_arg_command(cmd_args): return 'arg' in cmd_args or 'ign-arg' in cmd_args def gen_label(ind, tp): nice_tp = tp.replace('_small', '').replace('_large', '') addr = hex(ind)[2:].upper() ret = f".{nice_tp}_{addr}" if ind >= len(data): errors.append(f"reference to oob label {ret}") return ret if label_name[ind] is not None: return label_name[ind] label_name[ind] = ret return ret def gen_mnemonic(tp, b): nice_tp = tp.split('_')[0] mn = commands[tp][b][0] if not mn: mn = f"{b:02X}" return f"{nice_tp}_{mn}" decode_list = [] def decode_one(state): pos, tp, nesting, large = state orig_pos = pos if pos >= len(data): global hit_eof hit_eof = True return if output[pos] is not None: if output_instate[pos] != state: errors.append(f"got to {gen_label(orig_pos, tp)} with both state {state} and {output_instate[pos]}") return def u8(): nonlocal pos global hit_eof if pos == len(data): hit_eof = True return 0 ret = data[pos] pos += 1 return ret def u16(): hi = u8() lo = u8() return (hi << 8) | lo def var(): ret = u8() if ret & 0x80: ret = (ret << 8) & 0x7f00; ret |= u8() return (ret, ret < 0x80) return (ret, False) if tp == 'soundref': sound = u16() decode_list.append((sound, 'chan', 0, True)) if sound < len(data): script_start[sound] = True for p in range(orig_pos, pos): output[p] = '' output_instate[p] = state output[orig_pos] = 'sound_ref ' + gen_label(sound, 'chan') return if tp == 'envelope': a = u16() b = u16() for p in range(orig_pos, pos): output[p] = '' output_instate[p] = state if a >= 2**16 - 3: a -= 2**16 if a <= 0: mn = ['disable', 'hang', 'goto', 'restart'][-a] output[orig_pos] = f'envelope_{mn} {b}' # assume any goto is backwards and stop decoding else: output[orig_pos] = f'envelope_line {a} {b}' decode_list.append((pos, tp, nesting, large)) return ins_byte = u8() cmds = commands[tp] if ins_byte in cmds and not is_arg_command(cmds[ins_byte]): used_b = ins_byte arg = None elif ins_byte & 0xf0 in cmds and is_arg_command(cmds[ins_byte & 0xf0]): used_b = ins_byte & 0xf0 arg = ins_byte & 0xf elif ins_byte & 0xc0 in cmds and is_arg_command(cmds[ins_byte & 0xc0]) and tp.startswith('layer'): used_b = ins_byte & 0xc0 arg = ins_byte & 0x3f else: errors.append(f"unrecognized instruction {hex(ins_byte)} for type {tp} at label {gen_label(orig_pos, tp)}") return out_mn = gen_mnemonic(tp, used_b) out_args = [] cmd_mn = cmds[used_b][0] cmd_args = cmds[used_b][1:] long_var = False for a in cmd_args: if cmd_mn == 'portamento' and len(out_args) == 2 and (int(out_args[0], 0) & 0x80) == 0: a = 'var' if a == 'arg': out_args.append(str(arg)) elif a == 'ign-arg' and arg != 0: out_args.append(str(arg)) elif a == 'u8': out_args.append(str(u8())) elif a == 'hex8': out_args.append(hex(u8())) elif a == 's8': v = u8() out_args.append(str(v if v < 128 else v - 256)) elif a == 'u16': out_args.append(str(u16())) elif a == 'hex16': out_args.append(hex(u16())) elif a == 'var': val, bad = var() out_args.append(hex(val)) if bad: long_var = True elif a == 'addr': v = u16() kind = 'addr' if cmd_mn == 'call': kind = tp + '_fn' elif cmd_mn in ['jump', 'beqz', 'bltz', 'bgez']: kind = tp elif cmd_mn == 'startchannel': kind = 'chan' elif cmd_mn == 'setlayer': kind = 'layer' elif cmd_mn == 'setdyntable': kind = 'table' elif cmd_mn == 'setenvelope': kind = 'envelope' if v >= len(data): label = gen_label(v, kind) out_args.append(label) errors.append(f"reference to oob label {label}") elif cmd_mn == 'writeseq': out_args.append('<fixup>') seq_writes.append((orig_pos, v)) else: out_args.append(gen_label(v, kind)) if cmd_mn == 'call': decode_list.append((v, tp, 0, large)) script_start[v] = True elif cmd_mn in ['jump', 'beqz', 'bltz', 'bgez']: decode_list.append((v, tp, nesting, large)) elif cmd_mn == 'startchannel': decode_list.append((v, 'chan', 0, force_large_notes)) script_start[v] = True elif cmd_mn == 'setlayer': if large: decode_list.append((v, 'layer_large', 0, True)) else: decode_list.append((v, 'layer_small', 0, True)) script_start[v] = True elif cmd_mn == 'setenvelope': decode_list.append((v, 'envelope', 0, True)) script_start[v] = True else: script_start[v] = True out_all = out_mn if long_var: out_all += "_long" if out_args: out_all += ' ' out_all += ', '.join(out_args) for p in range(orig_pos, pos): output[p] = '' output_instate[p] = state output[orig_pos] = out_all if cmd_mn in ['hang', 'jump']: return if cmd_mn in ['loop']: nesting += 1 if cmd_mn == 'end': nesting -= 1 if cmd_mn in ['break', 'loopend']: nesting -= 1 if nesting < 0: # This is iffy, and actually happens in sequence 0. It will make us # return to the caller's caller at function end. nesting = 0 if cmd_mn == 'largenoteson': large = True if cmd_mn == 'largenotesoff': large = False if nesting >= 0: decode_list.append((pos, tp, nesting, large)) def decode_rec(state, initial): if not initial: v = state[0] gen_label(v, state[1]) script_start[v] = True decode_list.append(state) while decode_list: decode_one(decode_list.pop()) def main(): decode_rec((0, 'seq', 0, False), initial=True) if seq_num == 0: if lang == 'jp': sound_banks = [ (0x14C, 0x70), (0x8A8, 0x38), # stated as 0x30 (0xB66, 0x38), # stated as 0x30 (0xE09, 0x80), (0x194B, 0x28), # stated as 0x20 (0x1CA6, 0x80), (0x27C9, 0x20), (0x2975, 0x30), # same script as bank 3 # same script as bank 5 ] unused = [ (0x1FC4, 'layer_large'), (0x2149, 'layer_large'), (0x2223, 'layer_large'), (0x28C5, 'chan'), (0x3110, 'envelope'), (0x31EC, 'envelope'), ] elif lang == 'us': sound_banks = [ (0x14C, 0x70), (0x8F6, 0x38), # stated as 0x30 (0xBB4, 0x40), (0xF8E, 0x80), (0x1AF3, 0x28), # stated as 0x20 (0x1E4E, 0x80), (0x2971, 0x20), (0x2B1D, 0x40), # same script as bank 3 # same script as bank 5 ] unused = [ (0x216C, 'layer_large'), (0x22F1, 'layer_large'), (0x23CB, 'layer_large'), (0x2A6D, 'chan'), (0x339C, 'envelope'), (0x3478, 'envelope'), ] elif lang == 'eu': sound_banks = [ (0x154, 0x70), (0x8FE, 0x38), # stated as 0x30? (0xBBC, 0x40), (0xFA5, 0x80), (0x1B0C, 0x28), # stated as 0x20? (0x1E67, 0x80), (0x298A, 0x20), (0x2B36, 0x40), # same script as bank 3 # same script as bank 5 ] unused = [ (0xF9A, 'chan'), (0x2185, 'layer_large'), (0x230A, 'layer_large'), (0x23E4, 'layer_large'), (0x2A86, 'chan'), (0x33CC, 'envelope'), (0x34A8, 'envelope'), ] for (addr, count) in sound_banks: for i in range(count): decode_rec((addr + 2*i, 'soundref', 0, False), initial=True) for (addr, tp) in unused: gen_label(addr, tp + '_unused') decode_rec((addr, tp, 0, force_large_notes), initial=False) for (pos, write_to) in seq_writes: assert '<fixup>' in output[pos] delta = 0 while output[write_to] == '': write_to -= 1 delta += 1 if write_to > pos and all(output[i] == '' for i in range(pos+1, write_to)): nice_target = str(delta) output[pos] = output[pos].replace('writeseq', 'writeseq_nextinstr') else: tp = output_instate[write_to][1] if output_instate[write_to] is not None else 'addr' nice_target = gen_label(write_to, tp) + ", " + str(delta) output[pos] = output[pos].replace('<fixup>', nice_target) # Add unreachable 'end' markers for i in range(1, len(data)): if (data[i] == 0xff and output[i] is None and output[i - 1] is not None and label_name[i] is None): tp = output_instate[i - 1][1] if tp in ["seq", "chan", "layer_small", "layer_large"]: output[i] = gen_mnemonic(tp, 0xff) # Add envelope padding for i in range(1, len(data) - 1): if (data[i] == 0 and output[i] is None and output[i - 1] is not None and output[i + 1] is not None and label_name[i] is None and output[i + 1].startswith('envelope')): script_start[i] = True output[i] = "# padding\n.byte 0" # Add 'unused' marker labels for i in range(1, len(data)): if (output[i] is None and output[i - 1] is not None and label_name[i] is None): script_start[i] = True gen_label(i, 'unused') # Remove up to 15 bytes of padding at the end end_padding = 0 for i in range(len(data)-1, -1, -1): if output[i] is not None: break end_padding += 1 if end_padding > 15: end_padding = 0 if print_end_padding: print(end_padding) sys.exit(0) print(".include \"seq_macros.inc\"") print(".section .rodata") print(".align 0") print("sequence_start:") print() for i in range(len(data) - end_padding): if script_start[i] and i > 0: print() if label_name[i] is not None: print(f"{label_name[i]}:") o = output[i] if o is None: print(f".byte {hex(data[i])}") elif o: print(o) elif label_name[i] is not None: print("<mid-instruction>") errors.append(f"mid-instruction label {label_name[i]}") if hit_eof: errors.append("hit eof!?") if errors: print(f"[{filename}] errors:", file=sys.stderr) for w in errors: print(w, file=sys.stderr) main()
StarcoderdataPython
3386008
# Generated by Django 2.0.3 on 2018-04-02 14:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('proxies', '0001_initial'), ] operations = [ migrations.AddField( model_name='proxy', name='letsencrypt', field=models.BooleanField(default=False, verbose_name='LetsEncrypt?'), ), migrations.AddField( model_name='proxy', name='proxypass', field=models.CharField(blank=True, max_length=255, verbose_name='Proxy Address'), ), migrations.AddField( model_name='proxy', name='rewriteHTTPS', field=models.BooleanField(default=True, verbose_name='Rewrite to HTTPS?'), ), migrations.AddField( model_name='proxy', name='ssl', field=models.BooleanField(default=False, verbose_name='SSL?'), ), ]
StarcoderdataPython
1760675
<gh_stars>0 #!/usr/bin/python import sys import csv def reducer(): reader = csv.reader(sys.stdin, delimiter='\t') writer = csv.writer(sys.stdout, delimiter='\t') tagFrequency = {} for line in reader: tag = line[0] tagOccurance = int(line[1]) if tag not in tagFrequency: tagFrequency[tag] = tagOccurance else: tagFrequency[tag] += tagOccurance topTenTags = sorted(tagFrequency.items(), key=lambda x: -x[1])[:10] for tag in topTenTags: writer.writerow([tag[0], tag[1]]) def main(): reducer() if __name__ == "__main__": main()
StarcoderdataPython
3206579
import click from src.analyse import ParseTranslationData # SPECs are downloaded from https://src.fedoraproject.org/lookaside/ # https://src.fedoraproject.org/lookaside/rpm-specs-latest.tar.xz @click.command() @click.argument('keyword') @click.option( '--type', help="Parse format, example, SPEC" ) @click.option( '--path', help="Directory path", default='/home/suanand/Downloads/rpm-specs-latest/rpm-specs' ) @click.pass_obj def find(app_context, keyword, type, path): """Find in SPEC file.""" parse_dict = {"parse": "SPEC"} type = 'SPEC' if not type else type.upper() # Call find_in api from src module some_obj = ParseTranslationData(type, path) some_obj.find_in_spec_file(keyword) # app_context.print_r(parse_dict)
StarcoderdataPython
3393135
<gh_stars>0 from setuptools import setup, find_packages setup( name="Keywordtool Scraper", version="0.0.4", packages=find_packages(), )
StarcoderdataPython
3231381
from backend.database import db class AgentTypeVersion(db.Model): __tablename__ = "AgentTypeVersion" Id = db.Column(db.Integer, primary_key=True) Name = db.Column(db.String) AgentTypeId = db.Column(db.Integer, db.ForeignKey('AgentType.Id'), nullable=False) Payloads = db.relationship('Payload', backref='AgentTypeVersion', lazy=True) def __repr__(self): if self.Name: return '<AgentTypeVersion: %s>' % self.Name else: return '<AgentTypeVersion: %s>' % str(self.Id)
StarcoderdataPython
84279
# -*- coding: utf-8 -*- { 'name': 'Slides', 'version': '1.0', 'sequence': 145, 'summary': 'Share and Publish Videos, Presentations and Documents', 'category': 'Website', 'description': """ Share and Publish Videos, Presentations and Documents' ====================================================== * Website Application * Channel Management * Filters and Tagging * Statistics of Presentation * Channel Subscription * Supported document types : PDF, images, YouTube videos and Google Drive documents) """, 'depends': ['website', 'website_mail'], 'data': [ 'view/res_config.xml', 'view/website_slides.xml', 'view/website_slides_embed.xml', 'view/website_slides_backend.xml', 'data/website_slides_data.xml', 'security/ir.model.access.csv', 'security/website_slides_security.xml' ], 'demo': [ 'data/website_slides_demo.xml' ], 'installable': True, 'application': True, }
StarcoderdataPython
105038
<filename>relimg.py from PIL import Image #this function saves initial relative image to be searched using CV2. def relimg (imgobj,relx1,rely1,relx2,rely2,canvwidth,canvheight, keynum, tempdir): iniwidth = imgobj.size[0] iniheight = imgobj.size[1] relimg = imgobj.crop((relx1*iniwidth/canvwidth, rely1*iniheight/canvheight, relx2*iniwidth/canvwidth, rely2*iniheight/canvheight)) file = open(tempdir + "\\relimg" + str(keynum) + ".bmp", "w+b") relimg.save(file) file.close() return (tempdir + "\\relimg" + str(keynum) + ".bmp") #
StarcoderdataPython
3395748
<filename>governor/utils.py<gh_stars>1-10 # -*- coding: utf-8 -*- # Copyright 2019 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import TYPE_CHECKING, Union, Optional from urllib.parse import urlparse from .constants import COLUMN, PREDEFINED_URLS if TYPE_CHECKING: from urllib.parse import ParseResult def hex_to_bytes(tx_hash: str) -> bytes: return bytes.fromhex(tx_hash[2:]) def print_title(title: str, column: int = COLUMN, sep: str = "="): sep_count: int = max(0, column - len(title) - 3) print(f"[{title}] {sep * sep_count}") def print_dict(data: dict): converted = {} for key in data: value = data[key] if isinstance(value, bytes): value = f"0x{value.hex()}" converted[key] = value print(json.dumps(converted, indent=4)) def print_response(content: Union[str, dict]): print_title("Response", COLUMN) if isinstance(content, dict): print_dict(content) else: print(content) print("") def print_tx_result(tx_result: dict): print_title("Transaction Result") print_dict(tx_result) def is_url_valid(url: str) -> bool: ps: 'ParseResult' = urlparse(url) return ps.scheme in ("http", "https") \ and len(ps.netloc) > 0 \ and len(ps.path) > 0 def get_predefined_url(name: str) -> Optional[str]: return PREDEFINED_URLS.get(name) def get_url(url: str) -> str: predefined_url: str = get_predefined_url(url) if isinstance(predefined_url, str): return predefined_url if not is_url_valid(url): raise ValueError(f"Invalid url: {url}") return url
StarcoderdataPython
3378835
from orders.models import Orders import factory from faker import Faker import faker.providers.address Faker.seed(0) faker = Faker(['pt_BR']) class OrdersFactory (factory.django.DjangoModelFactory): cpf = faker.cpf() name = faker.name() email = faker.email() postal_code = faker.postcode() address = faker.street_name() number = faker.building_number() district = faker.bairro() state = faker.estado_sigla() city = faker.city() class Meta: model = Orders
StarcoderdataPython
3221160
<gh_stars>0 ''' To use this extension, follow these instrucitons: https://www.sphinx-doc.org/en/master/development/tutorials/todo.html#using-the-extension ''' from docutils import nodes from docutils.parsers.rst import Directive, directives from sphinx.locale import _ from sphinx.util.docutils import SphinxDirective ''' Node classes usually don’t have to do anything except inherit from the standard docutils classes defined in docutils.nodes. todo inherits from Admonition because it should be handled like a note or warning, todolist is just a “general” node. docutils nodes: https://docutils.sourceforge.io/docs/ref/doctree.html Sphinx nodes: https://www.sphinx-doc.org/en/master/extdev/nodes.html#nodes ''' class todo(nodes.Admonition, nodes.Element): pass class todolist(nodes.General, nodes.Element): pass def visit_todo_node(self, node): self.visit_admonition(node) def depart_todo_node(self, node): self.depart_admonition(node) ''' A directive class is a class deriving usually from docutils.parsers.rst.Directive. The directive interface is also covered in detail in the docutils documentation; the important thing is that the class should have attributes that configure the allowed markup, and a run method that returns a list of nodes. Sphinx docs about docutils...Directive: https://www.sphinx-doc.org/en/master/extdev/markupapi.html#docutils.parsers.rst.Directive docutils.parsers.rst.Directive: http://docutils.sourceforge.net/docs/ref/rst/directives.html ''' ''' It’s very simple, creating and returning an instance of our todolist node class. The TodolistDirective directive itself has neither content nor arguments that need to be handled. ''' class TodolistDirective(Directive): def run(self): return [todolist('')] ''' Several important things are covered here. First, as you can see, we’re now subclassing the SphinxDirective helper class instead of the usual Directive class. This gives us access to the build environment instance using the self.env property. Without this, we’d have to use the rather convoluted self.state.document.settings.env. Then, to act as a link target (from TodolistDirective), the TodoDirective directive needs to return a target node in addition to the todo node. The target ID (in HTML, this will be the anchor name) is generated by using env.new_serialno which returns a new unique integer on each call and therefore leads to unique target names. The target node is instantiated without any text (the first two arguments). On creating admonition node, the content body of the directive are parsed using self.state.nested_parse. The first argument gives the content body, and the second one gives content offset. The third argument gives the parent node of parsed result, in our case the todo node. Following this, the todo node is added to the environment. This is needed to be able to create a list of all todo entries throughout the documentation, in the place where the author puts a todolist directive. For this case, the environment attribute todo_all_todos is used (again, the name should be unique, so it is prefixed by the extension name). It does not exist when a new environment is created, so the directive must check and create it if necessary. Various information about the todo entry’s location are stored along with a copy of the node. In the last line, the nodes that should be put into the doctree are returned: the target node and the admonition node. ''' class TodoDirective(SphinxDirective): # this enables content in the directive has_content = True option_spec = { 'woof': directives.unchanged_required, 'wing': directives.unchanged_required, 'foo': directives.unchanged_required } def run(self): targetid = 'todo-%d' % self.env.new_serialno('todo') targetnode = nodes.target('', '', ids=[targetid]) todo_node = todo('\n'.join(self.content)) todo_node += nodes.title(_('Todo'), _('Todo')) self.state.nested_parse(self.content, self.content_offset, todo_node) if not hasattr(self.env, 'todo_all_todos'): self.env.todo_all_todos = [] self.env.todo_all_todos.append({ 'docname': self.env.docname, 'lineno': self.lineno, 'todo': todo_node.deepcopy(), 'target': targetnode, 'options': self.options }) return [targetnode, todo_node] ''' The node structure that the directive returns looks like this: +--------------------+ | target node | +--------------------+ +--------------------+ | todo node | +--------------------+ \__+--------------------+ | admonition title | +--------------------+ | paragraph | +--------------------+ | ... | +--------------------+ ''' ''' Event handlers are one of Sphinx’s most powerful features, providing a way to do hook into any part of the documentation process. There are many events provided by Sphinx itself, as detailed in the API guide, and we’re going to use a subset of them here. Sphinx event API:https://www.sphinx-doc.org/en/master/extdev/appapi.html#events ''' ''' Let’s look at the event handlers used in the above example. First, the one for the env-purge-doc event. Since we store information from source files in the environment, which is persistent, it may become out of date when the source file changes. Therefore, before each source file is read, the environment’s records of it are cleared, and the env-purge-doc event gives extensions a chance to do the same. Here we clear out all todos whose docname matches the given one from the todo_all_todos list. If there are todos left in the document, they will be added again during parsing. env-purge-doc: https://www.sphinx-doc.org/en/master/extdev/appapi.html#event-env-purge-doc ''' def purge_todos(app, env, docname): if not hasattr(env, 'todo_all_todos'): return env.todo_all_todos = [todo for todo in env.todo_all_todos if todo['docname'] != docname] ''' The next handler, for the env-merge-info event, is used during parallel builds. As during parallel builds all threads have their own env, there’s multiple todo_all_todos lists that need to be merged. env-merge-info: https://www.sphinx-doc.org/en/master/extdev/appapi.html#event-env-merge-info ''' def merge_todos(app, env, docnames, other): if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] if hasattr(other, 'todo_all_todos'): env.todo_all_todos.extend(other.todo_all_todos) ''' The doctree-resolved event is emitted at the end of phase 3 (resolving) and allows custom resolving to be done. The handler we have written for this event is a bit more involved. If the todo_include_todos config value (which we’ll describe shortly) is false, all todo and todolist nodes are removed from the documents. If not, todo nodes just stay where and how they are. todolist nodes are replaced by a list of todo entries, complete with backlinks to the location where they come from. The list items are composed of the nodes from the todo entry and docutils nodes created on the fly: a paragraph for each entry, containing text that gives the location, and a link (reference node containing an italic node) with the backreference. The reference URI is built by sphinx.builders.Builder.get_relative_uri() which creates a suitable URI depending on the used builder, and appending the todo node’s (the target’s) ID as the anchor name. doctree-resolved: https://www.sphinx-doc.org/en/master/extdev/appapi.html#event-doctree-resolved phase 3 (resolving): https://www.sphinx-doc.org/en/master/extdev/index.html#build-phases get_relative_uri(): https://www.sphinx-doc.org/en/master/extdev/builderapi.html#sphinx.builders.Builder.get_relative_uri ''' def process_todo_nodes(app, doctree, fromdocname): # if extention turned off, remove all todo nodes from doctree if not app.config.todo_include_todos: for node in doctree.traverse(todo): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. # Augment each todo with a backlink to the original location. env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] for node in doctree.traverse(todolist): if not app.config.todo_include_todos: node.replace_self([]) continue content = [] for todo_info in env.todo_all_todos: # if todo_info['options']: # print('OPTIONS FOUND IN {0}'.format(todo_info['docname'])) # print(todo_info['options']) # for todo in app.env.todo_all_todos: # for key in todo['options']: # print('key: {0}\tvalue: {1}'.format(key, todo['options'][key])) para = nodes.paragraph() filename = env.doc2path(todo_info['docname'], base=None) description = ( _('(The original entry is located in %s, line %d and can be found ') % (filename, todo_info['lineno'])) para += nodes.Text(description, description) # Create a reference newnode = nodes.reference('', '') innernode = nodes.emphasis(_('here'), _('here')) newnode['refdocname'] = todo_info['docname'] newnode['refuri'] = app.builder.get_relative_uri( fromdocname, todo_info['docname']) newnode['refuri'] += '#' + todo_info['target']['refid'] newnode.append(innernode) para += newnode para += nodes.Text('.)', '.)') # Insert into the todolist content.append(todo_info['todo']) content.append(para) node.replace_self(content) def process_build_finished(app, exception): if exception: print('Exception hit!') print(dir(exception)) ''' The setup function is a requirement and is used to plug directives into Sphinx. However, we also use it to hook up the other parts of our extension. Let’s look at our setup function: - add_config_value() lets Sphinx know that it should recognize the new config value todo_include_todos, whose default value should be False (this also tells Sphinx that it is a boolean value). If the third argument was 'html', HTML documents would be full rebuild if the config value changed its value. This is needed for config values that influence reading (build phase 1 (reading)). - add_config_value(): https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_config_value - phase 1 (reading): https://www.sphinx-doc.org/en/master/extdev/index.html#build-phases - add_node() adds a new node class to the build system. It also can specify visitor functions for each supported output format. These visitor functions are needed when the new nodes stay until phase 4 (writing). Since the todolist node is always replaced in phase 3 (resolving), it doesn’t need any. - add_node(): https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_node - phase 3 (resolving): https://www.sphinx-doc.org/en/master/extdev/index.html#build-phases - phase 4 (writing): https://www.sphinx-doc.org/en/master/extdev/index.html#build-phases - add_directive() adds a new directive, given by name and class. - add_directive(): https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_directive - Finally, connect() adds an event handler to the event whose name is given by the first argument. The event handler function is called with several arguments which are documented with the event. - connect(): https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.connect ''' def setup(app): app.add_config_value('todo_include_todos', False, 'html') app.add_node(todolist) app.add_node(todo, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', TodoDirective) app.add_directive('todolist', TodolistDirective) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) app.connect('env-merge-info', merge_todos) app.connect('build-finished', process_build_finished) return { 'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True, }
StarcoderdataPython
1697245
<reponame>mitodl/ocw-studio<filename>videos/signals_test.py """videos.signals tests""" import pytest from videos.constants import DESTINATION_YOUTUBE from videos.factories import VideoFileFactory @pytest.mark.django_db def test_delete_video_file_signal(mocker): """Deleting a youtube VideoFile should trigger the Youtube API delete function""" mock_remove = mocker.patch("videos.signals.remove_youtube_video") mock_delete_s3_objects = mocker.patch("videos.signals.delete_s3_objects") video_file = VideoFileFactory.create(destination=DESTINATION_YOUTUBE) video_file.delete() mock_remove.delay.assert_called_once_with(video_file.destination_id) mock_delete_s3_objects.delay.assert_called_once_with(video_file.s3_key)
StarcoderdataPython
1675238
<gh_stars>1-10 """Fixture module to skip the unsupervised_learning.rst doctest for versions of SciPy earlier than 0.12.0. """ from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version def setup_module(module): if sp_version < (0, 12): raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and " "thus does not include the scipy.misc.face() image.")
StarcoderdataPython
158185
from .classifier import USBaggingClassifier __version__ = '0.1.1'
StarcoderdataPython
1732362
import subprocess import logging import jinja2 from random import randint from pathlib import Path from .errors import ProcessingError DEBUG = True def run_subprocess(args): process = subprocess.Popen( args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = process.communicate() if process.returncode != 0: logging.error( f"Subprocess failed with code: {process.returncode} msg: {stderr}" ) raise ProcessingError(stderr) return stdout, stderr def get_rendered_template(template_path, args): """renders and returns the template in-memory, only use with small files.""" with open(template_path, "r") as f: template = jinja2.Template(f.read()) return template.render(args) def get_tmp_filepath_in_dir(directory, suffix=""): path_format = "{directory}/tmp_{uid}{suffix}" while True: uid = str(randint(0, 999999)).zfill(6) path = path_format.format(directory=directory, uid=uid, suffix=suffix) if not Path(path).exists(): return path def debug(msg): logging.debug(msg) if DEBUG: print(msg)
StarcoderdataPython
163326
#! /usr/bin/env python # Copyright 2021 <NAME> # # This file is part of WarpX. # # License: BSD-3-Clause-LBNL import os import sys import yt sys.path.insert(1, '../../../../warpx/Regression/Checksum/') import checksumAPI import numpy as np import scipy.constants as scc ## This script performs various checks for the proton boron nuclear fusion module. The simulation ## that we check is made of 5 different tests, each with different proton, boron and alpha species. ## ## The first test is performed in the proton-boron center of mass frame. It could correspond to the ## physical case of a proton beam colliding with a boron beam. The kinetic energy of the colliding ## particles depends on the cell number in the z direction and varies in the few keV to few MeV ## range. All the particles within a cell have the exact same momentum, which allows detailed ## checks of the energy of produced alpha particles. The proton and boron species have the same ## density and number of particles in this test. The number of produced alphas is much smaller than ## the initial number of protons and borons. ## ## The second test is performed in the boron rest frame. It corresponds to the physical case of a ## low density proton beam colliding with a high-density proton+boron target. The energy of the ## proton beam is varied in the few keV to few MeV range, depending on the cell number in the z ## direction. As in the previous case, all the particles within a cell have the exact same ## momentum, which allows detailed checks of the energy of produced alpha particles. In this test, ## there are 100 immobile boron and 100 immobile proton macroparticles per cell, as well as 900 ## beam proton macroparticles per cell. The density of the immobile particles is 6 orders of ## magnitude higher than the number of beam particles, which means that they have a much higher ## weight. This test is similar to the example given in section 3 of Higginson et al., ## Journal of Computation Physics, 388 439–453 (2019), which was found to be sensitive to the way ## unsampled pairs are accounted for. As before, the number of produced alphas is much smaller than ## the initial number of protons and borons. ## ## The third test corresponds to a Maxwellian plasma with a 44 keV temperature. The alpha yield is ## directly compared to the analytical fits of <NAME> and <NAME>, Nuclear Fusion, 40, 865 ## (2000) for a thermal plasma. ## ## The fourth test corresponds to a plasma with an extremely small boron density, so that all boron ## macroparticles should have disappeared by the end of the simulation, which we verify. ## ## The fifth test is exactly the same as the fourth test, except that the ## fusion_probability_threshold parameter is increased to an excessive value. Because of that, we ## severely underestimate the fusion yield and boron macroparticles remain at the end of the ## simulation, which we verify. ## ## In all simulations, we check particle number, charge, momentum and energy conservation and ## perform basic checks regarding the produced particles. When possible, we also compare the number ## of produced macroparticles, fusion yield and energy of the produced particles to theoretical ## values. ## ## Please be aware that the relative tolerances are often set empirically in this analysis script, ## so it would not be surprising that some tolerances need to be increased in the future. default_tol = 1.e-12 # Default relative tolerance ## Some physical parameters keV_to_Joule = scc.e*1e3 MeV_to_Joule = scc.e*1e6 barn_to_square_meter = 1.e-28 m_p = scc.m_p # Proton mass m_b = 10.9298*m_p # Boron 11 mass m_reduced = m_p*m_b/(m_p+m_b) m_a = 3.97369*m_p # Alpha mass m_be = 7.94748*m_p # Beryllium 8 mass Z_boron = 5. Z_proton = 1. E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2 E_Gamow_MeV = E_Gamow/MeV_to_Joule E_Gamow_keV = E_Gamow/keV_to_Joule E_fusion = 8.59009*MeV_to_Joule # Energy released during p + B -> alpha + Be E_decay = 0.0918984*MeV_to_Joule # Energy released during Be -> 2*alpha E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha ## Some numerical parameters for this test size_x = 8 size_y = 8 size_z = 16 dV_total = size_x*size_y*size_z # Total simulation volume # Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the # particles of a given species in the same slice have the exact same momentum dV_slice = size_x*size_y dt = 1./(scc.c*np.sqrt(3.)) # In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2 Energy_step = 22.*keV_to_Joule def is_close(val1, val2, rtol=default_tol, atol=0.): ## Wrapper around numpy.isclose, used to override the default tolerances. return np.isclose(val1, val2, rtol=rtol, atol=atol) def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v def add_empty_species_to_dict(data_dict, species_name, prefix, suffix): data_dict[prefix+"_px_"+suffix] = np.empty(0) data_dict[prefix+"_py_"+suffix] = np.empty(0) data_dict[prefix+"_pz_"+suffix] = np.empty(0) data_dict[prefix+"_w_"+suffix] = np.empty(0) data_dict[prefix+"_id_"+suffix] = np.empty(0) data_dict[prefix+"_cpu_"+suffix] = np.empty(0) data_dict[prefix+"_z_"+suffix] = np.empty(0) def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix): try: ## If species exist, we add its data to the dictionary add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix) except yt.utilities.exceptions.YTFieldNotFound: ## If species does not exist, we avoid python crash and add empty arrays to the ## dictionnary. Currently, this happens for the boron species in test number 4, which ## entirely fuses into alphas. add_empty_species_to_dict(data_dict, species_name, prefix, suffix) def check_particle_number_conservation(data): total_w_proton_start = np.sum(data["proton_w_start"]) total_w_proton_end = np.sum(data["proton_w_end"]) total_w_boron_start = np.sum(data["boron_w_start"]) total_w_boron_end = np.sum(data["boron_w_end"]) consumed_proton = total_w_proton_start - total_w_proton_end consumed_boron = total_w_boron_start - total_w_boron_end created_alpha = np.sum(data["alpha_w_end"]) assert(consumed_proton >= 0.) assert(consumed_boron >= 0.) assert(created_alpha >= 0.) ## Check that number of consumed proton and consumed boron are equal assert_scale = max(total_w_proton_start, total_w_boron_start) assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale)) ## Check that number of consumed particles corresponds to number of produced alpha ## Factor 3 is here because each nuclear fusion reaction produces 3 alphas assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.)) assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.)) def compute_energy_array(data, species_name, suffix, m): ## Relativistic computation of kinetic energy for a given species psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \ data[species_name+'_pz_'+suffix]**2 rest_energy = m*scc.c**2 return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy def check_energy_conservation(data): proton_energy_start = compute_energy_array(data, "proton", "start", m_p) proton_energy_end = compute_energy_array(data, "proton", "end", m_p) boron_energy_start = compute_energy_array(data, "boron", "start", m_b) boron_energy_end = compute_energy_array(data, "boron", "end", m_b) alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a) total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \ np.sum(boron_energy_start*data["boron_w_start"]) total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \ np.sum(boron_energy_end*data["boron_w_end"]) + \ np.sum(alpha_energy_end*data["alpha_w_end"]) ## Factor 3 is here because each nuclear fusion reaction produces 3 alphas n_fusion_reaction = np.sum(data["alpha_w_end"])/3. assert(is_close(total_energy_end, total_energy_start + n_fusion_reaction*E_fusion_total, rtol = 1.e-8)) def check_momentum_conservation(data): proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"]) proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"]) proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"]) proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"]) proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"]) proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"]) boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"]) boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"]) boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"]) boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"]) boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"]) boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"]) alpha_total_px_end = np.sum(data["alpha_px_end"]*data["alpha_w_end"]) alpha_total_py_end = np.sum(data["alpha_py_end"]*data["alpha_w_end"]) alpha_total_pz_end = np.sum(data["alpha_pz_end"]*data["alpha_w_end"]) total_px_start = proton_total_px_start + boron_total_px_start total_py_start = proton_total_py_start + boron_total_py_start total_pz_start = proton_total_pz_start + boron_total_pz_start total_px_end = proton_total_px_end + boron_total_px_end + alpha_total_px_end total_py_end = proton_total_py_end + boron_total_py_end + alpha_total_py_end total_pz_end = proton_total_pz_end + boron_total_pz_end + alpha_total_pz_end ## Absolute tolerance is needed because sometimes the initial momentum is exactly 0 assert(is_close(total_px_start, total_px_end, atol=1.e-15)) assert(is_close(total_py_start, total_py_end, atol=1.e-15)) assert(is_close(total_pz_start, total_pz_end, atol=1.e-15)) def check_id(data): ## Check that all created particles have unique id + cpu identifier (two particles with ## different cpu can have the same id) complex_id = data["alpha_id_end"] + 1j*data["alpha_cpu_end"] assert(complex_id.shape == np.unique(complex_id).shape) def basic_product_particles_check(data): ## For each nuclear fusion reaction in the code, we create 6 alpha macroparticles. So the ## total number of alpha macroparticles must be a multiple of 6. num_alpha = data["alpha_w_end"].shape[0] assert(num_alpha%6 == 0) ## The weight of the 6 macroparticles coming from a single fusion event should be the same. ## We verify this here. assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6])) assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6])) assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6])) assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6])) assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6])) ## When we create 6 macroparticles, the first has the exact same momentum as the second, the ## third has the same as the fourth and the fifth has the same as the sixth. We verify this ## here assert(np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6])) assert(np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6])) assert(np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6])) assert(np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6])) assert(np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6])) assert(np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6])) assert(np.array_equal(data["alpha_px_end"][4::6], data["alpha_px_end"][5::6])) assert(np.array_equal(data["alpha_py_end"][4::6], data["alpha_py_end"][5::6])) assert(np.array_equal(data["alpha_pz_end"][4::6], data["alpha_pz_end"][5::6])) def generic_check(data): check_particle_number_conservation(data) check_energy_conservation(data) check_momentum_conservation(data) check_id(data) basic_product_particles_check(data) def check_isotropy(data, relative_tolerance): ## Checks that the alpha particles are emitted isotropically average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"]) average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"]) average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"]) assert(is_close(average_px_sq, average_py_sq, rtol = relative_tolerance)) assert(is_close(average_px_sq, average_pz_sq, rtol = relative_tolerance)) def astrophysical_factor_lowE(E): ## E is in keV ## Returns astrophysical factor in MeV b using the low energy fit in the range E < 400 keV ## described in equation (2) of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000) C0 = 197. C1 = 0.24 C2 = 2.31e-4 AL = 1.82e4 EL = 148. dEL = 2.35 return C0 + C1*E + C2*E**2 + AL/((E-EL)**2 + dEL**2) def astrophysical_factor_midE(E): ## E is in keV ## Returns astrophysical factor in MeV b using the mid energy fit in the range ## 400 keV < E < 642 keV described in equation (3) of <NAME> and <NAME>, ## Nuclear Fusion, 40, 865 (2000) D0 = 330. D1 = 66.1 D2 = -20.3 D5 = -1.58 E_400 = 400. E_100 = 100. E_norm = (E - E_400)/E_100 return D0 + D1*E_norm + D2*E_norm**2 + D5*E_norm**5 def astrophysical_factor_highE(E): ## E is in keV ## Returns astrophysical factor in MeV b using the high energy fit in the range ## 642 keV < E < 3500 keV described in equation (4) of <NAME> and <NAME>, ## Nuclear Fusion, 40, 865 (2000) A0 = 2.57e6 A1 = 5.67e5 A2 = 1.34e5 A3 = 5.68e5 E0 = 581.3 E1 = 1083. E2 = 2405. E3 = 3344. dE0 = 85.7 dE1 = 234. dE2 = 138. dE3 = 309. B = 4.38 return A0/((E-E0)**2 + dE0**2) + A1/((E-E1)**2 + dE1**2) + \ A2/((E-E2)**2 + dE2**2) + A3/((E-E3)**2 + dE3**2) + B def astrophysical_factor(E): ## E is in keV ## Returns astrophysical factor in MeV b using the fits described in <NAME> ## and <NAME>, Nuclear Fusion, 40, 865 (2000) conditions = [E <= 400, E <= 642, E > 642] choices = [astrophysical_factor_lowE(E), astrophysical_factor_midE(E), astrophysical_factor_highE(E)] return np.select(conditions, choices) def pb_cross_section_buck_fit(E): ## E is in MeV ## Returns cross section in b using a power law fit of the data presented in Buck et al., ## Nuclear Physics A, 398(2), 189-202 (1983) in the range E > 3.5 MeV. E_start_fit = 3.5 ## Cross section at E = E_start_fit = 3.5 MeV cross_section_start_fit = 0.2168440845211521 slope_fit = -2.661840717596765 return cross_section_start_fit*(E/E_start_fit)**slope_fit def pb_cross_section(E): ## E is in keV ## Returns cross section in b using the fits described in <NAME> and <NAME>, ## Nuclear Fusion, 40, 865 (2000) for E < 3.5 MeV and a power law fit of the data presented in ## Buck et al., Nuclear Physics A, 398(2), 189-202 (1983) for E > 3.5 MeV. E_MeV = E/1.e3 conditions = [E <= 3500, E > 3500] choices = [astrophysical_factor(E)/E_MeV * np.exp(-np.sqrt(E_Gamow_MeV / E_MeV)), pb_cross_section_buck_fit(E_MeV)] return np.select(conditions, choices) def E_com_to_p_sq_com(m1, m2, E): ## E is the total (kinetic+mass) energy of a two particle (with mass m1 and m2) system in ## its center of mass frame, in J. ## Returns the square norm of the momentum of each particle in that frame. return E**2/(4.*scc.c**2) - (m1**2 + m2**2)*scc.c**2/2. + \ scc.c**6/(4.*E**2)*((m1**2 - m2**2)**2) def compute_relative_v_com(E): ## E is the kinetic energy of proton+boron in the center of mass frame, in keV ## Returns the relative velocity between proton and boron in this frame, in m/s E_J = E*keV_to_Joule + (m_p + m_b)*scc.c**2 p_sq = E_com_to_p_sq_com(m_p, m_b, E_J) p = np.sqrt(p_sq) gamma_p = np.sqrt(1. + p_sq / (m_p*scc.c)**2) gamma_b = np.sqrt(1. + p_sq / (m_b*scc.c)**2) v_p = p/(gamma_p*m_p) v_b = p/(gamma_b*m_b) return v_p+v_b def expected_alpha_weight_com(E_com, proton_density, boron_density, dV, dt): ## Computes expected number of produced alpha particles as a function of energy E_com in the ## center of mass frame. E_com is in keV. assert(np.all(E_com>=0)) ## Case E_com == 0 is handled manually to avoid division by zero conditions = [E_com == 0, E_com > 0] ## Necessary to avoid division by 0 warning when pb_cross_section is evaluated E_com_never_zero = np.clip(E_com, 1.e-15, None) choices = [0., pb_cross_section(E_com_never_zero)*compute_relative_v_com(E_com_never_zero)] sigma_times_vrel = np.select(conditions, choices) ## Factor 3 is here because each fusion reaction produces 3 alphas return 3.*proton_density*boron_density*sigma_times_vrel*barn_to_square_meter*dV*dt def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell): ## Checks that the number of macroparticles is as expected for the first and second tests ## The first slice 0 < z < 1 does not contribute to alpha creation numcells = dV_total - dV_slice ## In these tests, the fusion_multiplier is so high that the fusion probability per pair is ## equal to the parameter fusion_probability_target_value fusion_probability_per_pair = fusion_probability_target_value expected_fusion_number = numcells*num_pair_per_cell*fusion_probability_per_pair ## Each fusion event produces 6 alpha macroparticles expected_macroparticle_number = 6.*expected_fusion_number std_macroparticle_number = 6.*np.sqrt(expected_fusion_number) actual_macroparticle_number = data["alpha_w_end"].shape[0] # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions assert(is_close(actual_macroparticle_number, expected_macroparticle_number, rtol = 0., atol = 5.*std_macroparticle_number)) ## used in subsequent function return expected_fusion_number def p_sq_boron_frame_to_E_COM_frame(p_proton_sq): # Takes the proton square norm of the momentum in the boron rest frame and returns the total # kinetic energy in the center of mass frame. Everything is in SI units. # Total (kinetic + mass) energy in lab frame E_lab = np.sqrt(p_proton_sq*scc.c**2 + (m_p*scc.c**2)**2) + m_b*scc.c**2 # Use invariant E**2 - p**2c**2 of 4-momentum norm to compute energy in center of mass frame E_com = np.sqrt(E_lab**2 - p_proton_sq*scc.c**2) # Corresponding kinetic energy E_com_kin = E_com - (m_b+scc.m_p)*scc.c**2 return E_com_kin def p_sq_to_kinetic_energy(p_sq, m): ## Returns the kinetic energy of a particle as a function of its squared momentum. ## Everything is in SI units. return np.sqrt(p_sq*scc.c**2 + (m*scc.c**2)**2) - (m*scc.c**2) def compute_E_com1(data): ## Computes kinetic energy (in Joule) in the center of frame for the first test ## Square norm of the momentum of proton/boron as a function of cell number in z direction p_sq = 2.*m_reduced*(Energy_step*np.arange(size_z)**2) return p_sq_to_kinetic_energy(p_sq, m_b) + p_sq_to_kinetic_energy(p_sq, m_p) def compute_E_com2(data): ## Computes kinetic energy (in Joule) in the center of frame for the second test ## Square norm of the momentum of the proton as a function of cell number in z direction p_proton_sq = 2.*m_p*(Energy_step*np.arange(size_z)**2) return p_sq_boron_frame_to_E_COM_frame(p_proton_sq) def check_alpha_yield(data, expected_fusion_number, E_com, proton_density, boron_density): ## Checks that the fusion yield is as expected for the first and second tests. ## Proton and boron densities are in m^-3. alpha_weight_theory = expected_alpha_weight_com(E_com/keV_to_Joule, proton_density, boron_density, dV_slice, dt) alpha_weight_simulation = np.histogram(data["alpha_z_end"], bins=size_z, range=(0, size_z), weights = data["alpha_w_end"])[0] ## -1 is here because the first slice 0 < z < 1 does not contribute to alpha creation expected_fusion_number_per_slice = expected_fusion_number/(size_z-1) relative_std_alpha_weight = 1./np.sqrt(expected_fusion_number_per_slice) # 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions assert(np.all(is_close(alpha_weight_theory, alpha_weight_simulation, rtol = 5.*relative_std_alpha_weight))) def check_initial_energy1(data, E_com): ## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process ## takes place in two steps: ## (1): proton + boron 11 -> alpha + beryllium 8 ## (2): beryllium 8 -> alpha + alpha ## The alpha generated in the first step (labeled alpha1) generally has a different initial ## energy distribution than the alphas generated in the second step (labeled alpha2 and ## alpha3). ## In the first test, we are in the center of mass frame. Therefore, the momentum of alpha1 is ## entirely determined by the energy in the center of mass frame, so we check in this function ## that the energy of the alpha1 macroparticles is as expected. On the other hand, the energy ## of alpha2 and alpha3 follows a continuous distribution within a given range. In this test, ## we check that this range is as expected by comparing the maximum and minimum energy of the ## obtained macroparticles to the theoretical maximum and minimum. ## Note that in the simulations, 6 macroparticles are generated during for each fusion event. ## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2. ## The fifth and sixth are alpha3. energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a) z_alpha = data["alpha_z_end"] # Loop over all slices (i.e. cells in the z direction) for slice_number in range(1, size_z): ## Kinetic energy in the lab frame before fusion E_kinetic_com_before = E_com[slice_number] ## Total (kinetic + mass) energy in the lab frame after ## proton + boron 11 -> alpha + beryllium 8 E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be)*scc.c**2 ## Corresponding momentum norm squared of alpha1/beryllium p_sq_after = E_com_to_p_sq_com(m_a, m_be, E_total_com_after) ## Corresponding kinetic energy for alpha1 energy_alpha1_theory = p_sq_to_kinetic_energy(p_sq_after, m_a) ## Corresponding kinetic energy for beryllium energy_beryllium_theory = p_sq_to_kinetic_energy(p_sq_after, m_be) ## Corresponding kinetic energy for alpha2 + alpha3 after beryllium decay energy_alpha2_plus_3_theory = energy_beryllium_theory + E_decay ## Compute the theoretical maximum and minimum energy of alpha2 and alpha3. This ## calculation is done nonrelativistically, by noting that the maximum (minimum) energy ## corresponds to an alpha emitted exactly in the (opposite) direction of the beryllium ## in the center of mass frame. This calculation involves solving a polynomial equation of ## order 2 in p_alpha23. max_p_alpha23 = 0.5*(np.sqrt(p_sq_after) + \ np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after)) min_p_alpha23 = 0.5*(np.sqrt(p_sq_after) - \ np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after)) max_energy_alpha23 = max_p_alpha23**2/(2.*m_a) min_energy_alpha23 = min_p_alpha23**2/(2.*m_a) ## Get the energy of all alphas in the slice energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \ (z_alpha < (slice_number + 1))] ## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice energy_alpha1_simulation = energy_alpha_slice[::6] ## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice energy_alpha2_simulation = energy_alpha_slice[2::6] ## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice energy_alpha3_simulation = energy_alpha_slice[4::6] assert(np.all(is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.e-8))) assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.e-2)) assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=1.e-2)) assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.e-2)) assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=1.e-2)) def check_initial_energy2(data): ## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process ## takes place in two steps: ## (1): proton + boron 11 -> alpha + beryllium 8 ## (2): beryllium 8 -> alpha + alpha ## The alpha generated in the first step (labeled alpha1) generally has a different initial ## energy distribution than the alphas generated in the second step (labeled alpha2 and ## alpha3). ## In the second test, we are in the boron rest frame. In this case, the momentum of each alpha ## follows a continuous distribution within a given range. In this function, we verify that ## this range is as expected by comparing the maximum and minimum energy of the obtained ## macroparticles to the theoretical maximum and minimum. Be aware that the range for alpha1 ## is not the same as the range for alpha2 and alpha3 (typically alpha1 particles will carry ## more energy). ## Note that in the simulations, 6 macroparticles are generated during for each fusion event. ## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2. ## The fifth and sixth are alpha3. energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a) z_alpha = data["alpha_z_end"] # Loop over all slices (i.e. cells in the z direction) for slice_number in range(1, size_z): ## For simplicity, all the calculations in this functino are done nonrelativistically ## Proton kinetic energy in the lab frame before fusion E_proton_nonrelativistic = Energy_step*slice_number**2 ## Corresponding square norm of proton momentum p_proton_sq = 2.*scc.m_p*E_proton_nonrelativistic ## Kinetic energy in the lab frame after ## proton + boron 11 -> alpha + beryllium 8 E_after_fusion = E_proton_nonrelativistic + E_fusion ## Compute the theoretical maximum and minimum energy of alpha1 in the lab frame. This ## calculation is done by noting that the maximum (minimum) energy corresponds to an alpha ## emitted exactly in the (opposite) direction of the proton in the lab frame. This ## calculation involves solving a polynomial equation of order 2 in p_alpha1. max_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) + \ np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \ (m_a/m_be + 1.) min_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) - \ np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \ (m_a/m_be + 1.) max_energy_alpha1 = max_p_alpha1**2/(2*m_a) min_energy_alpha1 = min_p_alpha1**2/(2*m_a) ## Corresponding max/min kinetic energy of Beryllium in the lab frame max_E_beryllium = E_after_fusion - min_energy_alpha1 min_E_beryllium = E_after_fusion - max_energy_alpha1 ## Corresponding max/min momentum square of Beryllium in the lab frame max_p_sq_beryllium = 2.*m_be*max_E_beryllium min_p_sq_beryllium = 2.*m_be*min_E_beryllium ## Corresponding max/min kinetic energy in the lab frame for alpha2 + alpha3 after ## Beryllium decay max_energy_alpha2_plus_3 = max_E_beryllium + E_decay min_energy_alpha2_plus_3 = min_E_beryllium + E_decay ## Compute the theoretical maximum and minimum energy of alpha2 and alpha3 in the lab ## frame. This calculation is done by noting that the maximum (minimum) energy corresponds ## to an alpha emitted exactly in the (opposite) direction of a beryllium with energy ## max_E_beryllium (min_E_beryllium). This calculation involves solving a polynomial ## equation of order 2 in p_alpha23. max_p_alpha23 = 0.5*(np.sqrt(max_p_sq_beryllium) + \ np.sqrt(4*m_a*max_energy_alpha2_plus_3 - max_p_sq_beryllium)) min_p_alpha23 = 0.5*(np.sqrt(min_p_sq_beryllium) - \ np.sqrt(4*m_a*min_energy_alpha2_plus_3 - min_p_sq_beryllium)) max_energy_alpha23 = max_p_alpha23**2/(2*m_a) min_energy_alpha23 = min_p_alpha23**2/(2*m_a) ## Get the energy of all alphas in the slice energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \ (z_alpha < (slice_number + 1))] ## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice energy_alpha1_simulation = energy_alpha_slice[::6] ## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice energy_alpha2_simulation = energy_alpha_slice[2::6] ## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice energy_alpha3_simulation = energy_alpha_slice[4::6] assert(is_close(np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.e-2)) assert(is_close(np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.e-2)) ## Tolerance is quite high below because we don't have a lot of alphas to produce good ## statistics and an event like alpha1 emitted exactly in direction of proton & alpha2 ## emitted exactly in direction opposite to Beryllium is somewhat rare. assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=2.5e-1)) assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=2.5e-1)) assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=2.5e-1)) assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=2.5e-1)) def check_xy_isotropy(data): ## Checks that the alpha particles are emitted isotropically in x and y average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"]) average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"]) average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"]) assert(is_close(average_px_sq, average_py_sq, rtol = 5.e-2)) assert(average_pz_sq > average_px_sq) assert(average_pz_sq > average_py_sq) def sigmav_thermal_fit_lowE_nonresonant(T): ## Temperature T is in keV ## Returns the nonresonant average of cross section multiplied by relative velocity in m^3/s, ## in the range T <= 70 keV, as described by equation 9 of <NAME> and <NAME>, ## Nuclear Fusion, 40, 865 (2000). E0 = (E_Gamow_keV/4.)**(1./3.) * T**(2./3.) DE0 = 4.*np.sqrt(T*E0/3.) C0 = 197.*1.e3 C1 = 0.24*1.e3 C2 = 2.31e-4*1.e3 tau = 3.*E0/T Seff = C0*(1.+5./(12.*tau)) + C1*(E0+35./36.*T) + C2*(E0**2 + 89./36.*E0*T) ## nonresonant sigma times vrel, in barn meter per second sigmav_nr_bmps = np.sqrt(2*T*keV_to_Joule/m_reduced) * DE0*Seff/T**2 * np.exp(-tau) ## Return result in cubic meter per second return sigmav_nr_bmps*barn_to_square_meter def sigmav_thermal_fit_lowE_resonant(T): ## Temperature T is in keV ## Returns the resonant average of cross section multiplied by relative velocity in m^3/s, ## in the range T <= 70 keV, as described by equation 11 of <NAME> and <NAME>, ## Nuclear Fusion, 40, 865 (2000). return 5.41e-21 * np.exp(-148./T) / T**(3./2.) def sigmav_thermal_fit_lowE(T): ## Temperature T is in keV ## Returns the average of cross section multiplied by relative velocity in m^3/s, using the ## fits described in section 3.1 of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000). ## The fits are valid for T <= 70 keV. return sigmav_thermal_fit_lowE_nonresonant(T) + sigmav_thermal_fit_lowE_resonant(T) def expected_alpha_thermal(T, proton_density, boron_density, dV, dt): ## Computes the expected number of produced alpha particles when the protons and borons follow ## a Maxwellian distribution with a temperature T, in keV. This uses the thermal fits described ## in <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000). ## The fit used here is only valid in the range T <= 70 keV. assert((T >=0) and (T<=70)) sigma_times_vrel = sigmav_thermal_fit_lowE(T) ## Factor 3 is here because each fusion event produces 3 alphas. return 3.*proton_density*boron_density*sigma_times_vrel*dV*dt def check_thermal_alpha_yield(data): ## Checks that the number of alpha particles in test3 is as expected Temperature = 44. # keV proton_density = 1.e28 # m^-3 boron_density = 5.e28 # m^-3 alpha_weight_theory = expected_alpha_thermal(Temperature, proton_density, boron_density, dV_total, dt) alpha_weight_simulation = np.sum(data["alpha_w_end"]) assert(is_close(alpha_weight_theory, alpha_weight_simulation, rtol = 2.e-1)) def boron_remains(data): ## Checks whether there remains boron macroparticles at the end of the test n_boron_left = data["boron_w_end"].shape[0] return (n_boron_left > 0) def specific_check1(data): check_isotropy(data, relative_tolerance = 3.e-2) expected_fusion_number = check_macroparticle_number(data, fusion_probability_target_value = 0.002, num_pair_per_cell = 10000) E_com = compute_E_com1(data) check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1., boron_density = 1.) check_initial_energy1(data, E_com) def specific_check2(data): check_xy_isotropy(data) ## Only 900 particles pairs per cell here because we ignore the 10% of protons that are at rest expected_fusion_number = check_macroparticle_number(data, fusion_probability_target_value = 0.02, num_pair_per_cell = 900) E_com = compute_E_com2(data) check_alpha_yield(data, expected_fusion_number, E_com, proton_density = 1.e20, boron_density = 1.e26) check_initial_energy2(data) def specific_check3(data): check_isotropy(data, relative_tolerance = 1.e-1) check_thermal_alpha_yield(data) def specific_check4(data): ## In test 4, the boron initial density is so small that all borons should have fused within a ## timestep dt. We thus assert that no boron remains at the end of the simulation. assert(not boron_remains(data)) def specific_check5(data): ## Test 5 is similar to test 4, expect that the parameter fusion_probability_threshold is ## increased to the point that we should severely underestimate the fusion yield. Consequently, ## there should still be borons at the end of the test, which we verify here. assert(boron_remains(data)) def check_charge_conservation(rho_start, rho_end): assert(np.all(is_close(rho_start, rho_end, rtol=2.e-11))) def main(): filename_end = sys.argv[1] filename_start = filename_end[:-4] + '0000' ds_end = yt.load(filename_end) ds_start = yt.load(filename_start) ad_end = ds_end.all_data() ad_start = ds_start.all_data() field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge, dims=ds_end.domain_dimensions) field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, dims=ds_start.domain_dimensions) ntests = 5 for i in range(1, ntests+1): proton_species = "proton"+str(i) boron_species = "boron"+str(i) alpha_species = "alpha"+str(i) data = {} add_species_to_dict(ad_start, data, proton_species, "proton", "start") add_species_to_dict(ad_start, data, boron_species, "boron", "start") add_species_to_dict(ad_end, data, proton_species, "proton", "end") add_species_to_dict(ad_end, data, boron_species, "boron", "end") add_species_to_dict(ad_end, data, alpha_species, "alpha", "end") # General checks that are performed for all tests generic_check(data) # Checks that are specific to test number i eval("specific_check"+str(i)+"(data)") rho_start = field_data_start["rho"].to_ndarray() rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename_end) if __name__ == "__main__": main()
StarcoderdataPython
1663492
<gh_stars>0 import os CURRENT_DIR = os.path.abspath(os.path.dirname(__file__)) DATA_DIR = os.path.join(CURRENT_DIR, 'data') MODELS_DIR = os.path.join(CURRENT_DIR, 'models') environments = { 8: dict( mat_file=os.path.join(DATA_DIR, 'gridworld_8.mat'), train_data_file=os.path.join(DATA_DIR, 'gridworld-8-train.pickle'), test_data_file=os.path.join(DATA_DIR, 'gridworld-8-test.pickle'), pretrained_network_file=os.path.join(MODELS_DIR, 'pretrained-VIN-8.pickle'), input_image_shape=(2, 8, 8), image_size=(8, 8), k=10, ), 16: dict( mat_file=os.path.join(DATA_DIR, 'gridworld_16.mat'), train_data_file=os.path.join(DATA_DIR, 'gridworld-16-train.pickle'), test_data_file=os.path.join(DATA_DIR, 'gridworld-16-test.pickle'), pretrained_network_file=os.path.join(MODELS_DIR, 'pretrained-VIN-16.pickle'), input_image_shape=(2, 16, 16), image_size=(16, 16), k=20, ), 28: dict( mat_file=os.path.join(DATA_DIR, 'gridworld_28.mat'), train_data_file=os.path.join(DATA_DIR, 'gridworld-28-train.pickle'), test_data_file=os.path.join(DATA_DIR, 'gridworld-28-test.pickle'), pretrained_network_file=os.path.join(MODELS_DIR, 'pretrained-VIN-28.pickle'), input_image_shape=(2, 28, 28), image_size=(28, 28), k=36, ), }
StarcoderdataPython
1742567
#------------------------------------------------------------------------------ # Copyright (c) 2013-2020, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ """ A collection of common compiler functionality. """ import ast from types import CodeType import bytecode as bc from atom.api import Str, Typed from ..compat import PY38 from .code_generator import CodeGenerator from .enaml_ast import ( AliasExpr, ASTVisitor, Binding, ChildDef, EnamlDef, StorageExpr, Template, TemplateInst, PythonExpression, PythonModule, FuncDef ) #: The name of the compiler helpers in the global scope. COMPILER_HELPERS = '__compiler_helpers' #: The name of the compiler helpers in the fast locals. C_HELPERS = '_[helpers]' #: The name of the scope key in the fast locals. SCOPE_KEY = '_[scope_key]' #: The name of the node list in the fast locals. NODE_LIST = '_[node_list]' #: The name of the globals map in the fast locals. F_GLOBALS = '_[f_globals]' #: The name of the template parameter tuple. T_PARAMS = '_[t_params]' #: The name of the stored template const values. T_CONSTS = '_[t_consts]' #: The global name of the template map in a module. TEMPLATE_MAP = '_[template_map]' #: The name of the unpack mapping for the template instance. UNPACK_MAP = '_[unpack_map]' #: A mapping of enaml ast node to compile(...) mode string. COMPILE_MODE = { PythonExpression: 'eval', PythonModule: 'exec', } #: Ast nodes associated with comprehensions which uses a function call that we #: will have to call in proper scope _FUNC_DEF_NODES = (ast.Lambda, ast.ListComp, ast.DictComp, ast.SetComp) #: Opcode used to create a function _MAKE_FUNC = ("MAKE_FUNCTION",) def unhandled_pragma(name, filename, lineno): """ Emit a warning for an unhandled pragma. Parameters ---------- name : str The name of the unhandled pragma. filename : str The name of the file with the unhandled pragma. lineno : int The line number of the unhandled pragma. """ import warnings msg = "unhandled pragma '%s'" % name warnings.warn_explicit(msg, SyntaxWarning, filename, lineno) def warn_pragmas(node, filename): """ Emit a warning if there are any pragmas defined on the node. Parameters ---------- node : ASTNode An enaml ast node which supports pragmas filename : str The filename for the node. """ for pragma in node.pragmas: unhandled_pragma(pragma.command, filename, pragma.lineno) def should_store_locals(node): """ Get whether or not a node should store its locals. A node must store its local scope if it has alias exprs, attribute bindings, or storage exprs with default bindings. Parameters ---------- node : EnamlDef or ChildDef The ast node of interest. Returns ------- result : bool True if instances of the enamldef should store their local scopes, False otherwise. """ types = (AliasExpr, Binding, FuncDef) for item in node.body: if isinstance(item, types): return True if isinstance(item, StorageExpr) and item.expr is not None: return True return False def count_nodes(node): """ Count the number of compiler nodes needed for the template. Parameters ---------- node : Template The template node of interest. Returns ------- result : int The number of compiler nodes needed for the template. """ node_count = 0 stack = [node] types = (EnamlDef, Template, ChildDef, TemplateInst) while stack: node = stack.pop() if isinstance(node, types): node_count += 1 stack.extend(node.body) return node_count def has_list_comp(pyast): """ Determine whether a Python expression has a list comprehension. This function is only used under Python 2. Parameters ---------- pyast : Expression The Python Expression ast of interest. Returns ------- result : bool True if the ast includes a list comprehension, False otherwise. """ for item in ast.walk(pyast): if isinstance(item, ast.ListComp): return True return False def fetch_helpers(cg): """ Fetch the compiler helpers and store in fast locals. This function should be called once on a code generator before using the 'load_helper' function. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. """ cg.load_global(COMPILER_HELPERS) cg.store_fast(C_HELPERS) def fetch_globals(cg): """ Fetch the globals and store in fast locals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. """ cg.load_global('globals') cg.call_function() cg.store_fast(F_GLOBALS) def load_helper(cg, name, from_globals=False): """ Load a compiler helper onto the TOS. The caller should have already invoked the 'fetch_locals' function for the code generator before using this function, unless the 'from_globals' keyword is set to True. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. name : str The name of the compiler helper to load onto the TOS. from_globals : bool, optional If True, the helpers will be loaded from the globals instead of the fast locals. The default is False. """ if from_globals: cg.load_global(COMPILER_HELPERS) else: cg.load_fast(C_HELPERS) cg.load_const(name) cg.binary_subscr() def load_name(cg, name, local_names): """ Load a name onto the TOS. If the name exists in the local names set, it is loaded from the fast locals. Otherwise, it is loaded from the globals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. name : str The name of the value to load onto the TOS. local_names : set The set of fast local names available to the code object. """ if name in local_names: cg.load_fast(name) else: cg.load_global(name) def load_typename(cg, node, local_names): """ Load a dotted name onto the TOS. If the name exists in the local names set, it is loaded from the fast locals. Otherwise, it is loaded from the globals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : ast.Name or ast.Attribute The name or attribute node to load. local_names : set The set of fast local names available to the code object. """ if isinstance(node, ast.Name): load_name(cg, node.id, local_names) elif isinstance(node, ast.Attribute): load_typename(cg, node.value, local_names) cg.load_attr(node.attr) else: raise TypeError('Unsupported node %s' % type(node)) def make_node_list(cg, count): """ Create the node list and store in fast locals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. count : int The required size of the node list. """ cg.load_const(None) cg.build_list(1) cg.load_const(count) cg.binary_multiply() cg.store_fast(NODE_LIST) def store_node(cg, index): """ Store the node on TOS into the node list. The caller should ensure that NODE_LIST exists in fast locals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. index : int The index at which to store the node in the node list. """ cg.load_fast(NODE_LIST) cg.load_const(index) cg.store_subscr() def load_node(cg, index): """ Load the node at the given index in the node list. The caller should ensure that NODE_LIST exists in fast locals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. index : int The index of the parent node in the node list. """ cg.load_fast(NODE_LIST) cg.load_const(index) cg.binary_subscr() def append_node(cg, parent, index): """ Append the node on the TOS as a child of the specified node. The caller should ensure that NODE_LIST exists in fast locals. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. parent : int The index of the parent node in the node list. index : int The index of the target node in the node list. """ load_node(cg, parent) cg.load_attr('children') cg.load_attr('append') load_node(cg, index) cg.call_function(1) cg.pop_top() def safe_eval_ast(cg, node, name, lineno, local_names): """ Safe eval a Python ast node. This method will eval the python code represented by the ast in the local namespace. If the code would have the side effect of storing a value in the namespace, then the expression will be evaluated in it's own namespace. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : ast.Expression The Python expression ast node of interest. name : str The name to use any internal code object. lineno : int The line number to use for any internal code object. local_names : set The set of fast local names available to the code object. """ expr_cg = CodeGenerator() expr_cg.insert_python_expr(node) expr_cg.rewrite_to_fast_locals(local_names) cg.code_ops.extend(expr_cg.code_ops) def analyse_globals_and_func_defs(pyast): """Collect the explicit 'global' variable names and check for function definitions Functions definition can exist if a comprehension (list, dict, set) is present or if a lambda function exists. """ global_vars = set() has_def = False for node in ast.walk(pyast): if isinstance(node, ast.Global): global_vars.update(node.names) elif isinstance(node, _FUNC_DEF_NODES): has_def = True return global_vars, has_def def rewrite_globals_access(code, global_vars): """Update the function code global loads This will rewrite the function to convert each LOAD_GLOBAL opcode into a LOAD_NAME opcode, unless the associated name is known to have been made global via the 'global' keyword. """ for idx, instr in enumerate(code): if (getattr(instr, "name", None) == "LOAD_GLOBAL" and instr.arg not in global_vars): instr.name = "LOAD_NAME" def run_in_dynamic_scope(code, global_vars): """Wrap functions defined in operators/decl func to run them in the proper dynamic scope. This will also rewrite the function to convert each LOAD_GLOBAL opcode into a LOAD_NAME opcode, unless the associated name is known to have been made global via the 'global' keyword. Parameters ---------- code : bytecode.Bytecode Code object in which functions are defined. """ # Code generator used to modify the bytecode cg = CodeGenerator() fetch_helpers(cg) # Scan all ops to detect function call after GET_ITER for instr in code: if not isinstance(instr, bc.Instr): cg.code_ops.append(instr) continue i_name, i_arg = instr.name, instr.arg if isinstance(i_arg, CodeType): # Allow to pass the dynamic scope as locals. There is no need # to copy it as internal variables are stored in fast locals # and hence does not affect the scope content. inner = bc.Bytecode.from_code(i_arg) inner.flags ^= (inner.flags & bc.CompilerFlags.NEWLOCALS) # Set the NESTED flag since even though we may have obtained the # outer code from an expr it will run as a function. inner.flags |= bc.CompilerFlags.NESTED run_in_dynamic_scope(inner, global_vars) inner.update_flags() i_arg = inner.to_code() elif any(i_name == make_fun_op for make_fun_op in _MAKE_FUNC): cg.code_ops.append(bc.Instr(i_name, i_arg)) # func load_helper(cg, 'wrap_func') # func -> wrap cg.rot_two() # wrap -> func cg.load_global('__scope__') # wrap -> func -> scope cg.call_function(2) # wrapped continue cg.code_ops.append(bc.Instr(i_name, i_arg)) del code[:] code.extend(cg.code_ops) rewrite_globals_access(code, global_vars) code.update_flags() def gen_child_def_node(cg, node, local_names): """ Generate the code to create the child def compiler node. The caller should ensure that SCOPE_KEY is present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : ChildDef The enaml ast node of interest. local_names : set The set of local names available to the code object. """ # Validate the type of the child load_name(cg, node.typename, local_names) with cg.try_squash_raise(): cg.dup_top() load_helper(cg, 'validate_declarative') cg.rot_two() # base -> helper -> base cg.call_function(1) # base -> retval cg.pop_top() # base # Subclass the child class if needed store_types = (StorageExpr, AliasExpr, FuncDef) if any(isinstance(item, store_types) for item in node.body): # Create the class code cg.load_build_class() cg.rot_two() # builtins.__build_class_ -> base class_cg = CodeGenerator() class_cg.filename = cg.filename class_cg.name = node.typename class_cg.firstlineno = node.lineno class_cg.set_lineno(node.lineno) class_cg.load_name('__name__') class_cg.store_name('__module__') class_cg.load_const(node.typename) class_cg.store_name('__qualname__') class_cg.load_const(None) class_cg.return_value() class_code = class_cg.to_code() cg.load_const(class_code) cg.load_const(None) # XXX better qualified name cg.make_function() cg.rot_two() # builtins.__build_class_ -> class_func -> base cg.load_const(node.typename) cg.rot_two() # builtins.__build_class_ -> class_func -> class_name -> base cg.call_function(3) # class # Build the declarative compiler node store_locals = should_store_locals(node) load_helper(cg, 'declarative_node') cg.rot_two() cg.load_const(node.identifier) cg.load_fast(SCOPE_KEY) cg.load_const(store_locals) # helper -> class -> identifier -> key -> bool cg.call_function(4) # node def gen_template_inst_node(cg, node, local_names): """ Generate the code to create a template inst compiler node. The caller should ensure that SCOPE_KEY is present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : TemplateInst The enaml ast node of interest. local_names : set The set of local names available to the code object. """ # Validate the type of the template. load_name(cg, node.name, local_names) with cg.try_squash_raise(): cg.dup_top() load_helper(cg, 'validate_template') cg.rot_two() cg.call_function(1) cg.pop_top() # Load the arguments for the instantiation call. arguments = node.arguments for arg in arguments.args: safe_eval_ast(cg, arg.ast, node.name, arg.lineno, local_names) if arguments.stararg: arg = arguments.stararg safe_eval_ast(cg, arg.ast, node.name, arg.lineno, local_names) # Instantiate the template. argcount = len(arguments.args) varargs = bool(arguments.stararg) if varargs: cg.call_function_var(argcount) else: cg.call_function(argcount) # Validate the instantiation size, if needed. names = () starname = '' identifiers = node.identifiers if identifiers is not None: names = tuple(identifiers.names) starname = identifiers.starname with cg.try_squash_raise(): cg.dup_top() load_helper(cg, 'validate_unpack_size') cg.rot_two() cg.load_const(len(names)) cg.load_const(bool(starname)) cg.call_function(3) cg.pop_top() # Load and call the helper to create the compiler node load_helper(cg, 'template_inst_node') cg.rot_two() cg.load_const(names) cg.load_const(starname) cg.load_fast(SCOPE_KEY) cg.load_const(bool(node.body)) cg.call_function(5) def sanitize_operator_code(filename, mode, ast): """ Take special care of handling globals and function definitions. If an operator contains global access using the global keyword, those should keep using LOAD_GLOBAL while others should use LOAD_NAME to look-up the dynamic namespace. If the operator contains function definitions (i.e. comprehensions) those functions need to be called with access to the dynamic namespace. Parameters ---------- filename : str Filename from which the ast originates from. mode : {"eval", "exec"} The mode to use to compile the AST. ast : Ast node to compile. """ global_vars, has_defs = analyse_globals_and_func_defs(ast) # In mode exec, the body of the operator has been wrapped in a function def # after the compilation we extract the function code code = compile(ast, filename, mode=mode) if mode == 'exec': for instr in bc.Bytecode.from_code(code): i_arg = instr.arg if isinstance(i_arg, CodeType): code = i_arg break b_code = bc.Bytecode.from_code(code) if has_defs: run_in_dynamic_scope(b_code, global_vars) else: rewrite_globals_access(b_code, global_vars) return b_code.to_code() def gen_template_inst_binding(cg, node, index): """ Generate the code for a template inst binding. The caller should ensure that UNPACK_MAP and F_GLOBALS are present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : TemplateInstBinding The enaml ast node of interest. index : int The index of the template inst node in the node list. """ op_node = node.expr mode = COMPILE_MODE[type(op_node.value)] # Make sure the operator handle properly global and function definition # (from comprehensions) code = sanitize_operator_code(cg.filename, mode, op_node.value.ast) with cg.try_squash_raise(): cg.set_lineno(node.lineno) load_helper(cg, 'run_operator') load_node(cg, index) cg.load_fast(UNPACK_MAP) cg.load_const(node.name) cg.binary_subscr() cg.load_const(node.chain) cg.load_const(op_node.operator) cg.load_const(code) cg.load_fast(F_GLOBALS) cg.call_function(6) cg.pop_top() def gen_operator_binding(cg, node, index, name): """ Generate the code for an operator binding. The caller should ensure that F_GLOBALS and NODE_LIST are present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : OperatorExpr The enaml ast node of interest. index : int The index of the target node in the node list. name : str The attribute name to be bound. """ mode = COMPILE_MODE[type(node.value)] # Make sure the operator handle properly global and function definition # (from comprehensions) code = sanitize_operator_code(cg.filename, mode, node.value.ast) with cg.try_squash_raise(): cg.set_lineno(node.lineno) load_helper(cg, 'run_operator') load_node(cg, index) # For operators not in a template instance the scope_node and the node # are one and the same hence the dup_top. cg.dup_top() cg.load_const(name) cg.load_const(node.operator) cg.load_const(code) cg.load_fast(F_GLOBALS) cg.call_function(6) cg.pop_top() def gen_alias_expr(cg, node, index): """ Generate the code for an alias expression. The caller should ensure that NODE_LIST is present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : AliasExpr The enaml ast node of interest. index : int The index of the target node in the node list. """ with cg.try_squash_raise(): cg.set_lineno(node.lineno) load_helper(cg, 'add_alias') load_node(cg, index) cg.load_const(node.name) cg.load_const(node.target) cg.load_const(node.chain) cg.call_function(4) cg.pop_top() def gen_storage_expr(cg, node, index, local_names): """ Generate the code for a storage expression. The caller should ensure that NODE_LIST is present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : StorageExpr The enaml ast node of interest. index : int The index of the target node in the node list. local_names : set The set of fast local names available to the code object. """ with cg.try_squash_raise(): cg.set_lineno(node.lineno) load_helper(cg, 'add_storage') load_node(cg, index) cg.load_const(node.name) if node.typename: load_typename(cg, node.typename, local_names) else: cg.load_const(None) cg.load_const(node.kind) cg.call_function(4) cg.pop_top() def _insert_decl_function(cg, funcdef): """ Create and place a declarative function on the TOS. This will rewrite the function to convert each LOAD_GLOBAL opcode into a LOAD_NAME opcode, unless the associated name was explicitly made global via the 'global' keyword. Parameters ---------- funcdef : ast node The python FunctionDef ast node. """ # collect the explicit 'global' variable names and check for the presence # of comprehensions (list, dict, set). global_vars, has_defs = analyse_globals_and_func_defs(funcdef) # generate the code object which will create the function if PY38: mod = ast.Module(body=[funcdef], type_ignores=[]) else: mod = ast.Module(body=[funcdef]) code = compile(mod, cg.filename, mode='exec') # convert to a bytecode object and remove the leading and # trailing ops: STORE_NAME LOAD_CONST RETURN_VALUE outer_ops = bc.Bytecode.from_code(code)[0:-3] # the stack now looks like the following: # ... # ... # LOAD_CONST (<code object>) # LOAD_CONST (qualified name) # MAKE_FUCTION (num defaults) // TOS # extract the inner code object which represents the actual # function code and update its flags inner = bc.Bytecode.from_code(outer_ops[-3].arg) inner.flags ^= (inner.flags & bc.CompilerFlags.NEWLOCALS) # On Python 3 all comprehensions use a function call. To avoid scoping # issues the function call is run in the dynamic scope. if has_defs: run_in_dynamic_scope(inner, global_vars) else: rewrite_globals_access(inner, global_vars) outer_ops[-3].arg = inner.to_code() # inline the modified code ops into the code generator cg.code_ops.extend(outer_ops) def gen_decl_funcdef(cg, node, index): """ Generate the code for a declarative function definition. The caller should ensure that NODE_LIST is present in the fast locals of the code object. Parameters ---------- cg : CodeGenerator The code generator with which to write the code. node : FuncDef The enaml ast node of interest. index : int The index of the target node in the node list. """ with cg.try_squash_raise(): cg.set_lineno(node.lineno) load_helper(cg, 'add_decl_function') load_node(cg, index) _insert_decl_function(cg, node.funcdef) cg.load_const(node.is_override) cg.call_function(3) cg.pop_top() class CompilerBase(ASTVisitor): """ A base class for defining compilers. """ #: The filename for the code being generated. filename = Str() #: The code generator to use for this compiler. code_generator = Typed(CodeGenerator) def _default_code_generator(self): """ Create the default code generator instance. """ return CodeGenerator(filename=self.filename)
StarcoderdataPython
3245373
import numpy as np from sklearn import metrics def auc(t, p, **kwargs): # y_pred.shape = (N,) if p.ndim == 2 and t.ndim == 2: p = p[:,0] t = t[:,0] return {'auc': metrics.roc_auc_score(t, p)} def multi_auc(t, p, **kwargs): # y_pred.shape = (N,C) metrics_dict = {} for i in range(p.shape[-1]): try: metrics_dict[f'auc{i}'] = metrics.roc_auc_score(t[:,i], p[:,i]) except ValueError: metrics_dict[f'auc{i}'] = 0 return metrics_dict def multi_loss(t, p, **kwargs): # y_pred.shape = (N,C) metrics_dict = {} for i in range(p.shape[-1]): metrics_dict[f'loss{i}'] = metrics.log_loss(t[:,i], p[:,i], labels=[0,1], eps=1e-6) return metrics_dict def accuracy(t, p, **kwargs): return {'accuracy': np.mean(np.argmax(p, axis=1) == t)} def pos_accuracy(t, p, **kwargs): # p.shape (N, 4) # t is 0-3 (0=no PE, 1=right, 2=left, 3=central) positives = (t > 0) p = p[positives] t = t[positives] return {'pos_accuracy': np.mean(np.argmax(p[:,1:], axis=1)+1 == t)}
StarcoderdataPython
3351949
import argparse import glob import logging import os import shutil logging.basicConfig(format='[%(asctime)s] %(levelname)-8s | %(message)s', datefmt='%d-%b-%Y %H:%M:%S', level=logging.INFO) log = logging.getLogger() args_parser = argparse.ArgumentParser(description='Converts strings in files.') args_parser.add_argument('-p', dest='path', default='files', help='The path to the directory that contains the files to scan (default is \'files\')') args_parser.add_argument('-e', dest='file_extension', default='', help='The extension of the files to scan for string replacement (scans everything by default)') args_parser.add_argument('-b', dest='string_list_before', default='before.txt', help='The file that contains strings to convert (each string in its own line)') args_parser.add_argument('-a', dest='string_list_after', default='after.txt', help='The file that contains strings to convert to (each string in its corresponding line)') class StringReplacer: """ Replaces strings in files. Expects 2 files with the same amount of lines. Each line from the first file will be replaced with the corresponding line from the second file. Args: path: The path to the directory that contains the files to scan. file_extension: The extension of the files to scan for string replacement string_list_before: The file that contains strings to convert (each string in its own line). string_list_after: The file that contains strings to convert to (each string in its corresponding line). """ def __init__(self, path, file_extension, string_list_before, string_list_after): self.path = path self.file_extension = file_extension self.string_list_before = string_list_before self.string_list_after = string_list_after def replace(self): with open(self.string_list_before, 'r') as f: strings = f.readlines() strings_before = [s.strip() for s in strings] log.info('Strings to replace loaded from: {}'.format(self.string_list_before)) with open(self.string_list_after, 'r') as f: strings = f.readlines() strings_after = [s.strip() for s in strings] log.info('Strings to replace to loaded from: {}'.format(self.string_list_after)) # string_list_before and string_list_after must be have the same amount of strings: if len(strings_before) != len(strings_after): log.fatal( '\'{}\' and \'{}\' files must have the same amount of lines. Exiting...'.format(self.string_list_before, self.string_list_after)) raise SystemExit(1) strings = dict(zip(strings_before, strings_after)) # No need to keep these in RAM: del strings_before del strings_after if not self.file_extension: file_pattern = '**/*' log.info('All files in \'{}\' will be scanned.'.format(self.path)) else: file_pattern = '**/*.{}'.format(self.file_extension) log.info('\'{}\' files in \'{}\' will be scanned.'.format(self.file_extension, self.path)) try: os.mkdir(os.path.join(self.path, 'backup')) except OSError: log.fatal('Creation of backup directory failed. Exiting...'.format(self.path)) raise SystemExit(1) for file in glob.glob(os.path.join(self.path, file_pattern), recursive=True): if os.path.isdir(file): continue log.info('Replacing strings in: {}'.format(file)) old_file = '{}.old'.format(os.path.join(self.path, 'backup', os.path.relpath(file, self.path))) os.makedirs(os.path.split(os.path.relpath(old_file))[0], exist_ok=True) shutil.move(file, old_file) with open(old_file, 'r') as f: file_data = f.read() for before, after in strings.items(): file_data = file_data.replace(before, after) with open(file, 'w') as f: f.write(file_data) if __name__ == '__main__': args = args_parser.parse_args() replacer = StringReplacer(args.path, args.file_extension, args.string_list_before, args.string_list_after) log.info('Starting string replacement...') replacer.replace() log.info('Done')
StarcoderdataPython
34863
<reponame>jackytu/newbrandx<filename>sites/newbrandx/rankx/admin.py from django.contrib import admin # Register your models here. from .models import Milk from .models import Brand from .models import Company admin.site.register(Milk) admin.site.register(Brand) admin.site.register(Company)
StarcoderdataPython
3340977
#!/usr/bin/env python3 import numpy as np from math import cos # define x limimts, y limits and step size yl,yh,xl,dx,dy=0,2,0,0.001,0.001 # define lower y limit as function of x def xh(y): return 2*pow(y,0.5) # integrand def func(x,y): return 5*x**3*cos(y**3) #double nested loop to go over each x,y pair and add f(x,y) to sum sum = 0 for y in np.arange(yl,yh,dy): for x in np.arange(xl,xh(y),dx): sum = sum + func(x,y) #multiple sum by dx*dy to get value of integration sum = sum*dx*dy print("value of double integral is ", "%.3f" % sum)
StarcoderdataPython
3398505
""" kullanıcıdan aldıgımız boy ve kg değerlerine göre beden kitle hesabı kilo/boy*boy """ boy:float=float(input("boy= ")) kilo:float=float(input("kilo= ")) print("sonuc= ") bdi=float((kilo/(boy*boy))) print(bdi) if bdi >25: print("şişman reyis")
StarcoderdataPython
159076
import im3components as cmp class BuildDocs: TargetFile = 'components.rst' def __init__(self): self.heading = """ ================== Component Registry ================== """ self.body_element = """ {header_bar} {name} {header_bar} **Parent**: {parent} **Child**: {child} **Description**: {description} **Language**: {language} """ def update_docs(self): with open(BuildDocs.TargetFile, 'w') as dest: # write RST header dest.write(self.heading) # instantiate the component registry reg = cmp.registry() # write component metadata to the RST file for i in reg.components: dest.write(self.body_element.format(header_bar='-' * (len(i.name) + 1), name=i.name, parent=i.parent, child=i.child, description=i.description, language=i.language)) def update_component_docs(): """Update the components.rst file with currently implemented components.""" BuildDocs().update_docs()
StarcoderdataPython
3386039
<reponame>uktrade/great-cms import pytest import requests from contact.helpers import ( extract_other_offices_details, extract_regional_office_details, format_office_details, retrieve_regional_office, retrieve_regional_office_email, ) from directory_api_client.exporting import url_lookup_by_postcode @pytest.fixture() def other_offices_formatted(): return [ { 'address': 'The International Trade Centre\n10 New Street\nMidlands Business Park\nBirmingham\nB20 1RJ', 'is_match': False, 'region_id': 'west_midlands', 'name': '<NAME>', 'address_street': 'The International Trade Centre, 10 New Street, Midlands Business Park', 'address_city': 'Birmingham', 'address_postcode': 'B20 1RJ', 'email': '<EMAIL>', 'phone': '0208 555 4001', 'phone_other': '', 'phone_other_comment': '', 'website': None, } ] @pytest.fixture() def all_offices(): return [ { 'is_match': True, 'region_id': 'east_midlands', 'name': '<NAME>', 'address_street': ('The International Trade Centre, ' '5 Merus Court, ' 'Meridian Business Park'), 'address_city': 'Leicester', 'address_postcode': 'LE19 1RJ', 'email': '<EMAIL>', 'phone': '0345 052 4001', 'phone_other': '', 'phone_other_comment': '', 'website': None, }, { 'is_match': False, 'region_id': 'west_midlands', 'name': '<NAME>', 'address_street': 'The International Trade Centre, 10 New Street, Midlands Business Park', 'address_city': 'Birmingham', 'address_postcode': 'B20 1RJ', 'email': '<EMAIL>', 'phone': '0208 555 4001', 'phone_other': '', 'phone_other_comment': '', 'website': None, }, ] @pytest.fixture() def office_formatted(): return [ { 'address': 'The International Trade Centre\n5 Merus Court\nMeridian Business Park\nLeicester\nLE19 1RJ', 'is_match': True, 'region_id': 'east_midlands', 'name': '<NAME>', 'address_street': 'The International Trade Centre, 5 Merus Court, Meridian Business Park', 'address_city': 'Leicester', 'address_postcode': 'LE19 1RJ', 'email': '<EMAIL>', 'phone': '0345 052 4001', 'phone_other': '', 'phone_other_comment': '', 'website': None, } ] @pytest.fixture() def office_unformatted(): return [ { 'is_match': True, 'region_id': 'east_midlands', 'name': '<NAME>', 'address_street': 'The International Trade Centre, 5 Merus Court, Meridian Business Park', 'address_city': 'Leicester', 'address_postcode': 'LE19 1RJ', 'email': '<EMAIL>', 'phone': '0345 052 4001', 'phone_other': '', 'phone_other_comment': '', 'website': None, } ] def test_format_office_details( office_formatted, office_unformatted, ): office = format_office_details(office_unformatted) assert office == office_formatted def test_format_office_details_empty(): office = format_office_details([]) assert office is None def test_extract_other_offices_details(all_offices, other_offices_formatted): display_offices = extract_other_offices_details(all_offices) assert display_offices == other_offices_formatted def test_extract_other_offices_details_empty(): display_offices = extract_other_offices_details([]) assert display_offices is None def test_extract_regional_office_details(all_offices, office_formatted): regional_office = extract_regional_office_details(all_offices) assert regional_office == office_formatted[0] def test_extract_regional_office_details_empty(): regional_office = extract_regional_office_details([]) assert regional_office is None def test_retrieve_regional_office(requests_mock): mock_data = [ {'is_match': True, 'email': '<EMAIL>'}, {'is_match': False, 'email': '<EMAIL>'}, {'is_match': True, 'email': '<EMAIL>'}, ] requests_mock.get( url_lookup_by_postcode.format(postcode='ABC123'), status_code=200, json=mock_data, ) assert retrieve_regional_office('ABC123') == {'is_match': True, 'email': '<EMAIL>'} def test_retrieve_regional_office__no_match(requests_mock): mock_data = [ {'is_match': False, 'email': '<EMAIL>'}, {'is_match': False, 'email': '<EMAIL>'}, {'is_match': False, 'email': '<EMAIL>'}, ] requests_mock.get( url_lookup_by_postcode.format(postcode='ABC123'), status_code=200, json=mock_data, ) assert retrieve_regional_office('ABC123') is None def test_retrieve_regional_office_email_exception(settings, requests_mock): requests_mock.get( url_lookup_by_postcode.format(postcode='ABC123'), exc=requests.exceptions.ConnectTimeout, ) email = retrieve_regional_office_email('ABC123') assert email is None def test_retrieve_regional_office_email_not_ok(settings, requests_mock): requests_mock.get( url_lookup_by_postcode.format(postcode='ABC123'), status_code=404, ) email = retrieve_regional_office_email('ABC123') assert email is None def test_retrieve_regional_office_email_success(requests_mock): match_office = [{'is_match': True, 'email': '<EMAIL>'}] requests_mock.get( url_lookup_by_postcode.format(postcode='ABC123'), status_code=200, json=match_office, ) email = retrieve_regional_office_email('ABC123') assert email == '<EMAIL>'
StarcoderdataPython
25519
# -*- coding: utf-8 -*- from amplify.agent.common.util.math import median from unittest import TestCase from hamcrest import * __author__ = "<NAME>" __copyright__ = "Copyright (C) Nginx, Inc. All rights reserved." __license__ = "" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" class MathTestCase(TestCase): def test_median(self): # even length assert_that(median([1, 3, 5, 7]), equal_to(4.0)) # unsorted assert_that(median([1, 5, 7, 3]), equal_to(4.0)) # odd length assert_that(median([1, 2, 3, 4, 5, 6, 7]), equal_to(4.0)) assert_that(median([]), equal_to(None))
StarcoderdataPython
3208409
<reponame>zju-vipa/KamalEngine<filename>kamal/vision/models/classification/__init__.py<gh_stars>10-100 from .darknet import * from .mobilenetv2 import * from .resnet import * from .vgg import * from . import cifar from .alexnet import alexnet
StarcoderdataPython
1689155
<filename>tests/test-criticisms/test_metrics.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from edward.criticisms.evaluate import * all_classification_metrics = [ binary_accuracy, sparse_categorical_accuracy, ] all_real_classification_metrics = [ binary_crossentropy, categorical_crossentropy, hinge, squared_hinge, ] all_regression_metrics = [ mean_squared_error, mean_absolute_error, mean_absolute_percentage_error, mean_squared_logarithmic_error, poisson, cosine_proximity, ] all_specialized_input_output_metrics = [ categorical_accuracy, sparse_categorical_crossentropy, ] class test_metrics_class(tf.test.TestCase): def test_classification_metrics(self): with self.test_session(): y_true = tf.convert_to_tensor(np.random.randint(0, 1, (2, 3))) y_pred = tf.convert_to_tensor(np.random.randint(0, 1, (2, 3))) for metric in all_classification_metrics: self.assertEqual(metric(y_true, y_pred).eval().shape, ()) def test_real_classification_metrics(self): with self.test_session(): y_true = tf.convert_to_tensor(np.random.randint(0, 5, (6, 7))) y_pred = tf.random_normal([6, 7]) for metric in all_real_classification_metrics: self.assertEqual(metric(y_true, y_pred).eval().shape, ()) def test_regression_metrics(self): with self.test_session(): y_true = tf.random_normal([6, 7]) y_pred = tf.random_normal([6, 7]) for metric in all_regression_metrics: self.assertEqual(metric(y_true, y_pred).eval().shape, ()) def test_specialized_input_output_metrics(self): with self.test_session(): for metric in all_specialized_input_output_metrics: if metric == categorical_accuracy: y_true = tf.convert_to_tensor(np.random.randint(0, 1, (6, 7))) y_pred = tf.convert_to_tensor(np.random.randint(0, 7, (6,))) self.assertEqual(metric(y_true, y_pred).eval().shape, ()) elif metric == sparse_categorical_crossentropy: y_true = tf.convert_to_tensor(np.random.randint(0, 5, (6))) y_pred = tf.random_normal([6, 7]) self.assertEqual(metric(y_true, y_pred).eval().shape, ()) else: raise NotImplementedError() if __name__ == '__main__': tf.test.main()
StarcoderdataPython
1696086
from flask import Blueprint, request from concepts.fields import fields_dict from concepts.schemas import MessageSchema from core.filters_view import shared_filter_view from core.schemas import FiltersWrapperSchema from core.shared_view import shared_view from core.utils import is_cached from extensions import cache from settings import CONCEPTS_INDEX blueprint = Blueprint("concepts", __name__) @blueprint.route("/concepts") @cache.cached( timeout=24 * 60 * 60, query_string=True, unless=lambda: not is_cached(request) ) def concepts(): index_name = CONCEPTS_INDEX default_sort = ["-works_count", "id"] result = shared_view(request, fields_dict, index_name, default_sort) message_schema = MessageSchema() return message_schema.dump(result) @blueprint.route("/concepts/filters/<path:params>") def concepts_filters(params): index_name = CONCEPTS_INDEX results = shared_filter_view(request, params, fields_dict, index_name) filters_schema = FiltersWrapperSchema() return filters_schema.dump(results)
StarcoderdataPython
1786488
names = ['Jones', '<NAME>', '<NAME>'] message = f"My first bicycle was a {names[0]}." print(message) message = f"My first bicycle was a {names[1]}." print(message) message = f"My first bicycle was a {names[2]}." print(message)
StarcoderdataPython
3337478
import PIL from PIL import Image import src.pos as pos class ImageMaker: """ This is a class for making Binary PFPs. Attributes: color (str): The color of the PFP. """ def __init__(self): """ Initializes the ImageMaker class. Parameters: None Returns: None. Only usable functions. """ def basic_pfp_maker(self, color): """ Makes a 64x64 image using the function in src\pos.py. It uses the function in src\pos.py to make a 64x64 image of the 64x64 array of 0 and 1 integers. It then converts the image to a PIL image and saves it as a PNG file. Parameters: None Returns: None, just saves an image. """ color = color.lower() bg = Image.open('./assets/imgs/64-64.png') clr_asset = Image.open('./assets/imgs/' + color + '.png') white = Image.open('./assets/imgs/white.png') positions = pos.create_pos_lists() for i in range(len(positions)): for j in range(len(positions[i])): if positions[i][j] == 1: bg.paste(clr_asset, (j * 8, i * 8)) else: bg.paste(white, (j * 8, i * 8)) bg.save('profile.png') def alternating_pfp_maker(self, color_1, color_2): """ Makes a 64x64 image using the function in src\pos.py. It uses the function in src\pos.py to make a 64x64 image of the 64x64 array of 0 and 1 integers. It then converts the image to a PIL image and saves it as a PNG file. The only exception is that the image doesn't have the same color, it has alternating rows of colors. Parameters: None Returns: None, just saves an image. """ bg = Image.open('./assets/imgs/64-64.png') clr_asset_1 = Image.open('./assets/imgs/' + color_1 + '.png') clr_asset_2 = Image.open('./assets/imgs/' + color_2 + '.png') white = Image.open('./assets/im.0000000000gs/white.png') positions = pos.create_pos_lists() for i in range(len(positions)): for j in range(len(positions[i])): if i % 2 == 0 and positions[i][j] == 1: bg.paste(clr_asset_1, (j * 8, i * 8)) elif i % 2 == 0 or positions[i][j] == 1: bg.paste(white, (j * 8, i * 8)) else: bg.paste(clr_asset_2, (j * 8, i * 8)) bg.save('profile2.png')
StarcoderdataPython
135993
from ignite.metrics import Metric, Precision, Recall from typing import Sequence, Callable, Optional, Union import torch from ignite.metrics.metric import reinit__is_reduced __all__ = ["FbetaScore"] class FbetaScore(Metric): def __init__( self, beta: int = 1, output_transform: Callable = lambda x: x, average: str = "macro", is_multilabel: bool = False, device: Optional[Union[str, torch.device]] = None, ): self._beta = beta self._average = average _average_flag = self._average != "macro" self._precision = Precision( output_transform=output_transform, average=_average_flag, is_multilabel=is_multilabel, device=device, ) self._recall = Recall( output_transform=output_transform, average=_average_flag, is_multilabel=is_multilabel, device=device, ) super(FbetaScore, self).__init__( output_transform=output_transform, device=device ) @reinit__is_reduced def reset(self) -> None: self._precision.reset() self._recall.reset() def compute(self) -> torch.Tensor: precision_val = self._precision.compute() recall_val = self._recall.compute() fbeta_val = ( (1.0 + self._beta ** 2) * precision_val * recall_val / (self._beta ** 2 * precision_val + recall_val + 1e-15) ) if self._average == "macro": fbeta_val = torch.mean(fbeta_val).item() return fbeta_val @reinit__is_reduced def update(self, output: Sequence[torch.Tensor]) -> None: self._precision.update(output) self._recall.update(output)
StarcoderdataPython
1627354
from optuna.distributions import BaseDistribution from optuna import type_checking if type_checking.TYPE_CHECKING: from typing import Dict # NOQA class UnsupportedDistribution(BaseDistribution): def single(self): # type: () -> bool return False def _contains(self, param_value_in_internal_repr): # type: (float) -> bool return True def _asdict(self): # type: () -> Dict return {}
StarcoderdataPython
3373964
from __future__ import annotations from abc import ABC, abstractmethod from typing import Callable, Dict, Type, Union from ..backend import graph as G from ..layers import ( ConvolutionLayer, FlattenLayer, FullyConnectedLayer, InputLayer, Layer, MaxPoolLayer, ) ArgsType = Union[int, G.Tensor] # pylint:disable=unused-argument class LayerFunc(ABC): _subclasses: Dict[str, Type[LayerFunc]] = {} def __init__(self, layer: Layer, *args: ArgsType, **kwargs: ArgsType) -> None: ... @classmethod def register_subclass(cls) -> Callable[[Type[LayerFunc]], Type[LayerFunc]]: def decorator(subclass: Type[LayerFunc]) -> Type[LayerFunc]: try: subclass_type = subclass.__init__.__annotations__["layer"] except (KeyError, AttributeError) as exp: raise TypeError("Incorrect class for decorating") from exp cls._subclasses[subclass_type] = subclass return subclass return decorator @classmethod def get_layer_func(cls, layer: Layer, *args: ArgsType, **kwargs: ArgsType) -> Union[LayerFunc, G.PlaceholderType]: if isinstance(layer, InputLayer): return G.placeholder(name=layer.name, shape=(kwargs["batch_size"], *layer.output_shape)) layer_func = cls._subclasses.get(type(layer).__name__, None) if layer_func: return layer_func(layer, *args, **kwargs) raise ValueError(f"Unsupported type: {type(layer)}") @abstractmethod def __call__(self, layer_input: G.Tensor) -> G.Tensor: ... # pylint:disable=super-init-not-called @LayerFunc.register_subclass() class ConvolutionFunc(LayerFunc): def __init__( self, layer: ConvolutionLayer, weights: G.Tensor, biases: G.Tensor, *args: ArgsType, **kwargs: ArgsType ) -> None: self.layer = layer self.weights = weights self.biases = biases def __call__(self, layer_input: G.Tensor) -> G.Tensor: pad = self.layer.pad / 2 return self.layer.activation.value( G.conv2d( # TODO: check layer dimension, and choice the right convolution layer_input, self.weights, (1, *self.layer.stride, 1), # TODO: feature request padding=(0, 0, *pad, 0, 0), # TODO: feature request ) + self.biases ) @LayerFunc.register_subclass() class MaxPoolFunc(LayerFunc): def __init__(self, layer: MaxPoolLayer, *args: ArgsType, **kwargs: ArgsType) -> None: self.layer = layer def __call__(self, layer_input: G.Tensor) -> G.Tensor: return G.max_pool( layer_input, (1, *self.layer.kernel, 1), (1, *self.layer.stride, 1), padding="SAME", # TODO: need fix??? What about backend??? ) @LayerFunc.register_subclass() class FlattenFunc(LayerFunc): def __init__(self, layer: FlattenLayer, batch_size: int, *args: ArgsType, **kwargs: ArgsType) -> None: self.layer = layer self.batch_size = batch_size def __call__(self, layer_input: G.Tensor) -> G.Tensor: return G.reshape( layer_input, (self.batch_size, *self.layer.output_shape), ) @LayerFunc.register_subclass() class FullyConnectedFunc(LayerFunc): def __init__( self, layer: FullyConnectedLayer, weights: G.Tensor, biases: G.Tensor, *args: ArgsType, **kwargs: ArgsType ) -> None: self.layer = layer self.weights = weights self.biases = biases def __call__(self, layer_input: G.Tensor) -> G.Tensor: return self.layer.activation.value( G.matmul( layer_input, self.weights, ) + self.biases ) # pylint:enable=super-init-not-called # pylint:enable=unused-argument
StarcoderdataPython
1673503
<filename>nbfix.py #!/usr/bin/env python # https://gist.github.com/fbattello/c617e4928c37f3f934d14b90f9cc22fb # Fix IPython notebook files generated by vscode # Failed validating 'additionalProperties' in markdown_cell : https://github.com/microsoft/vscode-python/issues/8772 """ usage: python nbfix.py index.ipynb python nbfix.py "chapter*.ipynb" python nbfix.py "intro*.ipynb" "chapter*.ipynb" "appendix*.ipynb" """ import io import os import sys import nbformat from pathlib import Path def fix_notebooks(filenames): for fname in filenames: with io.open(fname, 'r', encoding='utf-8') as f: nb = nbformat.read(f, as_version=4) nbformat.write(nb, fname+".bak") # comment this line for no backup [cell.pop(k, None) for k in ['outputs', 'execution_count'] for cell in nb.cells if cell['cell_type']=='markdown'] nbformat.write(nb, fname) # override origin notebook if __name__ == '__main__': notebooks_glob = sys.argv[1:] # notebook filenames with wildcards if not notebooks_glob: print(__doc__, file=sys.stderr) sys.exit(1) notebooks = [] for notebook_glob in notebooks_glob: _notebooks = [f.name for f in Path().glob(notebook_glob)] notebooks.extend(_notebooks) fix_notebooks(notebooks)
StarcoderdataPython
1666515
from firstclasspostcodes.client import Client
StarcoderdataPython
1633297
<reponame>Khamaldeeen/Property-Scraper import requests import pandas as pd from bs4 import BeautifulSoup import time url = "https://nigeriapropertycentre.com/for-rent/flats-apartments/lagos?bedrooms=3&minprice=300000&maxprice=40000000&q=for-rent+flats-apartments+lagos+3+bedrooms+minprice+300000+maxprice+40000000" url0 = "https://nigeriapropertycentre.com/for-rent/flats-apartments/lagos/showtype?bedrooms=3&minprice=300000&maxprice=40000000&page=" url1 = "https://nigeriapropertycentre.com/for-rent/flats-apartments/lagos/showtype?bedrooms=3&minprice=300000&maxprice=40000000&page=2" def scraper(x): webpage = requests.get(x) refined = BeautifulSoup(webpage.text, 'lxml') cards = refined.select('.wp-block') Location = [] for item in cards: loc = item.select('.wp-block-content > address > strong') for elem in loc: splts = elem.get_text().split() fin = splts[-2] fin = fin.replace(',', '') fin = fin.replace('(', '') fin = fin.replace(')', '') Location.append(fin) Prices = [] for item in cards: amt = item.select('.wp-block-content > .pull-sm-left > .price') for i, elem in enumerate(amt): if i % 2 != 0: pri = elem.get('content') Prices.append(pri) Features = [] for item in cards: all_ = item.select('.wp-block-footer > .aux-info >li > span') items = [] for elem in all_: yuh = elem.get_text() items.append(yuh) Features.append(' '.join(items)) Features = Features[2:-2] return Location, Features, Prices urls = [url, url1] for i in range(3, 50): ext = url0 + str(i) urls.append(ext) ML, MF, MP = [], [], [] for item in urls: Ml, Mf, Mp = scraper(item) ML += Ml MF += Mf MP += Mp time.sleep(4) data = pd.DataFrame({'Location':ML, 'Extra Features': MF, 'Price': MP}) data.to_csv('3 Bedroom Apart.csv', index=False)
StarcoderdataPython
1647988
<filename>tests/test_spar_benchmark_api.py import unittest from fds.analyticsapi.engines.api.benchmarks_api import BenchmarksApi from fds.analyticsapi.engines.model.spar_benchmark_root import SPARBenchmarkRoot import common_parameters from common_functions import CommonFunctions class TestSparBenchmarkApi(unittest.TestCase): def setUp(self): self.spar_benchmark_api = BenchmarksApi( CommonFunctions.build_api_client()) def test_get_spar_benchmark_by_id(self): response = self.spar_benchmark_api.get_spar_benchmark_by_id(common_parameters.spar_benchmark_r1000, _return_http_data_only=False ) self.assertEqual(response[1], 200, "Response should be 200 - Success") self.assertEqual( type(response[0]), SPARBenchmarkRoot, "Response should be of SPARBenchmark type") if __name__ == '__main__': unittest.main()
StarcoderdataPython
3375205
# -*- coding: utf-8 -*- """ Exceptions and Warnings used in the AltamISA library. """ __author__ = "<NAME> <<EMAIL>>" class IsaException(Exception): """Base class for exceptions raised by Altamisa.""" class ParseIsatabException(IsaException): """Exception raised on problems parsing ISA-TAB.""" class WriteIsatabException(IsaException): """Exception raised on problems writing ISA-TAB.""" class IsaWarning(Warning): """Base class for warnings raised by Altamisa.""" class ParseIsatabWarning(IsaWarning): """Warning raised on problems parsing ISA-TAB.""" class WriteIsatabWarning(IsaWarning): """Warning raised on problems writing ISA-TAB.""" class IsaValidationWarning(IsaWarning): """Warning raised on problems validating ISA models or objects.""" class AdvisoryIsaValidationWarning(IsaValidationWarning): """Warning raised on uncritical problems when validating ISA models or objects.""" class ModerateIsaValidationWarning(IsaValidationWarning): """Warning raised on moderate problems when validating ISA models or objects.""" class CriticalIsaValidationWarning(IsaValidationWarning): """Warning raised on critical problems when validating ISA models or objects."""
StarcoderdataPython
115103
import os import pdb import pandas as pd import pickle as pkl import torch from torch.utils.data import Dataset, DataLoader class DetectionDataset(Dataset): def __init__(self, dslice): """Load the datasets Args: dslice: data slice (train, test, or val) """ dirname = os.path.join('/scratch/users/georgech/data/preprocessed_binary/',dslice) self.true = torch.load(os.path.join(dirname,'true.pth')) self.cond = torch.load(os.path.join(dirname,'cond.pth')) self.out = torch.load(os.path.join(dirname,'out.pth'))#.view([-1,1]) def __len__(self): # Return the size of the dataset return self.out.size()[0] def __getitem__(self, idx): """Access data sample given an index Args: idx: index of data sample """ return self.cond[idx], self.true[idx], self.out[idx]
StarcoderdataPython