diff --git "a/4915.jsonl" "b/4915.jsonl" new file mode 100644--- /dev/null +++ "b/4915.jsonl" @@ -0,0 +1,712 @@ +{"seq_id":"557942624","text":"\n# coding: utf-8\n\n# In[ ]:\n\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\ndef flowVisualUseColor(flow):\n hsv = np.zeros([flow.shape[0],flow.shape[1],3], dtype=np.uint8)\n hsv[:, :, 0] = 255\n hsv[:, :, 1] = 255\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n plt.figure()\n # plt.axis('off')\n plt.imshow(rgb)\n plt.show()\n\n","sub_path":"flow_visualization.py","file_name":"flow_visualization.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"167135149","text":"import numpy as np\nimport random\nimport os\nimport pickle\n\n\nclass BreedGeneration:\n \n \n \"\"\"\n \n \n A class just used too create the first generation of fighters fr the tournament\n \n \n \"\"\"\n \n def __init__(self,dead_generation,mutation_rate = 0.1,number_of_fighters = 16, last_name_list_file = 'last-names.txt',first_name_list_file = 'first-names.txt'):\n \n \"\"\"\n \n Initialize some values mostly with nothings\n \n \"\"\"\n \n self.dead_generation = dead_generation\n self.generation_number = dead_generation['generation_number']+1\n self.cwd = os.getcwd()\n self.matrix_dimensions = dead_generation['fighters'][list(dead_generation['fighters'].keys())[1]]['attributes'].shape\n\n \n self.ids = None\n self.generation_dict = dict()\n self.number_of_fighters = number_of_fighters\n self.file_last_name_list = last_name_list_file\n self.file_first_name_list = first_name_list_file\n self.mutation_rate = mutation_rate\n self.gen_storage_location = os.path.abspath(os.path.join(self.cwd,\n '..', 'storage',\n 'generation_'+str(self.generation_number)+'_pool',\n 'gen'+str(self.generation_number)+'begin.pickle'))\n def createIDList(self):\n self.ids = [\"g\"+str(self.generation_number)+\"_c\" + str(num) for num in range(self.number_of_fighters)]\n def pullNameList(self):\n last_name_list_path = os.path.abspath(os.path.join(self.cwd, '..', 'storage','resources',self.file_last_name_list))\n text_file = open(last_name_list_path, \"r\")\n self.last_name_list = text_file.read().split('\\n')\n text_file.close()\n \n first_name_list_path = os.path.abspath(os.path.join(self.cwd, '..', 'storage','resources',self.file_first_name_list))\n text_file = open(first_name_list_path, \"r\")\n self.first_name_list = text_file.read().split('\\n')\n text_file.close()\n def populateProbabilities(self):\n self.indexlist = list(self.dead_generation['fighters'].keys())\n wins = []\n for key in self.indexlist:\n wins.append(self.dead_generation['fighters'][key]['wins'])\n self.fighter_probabilities = np.array(wins)/sum(wins)\n def get_matrix_mask_2(self):\n mask_test = np.full(self.matrix_dimensions[0]**2,True)\n\n mask_test[0:int(self.matrix_dimensions[0]**2/2)] = False\n np.random.shuffle(mask_test)\n return mask_test.reshape(self.matrix_dimensions)\n def breedFighters(self):\n fighter_1 = self.dead_generation['fighters'][np.random.choice(self.indexlist, p=self.fighter_probabilities)]\n fighter_2 = self.dead_generation['fighters'][np.random.choice(self.indexlist, p=self.fighter_probabilities)]\n #get name\n new_first_name = random.choice(self.first_name_list)\n new_last_name = random.choice([fighter_1['last_name'],fighter_2['last_name']])\n name = new_first_name+' '+new_last_name\n #get masks for equation\n tf1_mask = self.get_matrix_mask_2()\n tf2_mask = np.invert(tf1_mask)\n #create base stats\n base_fighter_attributes = (fighter_1['attributes']*tf1_mask+fighter_2['attributes']*tf2_mask)\n base_fighter_attributes = np.nan_to_num(base_fighter_attributes)\n #mutate base stats\n mutation = np.random.rand(self.matrix_dimensions[0],self.matrix_dimensions[0])-0.5\n new_fighter_attributes = base_fighter_attributes + mutation*self.mutation_rate\n return name, new_last_name, new_fighter_attributes\n def populateDictionary(self):\n self.generation_dict['generation_number']=self.generation_number\n self.generation_dict['mutation_rate']=self.mutation_rate\n self.generation_dict['fighters']=dict()\n for num,i in enumerate(self.ids):\n self.generation_dict['fighters'][i] = dict()\n #create name\n name,last_name, attributes = self.breedFighters()\n #other attributes\n self.generation_dict['fighters'][i]['name'] = name\n self.generation_dict['fighters'][i]['last_name'] = last_name\n self.generation_dict['fighters'][i]['alive'] = 1\n self.generation_dict['fighters'][i]['attributes'] = attributes\n self.generation_dict['fighters'][i]['wins'] = 0\n self.generation_dict['fighters'][i]['tournament_count'] = 0\n def saveDictionary(self):\n with open(self.gen_storage_location, 'wb') as handle:\n pickle.dump(self.generation_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def pipeline(self):\n self.createIDList()\n self.pullNameList()\n self.populateProbabilities()\n self.populateDictionary()\n self.saveDictionary()\n","sub_path":"notebooks/.ipynb_checkpoints/breed_generation-checkpoint.py","file_name":"breed_generation-checkpoint.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"87383759","text":"#! /usr/bin/python3\nfrom NI import *\nimport os, sys\nfrom Crypto.Hash import SHA256\nfrom Crypto.Signature import PKCS1_PSS\nfrom Crypto.PublicKey import RSA\n\ndef parse_argv(argv):\n opts = {}\n argv = argv[1:]\n for arg in argv:\n if arg[0] == '-':\n key = arg\n else:\n if key != None:\n opts[key] = arg\n\n return opts\n\ndef get_struct_size(structure):\n total_size = 0\n for field in structure:\n total_size += structure[field]\n return total_size\n\ndef get_field_offset_size(struct, field):\n offset = 0\n for f in struct:\n if f == field:\n return (offset, struct[f])\n offset += struct[f]\n raise NiException(\"no such field : {0}\".format(field))\n\n\ndef field_to_bytes(value, size):\n if isinstance(value, bytearray):\n return value\n elif isinstance(value, bytes):\n return value\n elif isinstance(value, int):\n return value.to_bytes(size, 'little')\n else:\n raise NiException(\"unknown data type\")\n\ndef fill_struct_field(blob, struct, field, value):\n offset, size = get_field_offset_size(struct, field)\n data = field_to_bytes(value, size)\n blob[offset:offset+size] = data\n\n\ndef sign_fw(myargs):\n bin_tbs=\"\"\n padding_size = 0\n\n if '--sign-upc-fw' in myargs:\n padding_size = 512 - get_struct_size(PackageHeader)\n bin_tbs = myargs.get('--sign-upc-fw', 'app.bin')\n else:\n bin_tbs = myargs.get('--sign-fw', 'app.bin')\n\n asset_version = myargs.get('--version', '01.00.0000')\n key_file = myargs.get('--key', 'G7DEV_SKFW.PEM')\n\n try:\n file_size = os.path.getsize(bin_tbs)\n except:\n print(\"fail to check file {0}\".format(bin_tbs))\n return False\n\n\n try: \n bin_fd = os.open(bin_tbs, os.O_RDONLY) \n bin_data = os.read(bin_fd, file_size)\n os.close(bin_fd)\n except:\n print(\"fail to open & read file {0}\".format(bin_tbs))\n return False\n\n header_len = get_struct_size(PackageHeader)\n\n # PackageHeader = {\n # 'packageVersion' : 4, # should always be 1 for G7\n # 'headerLength' : 4,\n # 'payloadLength' : 4,\n # 'assetType' : 4, # one of ASSET_TYPE_ENUM\n # 'assetVersion' : 40, # format XX.XX.XXXX\n # 'signatureAlgorithm' : 4,\n # 'obfuscationAlgorithm' : 4,\n # 'keyFingerprint' : 4\n #};\n\n header = bytearray(header_len)\n fill_struct_field(header, PackageHeader, 'packageVersion', 1)\n fill_struct_field(header, PackageHeader, 'headerLength', header_len)\n fill_struct_field(header, PackageHeader, 'payloadLength', file_size + padding_size)\n fill_struct_field(header, PackageHeader, 'assetType', ASSET_TYPE_ENUM['UPC_APPLICATIONS'])\n fill_struct_field(header, PackageHeader, 'assetVersion', asset_version.encode())\n # obfuscationAlgorithm and keyFingerprint left as all zero\n\n # read key file\n try:\n key_file_size = os.path.getsize(key_file)\n key_fd = open(key_file, 'br')\n key_data = key_fd.read(key_file_size)\n key_fd.close()\n except:\n print(\"fail to read key file {0}\".format(key_file))\n return false;\n\n pkg_path = bin_tbs + \".pkg\"\n try:\n pkg_fd = os.open(pkg_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)\n except:\n print(\"fail to create pkg file {0}\".format(pkg_path))\n return False\n\n headerAndpayload = header + bytearray(padding_size) + bytearray(bin_data)\n data_hash = SHA256.new(headerAndpayload)\n private_key = RSA.importKey(key_data)\n signer = PKCS1_PSS.new(private_key)\n signature = signer.sign(data_hash)\n\n os.write(pkg_fd, headerAndpayload)\n os.write(pkg_fd, signature)\n os.close(pkg_fd)\n print(\"fw signed in {0}\".format(pkg_path))\n\n\nmyargs = parse_argv(sys.argv)\nif '--sign-upc-fw' in myargs:\n sign_fw(myargs)\n\n","sub_path":"fw-upc/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"239707043","text":"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport numpy as np\nimport torch\n\n\ndef power_method_opnorm(normal_op, x, n_iter=10):\n def _normalize(x):\n size = x.size()\n x = x.view(size[0], -1)\n norm = torch.norm(x, dim=1)\n x /= norm.view(-1, 1)\n return x.view(*size), torch.max(norm).item()\n \n with torch.no_grad():\n x, _ = _normalize(x)\n\n for i in range(n_iter):\n next_x = normal_op(x)\n x, v = _normalize(next_x)\n\n return v**0.5\n\n\ndef real2complex(x):\n return torch.stack([x, torch.zeros_like(x)], dim=4)\n\n\ndef complex2real(x):\n return x[..., 0]\n\n\ndef complex2channel(x):\n N, C, H, W, _ = x.shape\n # N C H W 2 -> N 2C H W\n temp = x\n x = x.permute(0, 1, 4, 2, 3).contiguous()\n x = x.view(N, C*2, H, W)\n return x\n\n\ndef to_tensor(data):\n \"\"\"\n Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts\n are stacked along the last dimension.\n\n Args:\n data (np.array): Input numpy array\n\n Returns:\n torch.Tensor: PyTorch version of data\n \"\"\"\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)\n\n\ndef apply_mask(data, mask_func, seed=None):\n \"\"\"\n Subsample given k-space by multiplying with a mask.\n\n Args:\n data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where\n dimensions -3 and -2 are the spatial dimensions, and the final dimension has size\n 2 (for complex values).\n mask_func (callable): A function that takes a shape (tuple of ints) and a random\n number seed and returns a mask.\n seed (int or 1-d array_like, optional): Seed for the random number generator.\n\n Returns:\n (tuple): tuple containing:\n masked data (torch.Tensor): Subsampled k-space data\n mask (torch.Tensor): The generated mask\n \"\"\"\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n return torch.where(mask == 0, torch.Tensor([0]), data), mask\n\n\ndef fft2(data):\n \"\"\"\n Apply centered 2 dimensional Fast Fourier Transform.\n\n Args:\n data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions\n -3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are\n assumed to be batch dimensions.\n\n Returns:\n torch.Tensor: The FFT of the input.\n \"\"\"\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data\n\n\ndef ifft2(data):\n \"\"\"\n Apply centered 2-dimensional Inverse Fast Fourier Transform.\n\n Args:\n data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions\n -3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are\n assumed to be batch dimensions.\n\n Returns:\n torch.Tensor: The IFFT of the input.\n \"\"\"\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.ifft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data\n\n\ndef complex_abs(data):\n \"\"\"\n Compute the absolute value of a complex valued input tensor.\n\n Args:\n data (torch.Tensor): A complex valued tensor, where the size of the final dimension\n should be 2.\n\n Returns:\n torch.Tensor: Absolute value of data\n \"\"\"\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()\n\n\ndef root_sum_of_squares(data, dim=0):\n \"\"\"\n Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.\n\n Args:\n data (torch.Tensor): The input tensor\n dim (int): The dimensions along which to apply the RSS transform\n\n Returns:\n torch.Tensor: The RSS value\n \"\"\"\n return torch.sqrt((data ** 2).sum(dim))\n\n\ndef center_crop(data, shape):\n \"\"\"\n Apply a center crop to the input real image or batch of real images.\n\n Args:\n data (torch.Tensor): The input tensor to be center cropped. It should have at\n least 2 dimensions and the cropping is applied along the last two dimensions.\n shape (int, int): The output shape. The shape should be smaller than the\n corresponding dimensions of data.\n\n Returns:\n torch.Tensor: The center cropped image\n \"\"\"\n assert 0 < shape[0] <= data.shape[-2]\n assert 0 < shape[1] <= data.shape[-1]\n w_from = (data.shape[-2] - shape[0]) // 2\n h_from = (data.shape[-1] - shape[1]) // 2\n w_to = w_from + shape[0]\n h_to = h_from + shape[1]\n return data[..., w_from:w_to, h_from:h_to]\n\n\ndef complex_center_crop(data, shape):\n \"\"\"\n Apply a center crop to the input image or batch of complex images.\n\n Args:\n data (torch.Tensor): The complex input tensor to be center cropped. It should\n have at least 3 dimensions and the cropping is applied along dimensions\n -3 and -2 and the last dimensions should have a size of 2.\n shape (int, int): The output shape. The shape should be smaller than the\n corresponding dimensions of data.\n\n Returns:\n torch.Tensor: The center cropped image\n \"\"\"\n assert 0 < shape[0] <= data.shape[-3]\n assert 0 < shape[1] <= data.shape[-2]\n w_from = (data.shape[-3] - shape[0]) // 2\n h_from = (data.shape[-2] - shape[1]) // 2\n w_to = w_from + shape[0]\n h_to = h_from + shape[1]\n return data[..., w_from:w_to, h_from:h_to, :]\n\n\ndef normalize(data, mean, stddev, eps=0.):\n \"\"\"\n Normalize the given tensor using:\n (data - mean) / (stddev + eps)\n\n Args:\n data (torch.Tensor): Input data to be normalized\n mean (float): Mean value\n stddev (float): Standard deviation\n eps (float): Added to stddev to prevent dividing by zero\n\n Returns:\n torch.Tensor: Normalized tensor\n \"\"\"\n return (data - mean) / (stddev + eps)\n\n\ndef normalize_instance(data, eps=0.):\n \"\"\"\n Normalize the given tensor using:\n (data - mean) / (stddev + eps)\n where mean and stddev are computed from the data itself.\n\n Args:\n data (torch.Tensor): Input data to be normalized\n eps (float): Added to stddev to prevent dividing by zero\n\n Returns:\n torch.Tensor: Normalized tensor\n \"\"\"\n mean = data.mean()\n std = data.std()\n return normalize(data, mean, std, eps), mean, std\n\n\n# Helper functions\n\ndef roll(x, shift, dim):\n \"\"\"\n Similar to np.roll but applies to PyTorch Tensors\n \"\"\"\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)\n\n\ndef fftshift(x, dim=None):\n \"\"\"\n Similar to np.fft.fftshift but applies to PyTorch Tensors\n \"\"\"\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [dim // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = x.shape[dim] // 2\n else:\n shift = [x.shape[i] // 2 for i in dim]\n return roll(x, shift, dim)\n\n\ndef ifftshift(x, dim=None):\n \"\"\"\n Similar to np.fft.ifftshift but applies to PyTorch Tensors\n \"\"\"\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [(dim + 1) // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = (x.shape[dim] + 1) // 2\n else:\n shift = [(x.shape[i] + 1) // 2 for i in dim]\n return roll(x, shift, dim)\n\n\ndef complex_mul(x1, x2):\n \"\"\"\n Compute multiplication of two complex numbers\n \"\"\"\n assert x1.size(-1) == 2 and x2.size(-1) == 2\n\n res = torch.stack(\n (x1[..., 0]*x2[..., 0]-x1[..., 1]*x2[..., 1],\n x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]), -1)\n\n return res\n\n\ndef conjugate(x):\n return torch.stack([x[..., 0], -x[..., 1]], -1)\n\n\ndef cdp_forward(data, mask):\n \"\"\"\n Compute the forward model of cdp.\n\n Args:\n data (torch.Tensor): Image_data (batch_size*1*hight*weight*2).\n mask (torch.Tensor): mask (batch_size*sampling_rate*hight*weight*2), where the size of the final dimension\n should be 2 (complex value).\n\n Returns:\n forward_data (torch.Tensor): the complex field of forward data (batch_size*sampling_rate*hight*weight*2)\n \"\"\"\n assert mask.size(-1) == 2\n if data.ndimension() == 4:\n data = torch.stack([data, torch.zeros_like(data)], -1)\n sampling_rate = mask.shape[1]\n x = data.repeat(1, sampling_rate, 1, 1, 1)\n masked_data = complex_mul(x, mask)\n forward_data = torch.fft(masked_data, 2, normalized=True)\n return forward_data\n\n\ndef cdp_backward(data, mask):\n \"\"\"\n Compute the backward model of cdp (the inverse operator of forward model).\n\n Args:\n data (torch.Tensor): Field_data (batch_size*sampling_rate*hight*weight*2).\n mask (torch.Tensor): mask (batch_size*sampling_rate*hight*weight*2), where the size of the final dimension\n should be 2 (complex value).\n\n Returns:\n backward_data (torch.Tensor): the complex field of backward data (batch_size*1*hight*weight*2)\n \"\"\"\n assert mask.size(-1) == 2\n sampling_rate = mask.shape[1]\n Ifft_data = torch.ifft(data, 2, normalized=True)\n backward_data = complex_mul(Ifft_data, conjugate(mask))\n return backward_data.mean(1, keepdim=True)\n\n\nif __name__ == '__main__':\n from scipy.io import loadmat\n from os.path import join\n\n maskdir = '/media/kaixuan/DATA/Papers/Code/Data/MRI/masks'\n sampling_masks = ['radial_128_2', 'radial_128_4',\n 'radial_128_8'] # different masks\n obs_masks = [loadmat(join(maskdir, '{}.mat'.format(sampling_mask))).get(\n 'mask') for sampling_mask in sampling_masks]\n mask = torch.from_numpy(obs_masks[2])[None].bool()\n\n def csmri_normal_op(x):\n y0 = fft2(x)\n y0[:, ~mask, :] = 0\n ATy0 = ifft2(y0)\n return ATy0\n\n # device = torch.device(\"cpu\")\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n x = torch.randn((1, 1, 128, 128, 2), device=device)\n\n opnorm = power_method_opnorm(csmri_normal_op, x, 10)\n print(opnorm) # nearly equal to 1\n","sub_path":"tfpnp/utils/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":10677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"517785514","text":"from elasticsearch import Elasticsearch\nfrom kafka import KafkaProducer, KafkaClient\n\n\nclass Singleton(object):\n def __new__(cls, *args, **kw):\n if not hasattr(cls, '_instance'):\n orig = super(Singleton, cls)\n cls._instance = orig.__new__(cls, *args, **kw)\n return cls._instance\n\n\nclass KafkaData(Singleton):\n def __init__(self, data=None):\n self.data = data\n self.producer = KafkaProducer(bootstrap_servers='emr2-header-1.ipa.aidigger.com:6667', retries=4)\n self.client = KafkaClient(hosts='emr2-header-1.ipa.aidigger.com:6667')\n\n def get_topics(self):\n return self.client.topics\n\n def push_data(self, topic, key, value, partition):\n self.producer.send(topic=topic, key=key, value=value, partition=partition)\n\n\nclass EsUtil(Singleton):\n def __init__(self):\n self.es = Elasticsearch(['10.10.11.182:9200'])\n\n def run_with_delete(self, ariticleid):\n q_body = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"query_string\": {\n \"query\": ariticleid,\n \"fields\": [\"articleid\"],\n \"use_dis_max\": \"True\"\n }\n }\n }\n },\n \"size\": 1\n }\n result = self.es.search('machine_mind_prod', body=q_body)\n if len(result['hits']['hits']) == 1:\n res = self.es.delete_by_query(index='machine_mind_prod', body=q_body, doc_type='articles')\n return res\n else:\n return 0","sub_path":"Api/eigen_util.py","file_name":"eigen_util.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"73030744","text":"# coding= utf8\n\n\nd = {'a':2,'b':3,'c':4}\n\nfor k,v in d.items():\n print (v)\n\nfor m,n in [('a',1),('b',2)]:\n print (m)\n\n\nfor x in range(2):\n for y in range(3):\n print (x,y)\n\n\n\n\n","sub_path":"code/untitled1/2-pytho函数高级特性/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"20256890","text":"# REGULAR EXPRESSION\n\n# Q.1- Write a python code to find a valid email address from a text.\n\nemail = input(\"Enter the email: \")\nmatcher = r.finditer('^[a-z][a-zA-Z0-9]*[@](gmail.com|yahoo.com)', email)\ncount = 0\nfor i in matcher:\n count += 1\nif count == 1:\n print('Email verified')\nelse:\n print('Not valid')\n\n\n# Q.2- Write a python program to find a valid Indian phone number from a text.\n#(Valid Indian numbers will start with \"+91-\" and after that [6-9] followed by\n# 9 digits. )\n\nnum = str(input('Enter the phone number'))\nmatcher = r.finditer('^[+][9][1][-][6-9][\\d]{9}', num)\ncount = 0\nfor i in matcher:\n count += 1\nif count == 1:\n print('phone number is valid')\nelse:\n print('phone number is not valid')\n","sub_path":"Assignment11.py","file_name":"Assignment11.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"457811109","text":"\"\"\"\nA* grid based planning\n\nauthor: Atsushi Sakai(@Atsushi_twi)\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport math\nimport csv\nimport numpy as np\nimport collections\n\nshow_animation = True\n\nSTEP = 1.0\nMAX_RANGE = 360\nLEN_TO_GATE = 15\nway_point =[]\nTURNING_MAX_RAD = 0.5\nWEIGHTS = 0.1\nLEN_THRE = 25.0\n\n# start and goal position\nsx = 180.0 # [m]\nsy = 180.0 # [m]\ngx = 120.0 # [m]\ngy = 20.0 # [m]\ngrid_size = 10.0 # [m]\nrobot_size = 19.0 # [m]\n\n\n\nclass Node:\n\n def __init__(self, x, y, cost, pind):\n self.x = x\n self.y = y\n self.cost = cost\n self.pind = pind\n\n def __str__(self):\n return str(self.x) + \",\" + str(self.y) + \",\" + str(self.cost) + \",\" + str(self.pind)\n\n\ndef calc_fianl_path(ngoal, closedset, reso):\n # generate final course\n rx, ry = [ngoal.x * reso], [ngoal.y * reso]\n pind = ngoal.pind\n while pind != -1:\n n = closedset[pind]\n rx.append(n.x * reso)\n ry.append(n.y * reso)\n pind = n.pind\n\n return rx, ry\n\n\ndef a_star_planning(sx, sy, gx, gy, ox, oy, reso, rr):\n \"\"\"\n gx: goal x position [m]\n gx: goal x position [m]\n ox: x position list of Obstacles [m]\n oy: y position list of Obstacles [m]\n reso: grid resolution [m]\n rr: robot radius[m]\n \"\"\"\n\n nstart = Node(round(sx / reso), round(sy / reso), 0.0, -1)\n ngoal = Node(round(gx / reso), round(gy / reso), 0.0, -1)\n ox = [iox / reso for iox in ox]\n oy = [ioy / reso for ioy in oy]\n\n obmap, minx, miny, maxx, maxy, xw, yw = calc_obstacle_map(ox, oy, reso, rr)\n\n motion = get_motion_model()\n\n openset, closedset = dict(), dict()\n openset[calc_index(nstart, xw, minx, miny)] = nstart\n\n while 1:\n c_id = min(\n openset, key=lambda o: openset[o].cost + calc_h(ngoal, openset[o].x, openset[o].y))\n current = openset[c_id]\n # print(\"current\", current)\n\n # show graph\n # if show_animation:\n # plt.plot(current.x * reso, current.y * reso, \"xc\")\n # if len(closedset.keys()) % 10 == 0:\n # plt.pause(0.001)\n\n if current.x == ngoal.x and current.y == ngoal.y:\n print(\"Find goal\")\n ngoal.pind = current.pind\n ngoal.cost = current.cost\n break\n\n # Remove the item from the open set\n del openset[c_id]\n # Add it to the closed set\n closedset[c_id] = current\n\n # expand search grid based on motion model\n for i in range(len(motion)):\n node = Node(current.x + motion[i][0], current.y + motion[i][1],\n current.cost + motion[i][2], c_id)\n n_id = calc_index(node, xw, minx, miny)\n\n if not verify_node(node, obmap, minx, miny, maxx, maxy):\n continue\n\n if n_id in closedset:\n continue\n # Otherwise if it is already in the open set\n if n_id in openset:\n if openset[n_id].cost > node.cost:\n openset[n_id].cost = node.cost\n openset[n_id].pind = c_id\n else:\n openset[n_id] = node\n\n rx, ry = calc_fianl_path(ngoal, closedset, reso)\n\n return rx, ry\n\n\ndef calc_h(ngoal, x, y):\n # w = 0.1 # weight of heuristic\n d = WEIGHTS * math.sqrt((ngoal.x - x)**2 + (ngoal.y - y)**2)\n return d\n\n\ndef verify_node(node, obmap, minx, miny, maxx, maxy):\n\n if node.x < minx:\n return False\n elif node.y < miny:\n return False\n elif node.x >= maxx:\n return False\n elif node.y >= maxy:\n return False\n\n if obmap[node.x][node.y]:\n return False\n\n return True\n\n\ndef calc_obstacle_map(ox, oy, reso, vr):\n\n minx = round(min(ox))\n miny = round(min(oy))\n maxx = round(max(ox))\n maxy = round(max(oy))\n # print(\"minx:\", minx)\n # print(\"miny:\", miny)\n # print(\"maxx:\", maxx)\n # print(\"maxy:\", maxy)\n\n xwidth = round(maxx - minx)\n ywidth = round(maxy - miny)\n # print(\"xwidth:\", xwidth)\n # print(\"ywidth:\", ywidth)\n\n # obstacle map generation\n obmap = [[False for i in range(xwidth)] for i in range(ywidth)]\n for ix in range(xwidth):\n x = ix + minx\n for iy in range(ywidth):\n y = iy + miny\n # print(x, y)\n for iox, ioy in zip(ox, oy):\n d = math.sqrt((iox - x)**2 + (ioy - y)**2)\n if d <= vr / reso:\n obmap[ix][iy] = True\n break\n\n return obmap, minx, miny, maxx, maxy, xwidth, ywidth\n\n\ndef calc_index(node, xwidth, xmin, ymin):\n return (node.y - ymin) * xwidth + (node.x - xmin)\n\n\ndef get_motion_model():\n # dx, dy, cost\n motion = [[1, 0, 1],\n [0, 1, 1],\n [-1, 0, 1],\n [0, -1, 1],\n [-1, -1, math.sqrt(2)],\n [-1, 1, math.sqrt(2)],\n [1, -1, math.sqrt(2)],\n [1, 1, math.sqrt(2)]]\n\n return motion\n\ndef rotate(origin, point, angle):\n \"\"\"\n Rotate a point counterclockwise by a given angle around a given origin.\n\n The angle should be given in radians.\n \"\"\"\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy\n\ndef dist_cal(point_1, point_2):\n return math.sqrt((point_1[0]-point_2[0])**2 + (point_1[1]-point_2[1])**2)\n\n\ndef create_map(mapfile):\n \"\"\"\n x_half_width, y_half_width represent the arena\n\n y\n |\n |--- x\n \"\"\"\n ox, oy = [], [] # ox, oy stores the non-reachable coords\n\n \n with open(mapfile, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=\" \")\n gates = []\n for row in reader:\n each_gate = []\n for each_num in row:\n each_gate.append(float(each_num))\n gates.append(each_gate)\n# print(g1_lhs_x, g1_lhs_y, g1_rhs_x, g1_rhs_y)\n\n for i in range(MAX_RANGE):\n ox.append(i)\n oy.append(0)\n for i in range(MAX_RANGE):\n ox.append(i)\n oy.append(MAX_RANGE)\n for i in range(MAX_RANGE+1):\n ox.append(0)\n oy.append(i)\n # for i in range(MAX_RANGE+1):\n # ox.append(i)\n # oy.append(MAX_RANGE+1)\n for i in range(MAX_RANGE+1):\n ox.append(MAX_RANGE+1)\n oy.append(i)\n\n\n\n # x_half_width, y_half_width\n for each_gate in gates:\n xlen = each_gate[2] - each_gate[0]\n ylen = each_gate[3] - each_gate[1]\n dis = math.sqrt(xlen**2 + ylen**2)\n N = math.floor(dis/STEP)\n x_stp = xlen/N\n y_stp = ylen/N\n for i in range(N):\n ox.append(each_gate[0] + i*x_stp)\n oy.append(each_gate[1] + i*y_stp)\n return ox, oy, gates\n\ndef getTurningPts(rx, ry, start_point, enter_point, count):\n \"\"\"\n input: full list of waypoints, rx and ry\n output: turing points\n \"\"\"\n rx = np.asarray(rx)\n ry = np.asarray(ry)\n delta_rx = rx[1:] - rx[:-1]\n delta_ry = ry[1:] - ry[:-1]\n gradient = np.arctan2(delta_ry, delta_rx)\n\n turningPts = []\n\n for i in range(1, len(gradient)):\n if abs(gradient[i] - gradient[i-1]) > TURNING_MAX_RAD: # 30 degrees\n turningPts.append([rx[i], ry[i]])\n\n turningPts = turningPts[::-1]\n if count == 0:\n turningPts = turningPts + [[enter_point[0], enter_point[1]]]\n final_turningPts = [[turningPts[0][0], turningPts[0][1]]]\n else:\n turningPts = [[start_point[0], start_point[1]]] + turningPts + [[enter_point[0], enter_point[1]]]\n final_turningPts = [[start_point[0], start_point[1]]]\n \n point_idx = 0\n while (point_idx < len(turningPts)-1):\n flag = 0\n for point_idx_end in range(point_idx,len(turningPts)):\n if dist_cal(turningPts[point_idx], turningPts[point_idx_end]) > LEN_THRE:\n final_turningPts.append(turningPts[point_idx_end])\n point_idx = point_idx_end\n flag = 1\n break\n if flag == 0:\n break\n # print(turningPts)\n return final_turningPts\n\ndef flatten(x):\n if isinstance(x, collections.Iterable):\n return [a for i in x for a in flatten(i)]\n else:\n return [x]\n\ndef path_planning_loop(ox, oy, gates):\n # x_half_width, y_half_width\n start_point = np.asarray([sx, sy])\n count = 0\n rx_all = []\n ry_all = []\n for each_gate in gates:\n xlen = each_gate[2] - each_gate[0]\n ylen = each_gate[3] - each_gate[1]\n dis = math.sqrt(xlen**2 + ylen**2)\n N = math.floor(dis/STEP)\n x_stp = xlen/N\n y_stp = ylen/N\n center_gate = np.asarray([(each_gate[0]+each_gate[2])/2, (each_gate[1]+each_gate[3])/2])\n perpen_pt = np.asarray(rotate(center_gate, [each_gate[2],each_gate[3]], math.radians(-90)))\n\n enter_point = center_gate + (perpen_pt - center_gate)/dist_cal(perpen_pt, center_gate)*LEN_TO_GATE\n rx, ry = a_star_planning(float(start_point[0]), float(start_point[1]), float(enter_point[0]), float(enter_point[1]), ox, oy, grid_size, robot_size)\n turningpoint = getTurningPts(rx, ry,start_point, enter_point, count)\n start_point = center_gate + (perpen_pt - center_gate)/dist_cal(perpen_pt, center_gate)*(-LEN_TO_GATE)\n rx_all = rx_all + rx[::-1]\n ry_all = ry_all + ry[::-1]\n if count == 3:\n turningpoint = turningpoint + [[start_point[0], start_point[1]]]\n \n way_point.append(turningpoint)\n\n count += 1\n # way_point.append([enter_point[0], enter_point[1]]);\n\n \n # way_point.append([start_point[0], start_point[1]])\n \n #plot\n if show_animation:\n plt.plot(rx, ry, \"-r\")\n # plt.show()\n\n return way_point,rx_all, ry_all\n\n\ndef main():\n print(__file__ + \" start!!\")\n\n\n ox, oy, gates= create_map(\"mapfile\")\n\n if show_animation:\n plt.plot(ox, oy, \".k\")\n plt.plot(sx, sy, \"xr\")\n plt.plot(gx, gy, \"xb\")\n plt.grid(True)\n plt.axis(\"equal\")\n # plt.show()\n way_point,rx_all, ry_all = path_planning_loop(ox, oy, gates)\n\n\n print(way_point)\n\n flat_list = flatten(way_point)\n\n way_x =[]\n way_y =[]\n for idx in range(len(flat_list)):\n if idx % 2 == 0:\n way_x.append(flat_list[idx])\n else:\n way_y.append(flat_list[idx])\n\n if show_animation:\n plt.plot(way_x, way_y, \"*k\")\n\n print('waypoints:', flat_list)\n\n with open('way_points.dat', 'w') as the_file:\n the_file.write('\\n'.join(map(str, flat_list)))\n\n plt.savefig('final')\n plt.close()\n\n # for report\n rx_all = rx_all + [way_x[-1]]\n ry_all = ry_all + [way_y[-1]]\n newsx = (np.asarray(sx) - 180)/100\n newsy = (np.asarray(sy) - 180)/100\n ox = (np.asarray(ox) - 180)/100\n oy = (np.asarray(oy) - 180)/100\n rx_all = (np.asarray(rx_all) - 180)/100\n ry_all = (np.asarray(ry_all) - 180)/100\n plt.plot(newsx, newsy, \"ob\",markersize=8.0, label='start point')\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all, ry_all, \"-r\",linewidth=3.0, label='final path')\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n plt.legend()\n plt.show()\n\n\n print('rx_all:', rx_all)\n print('ry_all:', ry_all)\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[:13], ry_all[:13], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path1')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[12:14], ry_all[12:14], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path2')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[13:21], ry_all[13:21], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path3')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[20:22], ry_all[20:22], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path4')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[21:38], ry_all[21:38], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path5')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[37:39], ry_all[37:39], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path6')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[38:54], ry_all[38:54], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path7')\n plt.close()\n\n plt.plot(ox, oy, \".k\")\n plt.plot(rx_all[53:55], ry_all[53:55], \"-r\",linewidth=3.0)\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel('x[m]',fontsize=18)\n plt.ylabel('y[m]',fontsize=18)\n # plt.legend()\n # plt.show()\n plt.savefig('path8')\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"P2/path_planning/AStar/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":13597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"371134761","text":"from typing import Optional, Type, Union\n\nimport h11\nimport trio\nimport wsproto\n\nfrom .base import HTTPServer\nfrom ..common.wsproto import WebsocketMixin\nfrom ..config import Config\nfrom ..typing import ASGIFramework\nfrom ..utils import WebsocketState\n\nMAX_RECV = 2 ** 16\n\n\nclass ConnectionClosed(Exception):\n pass\n\n\nclass WebsocketServer(HTTPServer, WebsocketMixin):\n\n def __init__(\n self,\n app: Type[ASGIFramework],\n config: Config,\n stream: trio.abc.Stream,\n *,\n upgrade_request: Optional[h11.Request]=None,\n ) -> None:\n super().__init__(stream, 'wsproto')\n self.app = app\n self.config = config\n self.connection = wsproto.connection.WSConnection(\n wsproto.connection.SERVER, extensions=[wsproto.extensions.PerMessageDeflate()],\n )\n self.app_queue = trio.Queue(10)\n self.response: Optional[dict] = None\n self.scope: Optional[dict] = None\n self.state = WebsocketState.HANDSHAKE\n\n self._buffer: Optional[Union[bytes, str]] = None\n\n if upgrade_request is not None:\n fake_client = h11.Connection(h11.CLIENT)\n self.connection.receive_bytes(fake_client.send(upgrade_request))\n\n async def handle_connection(self) -> None:\n try:\n request = await self.read_request()\n async with trio.open_nursery() as nursery:\n nursery.start_soon(self.handle_websocket, request)\n await self.read_messages()\n except (\n ConnectionClosed, trio.TooSlowError, trio.BrokenResourceError,\n trio.ClosedResourceError,\n ):\n await self.aclose()\n\n async def awrite(self, data: bytes) -> None:\n await self.stream.send_all(data)\n\n async def aclose(self) -> None:\n self.app_queue.put_nowait({'type': 'websocket.disconnect'})\n await super().aclose()\n\n async def read_request(self) -> wsproto.events.ConnectionRequested:\n for event in self.connection.events():\n if isinstance(event, wsproto.events.ConnectionRequested):\n return event\n else:\n raise ConnectionClosed()\n\n async def read_messages(self) -> None:\n while True:\n data = await self.stream.receive_some(MAX_RECV)\n self.connection.receive_bytes(data)\n for event in self.connection.events():\n if isinstance(event, wsproto.events.DataReceived):\n if self._buffer is None:\n if isinstance(event, wsproto.events.TextReceived):\n self._buffer = ''\n else:\n self._buffer = b''\n self._buffer += event.data\n if len(self._buffer) > self.config.websocket_max_message_size:\n self.connection.close(1009) # CLOSE_TOO_LARGE\n await self.asend()\n raise ConnectionClosed()\n if event.message_finished:\n if isinstance(event, wsproto.events.BytesReceived):\n await self.app_queue.put({\n 'type': 'websocket.receive',\n 'bytes': self._buffer,\n 'text': None,\n })\n else:\n await self.app_queue.put({\n 'type': 'websocket.receive',\n 'bytes': None,\n 'text': self._buffer,\n })\n self._buffer = None\n elif isinstance(event, wsproto.events.ConnectionClosed):\n raise ConnectionClosed()\n\n @property\n def scheme(self) -> str:\n return 'wss' if self._is_ssl else 'ws'\n","sub_path":"hypercorn/trio/wsproto.py","file_name":"wsproto.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"287344563","text":"def reversePrint(head):\n if head==None:\n print(\"nothing\")\n else:\n prev=None\n cur=head\n while(cur):\n nex=cur.next\n cur.next=prev\n prev=cur\n cur=nex\n head=prev\n curr=head\n while(curr!=None):\n print(curr.data)\n curr=curr.next","sub_path":"revalinkedlist.py","file_name":"revalinkedlist.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"617953392","text":"'''\niterative update training.\nChanged the definition of loss_degrad in create_architecture_adversarial function.\n'''\n\nimport sys\nsys.path.insert(0, '..')\n\nimport time\nimport datetime\nfrom six.moves import xrange\nimport input_data\nimport errno\nimport pprint\nimport itertools\nfrom degradlNet import residualNet\nfrom budgetNet import budgetNet\nfrom utilityNet import utilityNet\nfrom loss import *\nfrom utils import *\nfrom img_proc import _avg_replicate\nimport yaml\nfrom tf_flags import FLAGS\n\nfrom functions import placeholder_inputs, create_grad_accum_for_late_update, create_videos_reading_ops, create_summary_files\nfrom bcolors import bcolors\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=FLAGS.GPU_id\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='1,2'\nprint('Using GPU:', FLAGS.GPU_id)\nprint('GPU_NUM:', FLAGS.GPU_NUM)\n\n# job name:\nalgo_str = 'AlternativeUpdate'\nrestarting = False\nrest_str = 'Rest' if restarting else 'NoRest'\n\ndir_name = '{}-{}-M{}'.format(algo_str, rest_str, FLAGS.NBudget)\nif not os.path.isdir(dir_name):\n\tos.mkdir(dir_name)\n# end job name\n\n# dir name:\nsummary_dir = os.path.join(dir_name , 'summaries' + datetime.datetime.now().strftime(\"-%Y%m%d_%H%M%S\"))\nckpt_dir = os.path.join(dir_name, 'ckpt_dir')\n# vis_dir = os.path.join(dir_name, 'visualization')\n# log_dir = os.path.join(dir_name, 'tensorboard_events')\ntest_result_dir = os.path.join(dir_name, 'testing_results')\n# end dir names\n\nuse_pretrained_model = True # if True, load ckpts from pretrained fd and fT; if False: load ckpts from previous training stage of this run_adversarial_training function.\n\n\ndef create_architecture_adversarial(cfg, batch_size, multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct, scope, videos, utility_labels, budget_labels, dropout):\n\t'''\n\tCreate the architecture of the adversarial model in the graph\n\tis_training: whether it is in the adversarial training part. (include testing, not two-fold)\n\t'''\n\t# fd part:\n\tdegrad_videos = residualNet(videos, is_video=True)\n\tdegrad_videos = _avg_replicate(degrad_videos) if FLAGS.use_avg_replicate else degrad_videos\n\t# fd part ends\n\t# fT part:\n\tlogits_utility = utilityNet(degrad_videos, dropout, wd=0.001)\n\tloss_utility = tower_loss_xentropy_sparse(scope, logits_utility, utility_labels, use_weight_decay=True)\n\t# fT part ends\n\t# fb part:\n\tlogits_budget = tf.zeros([batch_size, cfg['DATA']['NUM_CLASSES_BUDGET']])\n\tloss_budget = 0.0\n\tbudget_logits_lst = []\n\tfor multiplier in multiplier_lst:\n\t\tprint(multiplier)\n\t\tlogits = budgetNet(degrad_videos, depth_multiplier=multiplier)\n\t\tbudget_logits_lst.append(logits)\n\t\tloss = tower_loss_xentropy_sparse(scope, logits, budget_labels, use_weight_decay=False)\n\t\tlogits_budget_lst_dct[str(multiplier)].append(logits)\n\t\tloss_budget_lst_dct[str(multiplier)].append(loss)\n\t\tlogits_budget += logits / FLAGS.NBudget\n\t\tloss_budget += loss / FLAGS.NBudget\n\t# fd part ends.\n\t# Find the largest budget loss of the M ensembled budget models:\n\targmax_adverse_budget_loss = None\n\t# finish finding max_adverse_budget_loss and argmax_adverse_budget_loss.\n\n\t# change the definition of loss_degrad:\n\tloss_degrad = -loss_budget\n\t\n\treturn loss_degrad, loss_budget, loss_utility, logits_budget, logits_utility, argmax_adverse_budget_loss\n\n# Training set for traning, validation set for validation.\n# lambda: weight for L1 loss (degrade model L1 loss)\ndef run_adversarial_training(cfg):\n\t'''\n\tAlgorithm 1 in the paper\n\t'''\n\n\tdef run_validation(input_op_list, summary_file, summary_info):\n\t\t'''\n\t\tValidation during training.\n\t\tValidation can be run on any set: training, validating or testing.\n\n\t\tInput:\n\t\tsess: run oprations in this session.\n\t\tinput_op_list: list. For example, when validating on training set, it is [tr_videos_op, tr_action_labels_op, tr_actor_labels_op]\n\t\tother_op_list: list. Always [accuracy_utility, accuracy_budget, loss_utility_op, loss_budget_op]\n\t\tsummary_file: put the validation summary in this file.\n\t\tsummary_info: string. Summary content.\n\n\t\tOutput:\n\t\tprint and write summary.\n\n\t\tReturn:\n\t\tacc_util_lst, acc_budget_lst\n\t\t'''\n\n\t\t# initialize timer and lists:\n\t\tstart_time = time.time()\n\t\tacc_util_lst, acc_budget_lst, loss_utility_lst, loss_budget_lst = [], [], [], []\n\t\t# late update:\n\t\tfor _ in itertools.repeat(None, FLAGS.n_minibatches_eval):\n\t\t\ttr_videos, tr_action_labels, tr_actor_labels = sess.run(input_op_list)\n\t\t\tacc_util, acc_budget, loss_utility_value, loss_budget_value = sess.run(\n\t\t\t\t\t\t\t\t\t[accuracy_utility, accuracy_budget, loss_utility_op, loss_budget_op],\n\t\t\t\t\t\t\t\t\tfeed_dict={videos_placeholder: tr_videos,\n\t\t\t\t\t\t\t\t\t\t\t\tutility_labels_placeholder: tr_action_labels,\n\t\t\t\t\t\t\t\t\t\t\t\tbudget_labels_placeholder: tr_actor_labels,\n\t\t\t\t\t\t\t\t\t\t\t\tdropout_placeholder: 1.0,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t\t\tacc_util_lst.append(acc_util)\n\t\t\tacc_budget_lst.append(acc_budget)\n\t\t\tloss_utility_lst.append(loss_utility_value)\n\t\t\tloss_budget_lst.append(loss_budget_value)\n\t\t# Writing and printing summary part:\n\t\tsummary = summary_info.format(\n\t\t\t\ttime.time() - start_time, \n\t\t\t\tnp.mean(acc_util_lst), np.mean(acc_budget_lst),\n\t\t\t\tnp.mean(loss_utility_lst), np.mean(loss_budget_lst))\n\t\tprint(summary)\n\t\tsummary_file.write(summary + '\\n')\n\t\tprint('\\n')\n\t\t# End writing and printing summary part.\n\t\treturn acc_util_lst, acc_budget_lst\n\n\t# initialize multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct, which are used in both the graph and the session:\n\t# The depth multiplier list for creating different budget models ensemble (MobileNet with different depth.)\n\tmultiplier_lst = [0.60 - i * 0.02 for i in range(FLAGS.NBudget)]\n\t# The dict of logits and loss for each different budget model to get accuracy\n\tlogits_budget_lst_dct = {str(multiplier): [] for multiplier in multiplier_lst}\n\tloss_budget_lst_dct = {str(multiplier): [] for multiplier in multiplier_lst}\n\t# end initializing multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct.\n\n\t# mkdir for saving ckpt of the adversarial training process:\n\tif not os.path.exists(ckpt_dir):\n\t\tos.makedirs(ckpt_dir)\n\n\t# define graph\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\t# global step:\n\t\tglobal_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n\t\t# placeholder inputs:\n\t\tvideos_placeholder, utility_labels_placeholder, budget_labels_placeholder, dropout_placeholder, _ = \\\n\t\t\t\t\t\t\t\t\t\tplaceholder_inputs(cfg['TRAIN']['BATCH_SIZE'] * FLAGS.GPU_NUM, cfg)\n\n\t\ttower_grads_degrad, tower_grads_utility, tower_grads_budget = [], [], []\n\n\t\t# Compute Acc (fT, fb logits output)\n\t\tlogits_utility_lst, logits_budget_lst = [], []\n\n\t\t# Compute Loss (LT, Lb_cross_entropy, Ld=LT+Lb_entropy?)\n\t\tloss_utility_lst, loss_budget_lst, loss_degrad_lst = [], [], []\n\n\t\t# Compute prediction with min entropy (most confident)\n\t\t# Use max uniform loss instead\n\t\targmax_adverse_budget_loss_lst = []\n\n\t\t# Optimizer for the 3 components respectively\n\t\topt_degrad = tf.train.AdamOptimizer(FLAGS.degradation_lr)\n\t\topt_utility = tf.train.AdamOptimizer(FLAGS.utility_lr)\n\t\topt_budget = tf.train.AdamOptimizer(FLAGS.budget_lr)\n\t\t\n\n\t\twith tf.variable_scope(tf.get_variable_scope()):\n\t\t\tfor gpu_index in range(0, FLAGS.GPU_NUM):\n\t\t\t\twith tf.device('/gpu:%d' % gpu_index):\n\t\t\t\t\tprint('/gpu:%d' % gpu_index)\n\t\t\t\t\twith tf.name_scope('%s_%d' % ('gpu', gpu_index)) as scope:\n\t\t\t\t\t\tvideos = videos_placeholder[gpu_index * cfg['TRAIN']['BATCH_SIZE']:(gpu_index + 1) * cfg['TRAIN']['BATCH_SIZE']]\n\t\t\t\t\t\tutility_labels = utility_labels_placeholder[gpu_index * cfg['TRAIN']['BATCH_SIZE']:(gpu_index + 1) * cfg['TRAIN']['BATCH_SIZE']]\n\t\t\t\t\t\tbudget_labels = budget_labels_placeholder[gpu_index * cfg['TRAIN']['BATCH_SIZE']:(gpu_index + 1) * cfg['TRAIN']['BATCH_SIZE']]\n\t\t\t\t\t\tloss_degrad, loss_budget, loss_utility, logits_budget, logits_utility, argmax_adverse_budget_loss = \\\n\t\t\t\t\t\t\t\t\tcreate_architecture_adversarial(cfg, cfg['TRAIN']['BATCH_SIZE'], multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct, scope, videos, utility_labels, budget_labels, dropout_placeholder)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Reuse variables for the next tower.\n\t\t\t\t\t\ttf.get_variable_scope().reuse_variables()\n\t\t\t\t\t\t\n\t\t\t\tloss_degrad_lst.append(loss_degrad)\n\t\t\t\tloss_budget_lst.append(loss_budget)\n\t\t\t\tloss_utility_lst.append(loss_utility)\n\t\t\t\tlogits_budget_lst.append(logits_budget)\n\t\t\t\tlogits_utility_lst.append(logits_utility)\n\t\t\t\targmax_adverse_budget_loss_lst.append(argmax_adverse_budget_loss)\n\t\t\t\t# varlist:\n\t\t\t\tvarlist_degrad = [v for v in tf.trainable_variables() if any(x in v.name for x in [\"DegradationModule\"])]\n\t\t\t\tvarlist_utility = [v for v in tf.trainable_variables() if any(x in v.name for x in [\"UtilityModule\"])]\n\t\t\t\tvarlist_budget = [v for v in tf.trainable_variables() if any(x in v.name for x in [\"BudgetModule\"])]\n\n\t\t\t\tgrads_degrad = opt_degrad.compute_gradients(loss_degrad, varlist_degrad)\n\t\t\t\tgrads_budget = opt_budget.compute_gradients(loss_budget, varlist_budget)\n\t\t\t\tgrads_utility = opt_utility.compute_gradients(loss_utility, varlist_utility+varlist_degrad)\n\n\t\t\t\ttower_grads_degrad.append(grads_degrad)\n\t\t\t\ttower_grads_budget.append(grads_budget)\n\t\t\t\ttower_grads_utility.append(grads_utility)\n\n\t\t\t\t\t\t\n\t\t# argmax_adverse_budget_loss_op = tf.concat(argmax_adverse_budget_loss_lst, 0)\n\n\t\t# Average losses over each GPU:\n\t\tloss_utility_op = tf.reduce_mean(loss_utility_lst, name='softmax') # LT\n\t\tloss_budget_op = tf.reduce_mean(loss_budget_lst, name='softmax') # Lb\n\t\tloss_degrad_op = tf.reduce_mean(loss_degrad_lst, name='softmax') # Ld = -Lb\n\n\t\t# Concatenate the logits over all GPU:\n\t\tlogits_utility = tf.concat(logits_utility_lst, 0)\n\t\tlogits_budget = tf.concat(logits_budget_lst, 0)\n\t\t# acc\n\t\taccuracy_utility = accuracy(logits_utility, utility_labels_placeholder)\n\t\taccuracy_budget = accuracy(logits_budget, budget_labels_placeholder)\n\t\t# count how many testing samples are classified correctly:\n\t\tright_count_utility_op = correct_num(logits_utility, utility_labels_placeholder)\n\t\tright_count_budget_op = correct_num(logits_budget, budget_labels_placeholder)\n\n\t\t# operations on each budget model:\n\t\tloss_budget_op_lst = []\n\t\taccuracy_budget_list = []\n\t\tright_count_budget_op_lst = []\n\t\t# for each mobile-net:\n\t\tfor multiplier in multiplier_lst: # multiplier_lst has M elements -> each is the channel depth of a mobile net.\n\t\t\t# loss of each model:\n\t\t\tloss_budget_op_each_model = tf.reduce_mean(loss_budget_lst_dct[str(multiplier)]) # mean loss over multi-gpu of a certain mobile-net.\n\t\t\tloss_budget_op_lst.append(loss_budget_op_each_model)\n\t\t\t# logits of each model:\n\t\t\tbudget_logits_each_model = tf.concat(logits_budget_lst_dct['{}'.format(multiplier)], 0) # same budget model, concatenate over GPUs.\n\t\t\t# acc of each model\n\t\t\taccuracy_budget_each_model = accuracy(budget_logits_each_model, budget_labels_placeholder)\n\t\t\taccuracy_budget_list.append(accuracy_budget_each_model)\n\t\t\t# right count of each model:\n\t\t\tright_count_op = correct_num(budget_logits_each_model, budget_labels_placeholder)\n\t\t\tright_count_budget_op_lst.append(right_count_op)\n\n\t\t'''\n\t\tThe only thing changed here is tower_grads_degrad, the second parameter of the create_grad_accum_for_late_update function.\n\t\tThe trainable variable list needn't be changed, it is still varlist_degrad, which is the variables in fd network.\n\t\tThe loss is changed:\n\t\tzero_ops_degrad, accum_ops_degrad, apply_gradient_op_degrad <- tower_grads_degrad <- grads_degrade <- loss_degrad\n\t\tSo, we only need to change the definition of 'loss_degrad' in 'create_architecture_adversarial' function in main_import.py.\n\t\t'''\n\t\tzero_ops_degrad, accum_ops_degrad, apply_gradient_op_degrad = create_grad_accum_for_late_update(opt_degrad, tower_grads_degrad, varlist_degrad, global_step, decay_with_global_step=True)\n\t\tzero_ops_budget, accum_ops_budget, apply_gradient_op_budget = create_grad_accum_for_late_update(opt_budget, tower_grads_budget, varlist_budget, global_step, decay_with_global_step=False)\n\t\tzero_ops_utility, accum_ops_utility, apply_gradient_op_utility = create_grad_accum_for_late_update(opt_utility, tower_grads_utility, varlist_utility+varlist_degrad, global_step, decay_with_global_step=False)\n\n\t\ttr_videos_op, tr_action_labels_op, tr_actor_labels_op = create_videos_reading_ops(is_train=True, is_val=False, cfg=cfg)\n\t\tval_videos_op, val_action_labels_op, val_actor_labels_op = create_videos_reading_ops(is_train=False, is_val=True, cfg=cfg)\n\n\t# session config:\n\tconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n\tconfig.gpu_options.allow_growth = True\n\t# run session:\n\twith tf.Session(graph=graph, config=config) as sess:\n\t\tcoord = tf.train.Coordinator()\n\t\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\t\tinit_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())\n\t\tsess.run(init_op)\n\n\t\t# load ckpts: \n\t\tif use_pretrained_model: # load ckpts from pretrained fd and fT.(By run_pretraining_degrad and run_pretraining_utility functions.)\n\t\t\t# fT and fd part:\n\t\t\trestore_model_ckpt(sess, FLAGS.deg_target_models, varlist_utility+varlist_degrad) # FLAGS.deg_target_models is the dir storing ckpt of theta_T and theta_d\n\t\t\t# fb part:\n\t\t\trestore_model_ckpt(sess, FLAGS.budget_models, varlist_budget)\n\t\telse: # load ckpts from previous training stage of this run_adversarial_training function.\n\t\t\tsaver = tf.train.Saver(tf.trainable_variables())\n\t\t\tckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)\n\t\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\t\t\t\tprint('Session restored from trained model at {}!'.format(ckpt.model_checkpoint_path))\n\t\t\telse:\n\t\t\t\traise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), ckpt_dir)\n\t\t\n\t\t# saver and summary files:\n\t\tsaver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=10)\n\t\tloss_summary_file, validation_train_set_summary_file, validation_val_set_summary_file, model_restarting_summary_file = create_summary_files(summary_dir)\n\n\t\t# Adversarial training loop:\n\t\tfor step in xrange(cfg['TRAIN']['TOP_MAXSTEP']):\n\t\t\t\n\t\t\t# Part 0: Model restarting\n\t\t\tif ( step != 0 and (FLAGS.use_restarting and step % FLAGS.restarting_step == 0) ):\n\t\t\t\t# reinitialize fb:\n\t\t\t\tbudget_varlist = [v for v in tf.trainable_variables() if any(x in v.name for x in [\"BudgetModule\"])]\n\t\t\t\tinit_budget_op = tf.variables_initializer(budget_varlist)\n\t\t\t\tsess.run(init_budget_op)\n\t\t\t\t# finish reinitializing fb\n\t\t\t\t# Train theta_B using Lb(X,Y_B) for FLAGS.retraining_step steps:\n\t\t\t\tfor Restarting_step in range(0, FLAGS.retraining_step):\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tacc_util_lst, acc_budget_lst, loss_degrad_lst, loss_utility_lst, loss_budget_lst = [], [], [], [], []\n\t\t\t\t\tsess.run(zero_ops_budget)\n\t\t\t\t\t# accumulating gradient for late update:\n\t\t\t\t\tfor _ in itertools.repeat(None, 20):\n\t\t\t\t\t\t# placeholder inputs:\n\t\t\t\t\t\ttr_videos, tr_action_labels, tr_actor_labels = sess.run([tr_videos_op, tr_action_labels_op, tr_actor_labels_op])\n\t\t\t\t\t\t# run operations:\n\t\t\t\t\t\t_, acc_util, acc_budget, loss_degrad_value, loss_utility_value, loss_budget_value = sess.run(\n\t\t\t\t\t\t\t[accum_ops_budget, accuracy_utility, accuracy_budget, loss_degrad_op, loss_utility_op, loss_budget_op],\n\t\t\t\t\t\t\tfeed_dict={videos_placeholder: tr_videos,\n\t\t\t\t\t\t\t\t\t\tutility_labels_placeholder: tr_action_labels,\n\t\t\t\t\t\t\t\t\t\tbudget_labels_placeholder: tr_actor_labels,\n\t\t\t\t\t\t\t\t\t\tdropout_placeholder: 1.0,})\n\t\t\t\t\t\t# append:\n\t\t\t\t\t\tacc_util_lst.append(acc_util)\n\t\t\t\t\t\tacc_budget_lst.append(acc_budget)\n\t\t\t\t\t\tloss_degrad_lst.append(loss_degrad_value)\n\t\t\t\t\t\tloss_utility_lst.append(loss_utility_value)\n\t\t\t\t\t\tloss_budget_lst.append(loss_budget_value)\n\t\t\t\t\t# finish accumulating gradient for late update\n\t\t\t\t\t# after accumulating gradient, do the update on fb:\n\t\t\t\t\tsess.run(apply_gradient_op_budget)\n\t\t\t\t\t# finish update on fb\n\n\t\t\t\t\tassert not np.isnan(np.mean(loss_degrad_lst)), 'Model diverged with loss = NaN'\n\n\t\t\t\t\t# loss summary:\n\t\t\t\t\tloss_summary = 'Restarting (Budget), Step: {:4d}, Restarting_step: {:4d}, time: {:.4f}, budget loss: {:.8f}, ' \\\n\t\t\t\t\t\t\t\t\t'training budget accuracy: {:.5f}, utility loss: {:.8f}, training utility accuracy: {:.5f}'.format(\n\t\t\t\t\t\t\t\t\tstep, Restarting_step, time.time() - start_time, \n\t\t\t\t\t\t\t\t\tnp.mean(loss_budget_lst), np.mean(acc_budget_lst), np.mean(loss_utility_lst), np.mean(acc_util_lst))\n\t\t\t\t\t\n\t\t\t\t\tmodel_restarting_summary_file.write(loss_summary + '\\n')\n\t\t\t\t\tprint(loss_summary)\n\t\t\t\t\t# end of loss summary\n\t\t\t\t\t\n\t\t\t\t# finish training theta_B using Lb(X,Y_B) for FLAGS.retraining_step steps.\n\t\t\t\tprint('')\n\t\t\t\tloss_summary_file.write('\\n')\n\t\t\t# End part 0\n\n\n\t\t\t# Part 3: train Fb using L_b (cross entropy)\n\t\t\tfor L_b_step in range(0, cfg['TRAIN']['L_B_MAXSTEP']):\n\n\t\t\t\t# max step: optimize theta_d using L_b(X,Y_B)\n\t\t\t\tfor L_b_max_step in range(0, cfg['TRAIN']['L_B_MAX_PART_STEP']):\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tacc_util_lst, acc_budget_lst, loss_utility_lst, loss_budget_lst = [], [], [], []\n\t\t\t\t\tsess.run(zero_ops_degrad)\n\t\t\t\t\t# accumulating gradient for late update:\n\t\t\t\t\tfor _ in itertools.repeat(None, FLAGS.n_minibatches):\n\t\t\t\t\t\t# placeholder inputs:\n\t\t\t\t\t\ttr_videos, tr_action_labels, tr_actor_labels = sess.run([tr_videos_op, tr_action_labels_op, tr_actor_labels_op])\n\t\t\t\t\t\t# run operations:\n\t\t\t\t\t\t_, acc_util, acc_budget, loss_utility_value, loss_budget_value = sess.run(\n\t\t\t\t\t\t\t\t\t\t[accum_ops_degrad, accuracy_utility, accuracy_budget, loss_utility_op, loss_budget_op],\n\t\t\t\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\t\t\t\t\tvideos_placeholder: tr_videos,\n\t\t\t\t\t\t\t\t\t\t\t\tutility_labels_placeholder: tr_action_labels,\n\t\t\t\t\t\t\t\t\t\t\t\tbudget_labels_placeholder: tr_actor_labels,\n\t\t\t\t\t\t\t\t\t\t\t\tdropout_placeholder: 1.0,})\n\t\t\t\t\t\t# append loss and acc for budget model:\n\t\t\t\t\t\tacc_util_lst.append(acc_util)\n\t\t\t\t\t\tacc_budget_lst.append(acc_budget)\n\t\t\t\t\t\tloss_utility_lst.append(loss_utility_value)\n\t\t\t\t\t\tloss_budget_lst.append(loss_budget_value)\n\t\t\t\t\t# finish accumulating gradient for late update\n\t\t\t\t\t# after accumulating gradient, do the update on fd:\n\t\t\t\t\t_ = sess.run([apply_gradient_op_degrad])\n\t\t\t\t\t# finish update on fd\n\n\t\t\t\t\tassert not np.isnan(np.mean(loss_budget_value)), 'Model diverged with loss = NaN'\n\n\t\t\t\t\t# loss summary:\n\t\t\t\t\tif L_b_max_step % cfg['TRAIN']['L_B_MAX_PRINT_STEP'] == 0:\n\t\t\t\t\t\tloss_summary = 'Alternating Training (Budget L_b MAX), Step: {:4d}, L_b_step: {:4d}, L_b_max_step: {:4d} time: {:.4f}, ' \\\n\t\t\t\t\t\t\t\t\t'training utility accuracy: {:.5f}, training budget accuracy: {:.5f}, ' \\\n\t\t\t\t\t\t\t\t\t'utility loss: {:.8f}, budget loss: {:.8f}'.format(\n\t\t\t\t\t\t\t\t\tstep, L_b_step, L_b_max_step, time.time() - start_time, \n\t\t\t\t\t\t\t\t\tnp.mean(acc_util_lst), np.mean(acc_budget_lst),\n\t\t\t\t\t\t\t\t\tnp.mean(loss_utility_lst), np.mean(loss_budget_lst)\n\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tprint(loss_summary)\n\t\t\t\t\t\tloss_summary_file.write(loss_summary + '\\n')\n\t\t\t\t\t# end loss summary\n\t\t\t\tprint()\n\t\t\t\t# End max step\n\t\t\t\t# min step: optimize theta_b using L_b(X,Y_B)\n\t\t\t\tfor L_b_min_step in range(0, cfg['TRAIN']['L_B_MIN_PART_STEP']):\n\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\tsess.run(zero_ops_budget)\n\t\t\t\t\tacc_budget_lst, loss_budget_lst = [], []\n\t\t\t\t\t# accumulating gradient for late update:\n\t\t\t\t\tfor _ in itertools.repeat(None, FLAGS.n_minibatches):\n\t\t\t\t\t\t# placeholder inputs:\n\t\t\t\t\t\ttr_videos, tr_actor_labels = sess.run([tr_videos_op, tr_actor_labels_op])\n\t\t\t\t\t\t# run operations:\n\t\t\t\t\t\t_, acc_budget, loss_budget_value = sess.run(\n\t\t\t\t\t\t\t\t\t[accum_ops_budget, accuracy_budget, loss_budget_op],\n\t\t\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\t\t\t\tvideos_placeholder: tr_videos,\n\t\t\t\t\t\t\t\t\t\t\t# utility_labels_placeholder: tr_action_labels,\n\t\t\t\t\t\t\t\t\t\t\tbudget_labels_placeholder: tr_actor_labels,\n\t\t\t\t\t\t\t\t\t\t\tdropout_placeholder: 1.0,})\n\t\t\t\t\t\t# append loss and acc for budget model:\n\t\t\t\t\t\tacc_budget_lst.append(acc_budget)\n\t\t\t\t\t\tloss_budget_lst.append(loss_budget_value)\n\t\t\t\t\t# finish accumulating gradient for late update\n\t\t\t\t\t\n\t\t\t\t\tassert not np.isnan(np.mean(loss_budget_lst)), 'Model diverged with loss = NaN'\n\n\t\t\t\t\t# Monitoring fb using training set\n\t\t\t\t\tif L_b_min_step % cfg['TRAIN']['MONITOR_STEP'] == 0:\n\t\t\t\t\t\tif np.mean(acc_budget_lst) >= FLAGS.highest_budget_acc_val:\n\t\t\t\t\t\t\tprint(bcolors.OKGREEN + 'pass budget acc bar!\\n' + bcolors.ENDC)\n\t\t\t\t\t\t\tloss_summary_file.write('pass budget acc bar!\\n')\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t# End monitoring fb on training set.\n\n\t\t\t\t\t# after accumulating gradient, do the update on fb, if it didn't pass the budget acc bar:\n\t\t\t\t\tsess.run([apply_gradient_op_budget])\n\t\t\t\t\t# finish update on fb\n\n\t\t\t\t\t# loss summary:\n\t\t\t\t\tif L_b_min_step % cfg['TRAIN']['MONITOR_STEP'] == 0:\n\t\t\t\t\t\tloss_summary = 'Alternating Training (Budget L_b MIN), Step: {:4d}, L_b_step: {:4d}, L_b_min_step: {:4d} time: {:.4f}, ' \\\n\t\t\t\t\t\t\t\t\t'training budget accuracy: {:.5f}, budget loss: {:.8f}'.format(\n\t\t\t\t\t\t\t\t\tstep, L_b_step, L_b_min_step, time.time() - start_time, \n\t\t\t\t\t\t\t\t\tnp.mean(acc_budget_lst), np.mean(loss_budget_lst)\n\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tprint(loss_summary)\n\t\t\t\t\t\tloss_summary_file.write(loss_summary + '\\n')\n\t\t\t\t\t# end loss summary\n\t\t\t\t\n\t\t\tprint('')\n\t\t\tloss_summary_file.write('\\n')\n\t\t\t# End part 3\n\n\n\t\t\t# Part 2: End-to-end train Ft and Fd using L_T\n\t\t\tfor L_T_step in range(0, cfg['TRAIN']['L_T_MAXSTEP']):\n\n\t\t\t\t# Monitoring LT using validation set:\n\t\t\t\tif L_T_step % cfg['TRAIN']['MONITOR_STEP'] == 0:\n\t\t\t\t\tacc_util_lst, _ = run_validation(input_op_list=[val_videos_op, val_action_labels_op, val_actor_labels_op], \n\t\t\t\t\t\t\t\t\t\t\tsummary_file=loss_summary_file, \n\t\t\t\t\t\t\t\t\t\t\tsummary_info=\"Monitoring L_T:\\n\" \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"Step: %d, L_T_step: %d, time: {:.4f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"validation utility accuracy: {:.5f}, validation budget accuracy: {:.5f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"utility loss: {:.8f}, budget loss: {:.8f}\" % (step, L_T_step))\n\t\t\t\t\t# breaking condition: (if performance on L_T is still good)\n\t\t\t\t\tif np.mean(acc_util_lst) >= FLAGS.highest_util_acc_val:\n\t\t\t\t\t\tprint(bcolors.OKGREEN + 'pass utility acc bar!\\n' + bcolors.ENDC)\n\t\t\t\t\t\tloss_summary_file.write('pass utility acc bar!\\n')\n\t\t\t\t\t\tbreak\n\t\t\t\t# End of monitoring LT\n\n\t\t\t\t# Optimizing LT (if necessary) using training set: (This is one batch=FLAGS.n_minibatches, each minibatch has FLAGS.GPU_NUM*cfg['TRAIN']['BATCH_SIZE'] video clips.) \n\t\t\t\tstart_time = time.time()\n\t\t\t\tsess.run(zero_ops_utility)\n\t\t\t\tacc_util_lst, acc_budget_lst, loss_utility_lst, loss_budget_lst = [], [], [], []\n\t\t\t\t# accumulating gradient for late update:\n\t\t\t\tfor _ in itertools.repeat(None, FLAGS.n_minibatches):\n\t\t\t\t\ttr_videos, tr_action_labels, tr_actor_labels = sess.run([tr_videos_op, tr_action_labels_op, tr_actor_labels_op])\n\t\t\t\t\t_, acc_util, acc_budget, loss_utility_value, loss_budget_value = sess.run(\n\t\t\t\t\t\t\t\t[accum_ops_utility, accuracy_utility, accuracy_budget, loss_utility_op, loss_budget_op],\n\t\t\t\t\t\t\t\tfeed_dict={videos_placeholder: tr_videos,\n\t\t\t\t\t\t\t\t\t\t\tutility_labels_placeholder: tr_action_labels,\n\t\t\t\t\t\t\t\t\t\t\tbudget_labels_placeholder: tr_actor_labels,\n\t\t\t\t\t\t\t\t\t\t\tdropout_placeholder: 0.5,})\n\t\t\t\t\tacc_util_lst.append(acc_util)\n\t\t\t\t\tacc_budget_lst.append(acc_budget)\n\t\t\t\t\tloss_utility_lst.append(loss_utility_value)\n\t\t\t\t\tloss_budget_lst.append(loss_budget_value)\n\t\t\t\t# finish accumulating gradient for late update\n\t\t\t\t# after accumulating gradient, do the update on fT and fd:\n\t\t\t\tsess.run([apply_gradient_op_utility])\n\t\t\t\t# finish update on fT and fd\n\n\t\t\t\tassert not np.isnan(np.mean(loss_utility_lst)), 'Model diverged with loss = NaN'\n\n\t\t\t\t# loss summary:\n\t\t\t\tloss_summary = 'Alternating Training (Utility), Step: {:4d}, L_T_step: {:4d}, time: {:.4f}, ' \\\n\t\t\t\t\t\t\t'training utility accuracy: {:.5f}, training budget accuracy: {:.5f}, ' \\\n\t\t\t\t\t\t\t'utility loss: {:.8f}, budget loss: {:.8f}'.format(\n\t\t\t\t\t\t\tstep, L_T_step, time.time() - start_time,\n\t\t\t\t\t\t\tnp.mean(acc_util_lst), np.mean(acc_budget_lst),\n\t\t\t\t\t\t\tnp.mean(loss_utility_lst), np.mean(loss_budget_lst)\n\t\t\t\t\t\t\t)\n\n\t\t\t\tprint(loss_summary)\n\t\t\t\tloss_summary_file.write(loss_summary + '\\n')\n\t\t\t\t# end of loss summary\n\t\t\t\t# End of optimizing LT.\n\t\t\t\n\t\t\tprint('')\n\t\t\tloss_summary_file.write('\\n')\n\t\t\t# End part 2\n\n\n\t\t\t# Do validation (on training set and validation set):\n\t\t\tif step % cfg['TRAIN']['VAL_STEP'] == 0:\n\t\t\t\t\n\t\t\t\trun_validation(input_op_list=[tr_videos_op, tr_action_labels_op, tr_actor_labels_op], \n\t\t\t\t\t\t\tsummary_file=validation_train_set_summary_file, \n\t\t\t\t\t\t\tsummary_info=\"Validation train_set summary\\n\" \\\n\t\t\t\t\t\t\t\t\t\t\"Step: %d, time: {:.4f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\"training utility accuracy: {:.5f}, training budget accuracy: {:.5f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\"utility loss: {:.8f}, budget loss: {:.8f}\" % step)\n\t\t\t\trun_validation(input_op_list=[val_videos_op, val_action_labels_op, val_actor_labels_op], \n\t\t\t\t\t\t\tsummary_file=validation_val_set_summary_file, \n\t\t\t\t\t\t\tsummary_info=\"Validation val_set summary\\n\" \\\n\t\t\t\t\t\t\t\t\t\t\"Step: %d, time: {:.4f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\"validation utility accuracy: {:.5f}, validation budget accuracy: {:.5f}, \" \\\n\t\t\t\t\t\t\t\t\t\t\"utility loss: {:.8f}, budget loss: {:.8f}\" % step)\n\n\t\t\t# End evaluation\n\t\t\t# Save ckpt for kb_adversarial learning:\n\t\t\tif step % cfg['TRAIN']['SAVE_STEP'] == 0 or (step + 1) == cfg['TRAIN']['TOP_MAXSTEP']:\n\t\t\t\tcheckpoint_path = os.path.join(ckpt_dir, 'model.ckpt')\n\t\t\t\tsaver.save(sess, checkpoint_path, global_step=step)\n\t\t\t# End evaluation\n\n\t\tloss_summary_file.close()\n\t\tvalidation_train_set_summary_file.close()\n\t\tvalidation_val_set_summary_file.close()\n\t\tcoord.request_stop()\n\t\tcoord.join(threads)\n\tprint(\"done\")\n\n# Testing the degradation model: eval+testing\ndef run_adversarial_testing(cfg):\n\t'''\n\tRun testing of the trained model (direct test without any retraining, different from the two-fold-evaluation proposed in the paper)\n\tIt will give the utility task accuracy and the privacy budget task accuracy\n\t'''\n\t# initialize multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct, which are used in both the graph and the session:\n\t# The depth multiplier list for creating different budget models ensemble (MobileNet with different depth.)\n\tmultiplier_lst = [0.60 - i * 0.02 for i in range(FLAGS.NBudget)]\n\t# The dict of logits and loss for each different budget model to get accuracy\n\tlogits_budget_lst_dct = {str(multiplier): [] for multiplier in multiplier_lst}\n\tloss_budget_lst_dct = {str(multiplier): [] for multiplier in multiplier_lst}\n\t# end initializing multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct.\n\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\t# placeholder inputs:\n\t\tvideos_placeholder, utility_labels_placeholder, budget_labels_placeholder, dropout_placeholder, _ = placeholder_inputs(cfg['TEST']['BATCH_SIZE'] * FLAGS.GPU_NUM, cfg)\n\n\t\t# Compute Acc\n\t\tlogits_utility_lst, logits_budget_lst = [], []\n\n\t\twith tf.variable_scope(tf.get_variable_scope()) as scope:\n\t\t\t# get the logits_budget and logits_utility on each gpu:\n\t\t\tfor gpu_index in range(0, FLAGS.GPU_NUM):\n\t\t\t\twith tf.device('/gpu:%d' % gpu_index):\n\t\t\t\t\tprint('/gpu:%d' % gpu_index)\n\t\t\t\t\twith tf.name_scope('%s_%d' % ('gpu', gpu_index)) as scope:\n\t\t\t\t\t\tvideos = videos_placeholder[gpu_index * cfg['TEST']['BATCH_SIZE']:(gpu_index + 1) * cfg['TEST']['BATCH_SIZE']]\n\t\t\t\t\t\tutility_labels = utility_labels_placeholder[gpu_index * cfg['TEST']['BATCH_SIZE']:(gpu_index + 1) * cfg['TEST']['BATCH_SIZE']]\n\t\t\t\t\t\tbudget_labels = budget_labels_placeholder[gpu_index * cfg['TEST']['BATCH_SIZE']:(gpu_index + 1) * cfg['TEST']['BATCH_SIZE']]\n\t\t\t\t\t\t_, _, _, logits_budget, logits_utility, _ = create_architecture_adversarial(cfg, cfg['TEST']['BATCH_SIZE'], multiplier_lst, logits_budget_lst_dct, loss_budget_lst_dct, scope, videos, utility_labels, budget_labels, dropout_placeholder)\n\t\t\t\t\t\tlogits_budget_lst.append(logits_budget)\n\t\t\t\t\t\tlogits_utility_lst.append(logits_utility)\n\t\t\t\t\t\t# print('len(logits_utility_lst):', len(logits_utility_lst))\n\t\t\t\t\t\ttf.get_variable_scope().reuse_variables()\n\n\t\t# concatnate the logits of each gpu:\n\t\tlogits_utility = tf.concat(logits_utility_lst, 0)\n\t\tlogits_budget = tf.concat(logits_budget_lst, 0)\n\t\t\n\t\t# count how many testing samples are classified correctly:\n\t\tright_count_utility_op = correct_num(logits_utility, utility_labels_placeholder)\n\t\tright_count_budget_op = correct_num(logits_budget, budget_labels_placeholder)\n\n\t\t# operations on each budget model:\n\t\tright_count_budget_op_lst = []\n\t\tfor multiplier in multiplier_lst:\n\t\t\t# right count of each model:\n\t\t\tbudget_logits_each_model = tf.concat(logits_budget_lst_dct['{}'.format(multiplier)], 0) # same budget model, concatenate over GPUs.\n\t\t\tright_count_op = correct_num(budget_logits_each_model, budget_labels_placeholder)\n\t\t\tright_count_budget_op_lst.append(right_count_op)\n\n\t\tvideos_op, action_labels_op, actor_labels_op = create_videos_reading_ops(is_train=False, is_val=False, cfg=cfg)\n\n\t# session config:\n\tconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n\tconfig.gpu_options.allow_growth = True\n\n\t# run session:\n\twith tf.Session(graph=graph, config=config) as sess:\n\t\t# initialization:\n\t\tinit_op = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())\n\t\tsess.run(init_op)\n\t\t# initialization part should be put outside the multi-threads part! But why?\n\t\t\n\t\t# multi-threads:\n\t\tcoord = tf.train.Coordinator()\n\t\tthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\t\t\n\t\t# Create a saver for loading trained checkpoints:\n\t\tsaver = tf.train.Saver(tf.trainable_variables())\n\t\tckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)\n\t\t# load trained checkpoints:\n\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\t\t\tprint('Session restored from trained model at {}!'.format(ckpt.model_checkpoint_path))\n\t\telse:\n\t\t\traise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), ckpt_dir)\n\t\t\n\t\ttotal_v = 0.0 # total number of testing samples\n\n\t\ttest_correct_num_utility = 0.0 # how many testing samples get correct utility label prediction.\n\t\ttest_correct_num_budget = 0.0 # how many testing samples get correct budget label prediction.\n\t\ttest_correct_num_budget_lst = [0.0] * FLAGS.NBudget\n\n\n\t\tprint('coord.should_stop():', coord.should_stop())\n\t\ttry:\n\t\t\tc = 0\n\t\t\tbatch_size = cfg['TEST']['BATCH_SIZE'] * FLAGS.GPU_NUM\n\t\t\twhile not coord.should_stop():\n\t\t\t\tc += 1\n\t\t\t\tprint('in while loop ', str(c))\n\t\t\t\t# input operations:\n\t\t\t\ttest_videos, test_action_labels, test_actor_labels = sess.run([videos_op, action_labels_op, actor_labels_op])\n\t\t\t\ttotal_v += test_action_labels.shape[0]\n\t\t\t\t# padding:\n\t\t\t\tif test_videos.shape[0] < batch_size: # the last batch of testing data\n\t\t\t\t\ttest_videos = np.pad(test_videos, ((0,batch_size-test_videos.shape[0]),(0,0),(0,0),(0,0),(0,0)), 'constant', constant_values=0)\n\t\t\t\t\ttest_actor_labels = np.pad(test_actor_labels, ((0,batch_size-test_actor_labels.shape[0])), 'constant', constant_values=-1)\n\t\t\t\t\ttest_action_labels = np.pad(test_action_labels, ((0,batch_size-test_action_labels.shape[0])), 'constant', constant_values=-1)\n\t\t\t\t# the padded videos will never be true, since it can never be classified as -1\n\t\t\t\tprint('test_videos:', test_videos.shape)\n\t\t\t\tprint('test_action_labels:', test_action_labels.shape)\n\t\t\t\tprint('test_actor_labels:', test_actor_labels.shape)\n\t\t\t\t# placeholders:\n\t\t\t\tfeed_dict = {videos_placeholder: test_videos, budget_labels_placeholder: test_actor_labels,\n\t\t\t\t\t\tutility_labels_placeholder: test_action_labels,\n\t\t\t\t\t\tdropout_placeholder: 1.0}\n\t\t\t\t# feed dorward:\n\t\t\t\tright_counts = sess.run([right_count_utility_op, right_count_budget_op] + right_count_budget_op_lst, feed_dict=feed_dict)\n\t\t\t\tprint('right_counts:', right_counts)\n\n\t\t\t\ttest_correct_num_utility += right_counts[0]\n\t\t\t\ttest_correct_num_budget += right_counts[1]\n\t\t\t\t# testing acc for each one of N budget models:\n\t\t\t\tfor i in range(FLAGS.NBudget):\n\t\t\t\t\ttest_correct_num_budget_lst[i] += right_counts[i + 2]\n\t\t\t\t# end testing acc for each one of N budget models.\n\t\t\t# end try\n\t\texcept tf.errors.OutOfRangeError:\n\t\t\tprint('Done testing on all the examples')\n\t\tfinally:\n\t\t\tcoord.request_stop()\n\n\t\t# print and write file:\n\t\ttest_result_str = ('test_acc_utility: {}, test_correct_num_utility: {}, total_v: {}\\n' \n\t\t\t\t\t\t'test_acc_budget: {}, test_correct_num_budget: {}, total_v: {}\\n').format(\n\t\t\t\t\t\ttest_correct_num_utility/total_v, test_correct_num_utility, total_v,\n\t\t\t\t\t\ttest_correct_num_budget/total_v, test_correct_num_budget, total_v)\n\t\tprint(test_result_str)\n\t\tif not os.path.exists(test_result_dir):\n\t\t\tos.makedirs(test_result_dir)\n\t\ttest_result_file = open(test_result_dir+'/EvaluationResuls.txt', 'w')\n\t\t# write testing result to file:\n\t\ttest_result_file.write(test_result_str)\n\n\t\tfor i in range(FLAGS.NBudget):\n\t\t\ttest_result_file.write('Budget{} test acc: {},\\ttest_correct_num: {}\\t: total_v: {}\\n'.format(\n\t\t\t\tmultiplier_lst[i], test_correct_num_budget_lst[i] / total_v,\n\t\t\t\ttest_correct_num_budget_lst[i], total_v))\n\t\t# finish writing testing result to file.\n\t\tcoord.join(threads)\n\t\tsess.close()\n\ndef main():\n\t# config:\n\tcfg = yaml.load(open('params.yml'))\n\tpp = pprint.PrettyPrinter()\n\t# pp.pprint(FLAGS.__flags)\n\t# pp.pprint(cfg)\n\n\t# adverserial training:\n\trun_adversarial_training(cfg)\n\n\t# testing learned fd:\n\t# run_adversarial_testing(cfg)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"SBU-exp/legacy/main_alter_update.py","file_name":"main_alter_update.py","file_ext":"py","file_size_in_byte":32848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"582385218","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\n__all__ = [\n \"test_gradient\", \"test_prediction\", \"test_repeated_prediction_cache\",\n \"test_apply_inverse\",\n]\n\nimport numpy as np\n\nfrom george import kernels, GP, BasicSolver, HODLRSolver\n\n\ndef _test_gradient(seed=123, N=100, ndim=3, eps=1.32e-3, solver=BasicSolver,\n **kwargs):\n np.random.seed(seed)\n\n # Set up the solver.\n kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim)\n gp = GP(kernel, solver=solver, **kwargs)\n\n # Sample some data.\n x = np.random.rand(N, ndim)\n y = gp.sample(x)\n gp.compute(x, yerr=0.1)\n\n # Compute the initial gradient.\n grad0 = gp.grad_log_likelihood(y)\n vector = gp.get_parameter_vector()\n\n for i, v in enumerate(vector):\n # Compute the centered finite difference approximation to the gradient.\n vector[i] = v + eps\n gp.set_parameter_vector(vector)\n lp = gp.lnlikelihood(y)\n\n vector[i] = v - eps\n gp.set_parameter_vector(vector)\n lm = gp.lnlikelihood(y)\n\n vector[i] = v\n gp.set_parameter_vector(vector)\n\n grad = 0.5 * (lp - lm) / eps\n assert np.abs(grad - grad0[i]) < 5 * eps, \\\n \"Gradient computation failed in dimension {0} ({1})\\n{2}\" \\\n .format(i, solver.__name__, np.abs(grad - grad0[i]))\n\n\ndef test_gradient(**kwargs):\n _test_gradient(solver=BasicSolver, **kwargs)\n _test_gradient(solver=HODLRSolver, **kwargs)\n\n _test_gradient(solver=BasicSolver, white_noise=0.1, fit_white_noise=True,\n **kwargs)\n _test_gradient(solver=HODLRSolver, white_noise=0.1, fit_white_noise=True,\n **kwargs)\n\n\ndef _test_prediction(solver=BasicSolver):\n \"\"\"Basic sanity checks for GP regression.\"\"\"\n\n kernel = kernels.ExpSquaredKernel(1.0)\n gp = GP(kernel, solver=solver)\n\n x = np.array((-1, 0, 1))\n gp.compute(x)\n\n y = x/x.std()\n mu, cov = gp.predict(y, x)\n\n assert np.allclose(y, mu), \\\n \"GP must predict noise-free training data exactly ({0}).\\n({1})\" \\\n .format(solver.__name__, y - mu)\n\n assert np.all(cov > -1e-15), \\\n \"Covariance matrix must be nonnegative ({0}).\\n{1}\" \\\n .format(solver.__name__, cov)\n\n var = np.diag(cov)\n assert np.allclose(var, 0), \\\n \"Variance must vanish at noise-free training points ({0}).\\n{1}\" \\\n .format(solver.__name__, var)\n\n t = np.array((-.5, .3, 1.2))\n var = np.diag(gp.predict(y, t)[1])\n assert np.all(var > 0), \\\n \"Variance must be positive away from training points ({0}).\\n{1}\" \\\n .format(solver.__name__, var)\n\n\ndef test_prediction(**kwargs):\n _test_prediction(solver=BasicSolver, **kwargs)\n _test_prediction(solver=HODLRSolver, **kwargs)\n\n\ndef test_repeated_prediction_cache():\n kernel = kernels.ExpSquaredKernel(1.0)\n gp = GP(kernel)\n\n x = np.array((-1, 0, 1))\n gp.compute(x)\n\n t = np.array((-.5, .3, 1.2))\n\n y = x/x.std()\n mu0, mu1 = (gp.predict(y, t, return_cov=False) for _ in range(2))\n assert np.array_equal(mu0, mu1), \\\n \"Identical training data must give identical predictions \" \\\n \"(problem with GP cache).\"\n\n y2 = 2*y\n mu2 = gp.predict(y2, t, return_cov=False)\n assert not np.array_equal(mu0, mu2), \\\n \"Different training data must give different predictions \" \\\n \"(problem with GP cache).\"\n\n a0 = gp._alpha\n gp.kernel[0] += 0.1\n gp.recompute()\n gp._compute_alpha(y2)\n a1 = gp._alpha\n assert not np.allclose(a0, a1), \\\n \"Different kernel parameters must give different alphas \" \\\n \"(problem with GP cache).\"\n\n mu, cov = gp.predict(y2, t)\n _, var = gp.predict(y2, t, return_var=True)\n assert np.allclose(np.diag(cov), var), \\\n \"The predictive variance must be equal to the diagonal of the \" \\\n \"predictive covariance.\"\n\n\ndef _test_apply_inverse(seed=1234, N=100, ndim=3, solver=BasicSolver,\n yerr=0.1):\n np.random.seed(seed)\n\n # Set up the solver.\n kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim)\n gp = GP(kernel, solver=solver)\n\n # Sample some data.\n x = np.random.rand(N, ndim)\n y = gp.sample(x)\n gp.compute(x, yerr=yerr, sort=True)\n\n K = gp.get_matrix(x)\n K[np.diag_indices_from(K)] += yerr**2\n b1 = np.linalg.solve(K, y)\n b2 = gp.apply_inverse(y)\n assert np.allclose(b1, b2), \\\n \"Apply inverse with a sort isn't working\"\n\n\ndef test_apply_inverse(**kwargs):\n _test_apply_inverse(solver=BasicSolver, **kwargs)\n _test_apply_inverse(solver=HODLRSolver, **kwargs)\n","sub_path":"tests/test_gp.py","file_name":"test_gp.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"615941374","text":"# difflib --> It can be used for example, for comparing files, and can produce difference information in various formats\nimport difflib\n\n# Open 2 files for comparing \nwith open('file1.txt') as text1, open('file2.txt') as text2:\n # Compare line by line\n diff = difflib.ndiff(text1.readlines(), text2.readlines())\n # Store output in Output file\nwith open('output', 'w') as result:\n for line in diff:\n result.write(line)\n","sub_path":"compare_two_files_and_show_difference.py","file_name":"compare_two_files_and_show_difference.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"217725730","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, request, abort, Response, json\nfrom flask_cors import CORS\nimport uuid, os\nimport configparser\nimport subprocess\nimport shlex\nimport re\n\napp = Flask(__name__)\nCORS(app)\n\nglobal busy\nbusy=0\n\nAM_PATH = '/opt/models/AM'\nLM_PATH = '/opt/models/LM'\nTEMP_FILE_PATH = '/opt/tmp' #/opt/wavs\nTEMP_FILE_PATH1= '/opt/models'\n\n\ndef dockerId():\n with open('/proc/self/cgroup') as f:\n lines = f.readlines() \n for l in lines:\n if '/docker/' in l:\n return l.split('/')[2][:20]\n\ndef run_shell_command(command_line):\n try:\n command_line_args = shlex.split(command_line)\n process = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output, error = process.communicate()\n return True, output\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n return False, ''\n except ValueError:\n print(\"data error.\")\n return False, ''\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return False, ''\n\ndef decode(wav_file,wav_name):\n b,o=run_shell_command(\"sox \"+wav_file+\".wav -t wav -b 16 -r 16000 -c 1 \"+wav_file+\"_tmp.wav\")\n if not b:\n return False, ''\n b,o=run_shell_command(\"mv \"+wav_file+\"_tmp.wav \"+wav_file+\".wav\")\n if not b:\n return False, ''\n\n\n decode_conf = TEMP_FILE_PATH1+\"/online.conf\"\n decode_mdl = AM_PATH+\"/\"+AM_FILE_PATH+\"/final.mdl\"\n decode_graph = LM_PATH+\"/HCLG.fst\"\n decode_words = LM_PATH+\"/words.txt\"\n\n if DECODER_SYS == 'dnn3':\n b,o=run_shell_command(\"kaldi-nnet3-latgen-faster --do-endpointing=false --frame-subsampling-factor=\"+DECODER_FSF+\" --frames-per-chunk=20 --online=false --config=\"+decode_conf+\" --minimize=false --min-active=\"+DECODER_MINACT+\" --max-active=\"+DECODER_MAXACT+\" --beam=\"+DECODER_BEAM+\" --lattice-beam=\"+DECODER_LATBEAM+\" --acoustic-scale=\"+DECODER_ACWT+\" --word-symbol-table=\"+decode_words+\" \"+decode_mdl+\" \"+decode_graph+\" \\\"ark:echo \"+wav_name+\" \"+wav_name+\"|\\\" \\\"scp:echo \"+wav_name+\" \"+wav_file+\"|\\\" ark:\"+TEMP_FILE_PATH+\"/\"+wav_name+\".lat\")\n elif DECODER_SYS == 'dnn2' or DECODER_SYS == 'dnn':\n b,o=run_shell_command(\"kaldi-nnet2-latgen-faster --do-endpointing=false --online=false --config=\"+decode_conf+\" --min-active=\"+DECODER_MINACT+\" --max-active=\"+DECODER_MAXACT+\" --beam=\"+DECODER_BEAM+\" --lattice-beam=\"+DECODER_LATBEAM+\" --acoustic-scale=\"+DECODER_ACWT+\" --word-symbol-table=\"+decode_words+\" \"+decode_mdl+\" \"+decode_graph+\" \\\"ark:echo \"+wav_name+\" \"+wav_name+\"|\\\" \\\"scp:echo \"+wav_name+\" \"+wav_file+\"|\\\" ark:\"+TEMP_FILE_PATH+\"/\"+wav_name+\".lat\")\n else:\n b=False\n o='KaldiFatalError decode param is not recognized'\n\n if not b or 'KaldiFatalError' in o:\n print(o)\n return False, ''\n\n hypothesis = re.findall('\\n'+wav_name+'.*',o)\n #app.logger.info(hypothesis)\n o=re.sub(wav_name,'',hypothesis[0]).strip()\n o=re.sub(r\"#nonterm:[^ ]* \", \"\", o)\n\n return True, o\n\n@app.route('/transcribe', methods=['POST'])\ndef transcribe():\n global busy\n busy=1\n fileid = str(uuid.uuid4())\n if 'wavFile' in request.files.keys():\n file = request.files['wavFile']\n filename = TEMP_FILE_PATH+'/'+fileid+'.wav'\n file.save(filename)\n b, out = decode(filename,fileid)\n if not b:\n busy=0\n abort(403)\n else:\n busy=0\n return 'No wave file was uploaded', 404\n\n # Delete temporary files\n for file in os.listdir(TEMP_FILE_PATH):\n os.remove(TEMP_FILE_PATH+\"/\"+file)\n busy=0\n json_string = json.dumps(out, ensure_ascii=False)\n return Response(json_string,content_type=\"application/json; charset=utf-8\" ), 200\n\n@app.route('/check', methods=['GET'])\ndef check():\n return '1', 200\n\n@app.route('/stop', methods=['POST'])\ndef stop():\n while(busy==1):\n continue\n subprocess.call(\"kill 1\",shell=True)\n return '1', 200\n\nif __name__ == '__main__':\n SERVICE_PORT = os.environ['SERVICE_PORT']\n\n #Decoder parameters applied for both GMM and DNN based ASR systems\n decoder_settings = configparser.ConfigParser()\n decoder_settings.read(AM_PATH+'/decode.cfg')\n DECODER_SYS = decoder_settings.get('decoder_params', 'decoder')\n AM_FILE_PATH = decoder_settings.get('decoder_params', 'ampath')\n DECODER_MINACT = decoder_settings.get('decoder_params', 'min_active')\n DECODER_MAXACT = decoder_settings.get('decoder_params', 'max_active')\n DECODER_BEAM = decoder_settings.get('decoder_params', 'beam')\n DECODER_LATBEAM = decoder_settings.get('decoder_params', 'lattice_beam')\n DECODER_ACWT = decoder_settings.get('decoder_params', 'acwt')\n DECODER_FSF = decoder_settings.get('decoder_params', 'frame_subsampling_factor')\n\n #Prepare config files\n AM_FINAL_PATH=AM_PATH+\"/\"+AM_FILE_PATH\n with open(AM_FINAL_PATH+\"/conf/online.conf\") as f:\n values = f.readlines()\n with open(TEMP_FILE_PATH1+\"/online.conf\", 'w') as f:\n for i in values:\n f.write(i)\n f.write(\"--ivector-extraction-config=\"+TEMP_FILE_PATH1+\"/ivector_extractor.conf\\n\")\n f.write(\"--mfcc-config=\"+AM_FINAL_PATH+\"/conf/mfcc.conf\")\n\n with open(AM_FINAL_PATH+\"/conf/ivector_extractor.conf\") as f:\n values = f.readlines()\n with open(TEMP_FILE_PATH1+\"/ivector_extractor.conf\", 'w') as f:\n for i in values:\n f.write(i)\n f.write(\"--splice-config=\"+AM_FINAL_PATH+\"/conf/splice.conf\\n\")\n f.write(\"--cmvn-config=\"+AM_FINAL_PATH+\"/conf/online_cmvn.conf\\n\")\n f.write(\"--lda-matrix=\"+AM_FINAL_PATH+\"/ivector_extractor/final.mat\\n\")\n f.write(\"--global-cmvn-stats=\"+AM_FINAL_PATH+\"/ivector_extractor/global_cmvn.stats\\n\")\n f.write(\"--diag-ubm=\"+AM_FINAL_PATH+\"/ivector_extractor/final.dubm\\n\")\n f.write(\"--ivector-extractor=\"+AM_FINAL_PATH+\"/ivector_extractor/final.ie\")\n\n\n #Run server\n app.run(host='0.0.0.0', port=SERVICE_PORT, debug=True, threaded=False, processes=1)\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"20143349","text":"import datetime\nimport urllib.request\nimport json\n\n\ndef main(context):\n \"\"\"\n Fetch the information about the available releases.\n\n To set up the extension, use:\n\n ```yaml\n releases:\n kind: github\n repo_url: datapythonista/pysuerga\n ```\n\n This will make available in the context the next information:\n\n ```\n {% for version in releases.versions %}\n {{ version.name }}\n {{ version.tag }}\n {{ version.published }}\n {{ version.url }}\n {% endfor %}\n ```\n \"\"\"\n context['releases']['versions'] = []\n config = context['releases']\n if config.get('kind') == 'github':\n url = f'https://api.github.com/repos/{config[\"repo_url\"]}/releases'\n resp = json.loads(urllib.request.urlopen(url).read())\n for version in resp:\n if version['prerelease']:\n continue\n context['releases']['versions'].append({\n 'name': version['tag_name'].lstrip('v'),\n 'tag': version['tag_name'],\n 'published': datetime.datetime.strptime(version['published_at'],\n '%Y-%m-%dT%H:%M:%SZ'),\n 'url': version['assets'][0]['browser_download_url'] if version['assets'] else ''})\n\n else:\n raise ValueError(\n 'The value of releases.config in congif.yml is missing or unknown. '\n 'Found: \"{config.get(\"kind\")}\". Supported: \"github\"'\n )\n return context\n","sub_path":"pysuerga/contrib/releases.py","file_name":"releases.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"205570060","text":"# Atom object\n# Derek Fujimoto\n# April 2017\n\nfrom inspect import currentframe, getframeinfo\nimport operator\nfrom numpy import array, concatenate\nimport numpy as np\nimport pandas as pd\n\nclass Atom(object):\n \"\"\"\n The atom object. Stores atom-like properties.\n \n Data Fields\n id_num: some unique identifier \n species = -1 (default). Ex: \"C\" or 6\n pos: DataFrame. Uses timestamps as indexes. Columns: x,y,z,ix,iy,iz\n mass = -1 (default)\n charge = 0 (default)\n is_wrapped = True (default) are atom coordinates wrapped or unwrapped? \n \n Derek Fujimoto\n April 2017\n \"\"\"\n\n# ============================================================================ # \n def __init__(self,id_num,species=-1,mass=-1,charge=0):\n \"\"\"Constructor.\"\"\"\n \n self.id_num = id_num\n self.species = species \n self.mass = mass\n self.charge = charge\n self.pos = pd.DataFrame() # positions\n self.is_wrapped = True\n \n return\n \n# ============================================================================ # \n def __del__(self):\n del self.pos \n \n \n# ============================================================================ # \n def __print_line_error__(self,frameinfo,message):\n \"\"\"Print error message based on line number. \n Call with frameinfo = getframeinfo(currentframe()).\n \"\"\"\n print(frameinfo.filename + \"::\" + frameinfo.function + \\\n \"::line \" + repr(frameinfo.lineno) + \": \" + message)\n return\n \n# ============================================================================ # \n def check_time_duplicate(self):\n \"\"\"Check for duplicate timestamps.\n Returns a list of duplicate times\n \"\"\"\n return pos.index.duplicated()\n \n# ============================================================================ # \n def del_position(self,time):\n \"\"\"Delete a position by timestamp.\"\"\"\n \n self.pos = self.pos.drop(time)\n return \n\n# ============================================================================ # \n def edit_position(self,time,new_pos):\n \"\"\"Change atom positions by time.\"\"\"\n self.pos.loc[time][['x','y','z']] = new_pos\n return\n\n# ============================================================================ # \n def get_image(self,time):\n \"\"\"Get the image box that the atom belongs to by time. \n return [ix,iy,iz]\n \"\"\"\n return np.array(self.pos.loc[time][['ix','iy','iz']])\n\n# ============================================================================ # \n def get_position(self,time):\n \"\"\"Get atom position by time. \n return [x,y,z]\n \"\"\"\n return np.array(self.pos.loc[time][['x','y','z']])\n\n# ============================================================================ # \n def get_time_index(self,time):\n \"\"\"Get the index corresponding with a given time. \n \"\"\" \n return self.pos.loc[time].get_loc['t']\n\n# ============================================================================ # \n def get_unwrap_position(self,boxsize):\n \"\"\"Get atom positions, unwrapped by images, by time.\n boxsize = [Lx,Ly,Lz] (box side lengths)\n if index=True, treat \"time\" as an index. \n return [x,y,z]\n \"\"\"\n\n # check boxsize\n if len(boxsize) < 3:\n self.__print_line_error__(getframeinfo(currentframe()),\\\n \"boxsize needs to be of format [Lx,Ly,Lz] (side lengths).\")\n return\n \n # rescale and return\n return np.array([self.pos['x']+boxsize[0]*self.pos['ix'],\\\n self.pos['y']+boxsize[1]*self.pos['iy'],\\\n self.pos['z']+boxsize[2]*self.pos['iz']])\n\n# ============================================================================ #\n def get_xyz(self):\n \"\"\"Get positions in the format [time][xyz], as opposed to the inverse \n indexing you would get from doing [Atom.x,Atom.y,Atom.z]. \n \"\"\"\n return np.array(self.pos[['x','y','z']])\n \n# ============================================================================ #\n def set_position(self,time,pos,img=[0,0,0]):\n \"\"\"Set atom position. \n pos = [x,y,z]\n img = [ix,iy,iz]\n \n Note: there is no check for duplicate time stamps. \n \"\"\"\n \n # make arrays\n time = np.array(time)\n pos = np.array(pos)\n img = np.array(img)\n \n # make data frames\n tdata = pd.DataFrame(time,columns=['t'])\n pdata = pd.DataFrame(pos,columns=['x','y','z'])\n idata = pd.DataFrame(img,columns=['ix','iy','iz'])\n \n # save memory\n tdata = tdata.apply(pd.to_numeric,downcast='integer')\n pdata = pdata.apply(pd.to_numeric,downcast='float')\n idata = idata.apply(pd.to_numeric,downcast='integer')\n \n # add to existing\n self.pos = self.pos.append(tdata.join((pdata,idata)).set_index('t'))\n\n # clear arrays\n del time,pos,img,tdata,pdata,idata\n \n return\n \n# ============================================================================ #\n def sort(self,key='t'):\n \"\"\"Sort storage position and time arrays by t,x,y, or z. \n key = \"t\",\"x\",\"y\",\"z\",\"ix\",\"iy\",\"iz\"\n \"\"\"\n \n if key == 't':\n self.pos = self.pos.sort_index()\n else:\n self.pos = self.pos.sort_values(by=[key])\n \n return\n\n# ============================================================================ # \n def unwrap(self,boxsizes):\n \"\"\"Unwrap atom positions by images.\n boxsizes = [[Lx,Ly,Lz]] (box side lengths) Ordered by timestep.\n \"\"\"\n \n # check wrapped status\n if not self.is_wrapped:\n print(\"Atom coordinates already unwrapped. \")\n return\n \n # check boxsize\n if len(boxsizes[0]) < 3:\n self.__print_line_error__(getframeinfo(currentframe()),\\\n \"boxsize needs to be of format [[Lx,Ly,Lz]] (side lengths).\")\n return\n \n if len(boxsizes) != len(self.pos):\n self.__print_line_error__(getframeinfo(currentframe()),\\\n \"len(boxsize) must equal number of timesteps in Atom object.\")\n return\n \n # rescale and return\n boxsizes = np.array(boxsizes)\n self.pos['x'] = self.pos['x']+boxsizes[:,0]*self.pos['ix']\n self.pos['y'] = self.pos['y']+boxsizes[:,1]*self.pos['iy']\n self.pos['z'] = self.pos['z']+boxsizes[:,2]*self.pos['iz']\n \n # set wrapped\n self.is_wrapped = False\n \n return \n\n# ============================================================================ # \n def wrap(self,boxsizes):\n \"\"\"Wrap atom positions by images.\n boxsize = [[Lx,Ly,Lz]] (box side lengths) Ordered by timestep.\n \"\"\"\n \n # check wrapped status\n if self.is_wrapped:\n print(\"Atom coordinates already wrapped. \")\n return\n \n # check boxsize\n if len(boxsizes[0]) < 3:\n self.__print_line_error__(getframeinfo(currentframe()),\\\n \"boxsize needs to be of format [[Lx,Ly,Lz]] (side lengths).\")\n return\n \n if len(boxsizes) != len(self.pos):\n self.__print_line_error__(getframeinfo(currentframe()),\\\n \"len(boxsize) must equal number of timesteps in Atom object.\")\n return\n \n \n # rescale and return\n boxsizes = array(boxsizes)\n self.pos['x'] = self.pos['x']-boxsizes[:,0]*self.pos['ix']\n self.pos['y'] = self.pos['y']-boxsizes[:,1]*self.pos['iy']\n self.pos['z'] = self.pos['z']-boxsizes[:,2]*self.pos['iz']\n \n # set wrapped\n self.is_wrapped = True\n \n return\n","sub_path":"Atom.py","file_name":"Atom.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"581784792","text":"def mz_building_kind_detail(val):\n # TODO should this be in yaml instead?\n if val in (\n 'bangunan',\n 'building',\n 'other',\n 'rumah',\n 'Rumah',\n 'Rumah Masyarakat',\n 'rumah_penduduk',\n 'true',\n 'trullo',\n 'yes'):\n return None\n\n if val in (\n 'abandoned',\n 'administrative',\n 'agricultural',\n 'airport',\n 'allotment_house',\n 'apartments',\n 'arbour',\n 'bank',\n 'barn',\n 'basilica',\n 'beach_hut',\n 'bell_tower',\n 'boathouse',\n 'brewery',\n 'bridge',\n 'bungalow',\n 'bunker',\n 'cabin',\n 'carport',\n 'castle',\n 'cathedral',\n 'chapel',\n 'chimney',\n 'church',\n 'civic',\n 'clinic',\n 'clubhouse',\n 'collapsed',\n 'college',\n 'commercial',\n 'construction',\n 'container',\n 'convent',\n 'cowshed',\n 'dam',\n 'damaged',\n 'depot',\n 'destroyed',\n 'detached',\n 'disused',\n 'dormitory',\n 'duplex',\n 'factory',\n 'farm',\n 'farm_auxiliary',\n 'fire_station',\n 'garage',\n 'garages',\n 'gazebo',\n 'ger',\n 'glasshouse',\n 'government',\n 'grandstand',\n 'greenhouse',\n 'hangar',\n 'healthcare',\n 'hermitage',\n 'hospital',\n 'hotel',\n 'house',\n 'houseboat',\n 'hut',\n 'industrial',\n 'kindergarten',\n 'kiosk',\n 'library',\n 'mall',\n 'manor',\n 'manufacture',\n 'mobile_home',\n 'monastery',\n 'mortuary',\n 'mosque',\n 'museum',\n 'office',\n 'outbuilding',\n 'parking',\n 'pavilion',\n 'power',\n 'prison',\n 'proposed',\n 'pub',\n 'public',\n 'residential',\n 'restaurant',\n 'retail',\n 'roof',\n 'ruin',\n 'ruins',\n 'school',\n 'semidetached_house',\n 'service',\n 'shed',\n 'shelter',\n 'shop',\n 'shrine',\n 'silo',\n 'slurry_tank',\n 'stable',\n 'stadium',\n 'static_caravan',\n 'storage',\n 'storage_tank',\n 'store',\n 'substation',\n 'summer_cottage',\n 'summer_house',\n 'supermarket',\n 'synagogue',\n 'tank',\n 'temple',\n 'terrace',\n 'tower',\n 'train_station',\n 'transformer_tower',\n 'transportation',\n 'university',\n 'utility',\n 'veranda',\n 'warehouse',\n 'wayside_shrine',\n 'works'):\n return val\n\n if val == 'barne':\n return 'barn'\n if val == 'commercial;residential':\n return 'mixed_use'\n if val == 'constructie':\n return 'construction'\n if val == 'dwelling_house':\n return 'house'\n if val == 'education':\n return 'school'\n if val == 'greenhouse_horticulture':\n return 'greenhouse'\n if val in ('apartment', 'flat'):\n return 'apartments'\n if val in ('houses', 'residences', 'residence', 'perumahan permukiman',\n 'residentiel1'):\n return 'residential'\n if val in ('semi_detached', 'semi-detached', 'semi'):\n return 'semidetached_house'\n if val == 'offices':\n return 'office'\n if val == 'prefab_container':\n return 'container'\n if val == 'public_building':\n return 'public'\n if val == 'railway_station':\n return 'train_station'\n if val == 'roof=permanent':\n return 'roof'\n if val == 'stables':\n return 'stable'\n if val == 'static caravan':\n return 'static_caravan'\n if val == 'station':\n return 'transportation'\n if val == 'storage tank':\n return 'storage_tank'\n if val == 'townhome':\n return 'terrace'\n\n\ndef mz_building_part_kind_detail(val):\n if val in ('yes', 'part', 'church:part', 'default'):\n return None\n if val in (\n 'arch',\n 'balcony',\n 'base',\n 'column',\n 'door',\n 'elevator',\n 'entrance',\n 'floor',\n 'hall',\n 'main',\n 'passageway',\n 'pillar',\n 'porch',\n 'ramp',\n 'roof',\n 'room',\n 'steps',\n 'stilobate',\n 'tier',\n 'tower',\n 'verticalpassage',\n 'wall',\n 'window'):\n return val\n if val in ('corridor', 'Corridor', 'vertical', 'verticalpassage'):\n return 'verticalpassage'\n if val in ('stairs', 'stairway'):\n return 'steps'\n\n\n# these functions were used in the yaml, but are worked around at the\n# moment by continuing to call the sql and having the output\n# calculation simply pick up the sql values\n# def mz_get_rel_networks(osm_id):\n# return []\n# def mz_cycling_network(props, osm_id):\n# pass\n","sub_path":"vectordatasource/meta/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"379960197","text":"import time\nimport multiprocessing\nfrom multiprocessing import Process, Pipe\nimport boto3\nimport json\n\n\ns3_resource = boto3.resource('s3')\naws_region = 'us-east-1'\n\ndef read_ec2_list_from_s3():\n #This function can put in Multiprocessing\n obj = s3_resource.Object('limliht-config','config/'+aws_region+'/ec2_default_document.json')\n read_data = obj.get()['Body'].read()\n loads_data = json.loads(read_data)\n print(loads_data)\n\ndef read_asg_list_from_s3():\n #This function can put in Multiprocessing\n obj = s3_resource.Object('limliht-config','config/'+aws_region+'/asg_default_document.json')\n read_data = obj.get()['Body'].read()\n loads_data = json.loads(read_data)\n print(loads_data)\n \nif __name__ == \"__main__\":\n \n t1 = time.time()\n p_read_ec2_list = Process(target=read_ec2_list_from_s3)\n p_read_asg_lsit = Process(target=read_asg_list_from_s3)\n \n p_read_ec2_list.start()\n p_read_asg_lsit.start()\n \n p_read_ec2_list.join()\n p_read_asg_lsit.join()\n print('total time spend = ', time.time()-t1 )\n \n t2 = time.time()\n read_asg_list_from_s3()\n read_ec2_list_from_s3()\n print('total time spend = ', time.time()-t2)\n \n print('Done')","sub_path":"multiprocess.py","file_name":"multiprocess.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"80293524","text":"#!/usr/bin/env python\n\n# stdlib imports\nfrom datetime import datetime\nimport re\nimport copy\n\n# third party imports\nimport numpy as np\nfrom scipy import constants\nfrom obspy.core.utcdatetime import UTCDateTime\n\n# local\nfrom gmprocess.stationstream import StationStream\nfrom gmprocess.stationtrace import StationTrace, PROCESS_LEVELS\nfrom gmprocess.io.seedname import get_channel_name\n\n\nTIMEFMT = '%d/%m/%Y %H:%M:%S.%f'\nFLOATRE = \"[-+]?[0-9]*\\.?[0-9]+\"\nINTRE = \"[-+]?[0-9]*\"\n\n# 20/07/2017 22:30:58.000000\nTIME_RE = '[0-9]{2}/[0-9]{2}/[0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}\\.?[0-9]*'\n\nTEXT_HDR_ROWS = 18\n\nCOLWIDTH = 12\nNCOLS = 3\n\nSOURCE = 'National Strong-Motion Network of Turkey (TR-NSMN)'\nSOURCE_FORMAT = 'NSMN'\nNETWORK = 'TK'\n\nLEVELS = {'VOL1DS': 'V1'}\n\nDECIG_TO_GALS = (constants.g * 100) / 10\n\nMARKER = 'STRONG GROUND MOTION RECORDS OF TURKIYE'\n\nENCODING = 'ISO-8859-1'\n# ENCODING = 'utf-16-be'\n\n\ndef is_nsmn(filename):\n with open(filename, 'rt', encoding=ENCODING) as f:\n line = f.readline()\n if MARKER in line:\n return True\n\n return False\n\n\ndef read_nsmn(filename):\n \"\"\"Read the Turkish NSMN strong motion data format.\n\n Args:\n filename (str): path to NSMN data file.\n\n Returns:\n list: Sequence of one StationStream object containing 3 StationTrace objects.\n \"\"\"\n header = _read_header(filename)\n header1 = copy.deepcopy(header)\n header2 = copy.deepcopy(header)\n header3 = copy.deepcopy(header)\n header1['standard']['horizontal_orientation'] = 0.0\n header1['channel'] = get_channel_name(header['sampling_rate'],\n True,\n False,\n True)\n header2['standard']['horizontal_orientation'] = 90.0\n header2['channel'] = get_channel_name(header['sampling_rate'],\n True,\n False,\n False)\n header3['standard']['horizontal_orientation'] = 0.0\n header3['channel'] = get_channel_name(header['sampling_rate'],\n True,\n True,\n False)\n # three columns of NS, EW, UD\n # data = np.genfromtxt(filename, skip_header=TEXT_HDR_ROWS,\n # delimiter=[COLWIDTH] * NCOLS, encoding=ENCODING)\n data = np.loadtxt(filename,\n skiprows=TEXT_HDR_ROWS,\n encoding=ENCODING)\n data1 = data[:, 0]\n data2 = data[:, 1]\n data3 = data[:, 2]\n trace1 = StationTrace(data=data1, header=header1)\n trace2 = StationTrace(data=data2, header=header2)\n trace3 = StationTrace(data=data3, header=header3)\n stream = StationStream(traces=[trace1, trace2, trace3])\n return [stream]\n\n\ndef _read_header(filename):\n header = {}\n standard = {}\n coords = {}\n format_specific = {}\n with open(filename, 'rt', encoding=ENCODING) as f:\n lines = [next(f) for x in range(TEXT_HDR_ROWS)]\n # fill out the standard dictionary\n standard['source'] = SOURCE\n standard['source_format'] = SOURCE_FORMAT\n standard['instrument'] = lines[9].split(':')[1].strip()\n standard['sensor_serial_number'] = lines[10].split(':')[1].strip()\n standard['process_level'] = PROCESS_LEVELS['V1']\n standard['process_time'] = ''\n standard['station_name'] = lines[1].split(':')[1].strip()\n standard['structure_type'] = ''\n standard['corner_frequency'] = np.nan\n standard['units'] = 'acc'\n standard['instrument_period'] = np.nan\n standard['instrument_damping'] = np.nan\n standard['horizontal_orientation'] = np.nan\n standard['comments'] = ' '.join(lines[15:17]).replace('\\n', '')\n\n # fill out the stats stuff\n stimestr = re.search(TIME_RE, lines[11]).group()\n # 20/07/2017 22:30:58.000000 (GMT)\n stime = datetime.strptime(stimestr, TIMEFMT)\n header['starttime'] = stime\n header['npts'] = int(lines[12].split(':')[1].strip())\n header['delta'] = float(lines[13].split(':')[1].strip())\n header['sampling_rate'] = 1 / header['delta']\n header['duration'] = header['npts'] * header['delta']\n header['channel'] = ''\n header['station'] = lines[6].split(':')[1].strip()\n header['location'] = '--'\n header['network'] = NETWORK\n\n coordstr = lines[7].split(':')[1].replace('-', '')\n lat_str, lon_str = re.findall(FLOATRE, coordstr)\n altparts = lines[8].split(':')\n altitude = 0.0\n if len(altparts) > 1 and len(altparts[1].strip()):\n altitude = float(altparts[1].strip())\n coords = {'latitude': float(lat_str),\n 'longitude': float(lon_str),\n 'elevation': altitude}\n\n header['coordinates'] = coords\n header['standard'] = standard\n header['format_specific'] = format_specific\n\n return header\n\n\ndef _read_header_lines(filename, offset):\n \"\"\"Read the header lines for each channel.\n\n Args:\n filename (str): \n Input BHRC file name.\n offset (int): \n Number of lines to skip from the beginning of the file.\n\n Returns:\n tuple: (header dictionary containing Stats dictionary with extra sub-dicts, \n updated offset rows)\n \"\"\"\n with open(filename, 'rt') as f:\n for _ in range(offset):\n next(f)\n lines = [next(f) for x in range(TEXT_HDR_ROWS)]\n\n offset += TEXT_HDR_ROWS\n\n header = {}\n standard = {}\n coords = {}\n format_specific = {}\n\n # get the sensor azimuth with respect to the earthquake\n # this data has been rotated so that the longitudinal channel (L)\n # is oriented at the sensor azimuth, and the transverse (T) is\n # 90 degrees off from that.\n station_info = lines[7][lines[7].index('Station'):]\n (lat_str, lon_str,\n alt_str, lstr, tstr) = re.findall(FLOATRE, station_info)\n component = lines[4].strip()\n if component == 'V':\n angle = np.nan\n elif component == 'L':\n angle = float(lstr)\n else:\n angle = float(tstr)\n coords = {'latitude': float(lat_str),\n 'longitude': float(lon_str),\n 'elevation': float(alt_str)}\n\n # fill out the standard dictionary\n standard['source'] = SOURCE\n standard['source_format'] = SOURCE_FORMAT\n standard['instrument'] = lines[1].split('=')[1].strip()\n standard['sensor_serial_number'] = ''\n volstr = lines[0].split()[1].strip()\n if volstr not in LEVELS:\n raise KeyError('Volume %s files are not supported.' % volstr)\n standard['process_level'] = PROCESS_LEVELS[LEVELS[volstr]]\n standard['process_time'] = ''\n station_name = lines[7][0:lines[7].index('Station')].strip()\n standard['station_name'] = station_name\n standard['structure_type'] = ''\n standard['corner_frequency'] = np.nan\n standard['units'] = 'acc'\n period_str, damping_str = re.findall(FLOATRE, lines[9])\n standard['instrument_period'] = float(period_str)\n standard['instrument_damping'] = float(damping_str)\n standard['horizontal_orientation'] = angle\n standard['comments'] = ''\n\n # fill out the stats stuff\n # we don't know the start of the trace\n header['starttime'] = UTCDateTime(1970, 1, 1)\n npts_str, dur_str = re.findall(FLOATRE, lines[10])\n header['npts'] = int(npts_str)\n header['duration'] = float(dur_str)\n header['delta'] = header['duration'] / (header['npts'] - 1)\n header['sampling_rate'] = 1 / header['delta']\n if np.isnan(angle):\n header['channel'] = get_channel_name(\n header['sampling_rate'],\n is_acceleration=True,\n is_vertical=True,\n is_north=False)\n elif (angle > 315 or angle < 45) or (angle > 135 and angle < 225):\n header['channel'] = get_channel_name(\n header['sampling_rate'],\n is_acceleration=True,\n is_vertical=False,\n is_north=True)\n else:\n header['channel'] = get_channel_name(\n header['sampling_rate'],\n is_acceleration=True,\n is_vertical=False,\n is_north=False)\n\n part1 = lines[0].split(':')[1]\n stationcode = part1.split('/')[0].strip()\n header['station'] = stationcode\n header['location'] = '--'\n header['network'] = NETWORK\n\n header['coordinates'] = coords\n header['standard'] = standard\n header['format_specific'] = format_specific\n\n offset += INT_HDR_ROWS\n offset += FLOAT_HDR_ROWS\n\n return (header, offset)\n\n\ndef _read_data(filename, offset, header):\n \"\"\"Read acceleration data from BHRC file.\n\n Args:\n filename (str): \n BHRC strong motion filename.\n offset (int):\n Number of rows from the beginning of the file to skip.\n header (dict):\n Dictionary for given channel with number of points.\n\n Returns:\n tuple: (Acceleration data (in gals), updated offset)\n \"\"\"\n widths = [COLWIDTH] * COLS_PER_ROW\n npoints = header['npts']\n nrows = int(np.ceil(npoints / COLS_PER_ROW))\n data = np.genfromtxt(filename, skip_header=offset,\n max_rows=nrows, filling_values=np.nan,\n delimiter=widths)\n data = data.flatten()\n data = data[0:header['npts']]\n\n # convert data to cm/s^2\n data *= DECIG_TO_GALS\n\n offset += nrows + 1 # there is an end of record marker line\n return (data, offset)\n","sub_path":"gmprocess/io/nsmn/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":9599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"229195954","text":"import torch as t\nimport torch.nn as nn\nimport torchvision as tv\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\n\n# from models.LeNet5 import LeNet5 as Net\n# NET_NAME = 'LeNet5_MNIST'\n# transform = transforms.Compose([\n# transforms.ToTensor()\n# ])\n\n# from models.AlexNet import AlexNet as Net\n# NET_NAME = 'AlexNet_MNIST'\n# transform = transforms.Compose([\n# # transforms.Resize((227,227)),\n# transforms.ToTensor(),\n# ])\nfrom models.VGGNet import VGGNet as Net\nNET_NAME = 'VGGNet_MNIST'\ntransform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n])\nnet = Net('vgg11', 1, 10)\n\ndevice = t.device(\"cuda\" if t.cuda.is_available() else \"cpu\")\n\ntestset = tv.datasets.MNIST(\n root='./data_tmp/',\n train=False,\n download=True,\n transform=transform,\n)\ntestloader = DataLoader(\n testset,\n batch_size=1,\n shuffle=False,\n)\n\nnet = net.to(device)\n# net.load(NET_NAME,device)\ncriterion = nn.CrossEntropyLoss(size_average=False)\n\nac_num = 0\nnum = 0\n\nif __name__ == '__main__':\n for data in tqdm(testloader):\n input, label = data\n # print(input.shape,label.shape)\n input, label = input.to(device), label.to(device)\n output = net(input)\n print(output)\n exit()\n _, index = t.max(output,1)\n if index == label:\n ac_num += 1\n num += 1\n\n print(ac_num/num*100,'%')","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"51123921","text":"#API setup\nfrom picraft import Vector\nfrom picraft import World, Block\n\ndef operation(world, position, data):\n\t#center block\n\tworld.blocks[position] = Block(53, data)\n\n\t#left block\n\t\n\t#right block\n\n\ndef main():\n\t#API setup\n\tworld = World()\n\n\twhile True:\n\t\t#get recent sword hits\n\t\thits = world.events.poll()\n\t\t\n\t\tfor hit in hits:\n\t\t\t#get position and orientation for stairs\n\t\t\tposition = hit.pos\n\t\t\tdata = face_to_dataval(hit.face) #this function needs to be made\n\t\t\t\n\t\t\t#call the building function\n\t\t\tif data != -1:\n\t\t\t\toperation(world, position, data)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Raspberry Jam /Manchester Raspberry Jam/Workshops/006_ Picraft API/Files/4_bench.py","file_name":"4_bench.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"454733564","text":"# -*- coding: utf-8 -*-\n\nfrom . import BaseHandler\nimport random\nimport string\nfrom urllib import unquote_plus\nfrom xml.etree import ElementTree as ET\nimport logging\nfrom tornado.options import options\nfrom autumn.sms import CouponSMSMessage\nfrom autumn.utils import send_email\n\n\nclass Notify(BaseHandler):\n def post(self):\n notify_data = unquote_plus(self.get_argument('notify_data').encode('utf-8'))\n notify_data_xml = ET.fromstring(notify_data)\n\n status_set = {\n 'WAIT_BUYER_PAY': 0,\n 'TRADE_CLOSED': 1,\n 'TRADE_SUCCESS': 2,\n 'TRADE_PENDING': 3,\n 'TRADE_FINISHED': 4\n }\n\n order_id = notify_data_xml.findtext('out_trade_no')\n trade_status = notify_data_xml.findtext('trade_status')\n\n status = status_set.get(trade_status, -1)\n\n if status in [2, 4]:\n shipping = self.db.get('select oi.shipping_info_id, oi.goods_id, o.distributor_shop_id, s.sales_id, g.*, '\n 'o.id oid, oi.id oiid, o.payment, o.order_no, o.mobile, oi.num '\n 'from orders o, order_item oi, goods g, supplier s '\n 'where o.id = oi.order_id and oi.goods_id = g.id and g.supplier_id = s.id '\n 'and o.status = 0 and o.id = %s', order_id)\n if shipping:\n self.db.execute('update orders set paid_at = NOW(), status = 1 where id = %s', order_id)\n if shipping.shipping_info_id:\n self.db.execute('update order_shipping_info set paid_at = NOW() where id = %s',\n shipping.shipping_info_id)\n\n item_ids = []\n for i in range(shipping.num):\n item_id = self.db.execute('insert into item(status, goods_name, goods_id, distr_id, distr_shop_id, '\n 'sp_id, sales_id, order_id, order_item_id, order_no, face_value, payment, '\n 'sales_price, purchase_price, created_at) '\n 'values(1, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW())',\n shipping.short_name, shipping.goods_id, options.distributor_id_weixin,\n shipping.distributor_shop_id, shipping.supplier_id, shipping.sales_id, shipping.oid,\n shipping.oiid, shipping.order_no, shipping.face_value, shipping.payment,\n shipping.sales_price, shipping.purchase_price)\n item_ids.append(item_id)\n # 如果是电子券,生成item_coupon 并发货\n if shipping.type == 'E':\n # 导入券,获得导入的券号,密码\n if shipping.generate_type == 'IMPORT':\n coupon_imported = self.db.query('select * from coupon_imported where goods_id=%s and used=0 '\n 'limit %s', shipping.goods_id, shipping.num)\n # 导入券库存不足,直接返回\n if len(coupon_imported) < shipping.num:\n send_email(redis=self.redis,\n subject='weixin coupon generation failed:out of stock for imported coupon',\n to_list='dev@uhuila.com',\n html='order id=%s and goods id=%s' % (shipping.oid, shipping.goods_id))\n logging.error('imported goods id=%s out of stock' % shipping.goods_id)\n self.write('success')\n return\n\n imported_ids = [c.id for c in coupon_imported]\n self.db.execute('update coupon_imported set used=1 where id in (%s)'\n % ','.join(['%s']*len(imported_ids)), *imported_ids)\n coupon_sns = [c.coupon_sn for c in coupon_imported]\n coupon_pwds = [c.coupon_pwd for c in coupon_imported]\n\n # 生成电子券\n for i in range(int(shipping.num)):\n if shipping.generate_type == 'IMPORT':\n coupon_sn = coupon_sns[i]\n coupon_pwd = coupon_pwds[i]\n else:\n coupon_pwd = ''\n while True:\n coupon_sn = ''.join([random.choice(string.digits) for z in range(10)])\n # 没有重复,停止\n if not self.db.get('select id from item_coupon where sn=%s', coupon_sn):\n break\n item_coupon_field = {\n 'mobile': shipping.mobile,\n 'sn': coupon_sn,\n 'pwd': coupon_pwd,\n 'distr_sn': None,\n 'distr_pwd': None,\n 'sms_sent_count': 0,\n 'expire_at': shipping.expire_at,\n 'item_id': item_ids.pop()\n }\n self.db.execute('insert into item_coupon(%s) values (%s)'\n % (','.join(item_coupon_field.keys()), ','.join(['%s']*len(item_coupon_field))),\n *item_coupon_field.values())\n\n # 发送电子券\n all_order_items = self.db.query('select * from order_item where order_id=%s', order_id)\n for item in all_order_items:\n CouponSMSMessage(self.db, self.redis, order_item=item).remark('微商城发送券号短信').send()\n\n # 删除redis对应的值\n self.redis.delete('o:alipay:%s' % order_id)\n logging.info('order: %s, paid success', order_id)\n else:\n logging.info('order: %s, can not find order', order_id)\n else:\n logging.info('order: %s, paid failed. status: %s', order_id, trade_status)\n\n self.write('success')","sub_path":"apps/api/controllers/alipay.py","file_name":"alipay.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"406088839","text":"\"\"\"\nTiles at the edge of the image also have this border, but the outermost edges won't line up with any other tiles.\n^ without this, i'd probably die lol\n\"\"\"\nimport re\nfrom typing import List\nfrom math import sqrt\nimport numpy as np\n\n\n# returns 0 if they don't line up at any place, \n# 1 if they match like for the first edge pair and 2 for the second\ndef lineUp(edge1: List[List[str]], edge2: List[List[str]]) -> int:\n \"\"\"\n returns 0 if they can't line up at all\n returns 1 if the first edge pair of edge1 is valid\n and 2 if the second edge pair is valid\n \"\"\"\n otherEdges = edge2[0] + edge2[1]\n for e in edge1[0]:\n for otherE in otherEdges:\n if e == otherE or e == otherE[::-1]:\n return 1\n for e in edge1[1]:\n for otherE in otherEdges:\n if e == otherE or e == otherE[::-1]:\n return 2\n return 0\n\n\ndef verticalNeighbors(side: int, r: int, c: int) -> List[List[int]]:\n return [p for p in [[r + 1, c], [r - 1, c]] if 0 <= p[0] < side]\n\n\ndef horizontalNeighbors(side: int, r: int, c: int) -> List[List[int]]:\n return [p for p in [[r, c + 1], [r, c - 1]] if 0 <= p[1] < side]\n\n\ndef neighbors(side: int, r: int, c: int) -> List[List[int]]:\n return verticalNeighbors(side, r, c) + horizontalNeighbors(side, r, c)\n\n\ndef orientations(grid: List[str]) -> List[List[str]]:\n \"\"\"\n gives all possible ways a tile can be in\n copied from https://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python\n \"\"\"\n possible = []\n rotated = grid.copy()\n for _ in range(4):\n possible.append(rotated)\n rotated = list(''.join(r) for r in zip(*rotated[::-1]))\n\n rotated = [r for r in reversed(grid)]\n for _ in range(4):\n possible.append(rotated)\n rotated = list(''.join(r) for r in zip(*rotated[::-1]))\n\n rotated = [r[::-1] for r in grid]\n for _ in range(4):\n possible.append(rotated)\n rotated = list(''.join(r) for r in zip(*rotated[::-1]))\n return possible\n\n\ntiles = {}\nwith open('gonnaStayUp.txt') as read:\n for rawTile in read.read().split('\\n\\n'):\n rawTile = rawTile.strip().split('\\n')\n tile = []\n tiles[int(''.join(c for c in rawTile[0] if c.isdigit()))] = rawTile[1:]\n sideLen = int(sqrt(len(tiles)))\n assert sideLen == sqrt(len(tiles)), 'just please give me a square'\n\nedges = {}\nfor tileID, tile in tiles.items():\n edges[tileID] = [[tile[0], tile[-1]], [''.join(r[0] for r in tile), ''.join(r[-1] for r in tile)]]\n\nadjacentIDs = {}\nprod = 1\nstart = -1\nfor id1, tile1 in edges.items():\n adjacentIDs[id1] = [[], []]\n matches = []\n for id2, tile2 in edges.items():\n if id1 == id2:\n continue\n result = lineUp(tile1, tile2)\n if result != 0:\n matches.append(result)\n adjacentIDs[id1][result - 1].append(id2)\n # only matches 2, so it must be a corner piece\n if sorted(matches) == [1, 2]:\n start = id1 # idk, just assign a random corner for our start\n prod *= id1\nassert start != -1, \"there's gotta be a corner bro\"\nprint(\"this is the worst camera design ever made in the history of mankind: %i\" % prod)\n\npicIDs = np.full((sideLen, sideLen), -1)\npicIDs[0][0] = start\nfrontier = [[[0, 0], start]]\nwhile frontier: # first build the picture out of pure IDs\n curr = frontier.pop(0)\n upDown, leftRight = [set(i) for i in adjacentIDs[curr[-1]]]\n row, col = curr[0]\n firstMatch = len(upDown)\n secondMatch = len(leftRight)\n vNeighbors = verticalNeighbors(sideLen, row, col)\n hNeighbors = horizontalNeighbors(sideLen, row, col)\n for vn in vNeighbors:\n val = picIDs[vn[0]][vn[1]]\n if val != -1 and val not in upDown:\n upDown, leftRight = leftRight, upDown\n break\n\n # clear out the ones that've alr been processed, then fill in the undone ones\n for vr, vc in vNeighbors:\n if picIDs[vr, vc] != -1:\n upDown.remove(picIDs[vr, vc])\n for vr, vc in vNeighbors:\n if picIDs[vr, vc] == -1:\n picIDs[vr, vc] = upDown.pop()\n frontier.append([[vr, vc], picIDs[vr, vc]])\n\n for hr, hc in hNeighbors:\n if picIDs[hr, hc] != -1:\n leftRight.remove(picIDs[hr, hc])\n for hr, hc in hNeighbors:\n if picIDs[hr, hc] == -1:\n picIDs[hr, hc] = leftRight.pop()\n frontier.append([[hr, hc], picIDs[hr, hc]])\n\n# try all the starting orientations because we don't really know which one it is\nfor startingO in orientations(tiles[picIDs[0, 0]]):\n try:\n actualPic = [[None for _ in range(sideLen)] for _ in range(sideLen)]\n actualPic[0][0] = startingO\n for r in range(sideLen):\n for c in range(sideLen):\n if actualPic[r][c] is not None: # just to handle (0, 0)\n continue\n thisTile = tiles[picIDs[r][c]]\n filledAlr = [p for p in neighbors(sideLen, r, c) if actualPic[p[0]][p[1]] is not None].pop()\n tile = actualPic[filledAlr[0]][filledAlr[1]]\n if filledAlr[0] != r: # it's upwards\n matchUp = tile[-1]\n for o in orientations(thisTile):\n if o[0] == matchUp:\n actualPic[r][c] = o\n break\n\n else: # it's to the left of the thing\n matchUp = ''.join(r[-1] for r in tile)\n for o in orientations(thisTile):\n if ''.join(r[0] for r in o) == matchUp:\n actualPic[r][c] = o\n break\n break # wow, everything matched up!\n except IndexError: # well, that starting orientation wasn't valid, let's try another\n pass\n\nfinal = []\nfor bigR in actualPic:\n for i in range(1, len(bigR[0]) - 1): # exclude the top & bottom borders\n final.append(''.join(r[i][1:-1] for r in bigR))\n\nnessie = [\n \"..................#.\", # the ending dots are so the bodyLen is consistent\n \"#....##....##....###\",\n \".#..#..#..#..#..#...\"\n]\nbodyLen = len(nessie[0])\nfor o in orientations(final):\n partOfNessie = np.zeros((len(final), len(final[0])))\n nessieCount = 0\n for i in range(len(final) - len(nessie) + 1):\n for s in range(len(o[0]) - bodyLen + 1):\n for v, r in enumerate(o[i:i + len(nessie)]):\n r = r[s: s + bodyLen]\n if re.match(nessie[v], r) is None:\n break\n else:\n for v1 in range(len(nessie)):\n for v2 in range(bodyLen):\n partOfNessie[i + v1][s + v2] = nessie[v1][v2] == '#'\n nessieCount += 1\n if nessieCount: # let's assume there's only ONE valid orientation\n final = o\n break\n\nrough = 0\nfor r in range(len(final)):\n for c in range(len(final[0])):\n rough += not partOfNessie[r][c] and final[r][c] == '#'\nprint(\"i can't believe this took 200 lines of code: %i\" % rough)\n","sub_path":"y2020/day20/definitelyIs.py","file_name":"definitelyIs.py","file_ext":"py","file_size_in_byte":7055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"218437657","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.controllers.function.analysismanager.py is part of The RAMSTK\n# Project\n#\n# All rights reserved.\n# Copyright 2019 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"Function Controller Package analysis manager.\"\"\"\n\n# Standard Library Imports\nfrom typing import Any, Dict\n\n# Third Party Imports\nfrom pubsub import pub\n\n# RAMSTK Package Imports\nfrom ramstk.analyses import improvementfactor\nfrom ramstk.configuration import RAMSTKUserConfiguration\nfrom ramstk.controllers import RAMSTKAnalysisManager\n\n\nclass AnalysisManager(RAMSTKAnalysisManager):\n \"\"\"Contain the attributes and methods of the Function analysis manager.\n\n This class manages the functional analysis for functional hazards\n analysis (FHA). Attributes of the function Analysis Manager are:\n \"\"\"\n def __init__(self, configuration: RAMSTKUserConfiguration,\n **kwargs: Dict[str, Any]) -> None:\n \"\"\"Initialize an instance of the function analysis manager.\n\n :param configuration: the Configuration instance associated with the\n current instance of the RAMSTK application.\n \"\"\"\n super().__init__(configuration, **kwargs)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n\n # Initialize private scalar attributes.\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n\n # Subscribe to PyPubSub messages.\n pub.subscribe(super().on_get_all_attributes,\n 'succeed_get_stakeholder_attributes')\n pub.subscribe(super().on_get_tree, 'succeed_get_stakeholder_tree')\n\n pub.subscribe(self.do_calculate_stakeholder,\n 'request_calculate_stakeholder')\n\n def do_calculate_stakeholder(self, node_id: int) -> None:\n \"\"\"Calculate improvement factor and weight for currently selected item.\n\n :param node_id: the node (stakeholder) ID to calculate.\n :return: None\n :rtype: None\n \"\"\"\n # Retrieve all the attributes from all the RAMSTK data tables for the\n # requested stakeholder. We need to build a comprehensive dict of\n # attributes to pass to the various analysis methods/functions.\n pub.sendMessage(\n 'request_get_all_stakeholder_attributes',\n node_id=node_id,\n )\n\n self._do_calculate_improvement()\n\n pub.sendMessage(\n 'succeed_calculate_stakeholder',\n node_id=node_id,\n package={'improvement': self._attributes['improvement']},\n )\n pub.sendMessage(\n 'succeed_calculate_stakeholder',\n node_id=node_id,\n package={'overall_weight': self._attributes['overall_weight']},\n )\n\n def _do_calculate_improvement(self) -> None:\n \"\"\"Calculate improvement factor and weight for currently selected item.\n\n :return: None\n :rtype: None\n \"\"\"\n (self._attributes['improvement'], self._attributes['overall_weight']\n ) = improvementfactor.calculate_improvement(\n self._attributes['planned_rank'],\n self._attributes['customer_rank'],\n self._attributes['priority'],\n user_float_1=self._attributes['user_float_1'],\n user_float_2=self._attributes['user_float_2'],\n user_float_3=self._attributes['user_float_3'],\n user_float_4=self._attributes['user_float_4'],\n user_float_5=self._attributes['user_float_5'])\n","sub_path":"src/ramstk/controllers/stakeholder/analysismanager.py","file_name":"analysismanager.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"608290299","text":"import asyncio\nimport socket\nimport sys\nimport os\nimport time\n\nport = sys.argv[1]\ncommand = sys.argv[2]\narg = sys.argv[3] if len(sys.argv) == 4 else 0\ndirs = '/Users/mario/IdeaProjects/pythonLearn/pytest/file/p3/client/'\nif not os.path.exists(dirs):\n os.makedirs(dirs)\n\n\nasync def tcp_echo_client(command, arg, loop):\n message = command + \" \" + arg\n reader, writer = await asyncio.open_connection(socket.gethostname(), port,\n loop=loop)\n print('Send: %r' % message)\n writer.write(message.encode(\"UTF-8\"))\n data = await reader.read(1024)\n print('Received: %r' % data.decode(\"UTF-8\"))\n count = len(os.listdir(dirs))\n newText = dirs + \"bank_response_\" + str(count + 1) + \".txt\"\n content = str(time.time()) + '\\n' + command + \"\\n\" + data.decode(\"UTF-8\")\n\n with open(newText, 'w') as f:\n f.write(content)\n writer.close()\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(tcp_echo_client(command, str(arg), loop))\nloop.close()\n","sub_path":"pytest/pytest_bankclient.py","file_name":"pytest_bankclient.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"540974847","text":"from django.urls import path, include\nfrom rest_framework_nested import routers\n\nfrom .views import UserViewSet, RoleViewSet, CustomJWTPairView, CustomJWTRefreshView\n\nrouter = routers.SimpleRouter()\nrouter.register(\"users\", UserViewSet)\nrouter.register(\"roles\", RoleViewSet)\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n path(\"token/\", CustomJWTPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"token/refresh/\", CustomJWTRefreshView.as_view(), name=\"token_refresh\"),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"315560597","text":"import os\nimport io\n\nfrom ansible_runner.streaming import Transmitter, Worker, Processor\n\n\ndef test_remote_job_interface(tmpdir, test_data_dir):\n worker_dir = str(tmpdir.mkdir('for_worker'))\n process_dir = str(tmpdir.mkdir('for_process'))\n\n original_dir = os.path.join(test_data_dir, 'debug')\n\n outgoing_buffer = io.BytesIO()\n\n # Intended AWX and Tower use case\n transmitter = Transmitter(\n _output = outgoing_buffer,\n private_data_dir = original_dir,\n playbook = 'debug.yml'\n )\n\n print(transmitter.kwargs)\n assert transmitter.kwargs.get('playbook', '') == 'debug.yml'\n\n status, rc = transmitter.run()\n assert rc in (None, 0)\n assert status == 'unstarted'\n\n outgoing_buffer.seek(0) # rewind so we can start reading\n\n sent = outgoing_buffer.getvalue()\n assert sent # should not be blank at least\n assert b'zipfile' in sent\n\n incoming_buffer = io.BytesIO()\n\n worker = Worker(\n _input = outgoing_buffer,\n _output = incoming_buffer,\n private_data_dir = worker_dir\n )\n worker.run()\n\n assert set(os.listdir(worker_dir)) == set(['artifacts', 'inventory', 'project']), outgoing_buffer.getvalue()\n\n incoming_buffer.seek(0) # again, be kind, rewind\n\n processor = Processor(\n _input = incoming_buffer,\n private_data_dir = process_dir\n )\n processor.run()\n\n assert set(os.listdir(process_dir)) == set(['artifacts']), outgoing_buffer.getvalue()\n","sub_path":"test/integration/test_transmit_worker_process.py","file_name":"test_transmit_worker_process.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"567260265","text":"'''\n22. 括号生成\n数字 n 代表生成括号的对数,请你设计一个函数,用于能够生成所有可能的并且 有效的 括号组合。\n\n示例:\n\n输入:n = 3\n输出:[\n \"((()))\",\n \"(()())\",\n \"(())()\",\n \"()(())\",\n \"()()()\"\n ]\n'''\n\nfrom typing import List\n\n\nclass Solution:\n\n\n def generateParenthesis(self, n: int) -> List[str]:\n if n == 0:\n return []\n total_l = []\n total_l.append([None]) # 0组括号时记为None\n total_l.append([\"()\"]) # 1组括号只有一种情况\n for i in range(2,n+1): # 开始计算i组括号时的括号组合\n l = []\n for j in range(i): # 开始遍历 p q ,其中p+q=i-1 , j 作为索引\n now_list1 = total_l[j] # p = j 时的括号组合情况\n now_list2 = total_l[i-1-j] # q = (i-1) - j 时的括号组合情况\n for k1 in now_list1:\n for k2 in now_list2:\n if k1 == None:\n k1 = \"\"\n if k2 == None:\n k2 = \"\"\n el = \"(\" + k1 + \")\" + k2\n l.append(el) # 把所有可能的情况添加到 l 中\n total_l.append(l) # l这个list就是i组括号的所有情况,添加到total_l中,继续求解i=i+1的情况\n return total_l[n]\n\nsolution= Solution()\nnum=solution.generateParenthesis(9)\nprint(num)","sub_path":"acwing/python版本/generate-parentheses.py","file_name":"generate-parentheses.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"267239315","text":"import os, sys\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nimport numpy as np\nimport scipy.io as sio\nfrom model.utils import load_embedding_vectors_word2vec_gensim as load_word2vec_matias\nfrom math import floor\nfrom sklearn.metrics import roc_auc_score\nimport random\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport gc\nimport pickle\nfrom model.model import *\nfrom model.utils import *\nfrom model.AL_utils import *\nimport ast\n\ndef get_entropy(p):\n\t#p=np.array(prob)\n\tentropy=(-p * np.log2(p)-(1-p)*np.log2(1-p))\n\tentropy=np.nan_to_num(entropy)\n\treturn entropy\n\nclass Options(object):\n\tdef __init__(self):\n\t\tself.GPUID = 0\n\t\tself.dataset = None\n\t\tself.fix_emb = True\n\t\tself.restore = False\n\t\tself.W_emb = None\n\t\tself.W_class_emb = None\n\t\tself.maxlen = 538\n\t\tself.n_words = None\n\t\tself.embed_size = 300\n\t\tself.lr = 1e-3\n\t\tself.batch_size = 50\n\t\tself.max_epochs = 300\n\t\tself.dropout = 0.2\n\t\tself.part_data = False\n\t\tself.portion = 1.0 \n\t\tself.save_path = \"./model/kidney/model/\"\n\t\tself.log_path = \"./model/log/\"\n\t\tself.print_freq = 100\n\t\tself.valid_freq = 10\n\n\t\tself.optimizer = 'Adam'\n\t\tself.clip_grad = None\n\t\tself.class_penalty = 1.0\n\t\tself.ngram = 60\n\t\tself.H_dis = 64\n\n\n\tdef __iter__(self):\n\t\tfor attr, value in self.__dict__.iteritems():\n\t\t\tyield attr, value\n\ndef emb_classifier(x, x_mask, y, dropout, opt, class_penalty):\n# comment notation\n\t# b: batch size, s: sequence length, e: embedding dim, c : num of class\n\tx_emb, W_norm = embedding(x, opt) # b * s * e\n\ty_pos = tf.argmax(y, -1)\n\ty_emb, W_class = embedding_class(y_pos, opt, 'class_emb') # b * e, c * e\n\tW_class_tran = tf.transpose(W_class, [1,0]) # e * c\n\tx_emb = tf.expand_dims(x_emb, 3) # b * s * e * 1\n\tH_enc = att_emb_ngram_encoder_maxout(x_emb, x_mask, W_class, W_class_tran, opt)\n\t#H_enc = tf.squeeze(H_enc)\n\tlogits, last_layer1 = discriminator_2layer(H_enc, opt, dropout, prefix='classify_', num_outputs=opt.num_class, is_reuse=False)\t# b * c\n\tlogits_class, last_layer2 = discriminator_2layer(W_class, opt, dropout, prefix='classify_', num_outputs=opt.num_class, is_reuse=True)\n\t# prob = tf.nn.softmax(logits)\n\tprob = tf.nn.sigmoid(logits)\n\tclass_y = tf.constant(name='class_y', shape=[opt.num_class, opt.num_class],\n\t\t\t\t\t\t dtype=tf.float32, value=np.identity(opt.num_class),)\n\t# correct_prediction = tf.equal(tf.argmax(prob, 1), tf.argmax(y, 1))\n\t# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\tloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)) +\t\t\t class_penalty * tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=class_y, logits=logits_class))\n\n\tglobal_step = tf.Variable(0, trainable=False)\n\ttrain_op = layers.optimize_loss(\n\t\tloss,\n\t\tglobal_step=global_step,\n\t\toptimizer=opt.optimizer,\n\t\tlearning_rate=opt.lr)\n\treturn prob, logits, loss, train_op, W_norm, global_step, H_enc, last_layer1\n\ndef get_model(train, train_lab,val, val_lab,test, test_lab, opt, max_auc, max_test_, det_data ):\n\t\"\"\"\n\tRetrained the model and saved it in the original folder.\n\tinputs:\n\t\ttrain: X training; train_lab: y train; val: X validation; test: X test; test_lab: y test; opt: class containing the hyper-parameters\n\toutput:\n\t\ttest auc, validation auc, list of determinants for the unlabeled set\n\t\"\"\"\n\tupdated=False\n\ttf.reset_default_graph()\n\tpred_batch_size = 80\n\twith tf.device('/gpu:0'):\n\t\tx_ = tf.placeholder(tf.int32, shape=[None, opt.maxlen])\n\t\tx_mask_ = tf.placeholder(tf.float32, shape=[None, opt.maxlen])\n\t\tkeep_prob = tf.placeholder(tf.float32)\n\t\ty_ = tf.placeholder(tf.float32, shape=[None, opt.num_class])\n\t\tclass_penalty_ = tf.placeholder(tf.float32, shape=())\t \n\t\tprob_, logits_, loss_, train_op, W_norm_, global_step, H_enc_, last_layer_ = emb_classifier(\n\t\t\tx_, x_mask_, y_, keep_prob, opt, class_penalty_)\n\n\n\tconfig = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True, )\n\tconfig.gpu_options.allow_growth = True\n\tnp.set_printoptions(precision=3)\n\tnp.set_printoptions(threshold=np.inf)\n\tsaver = tf.train.Saver()\n\n\tcovariance_input=None\n\tvalidation_list=[]\n\ttest_list=[]\n\tuidx = 0\n\tmax_val_auc_mean = max_auc\n\tmax_test_auc_mean = 0\n\ttest_auc_mean = max_test_\n\ttest_auc_mean_list = []\n\tval_auc_mean_list = []\n\ttest_auc_lists = []\n\tval_auc_lists = []\n\n\twith tf.Session(config=config) as sess:\n\t\t\ttrain_writer = tf.summary.FileWriter(opt.log_path + '/train', sess.graph)\n\t\t\ttest_writer = tf.summary.FileWriter(opt.log_path + '/test', sess.graph)\n\t\t\tsess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n\t\t\tsaver = tf.train.Saver()\n\n\t\t\tif opt.restore:\n\t\t\t\ttry:\n\t\t\t\t\tt_vars = tf.trainable_variables()\n\t\t\t\t\tsave_keys = tensors_key_in_file(opt.save_path)\n\t\t\t\t\tss = set([var.name for var in t_vars]) & set([s + \":0\" for s in save_keys.keys()])\n\t\t\t\t\tcc = {var.name: var for var in t_vars}\n\t\t\t\t\t# only restore variables with correct shape\n\t\t\t\t\tss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[s[:-2]]])\n\n\t\t\t\t\tloader = tf.train.Saver(var_list=[var for var in t_vars if var.name in ss_right_shape])\n\t\t\t\t\tloader.restore(sess, opt.save_path)\n\n\t\t\t\t\tprint(\"Loading variables from '%s'.\" % opt.save_path)\n\t\t\t\t\tprint(\"Loaded variables:\" + str(ss))\n\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"No saving session, using random initialization\")\n\t\t\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\t\t\tsess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n\n\t\t\ttry:\n\t\t\t\t#opt.max_epochs = 320\n\t\t\t\tepoch_num_iter = 5000\n\t\t\t\topt.valid_freq=1\n\t\t\t\tfor epoch in range(opt.max_epochs):\n\t\t\t\t\tprint(\"Starting epoch %d\" % epoch)\n\t\t\t\t\ttrain_loss_list = val_loss_list = []\n\t\t\t\t\t#kf = get_balanced_batch_idx(len(train), train_lab, opt.batch_size, opt.num_class, epoch_num_iter)\n\t\t\t\t\tkf=get_minibatches_idx(len(train), len(train), shuffle=False)\n\t\t\t\t\t#kf=get_balanced_batch(train_lab, len(train_lab))\n\t\t\t\t\tfor _, train_index in kf:\n\t\t\t\t\t\tuidx += 1\n\t\t\t\t\t\tsents = [train[t] for t in train_index]\n\t\t\t\t\t\tx_labels = [train_lab[t] for t in train_index]\n\t\t\t\t\t\tx_labels = np.array(x_labels)\n\t\t\t\t\t\tx_labels = x_labels.reshape((len(x_labels), opt.num_class))\n\t\t\t\t\t\tx_batch, x_batch_mask = prepare_data_for_emb(sents, opt)\n\t\t\t\t\t\t_, train_loss, step = sess.run([train_op, loss_, global_step], \n\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x_: x_batch, x_mask_: x_batch_mask, y_: x_labels,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t keep_prob: opt.dropout, class_penalty_:opt.class_penalty})\n\t\t\t\t\t\ttrain_loss_list.append(train_loss)\n\n\t\t\t\t\t\tif uidx % opt.valid_freq == 0:\n\t\t\t\t\t\t\ttrain_logits_list = []\n\t\t\t\t\t\t\ttrain_prob_list = []\n\t\t\t\t\t\t\ttrain_true_list = []\n\n\t\t\t\t\t\t\tkf_train = get_minibatches_idx(len(train), len(train), shuffle=False)\n\t\t\t\t\t\t\tfor _, train_index in kf_train:\n\t\t\t\t\t\t\t\ttrain_sents = [train[t] for t in train_index]\n\t\t\t\t\t\t\t\ttrain_labels = [train_lab[t] for t in train_index]\n\t\t\t\t\t\t\t\ttrain_labels = np.array(train_labels)\n\t\t\t\t\t\t\t\ttrain_labels = train_labels.reshape((len(train_labels), opt.num_class))\n\t\t\t\t\t\t\t\tx_train_batch, x_train_batch_mask = prepare_data_for_emb(train_sents, opt)\n\t\t\t\t\t\t\t\ttrain_prob, train_logits , train_last_layer= sess.run([prob_, logits_,last_layer_],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeed_dict={x_: x_train_batch, x_mask_: x_train_batch_mask, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_: train_labels, keep_prob: 1.0, class_penalty_:0.0})\n\t\t\t\t\t\t\t\t#last_layer needs to be appended if not full batches\n\t\t\t\t\t\t\t\t#print(type(train_last_layer))\n\t\t\t\t\t\t\t\t#train_last_layer=train_last_layer.numpy()\n\t\t\t\t\t\t\t\ttrain_logits_list += train_logits.tolist()\n\t\t\t\t\t\t\t\ttrain_prob_list += train_prob.tolist()\n\t\t\t\t\t\t\t\ttrain_true_list += train_labels.tolist()\n\n\t\t\t\t\t\t\ttrain_logits_array = np.asarray(train_logits_list)\n\t\t\t\t\t\t\ttrain_prob_array = np.asarray(train_prob_list)\n\t\t\t\t\t\t\ttrain_true_array = np.asarray(train_true_list)\n\t\t\t\t\t\t\ttrain_auc_list = []\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfor i in range(opt.num_class):\n\t\t\t\t\t\t\t\ttrain_auc = roc_auc_score(y_true = train_true_array[:,i], y_score = train_logits_array[:,i],)\n\t\t\t\t\t\t\t\ttrain_auc_list.append(train_auc)\n\n\t\t\t\t\t\t\ttrain_auc_mean = np.mean(train_auc_list)\n\n\t\t\t\t\t\t\t#print(\"Iteration %d: Training Loss %f \" % (uidx, train_loss))\n\t\t\t\t\t\t\t#print(\"--\tTrain AUC Mean %f \" % train_auc_mean)\n\n\n\t\t\t\t\t\t\tval_loss = 0.\n\t\t\t\t\t\t\tval_logits_list = []\n\t\t\t\t\t\t\tval_prob_list = []\n\t\t\t\t\t\t\tval_true_list = []\n\n\t\t\t\t\t\t\tkf_val = get_minibatches_idx(len(val), len(val), shuffle=False)\n\t\t\t\t\t\t\tfor _, val_index in kf_val:\n\t\t\t\t\t\t\t\tval_sents = [val[t] for t in val_index]\n\t\t\t\t\t\t\t\tval_labels = [val_lab[t] for t in val_index]\n\t\t\t\t\t\t\t\tval_labels = np.array(val_labels)\n\t\t\t\t\t\t\t\tval_labels = val_labels.reshape((len(val_labels), opt.num_class))\n\t\t\t\t\t\t\t\tx_val_batch, x_val_batch_mask = prepare_data_for_emb(val_sents, opt)\n\t\t\t\t\t\t\t\tval_prob, val_logits, val_loss_ = sess.run([prob_, logits_, loss_],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x_: x_val_batch, x_mask_: x_val_batch_mask, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_: val_labels, keep_prob: 1.0, class_penalty_:0.0})\n\t\t\t\t\t\t\t\tval_loss += val_loss_ * len(val_index)\n\n\t\t\t\t\t\t\t\tval_logits_list += val_logits.tolist()\n\t\t\t\t\t\t\t\tval_prob_list += val_prob.tolist()\n\t\t\t\t\t\t\t\tval_true_list += val_labels.tolist()\n\n\n\t\t\t\t\t\t\tval_loss_list.append(val_loss/len(val))\n\n\t\t\t\t\t\t\tval_logits_array = np.asarray(val_logits_list)\n\t\t\t\t\t\t\tval_prob_array = np.asarray(val_prob_list)\n\t\t\t\t\t\t\tval_true_array = np.asarray(val_true_list)\n\t\t\t\t\t\t\tval_auc_list = []\n\n\t\t\t\t\t\t\tfor i in range(opt.num_class):\n\t\t\t\t\t\t\t\tval_auc = roc_auc_score(y_true = val_true_array[:,i], y_score = val_logits_array[:,i],)\n\t\t\t\t\t\t\t\tval_auc_list.append(val_auc)\n\n\t\t\t\t\t\t\tval_auc_mean = np.mean(val_auc_list)\n\t\t\t\t\t\t\tvalidation_list.append(val_auc_mean)\n\t\t\t\t\t\t\tprint(\"-- Validation AUC Mean %f \" % val_auc_mean)\n\n\t\t\t\t\t\t\tif val_auc_mean > max_val_auc_mean:\n\t\t\t\t\t\t\t\tmax_val_auc_mean = val_auc_mean\n\t\t\t\t\t\t\t\tval_auc_mean_list.append(val_auc_mean)\n\t\t\t\t\t\t\t\tval_auc_lists.append(val_auc_list)\n\t\t\t\t\t\t\t\tsave_path = saver.save(sess, opt.save_path)\n\t\t\t\t\t\t\t\tprint('Max Validation AUC: '+str(max_val_auc_mean))\n\t\t\t\t\t\t\t\tprint ('Model saved...')\n\t\t\t\t\t\t\t\tupdated=True\n\t\t\t\t\t\t\t\t#last layer\n\t\t\t\t\t\t\t\tcovariance_phi=train_last_layer\n\t\t\t\t\t\t\t\tcovariance_sigma=train_prob_array\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ttest_logits_list = []\n\t\t\t\t\t\t\t\ttest_prob_list = []\n\t\t\t\t\t\t\t\ttest_true_list = []\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tkf_test = get_minibatches_idx(len(test), len(test), shuffle=False)\n\t\t\t\t\t\t\t\tfor _, test_index in kf_test:\n\t\t\t\t\t\t\t\t\ttest_sents = [test[t] for t in test_index]\n\t\t\t\t\t\t\t\t\ttest_labels = [test_lab[t] for t in test_index]\n\t\t\t\t\t\t\t\t\ttest_labels = np.array(test_labels)\n\t\t\t\t\t\t\t\t\ttest_labels = test_labels.reshape((len(test_labels), opt.num_class))\n\t\t\t\t\t\t\t\t\tx_test_batch, x_test_batch_mask = prepare_data_for_emb(test_sents, opt)\n\t\t\t\t\t\t\t\t\ttest_prob, test_logits = sess.run([prob_, logits_],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x_: x_test_batch, x_mask_: x_test_batch_mask, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_: test_labels, keep_prob: 1.0,class_penalty_:0.0})\n\n\t\t\t\t\t\t\t\t\ttest_logits_list += test_logits.tolist()\n\t\t\t\t\t\t\t\t\ttest_prob_list += test_prob.tolist()\n\t\t\t\t\t\t\t\t\ttest_true_list += test_labels.tolist()\n\n\t\t\t\t\t\t\t\ttest_logits_array = np.asarray(test_logits_list)\n\t\t\t\t\t\t\t\ttest_prob_array = np.asarray(test_prob_list)\n\t\t\t\t\t\t\t\ttest_true_array = np.asarray(test_true_list)\n\t\t\t\t\t\t\t\ttest_auc_list = []\n\n\t\t\t\t\t\t\t\tfor i in range(opt.num_class):\n\t\t\t\t\t\t\t\t\ttest_auc = roc_auc_score(y_true = test_true_array[:,i], y_score= test_logits_array[:,i],)\n\t\t\t\t\t\t\t\t\ttest_auc_list.append(test_auc)\n\n\t\t\t\t\t\t\t\ttest_auc_mean = np.mean(test_auc_list)\n\t\t\t\t\t\t\t\ttest_auc_mean_list.append(test_auc_mean)\n\t\t\t\t\t\t\t\ttest_auc_lists.append(test_auc_list)\n\t\t\t\t\t\t\t\tprint(\"-- Test AUC%f \" % test_auc_mean)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpass\t\n\t\t\t\t\t\t\ttest_list.append(test_auc_mean)\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tprint('Training interupted')\n\t\t\t\tprint(\"Max VAL AUC Mean %f \" % max_val_auc_mean)\t\t\t\t\n\tif not updated:\n\t\treturn test_auc_mean, max_val_auc_mean, None\n\telse:\n\t\tcov_matrix=covariance_matrix(covariance_sigma, covariance_phi)\n\t\tsigma, phi=input_determinants(det_data, opt)\n\t\tdeterminants=get_determinants(cov_matrix, phi, sigma)\n\t\tprint ('determinants gotten.....')\n\t\treturn test_auc_mean, max_val_auc_mean, determinants\ndef main():\n\tlabels=pickle.load(open('model/new_dict_updt.p', 'rb'))\n\tmax_auc=pd.read_csv('model/kidney/max_auc.csv')\n\tmax_auc_val=max_auc['AUC_val'].iloc[0]\n\tmax_auc_test=max_auc['AUC_test'].iloc[0]\n\n\t#import files created by Guoyin\n\twith open('model/kidney/train.p', 'rb') as f:\n\t\tdata=pickle.load(f)\n\t#dictionary token to id\n\tword2id=data[3]\n\tid2word=data[4]\n\tembeddings=load_word2vec_matias(word2id, 'model/random_sampling/CTword2vec_clean')\n\tx_full=data[0]\n\ty=data[2]\n\t#class_weight={0:(1.0/np.sum(y[:,0])), 1:(1.0/np.sum(y[:,1])), 2:(1.0/np.sum(y[:,2]))}\n\t#import files\n\twith open('model/random_sampling/test.p', 'rb') as f:\n\t\ttest=pickle.load(f)\n\tnoteix_0=x_full\n\tnoteix_test=test[0]\n\ty_test=test[2]\n\twordtoix=word2id\n\tixtoword=id2word\n\tmax_test=test[-1]\n\tmax_len_0=max(data[-1], max_test)\n\tkidney=labels[0][0]\n\n\topt = Options()\n\n\topt.num_class = 1\n\topt.class_name = [kidney]\n\n\topt.n_words = len(ixtoword)\n\tos.environ['CUDA_VISIBLE_DEVICES'] = str(opt.GPUID)\n\topt.W_emb = np.float32(embeddings)\n\topt.W_class_emb = load_embb_new(wordtoix, opt)\n\tprint('Total words: %d' % opt.n_words)\n\n\topt.maxlen=max_len_0\n\topt.emb_size=300\n\topt.H_dis=150\n\topt.restore=True\n\topt.max_epochs=25\n\n\ttrain=noteix_0\n\tval=noteix_test[:100]\n\ttrain_lab=y[:]\n\tval_lab=y_test[:100]\n\ttest=noteix_test[100:]\n\ttest_lab=y_test[100:]\n\tdatabase=pd.read_csv('database.csv', encoding=\"latin-1\")\n\tdet_data=database.loc[database['valid'],'tokens_num'].values\n\tdet_data=[ast.literal_eval(x) for x in det_data]\n\n\treturn get_model(train, train_lab, val, val_lab, test, test_lab, opt, max_auc_val, max_auc_test, det_data)\n\n\n\n","sub_path":"model/get_auc.py","file_name":"get_auc.py","file_ext":"py","file_size_in_byte":13557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"64401331","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Tablib - ODF Support.\n\"\"\"\n\nimport sys\n\n\nif sys.version_info[0] > 2:\n from io import BytesIO\nelse:\n from cStringIO import StringIO as BytesIO\n\nfrom tablib.compat import opendocument, style, table, text, unicode\n\ntitle = 'ods'\nextensions = ('ods',)\n\nbold = style.Style(name=\"bold\", family=\"paragraph\")\nbold.addElement(style.TextProperties(fontweight=\"bold\", fontweightasian=\"bold\", fontweightcomplex=\"bold\"))\n\ndef export_set(dataset):\n \"\"\"Returns ODF representation of Dataset.\"\"\"\n\n wb = opendocument.OpenDocumentSpreadsheet()\n wb.automaticstyles.addElement(bold)\n\n ws = table.Table(name=dataset.title if dataset.title else 'Tablib Dataset')\n wb.spreadsheet.addElement(ws)\n dset_sheet(dataset, ws)\n\n stream = BytesIO()\n wb.save(stream)\n return stream.getvalue()\n\n\ndef export_book(databook):\n \"\"\"Returns ODF representation of DataBook.\"\"\"\n\n wb = opendocument.OpenDocumentSpreadsheet()\n wb.automaticstyles.addElement(bold)\n\n for i, dset in enumerate(databook._datasets):\n ws = table.Table(name=dset.title if dset.title else 'Sheet%s' % (i))\n wb.spreadsheet.addElement(ws)\n dset_sheet(dset, ws)\n\n\n stream = BytesIO()\n wb.save(stream)\n return stream.getvalue()\n\n\ndef dset_sheet(dataset, ws):\n \"\"\"Completes given worksheet from given Dataset.\"\"\"\n _package = dataset._package(dicts=False)\n\n for i, sep in enumerate(dataset._separators):\n _offset = i\n _package.insert((sep[0] + _offset), (sep[1],))\n\n for i, row in enumerate(_package):\n row_number = i + 1\n odf_row = table.TableRow(stylename=bold, defaultcellstylename='bold')\n for j, col in enumerate(row):\n try:\n col = unicode(col, errors='ignore')\n except TypeError:\n ## col is already unicode\n pass\n ws.addElement(table.TableColumn())\n\n # bold headers\n if (row_number == 1) and dataset.headers:\n odf_row.setAttribute('stylename', bold)\n ws.addElement(odf_row)\n cell = table.TableCell()\n p = text.P()\n p.addElement(text.Span(text=col, stylename=bold))\n cell.addElement(p)\n odf_row.addElement(cell)\n\n # wrap the rest\n else:\n try:\n if '\\n' in col:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)\n else:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)\n except TypeError:\n ws.addElement(odf_row)\n cell = table.TableCell()\n cell.addElement(text.P(text=col))\n odf_row.addElement(cell)","sub_path":"desktop/core/ext-py/tablib-0.10.0/tablib/formats/_ods.py","file_name":"_ods.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"61467055","text":"from django.conf.urls import url\nfrom computer.views import *\n\napp_name = 'computer'\n\nurlpatterns = [\n url(r'^$', Computer.Index, name='Homepage'),\n url(r'^matlab$', Computer.Matlab, name='Matlab'),\n url(r'^simulink$', Computer.Simulink, name='Simulink'),\n url(r'^psat$', Computer.Psat, name='Psat'),\n\n url(r'^psatinstall$', Computer.PsatInstall, name='PsatInstall'),\n url(r'^psatplot$', Computer.PsatPlot, name='PsatPlot'),\n url(r'^psatintro$', Computer.PsatIntro, name='PsatIntro'),\n url(r'^psatloadflow$', Computer.PsatLoadFlow, name='PsatLoadFlow'),\n url(r'^psatexample$', Computer.PsatExample, name='PsatExample'),\n url(r'^psatgraph$', Computer.PsatGraph, name='PsatGraph'),\n\n\n\n url(r'^matinverse$', Matlab.InverseMatrix, name='MatInverse'),\n\n\n\n url(r'^simrccircuit$', Simulink.SeriesRC, name='SimRC'),\n\n]\n","sub_path":"computer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"3768993","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse\nfrom Buy.models import *\nimport json\nimport datetime\nimport requests\nimport os\nfrom dateutil.parser import parse\nfrom django.forms.models import model_to_dict\nimport re\nfrom math import ceil\nfrom django.utils import timezone\nimport csv\n\ndef update_bearer():\n if Token.objects.all().exists():\n if Token.objects.latest('expires').expires.date() > datetime.datetime.today().date():\n return Token.objects.latest('expires')\n r = requests.get('https://api.tcgplayer.com/token', headers={'Content-Type':'application/x-www-form-urlencoded', 'X-Tcg-Access-Token':os.environ['access_token']}, data={'grant_type':'client_credentials', 'client_id':os.environ['public_key'], 'client_secret':os.environ['private_key']})\n response_dict = r.json()\n print(response_dict)\n token = Token(bearer=response_dict['access_token'], expires=parse(response_dict['.expires']))\n token.save()\n return token\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\ndef buy(request):\n #Ensure TCGPlayer bearer token is valid\n bearer_token = update_bearer()\n return render(request, 'buy.html', {'bearer':bearer_token.bearer})\n\ndef sell(request):\n #Ensure TCGPlayer bearer token is valid\n bearer_token = update_bearer()\n return render(request, 'sell.html', {'bearer':bearer_token.bearer, 'store_key':os.environ['store_key']})\n\ndef trade(request):\n return HttpResponse(\"Oops! This isn't implemented yet.\")\n\ndef seller_info(request):\n if request.method != \"GET\":\n return redirect(\"/\")\n name = request.GET[\"name\"]\n try:\n seller = Seller.objects.get(name=name)\n return JsonResponse({'found':True, 'email':seller.email, 'phone':seller.phone, 'notes':seller.notes, 'id':seller.id})\n except:\n return JsonResponse({'found':False})\n\ndef query_price(request):\n if request.method != \"GET\":\n return redirect(\"/\")\n card = {'name':request.GET['name'], 'tcgplayer_NM_id':request.GET['NM_id'], 'tcgplayer_LP_id':request.GET['LP_id'], 'tcgplayer_card_id':request.GET['card_id']}\n results = price_check(card)\n results['context'] = request.GET['context']\n return JsonResponse(results)\n\ndef report_buy(request):\n if request.method != \"POST\":\n return redirect(\"/\")\n print(request.POST)\n try:\n if 'seller_id' in request.POST.keys():\n seller = Seller.objects.get(pk=request.POST['seller_id'])\n else:\n name_search = Seller.objects.filter(name=request.POST['seller_name'])\n if len(name_search) == 1:\n seller = name_search[0]\n else:\n seller = Seller(\n name = request.POST['seller_name'],\n email = request.POST['seller_email'],\n phone = request.POST['seller_phone']\n )\n seller.save()\n if 'seller_notes' in request.POST.keys() and request.POST['seller_notes'] != \"\":\n seller.notes = request.POST['seller_notes']\n if 'seller_email' in request.POST.keys() and request.POST['seller_email'] != \"\":\n seller.email = request.POST['seller_email']\n if 'seller_phone' in request.POST.keys() and request.POST['seller_phone'] != \"\":\n seller.phone = request.POST['seller_phone']\n seller.save()\n\n block = CardPurchaseBlock(seller=seller, payment_method=request.POST['paymentmethod'])\n block.save()\n index = 0\n errors = \"\"\n total_buy = 0\n bearer = \"bearer \" + update_bearer().bearer\n #Regex parse each key named 'card_name_/d', use those indices, catch specific errors\n\n for key in request.POST.keys():\n match = re.match(r'card_name_(\\d+)', key)\n if match:\n index = match.group(1)\n if (request.POST['card_name_'+str(index)] != \"\") and (int(request.POST['quantity_'+str(index)]) > 0):\n try:\n for i in range(0, int(request.POST['quantity_'+str(index)])):\n single = SingleCardPurchase(\n block = block,\n name = request.POST['card_name_'+str(index)],\n expansion = request.POST['expansion_'+str(index)],\n tcgplayer_card_id = request.POST['tcgplayer_card_id_'+str(index)],\n tcgplayer_NM_id = request.POST['tcgplayer_nm_id_'+str(index)],\n tcgplayer_LP_id = request.POST['tcgplayer_lp_id_'+str(index)],\n buy_price = float(request.POST['price_'+str(index)]),\n initial_sell_price = float(request.POST['sell_price_'+str(index)]),\n base_price = float(request.POST['sell_price_'+str(index)]),\n lowest_listing_at_buy = float(request.POST['lowest_listing_at_buy_'+str(index)]),\n lowest_direct_at_buy = float(request.POST['lowest_direct_at_buy_'+str(index)]),\n market_price_at_buy = float(request.POST['market_price_'+str(index)])\n )\n single.save()\n total_buy += float(request.POST['price_'+str(index)])\n except:\n errors += single.name + \" not auto-listed
\"\n continue\n if ('auto_list_'+str(index) in request.POST.keys() and request.POST['auto_list_'+str(index)] == \"on\"):\n old_quantity = 0\n r = requests.get(\"http://api.tcgplayer.com/v1.10.0/stores/\"+os.environ['store_key']+\"/inventory/skus/\"+single.tcgplayer_card_id+\"/quantity\", headers={'Authorization':bearer})\n if (r.status_code == 200 and r.json()['success']):\n old_quantity = int(r.json()['results'][0]['quantity'])\n quantity = old_quantity + int(request.POST['quantity_'+str(index)])\n r = requests.put(\"http://api.tcgplayer.com/v1.10.0/stores/\"+os.environ['store_key']+\"/inventory/skus/\"+single.tcgplayer_card_id, headers={'Authorization':bearer, 'Content-Type':'application/json'}, json={'price':(single.initial_sell_price * 1.1 if single.initial_sell_price * 1.1 <= single.initial_sell_price + 5 else single.initial_sell_price + 5), 'quantity':quantity, 'channelId':0})\n if (r.status_code != 200):\n errors += single.name + \" price/quantity not updated
\"\n else:\n if single.initial_sell_price > 1:\n errors += single.name + \" to case
\"\n else:\n errors += single.name + \" to box
\"\n else:\n errors += single.name + \" not auto-listed
\"\n except Exception as e:\n print(e)\n\n if request.POST['paymentmethod'] == 'Store Credit':\n errors += \"
Give \" + seller.name + \" $\" + str(total_buy) + \" Store Credit\"\n else:\n errors += \"
Give \" + seller.name + \" $\" + str(total_buy) + \" Cash\"\n\n return render(request, 'post.html', {'message':errors})\n\ndef report_sell(request):\n if request.method != \"POST\":\n return redirect(\"/\")\n print(request.POST)\n if 'buyer_id' in request.POST.keys():\n buyer = Seller.objects.get(pk=request.POST['buyer_id'])\n else:\n buyer = Seller(\n name = request.POST['buyer_name'],\n email = request.POST['buyer_email'],\n phone = request.POST['buyer_phone']\n )\n buyer.save()\n if 'buyer_notes' in request.POST.keys() and request.POST['buyer_notes'] != \"\":\n buyer.notes = request.POST['buyer_notes']\n if 'buyer_email' in request.POST.keys() and request.POST['buyer_email'] != \"\":\n buyer.email = request.POST['buyer_email']\n if 'buyer_phone' in request.POST.keys() and request.POST['buyer_phone'] != \"\":\n buyer.phone = request.POST['buyer_phone']\n buyer.save()\n\n bearer = \"bearer \" + update_bearer().bearer\n\n errors = \"\"\n total_price = 0\n for key in request.POST.keys():\n match = re.match(r'card_name_(\\d+)', key)\n if match:\n index = match.group(1)\n if (request.POST['card_name_'+str(index)] != \"\") and (int(request.POST['quantity_'+str(index)]) > 0):\n r = requests.get(\"http://api.tcgplayer.com/v1.10.0/stores/\"+os.environ['store_key']+\"/inventory/skus/\"+request.POST['tcgplayer_card_id_'+str(index)]+\"/quantity\", headers={\"Authorization\":bearer})\n try:\n print(r.json())\n inStock = r.json()['results'][0]['quantity']\n foil_cond_name = request.POST['condition_'+str(index)] + \" \" + request.POST['card_name_'+str(index)]\n if inStock < int(request.POST['quantity_'+str(index)]):\n new_quantity = 0\n errors += \"Only have \" + inStock + \" \" + foil_cond_name + \" from \" + request.POST['expansion_'+str(index)] + \" available to sell
\"\n total_price += inStock * float(request.POST['price_'+str(index)])\n else:\n new_quantity = inStock - int(request.POST['quantity_'+str(index)])\n errors += \"Sold \" + request.POST['quantity_'+str(index)] + \" \" + foil_cond_name + \" from \" + request.POST['expansion_'+str(index)] + \"
\"\n total_price += int(request.POST['quantity_'+str(index)]) * float(request.POST['price_'+str(index)])\n update = requests.post(\"http://api.tcgplayer.com/v1.10.0/stores/\" + os.environ['store_key'] + \"/inventory/skus/\" + request.POST['tcgplayer_card_id_'+str(index)] + \"/quantity\", headers={\"Authorization\":bearer}, json={\"quantity\":new_quantity - inStock})\n #update = requests.put(\"http://api.tcgplayer.com/v1.10.0/stores/\" + os.environ['store_key'] +\"/inventory/skus/\" + request.POST['tcgplayer_card_id_'+str(index)], headers={\"Authorization\":bearer, 'Content-Type':'application/json'}, json={'quantity':new_quantity, 'price':float(request.POST['price_'+str(index)]), 'channelId':0})\n singles = SingleCardPurchase.objects.filter(tcgplayer_card_id=int(request.POST['tcgplayer_card_id_'+str(index)]), sold_on=None)\n spec = requests.get(\"http://api.tcgplayer.com/v1.10.0/pricing/sku/\" + request.POST['tcgplayer_card_id_'+str(index)], headers={\"Authorization\":bearer})\n for i in range(min(inStock, int(request.POST['quantity_'+str(index)]))):\n if len(singles) > i:\n singles[i].sold_on = timezone.now()\n singles[i].sell_price = float(request.POST['price_'+str(index)])\n if spec.json()['success']:\n if spec.json()['results'][0]['marketPrice'] != None:\n singles[i].market_price_at_sell = spec.json()['results'][0]['marketPrice']\n else:\n singles[i].market_price_at_sell = 0\n if spec.json()['results'][0]['directLowPrice'] != None:\n singles[i].lowest_direct_at_sell = spec.json()['results'][0]['directLowPrice']\n else:\n singles[i].lowest_direct_at_sell = 0\n if spec.json()['results'][0]['lowestListingPrice'] != None:\n singles[i].lowest_listing_at_sell = spec.json()['results'][0]['lowestListingPrice']\n else:\n singles[i].lowest_listing_at_sell = 0\n singles[i].in_house_sale = True\n singles[i].save()\n else:\n sale = UntrackedCardSale( name = request.POST['card_name_'+str(index)],\n expansion = request.POST['expansion_'+str(index)],\n tcgplayer_card_id = int(request.POST['tcgplayer_card_id_'+str(index)]),\n sold_on = timezone.now(),\n sell_price = float(request.POST['price_'+str(index)]),\n in_house_sale = True)\n if spec.json()['success']:\n if spec.json()['results'][0]['marketPrice'] != None:\n sale.market_price_at_sell = spec.json()['results'][0]['marketPrice']\n else:\n sale.market_price_at_sell = 0\n if spec.json()['results'][0]['directLowPrice'] != None:\n sale.lowest_direct_at_sell = spec.json()['results'][0]['directLowPrice']\n else:\n sale.lowest_direct_at_sell = 0\n if spec.json()['results'][0]['lowestListingPrice'] != None:\n sale.lowest_listing_at_sell = spec.json()['results'][0]['lowestListingPrice']\n else:\n sale.lowest_listing_at_sell = 0\n sale.save()\n except Exception as e:\n errors += request.POST['card_name_'+str(index)] + \" not sold
\"\n print(e)\n\n errors += \"
Ring up \" + str(total_price) + \" in Crystal Commerce (under Roboklein Magic Singles)\"\n\n return render(request, 'post.html', {'message':errors})\n\ndef download_results(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"sales_' + str(timezone.now()) + '_data.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['Name', 'Expansion', 'Bought On', 'Buy Price', 'Lowest Listing (buy)', 'Lowest Direct (buy)', 'Market (buy)', 'Initial Sell Price', 'Sold On', 'Final Sell Price', 'Market (sell)', 'Lowest Listing (sell)', 'Lowest Direct (sell)', 'In House'])\n\n for card in SingleCardPurchase.objects.all():\n writer.writerow([card.name, card.expansion, card.block.bought_on, card.buy_price, card.lowest_listing_at_buy, card.lowest_direct_at_buy, card.market_price_at_buy, card.initial_sell_price, card.sold_on, card.sell_price, card.market_price_at_sell, card.lowest_listing_at_sell, card.lowest_direct_at_sell, card.in_house_sale])\n\n return response\n\ndef price_check(card):\n context = \"\"\n try:\n card = model_to_dict(card)\n except:\n pass\n\n bearer = \"bearer \" + update_bearer().bearer\n\n if card['tcgplayer_NM_id'] and card['tcgplayer_LP_id']:\n r = requests.get(\"http://api.tcgplayer.com/v1.10.0/pricing/sku/\" + str(card['tcgplayer_NM_id']) + \",\" + str(card['tcgplayer_LP_id']), headers={\"Authorization\":bearer})\n if r.json()['success']:\n standard = False\n standard_list = [thing.name for thing in StandardSet.objects.all()]\n card_details = requests.get(\"http://api.tcgplayer.com/v1.10.0/catalog/products\", headers={\"Authorization\":bearer}, data={'categoryId':1, 'productName':card['name'], 'limit':50})\n if card_details.json()['success']:\n group_id_string = \"\"\n for data in card_details.json()['results']:\n group_id_string = group_id_string + \",\" + str(data['groupId'])\n card_standard = requests.get(\"http://api.tcgplayer.com/v1.10.0/catalog/groups/\" + group_id_string, headers={\"Authorization\":bearer})\n if card_standard.json()['success']:\n for group in card_standard.json()['results']:\n if group['name'] in standard_list:\n standard = True\n\n premium = 1\n market = 0\n direct_low = 0\n low = 0\n #TODO make the algorithm happen\n if r.json()['results'][0]['directLowPrice'] and r.json()['results'][1]['directLowPrice']:\n if standard:\n direct_low = (r.json()['results'][0]['directLowPrice'] + r.json()['results'][1]['directLowPrice'])/2\n if r.json()['results'][0]['marketPrice'] and r.json()['results'][1]['marketPrice']:\n market = (r.json()['results'][0]['marketPrice'] + r.json()['results'][1]['marketPrice'])/2\n elif r.json()['results'][0]['marketPrice']:\n market = r.json()['results'][0]['marketPrice']\n else:\n market = r.json()['results'][1]['marketPrice']\n elif r.json()['results'][0]['directLowPrice'] < r.json()['results'][1]['directLowPrice']:\n direct_low = r.json()['results'][0]['directLowPrice']\n market = r.json()['results'][0]['marketPrice']\n else:\n direct_low = r.json()['results'][1]['directLowPrice']\n market = r.json()['results'][1]['marketPrice']\n elif r.json()['results'][1]['directLowPrice']:\n direct_low = r.json()['results'][1]['directLowPrice']\n market = r.json()['results'][1]['marketPrice']\n elif r.json()['results'][0]['directLowPrice']:\n direct_low = r.json()['results'][0]['directLowPrice']\n market = r.json()['results'][0]['marketPrice']\n\n if r.json()['results'][0]['lowestListingPrice'] and r.json()['results'][1]['lowestListingPrice']:\n if standard:\n low = (r.json()['results'][0]['lowestListingPrice'] + r.json()['results'][1]['lowestListingPrice'])/2\n if r.json()['results'][0]['marketPrice'] and r.json()['results'][1]['marketPrice']:\n market = (r.json()['results'][0]['marketPrice'] + r.json()['results'][1]['marketPrice'])/2\n elif r.json()['results'][0]['marketPrice']:\n market = r.json()['results'][0]['marketPrice']\n else:\n market = r.json()['results'][1]['marketPrice']\n elif r.json()['results'][0]['lowestListingPrice'] < r.json()['results'][1]['lowestListingPrice']:\n low = r.json()['results'][0]['lowestListingPrice']\n market = r.json()['results'][0]['marketPrice']\n else:\n low = r.json()['results'][1]['lowestListingPrice']\n market = r.json()['results'][1]['marketPrice']\n elif r.json()['results'][0]['lowestListingPrice']:\n low = r.json()['results'][0]['lowestListingPrice']\n market = r.json()['results'][0]['marketPrice']\n elif r.json()['results'][1]['lowestListingPrice']:\n low = r.json()['results'][1]['lowestListingPrice']\n market = r.json()['results'][1]['marketPrice']\n\n if direct_low == 0 and low == 0:\n return {\"error\":\"Cannot Price \"+card['name']}\n\n if direct_low == None:\n direct_low = 0\n if low == None:\n low = 0\n min_base = max(direct_low, low)\n if abs(direct_low-low)>=5 and direct_low != 0 and low != 0:\n premium = premium - 0.05\n\n if not standard:\n premium = premium + 0.07\n\n card_cond = requests.get(\"http://api.tcgplayer.com/v1.10.0/catalog/skus/\" + str(card['tcgplayer_card_id']), headers={\"Authorization\":bearer})\n if card_cond.json()['success']:\n if card_cond.json()['results'][0]['conditionId'] == 1:\n min_base = min_base * 1.05\n if card_cond.json()['results'][0]['conditionId'] == 3:\n min_base = min_base * 0.95\n elif card_cond.json()['results'][0]['conditionId'] == 4:\n min_base = min_base * 0.75\n elif card_cond.json()['results'][0]['conditionId'] == 5:\n min_base = min_base * 0.55\n\n if card_cond.json()['results'][0]['variantId'] == 2:\n premium = premium - 0.07\n\n final_price = ceil(min_base * premium * 100)/100.0\n if final_price > min_base + 5:\n final_price = min_base + 5\n elif final_price < min_base - 5:\n final_price = min_base - 5\n\n return {\"price\":final_price}\n else:\n return {\"error\":\"Could not connect to pricing data\"}\n return {\"error\":\"Insufficient Data\"}\n","sub_path":"Buy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"109513164","text":"from channels.generic.websocket import WebsocketConsumer\nfrom asgiref.sync import async_to_sync\nfrom .serializers import MessageSerializer\nimport json\nfrom .models import *\n\n\nclass ChatConsumer(WebsocketConsumer):\n def connect(self):\n if self.scope.get('user') is None:\n self.close(code=403)\n return\n\n self.user = self.scope['user']\n self.roomId = str(self.scope['url_route']['kwargs']['roomId'])\n\n async_to_sync(self.channel_layer.group_add)(\n self.roomId,\n self.channel_name\n )\n\n self.accept()\n\n def disconnect(self, code):\n async_to_sync(self.channel_layer.group_discard)(\n self.roomId,\n self.channel_name\n )\n\n def receive(self, text_data=None, bytes_data=None):\n message = json.loads(text_data)\n new_message = Message(author=self.user, text=message['text'], room_id=self.roomId)\n new_message.save()\n\n async_to_sync(self.channel_layer.group_send)(\n self.roomId,\n {\n 'type': 'chat_message',\n 'message': MessageSerializer(new_message).data\n }\n )\n\n def chat_message(self, event):\n message = event['message']\n self.send(text_data=json.dumps({\n 'message': message\n }))\n","sub_path":"pychat/api/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"396399449","text":"# -*- coding: utf-8 -*-\r\n#!/usr/bin/python3\r\n\r\n###################################################################\r\n# Mise en place d'un client simple\r\n# simulation d'une connexion client/serveur\r\n#\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" version threadee par SABRI MJAHED \"\"\"\"\"\"\"\"\"\"\"#\r\n\r\nimport socket,sys,_thread,threading\r\n\r\ndef send():\r\n msgClient = \"\"\r\n\r\n #On demande le nom du client\r\n msgClient = input(\"Veuillez entrer votre nom : \")\r\n nomClient = msgClient\r\n msgClient=msgClient.encode()\r\n sock.send(msgClient)\r\n\r\n while msgServer.upper() != 'FIN':\r\n\r\n msgClient=input(nomClient+ \" : \")\r\n msgClient=msgClient.encode()\r\n \r\n sock.send(msgClient)\r\n \r\ndef receive():\r\n global msgServer\r\n msgServer = \"\"\r\n\r\n while msgServer.upper() != 'FIN':\r\n data = sock.recv(1024)\r\n msgServer = data.decode()\r\n \r\n #On affiche pas les message de confirmation \r\n if msgServer != \"MSG ACK\":\r\n print(msgServer)\r\n \r\n #Quand on deconnecte le serveur, celui-ci envoi un ''\r\n if msgServer == '':\r\n msgServer = \"FIN\"\r\n\r\n\r\n if msgServer.upper() == 'FIN':\r\n print (\" Fermeture de la connexion \")\r\n sock.close()\r\n \r\nif __name__ == '__main__':\r\n # création d'un socket pour la connexion avec le serveur en local\r\n sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n # connexion au serveur, bloc surveillé, et gestion de l'exception\r\n sock.connect(('127.0.0.1',12810))\r\n\r\n except socket.error:\r\n print(\"la connexion a échoué.......\")\r\n sys.exit()\r\n\r\n print(\"--- Connexion établie avec le serveur ---\")\r\n\r\n thread_receive = threading.Thread(target=receive)\r\n thread_receive.start()\r\n \r\n thread_send = threading.Thread(target=send)\r\n thread_send.start()\r\n\r\n thread_send.join()\r\n thread_receive.join()\r\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"569584900","text":"from graphviz import Digraph\n\nclass LightGraph:\n\n def __init__(self, ligh_shapes, advance_light_distr, req_level_name):\n self.ligh_shapes = ligh_shapes\n self.light_distr = advance_light_distr # assume basic light distribution is Lambert, i.e. Diffuse.\n self.req_level = req_level_name\n self.brdfs = [\"lambert\", \"mirror\", \"ggx\"]\n self.basic_shapes = [\"rect\",\"disk\",\"sphere\"]\n self.allow_cust_dist = [\"rect\",\"disk\",\"sphere\",\"point\"]\n\n self.edges = [(brdf,shape) for brdf in self.brdfs \n for shape in self.ligh_shapes]\n\n self.vertices = self.brdfs + self.ligh_shapes + self.light_distr\n\n self.g = Digraph('G', filename=req_level_name)\n self.g.attr(nodesep='1', ranksep='1')\n \n def ShapesWithDistr(self, a_shapes):\n for shape in a_shapes:\n if shape in self.ligh_shapes:\n for distr in self.light_distr:\n self.edges.append((shape,distr))\n \n def PrintTests(self):\n arrow_ends = [x[1] for x in self.edges]\n for v in self.vertices:\n if v in arrow_ends:\n print(\"test(\",v,\"\\tWITH_FEATURES:\\t(\", end='')\n for e in self.edges:\n if e[1] == v:\n print(e[0], end=',')\n print(\") )\")\n \n def Draw(self):\n \n with self.g.subgraph(name='cluster_1') as c:\n for lshape in self.ligh_shapes:\n c.node(lshape, shape='doublecircle')\n c.attr(label='Light Shapes (core)')\n\n with self.g.subgraph(name='cluster_3') as c:\n for brdf in self.brdfs:\n c.node(brdf)\n c.attr(label='BRDFs (core)')\n\n with self.g.subgraph(name='cluster_4') as c:\n for distr in self.light_distr:\n num_arrows = 0\n for (a,b) in self.edges:\n if distr == a or distr == b: \n num_arrows=num_arrows+1\n #print(\"num_arrows = \", num_arrows)\n if num_arrows <= 1:\n c.node(distr)\n else: \n c.node(distr, shape='doublecircle') \n\n c.attr(label='Light Distribution')\n \n for pair in self.edges:\n self.g.edge(pair[0], pair[1])\n\n self.g.node('END', shape='Msquare', label=self.req_level)\n self.g.edge('sphere', 'END')\n \n for distr in self.light_distr:\n self.g.edge(distr, 'END')\n\n for shape in [shape for shape in self.ligh_shapes if shape not in self.allow_cust_dist and shape]:\n self.g.edge(shape, 'END')\n \n self.g.view()\n\n\n#mygraph = LightGraph([\"rect\",\"disk\",\"sphere\"], \n# [\"diffuse\"], \n# \"LIGHT_LEVEL_1.0\")\n\n#mygraph = LightGraph([\"rect\",\"disk\",\"sphere\"], \n# [\"diffuse\",\"ies\",\"spot\"], \n# \"LIGHT_LEVEL_1.1\")\n\nmygraph = LightGraph([\"rect\",\"disk\",\"sphere\",\"env\",\"direct\",\"point\"], \n [\"diffuse\",\"spot\",\"ies\"], \n \"LIGHT_LEVEL_1.2\")\n\nmygraph.ShapesWithDistr([\"rect\", \"disk\", \"point\"])\nmygraph.PrintTests()\nmygraph.Draw()\n\n\n\n","sub_path":"doc/doc_hydra_standart/graph_lights.py","file_name":"graph_lights.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"313733971","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n@Author: lemons\r\n@Date:\r\n@Description: Deal with training and validation\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport random\r\nfrom config import *\r\nfrom utils.file_util import *\r\nfrom models.general_model import Lemon_Model\r\n\r\n\r\nclass Trainer:\r\n def __init__(self, train_dt=None, dev_dt=None):\r\n self.train_dt = train_dt\r\n self.dev_dt = dev_dt\r\n self.model = Lemon_Model()\r\n self.log_file = ...\r\n\r\n def do_train(self):\r\n \"\"\"\r\n 训练指定轮数之后评测出结果提交平台\r\n :return:\r\n \"\"\"\r\n random.seed(RANDOM_SEED) # 保证别人再次跑的时候有相同的效果\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer = optim.Adam(self.model.parameters(), lr=LR, weight_decay=L2_penalty)\r\n optimizer.zero_grad()\r\n batch_scores = None\r\n batch_labels = None\r\n loss_distribution = []\r\n iter_count = 0\r\n batch_count = 0\r\n loss_ = 0.\r\n for epoch in range(EPOCH_ALL):\r\n random.shuffle(self.train_dt)\r\n for document in self.train_dt:\r\n iter_count += 1\r\n doc_score = self.model(document)\r\n batch_scores = doc_score if batch_scores is None else torch.cat((batch_scores, doc_score), 0)\r\n batch_labels = document.title_word_ids if batch_labels is None else \\\r\n torch.cat((batch_labels, document.title_word_ids), 0)\r\n loss_ += criterion(batch_scores, torch.Tensor(batch_labels).long())\r\n if iter_count % BATCH_SIZE == 0 and iter_count > 0:\r\n batch_count += 1\r\n # compute the grads of the loss function according to this batch of data\r\n optimizer.zero_grad()\r\n # Margin loss\r\n loss_.backward(retain_graph=True) # retain_graph=True\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n loss_of_batch = loss_.data.item() / BATCH_SIZE\r\n loss_str = \"the train loss_tran is: \" + str(loss_of_batch)\r\n print_(loss_str, self.log_file)\r\n # loss分布分析\r\n loss_distribution.append(loss_of_batch)\r\n loss_ = 0.\r\n self.do_eval()\r\n self.report()\r\n # loss 存储\r\n save_data(loss_distribution, TRAIN_loss_path)\r\n\r\n def do_eval(self):\r\n random.shuffle(self.dev_dt)\r\n for doc_ids in self.dev_dt:\r\n doc_score = self.model(doc_ids)\r\n title_predicted = self.ids2words(doc_score)\r\n write_append(title_predicted, ..., type_=\"over\")\r\n print(\"请将生成的标题提交至评测平台...\")\r\n\r\n @staticmethod\r\n def ids2words(doc_score):\r\n words_of_title = \"\"\r\n for ids in doc_score:\r\n word = ...\r\n words_of_title += word + \" \"\r\n return words_of_title\r\n\r\n def report(self):\r\n ...\r\n","sub_path":"models/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"141191133","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 26 21:08:39 2018\n\n@author: yuta\n\"\"\"\nimport time\nfrom Q_H03 import*\nimport itertools\nfrom scipy.stats import binom\nfrom numpy.random import*\nimport matplotlib.pyplot as plt\n\"\"\"\nこのクラスはベイズ推定のためのクラスです.\nクロスワイヤのドライブハミルトニアンを生成できます.\n効用はベイズリスク、エントロピーが選べます。\n確率のルックアップテーブルは十字に計算するか、全て計算するか選ぶことが出来ます。\n\"\"\"\nclass Bayes_Function(Q_H):\n \"\"\"\n ベイズ推定のメソッドをまとめたクラス\n \"\"\"\n def __init__(self):\n \"\"\"\n 変数にデフォルト値を代入\n \"\"\"\n Q_H.__init__(self)\n self.ex=100 #試行回数\n self.i=0 #現在の試行回数\n self.n={\"a1\":5,\"b1\":5,\"a2\":5,\"b2\":5,\"w_theta\":5,\"D0\":5,\"AN\":5,\"QN\":5,\"Bz\":5} #推定基底毎のパーティクルの数 a1,b1,a2,b2,w_theta,D0,An,Qn,Bz\n self.g={\"V1\":5,\"V2\":5,\"phi\":30,\"MWwidth\":5,\"MWfreq\":5} #量子操作において変更するパラメータの分割数 V1,V2,phi,MWwidth,MWfreq\n self.d=1000 #一度の推定に使用する実験データの数\n self.a=0.75 #パーティクルの再配分における移動強度\n self.resample_threshold=0.5 #パーティクルの再配分を行う判断をする閾値\n self.approx_ratio=0.98 #不要なパーティクルを削除する際の残す割合\n self.bayes_threshold=1 #推定を終えるベイズリスクの閾値\n self.mode=1 #Expsimの測定モード(1:確率,0:射影測定)\n self.state=0 #0:ms=0で射影測定,1:ms=±1で射影測定\n self.flag1=False #パーティクルの数が変化したらTrue\n self.flag2=False #実験設計の数が変化したらTrue\n self.Q=0 #ベイズリスクの重み行列\n self.U=0 #効用\n self.B_R=0 #ベイズリスク\n self.Utility=\"binomial\"\n self.ptable_mode=\"all\" #all or cross\n #結果格納配列\n self.i_list=[]\n self.ptable_C=[]\n self.ptable_x=[]\n self.ptable=[]\n self.risk=[]\n #パーティクル\n self.w=0 #現在のパーティクルの重みs\n self.ParamH={\"a1\":0,\"b1\":0,\"a2\":0,\"b2\":0,\"w_theta\":0,\"D0\":1,\"AN\":0,\"QN\":0,\"Bz\":0} #変更するパーティクルのパラメータ\n self.RangeH={\"a1\":5,\"b1\":3,\"a2\":10,\"b2\":5,\"w_theta\":2*np.pi,\"D0\":10,\"AN\":0,\"QN\":0,\"Bz\":0} #変更する範囲\n #実験設計\n self.V1=1 #ワイヤ1の電圧[V]\n self.V2=1 #ワイヤ2の電圧[V]\n self.phi=180*pi/180 #ワイヤ間の位相差[rad]\n self.t=0.05 #MWパルス長[us]\n self.MWf=2870 #MW周波数の中心[MHz]\n self.ParamC={\"V1\":0,\"V2\":0,\"phi\":1,\"MWwidth\":1,\"MWfreq\":1} #V1,V2,phi,MWwidth,MWfreq #変更する量子操作のパラメータ\n self.RangeC={\"V1\":1,\"V2\":1,\"phi\":360,\"MWwidth\":0.05,\"MWfreq\":10} #変更する範囲\n self.C_best=0\n self.C_best_i=0\n #GRAPE\n self.omega_j=[] #GRAPEの結果を格納する配列\n self.U_grape=[] #GRAPEの真値に対する時間発展演算子\n \n def params(self):\n self.params_list=[\"a1\",\"b1\",\"a2\",\"b2\",\"w_theta\",\"D0\",\"AN\",\"QN\",\"Bz\"]\n self.x0_dict={\"a1\":self.a1,\"b1\":self.b1,\"a2\":self.a2,\"b2\":self.b2,\"w_theta\":self.w_theta\n ,\"D0\":self.D0,\"AN\":self.AN,\"QN\":self.QN,\"Bz\":self.Be+self.Bo} #真のハミルトニアン\n self.x0=[self.x0_dict[\"a1\"],self.x0_dict[\"b1\"],self.x0_dict[\"a2\"],self.x0_dict[\"b2\"],self.x0_dict[\"w_theta\"]\n ,self.x0_dict[\"D0\"],self.x0_dict[\"AN\"],self.x0_dict[\"QN\"],self.x0_dict[\"Bz\"]]\n self.D=np.empty([self.d,1])\n self.x_dict={\"a1\":self.a1+self.a1/5,\"b1\":self.b1-self.b1/20,\"a2\":self.a2-self.a2/10,\"b2\":self.b2+self.b2/5,\"w_theta\":self.w_theta\n ,\"D0\":self.D0+3,\"AN\":self.AN,\"QN\":self.QN,\"Bz\":self.Be+self.Bo} #現在のパーティクル\n self.x=[self.x_dict[\"a1\"],self.x_dict[\"b1\"],self.x_dict[\"a2\"],self.x_dict[\"b2\"],self.x_dict[\"w_theta\"]\n ,self.x_dict[\"D0\"],self.x_dict[\"AN\"],self.x_dict[\"QN\"],self.x_dict[\"Bz\"]]\n self.x_first=self.x_dict\n \n \n def n_particles(self):\n \"\"\"\n パーティクルの数を返す\n \"\"\"\n n_p=1\n if self.i==0:\n for p in self.n:\n if self.ParamH[p]==1:\n n_p=n_p * self.n[p] \n else:\n n_p=len(self.w)\n return n_p\n \n def n_exp(self):\n \"\"\"\n 実験設計の数を返す\n \"\"\"\n n_C=1\n if self.i==0:\n for c in self.g:\n if self.ParamC[c]==1:\n n_C=n_C*self.g[c]\n else:\n n_C=len(self.U)\n return n_C\n \n def Mean(self,w,x): #重み付き平均を計算する関数\n \"\"\"\n w:重み\n x:パラメータ\n wで重みづけされたxの平均を返す\n \"\"\"\n i=0\n n=len(w)\n m=len(x[0])\n mu=np.zeros([1,m])\n for i in range(n):\n mu=mu+w[i][0]*x[i]\n return mu\n \n def init_C(self):\n \"\"\"\n 実験設計の初期値代入\n \"\"\"\n self.C=[self.V1, self.V2, self.phi, self.t, self.MWf]\n \n def init_w(self):\n \"\"\"\n 重みを一様分布に初期化\n \"\"\"\n n_p=self.n_particles()\n self.w=np.ones([n_p,1])\n \n def init_U(self):\n \"\"\"\n 効用を一様分布に初期化\n \"\"\"\n n_C=self.n_exp()\n self.U=np.ones([n_C,1])\n \n def init_R(self):\n \"\"\"\n ベイズリスクを一様分布に初期化\n \"\"\"\n n_C=self.n_exp()\n self.B_R=np.ones([n_C,1])\n \n def init_x(self):\n \"\"\"\n パーティクルの中心を生成\n ここでは炭素の超微細相互作用を追加\n \"\"\"\n for Ac in self.Ac_list:\n self.x.append(Ac)\n \n \n def init_x0(self):\n \"\"\"\n パーティクルの真値を生成\n ここでは炭素の超微細相互作用を追加\n \"\"\"\n N=len(self.Ac_list)\n for i in range(N):\n self.params_list.append(\"Ac\"+str(i))\n self.x0_dict[\"Ac\"+str(i)]=self.Ac_list[i]\n self.x0.append(self.Ac_list[i])\n \n def weighting_matrix(self):\n \"\"\"\n ベイズリスクの重み行列を作成\n ParamHの要素が1ならば対応する重み行列の要素も1\n つまり、ベイズリスクを考慮する\n \"\"\"\n self.Q=np.zeros([len(self.x0), len(self.x0)])\n for i,p in enumerate(self.ParamH):\n px=self.ParamH[p]\n self.Q[i][i]=px\n \n def resample(self,w,x): #パーティクルの移動と重みの再配分を行う関数\n \"\"\"\n a:resample強度\n 各パーティクルをx=a*x+(1-a)*x_averageに移動させる\n つまり、各パーティクルを強度aで分布の中心に寄せる\n \"\"\"\n i=0\n n=len(w)\n m=len(x[0])\n mu=self.Mean(w,x)\n mui=np.zeros([1,m])\n for i in range(n):\n if w[i]<1.0/n*self.a:\n mui=self.a*x[i]+(1-self.a)*mu\n x[i]=mui\n w[i][0]=1.0/n\n print (\"resample\")\n return w,x\n \n def reapprox_par(self): #不要となったパーティクルを削除する関数\n \"\"\"\n m:残すパーティクルの数\n ws:昇順に並び替えた重み\n wsの(m+1)番目の要素よりも大きい重みのパーティクルは残す\n \"\"\"\n n=len(self.w)\n ws=sorted(self.w)\n m=floor(n*(1.0-self.approx_ratio))\n if m<1:\n m=0\n j=0\n delist=[]\n while j!=m:\n i=0\n for i in range(n):\n if self.w[i]==ws[j] and n!=0:\n delist.append(i)\n j=j+1\n if j==m:\n break\n self.w=np.delete(self.w,delist,0)\n self.w=self.w/sum(self.w)\n self.x=np.delete(self.x,delist,0)\n if n != len(self.w):\n self.flag1=True\n print(\"reapprox_exp\")\n else:\n self.flag1=False\n \n def reapprox_exp(self): #不要となったパーティクルを削除する関数\n \"\"\"\n m:残すパーティクルの数\n ws:昇順に並び替えた重み\n wsの(m+1)番目の要素よりも大きい重みのパーティクルは残す\n \"\"\"\n n=len(self.U)\n Us=sorted(self.U)\n m=floor(n*(1.0-self.approx_ratio))\n if m<1:\n m=0\n j=0\n delist=[]\n while j!=m:\n i=0\n for i in range(n):\n if self.U[i]==Us[j] and n!=0:\n delist.append(i)\n j=j+1\n if j==m:\n break\n self.U=np.delete(self.U,delist,0)\n self.U=self.U/sum(self.U)\n self.U=np.delete(self.U,delist,0)\n if n != len(self.U):\n self.flag2=True\n print(\"reapprox_exp\")\n else:\n self.flag2=False\n \n def Particlemaker(self,x,n,Param,Range): #パーティクルを生成する関数\n #itertools.product与えられた変数軍([x1,x2,x3],[y1,y2])の総重複なし組み合わせを配列として出力\n \"\"\"\n パーティクルを生成する関数\n 最小値:x-Range/2\n 最大値:x+Range/2\n \"\"\"\n N=len(x)\n temp=[]\n for i,p in enumerate(Param):\n if(Param[p]==1):\n temp.append(np.linspace(x[i]-Range[p]/2,x[i]+Range[p]/2,n[p]))\n else:\n temp.append([x[i]])\n return(np.array(list(itertools.product(*temp))))\n \n def Expsim(self,x,C): #実験と同様のシーケンスを行いデータの生成を行う関数\n \"\"\"\n パーティクルxに実験Cで実験シミュレーションを行う関数\n \"\"\"\n self.H(x)\n self.H_rot(C[4]) #C[4]:MW周波数\n self.Vdrive_all(x,C[0],C[1],C[2]) #C[0]:V1, C[1]:V2, C[2]:ワイヤ間の位相差phi\n rhof=self.Tevo(C[3]) #C[3]:MWwidth\n expect0=self.exp(rhof) #ms=0で測定\n if self.mode==0:\n if rand()>=expect0:\n mes=1 #ms=+1,-1\n else:\n mes=0 #ms=0\n else:\n if self.state==0:\n mes=expect0\n else:\n mes=1.0-expect0\n if mes<0:\n mes=0\n return mes\n \n def Prob_Lookup(self,x,C): #確率のルックアップテーブルを作る。もっと軽量化したい\n \"\"\"\n 確率のルックアップテーブルを作成する\n allの場合,ルックアップテーブルの形は(パーティクル数 * 実験設計数)\n \"\"\"\n self.mode=1 #Expsimでms=0の確率を算出\n if self.ptable_mode==\"cross\":\n if len(x) == self.n_particles():#各パーティクルについてある実験Cで実験を行う\n self.ptable_x=np.zeros([self.n_particles(),1])\n for i in range(self.n_particles()):\n self.ptable_x[i]=self.Expsim(self.x[i],C) \n else: #あるパーティクルについて全実験設計で実験を行う\n if self.i==0:\n self.C_best_i=int(self.n_exp()*np.random.random())\n self.C_best=self.C[self.C_best_i]\n self.ptable_C=np.zeros([self.n_exp(),1])\n for i in range(self.n_exp()):\n self.ptable_C[i]=self.Expsim(x,self.C[i])\n \n elif (self.ptable_mode==\"all\"):\n self.ptable=np.zeros([self.n_particles(),self.n_exp()])\n for i in range(self.n_exp()):\n for j in range(self.n_particles()):\n self.ptable[j,i]=self.Expsim(self.x[j],self.C[i])\n \n def Entropy(self,p):\n \"\"\"\n p(確率が格納された配列)の各要素について平均情報量を計算する関数\n \"\"\"\n return -p*np.log2(p)-(1-p)*np.log2(1-p)\n\n def UtilIG(self):\n \"\"\"\n 各パーティクルについてエントロピーを計算する。\n エントロピーをかけて効用の分布を更新する\n \"\"\"\n ent_table = self.Entropy(self.ptable_C)#-self.ptable_C*np.log2(self.ptable_C) #エントロピーテーブル(各要素は各々の実験でのエントロピー)\n ent_array = (np.reshape(ent_table,[len(ent_table),1])) #2D配列に変換\n self.U=ent_array*self.U\n self.U=self.U/np.sum(self.U)\n self.C_best_i=np.argmax(self.U)\n self.C_best=self.C[self.C_best_i]\n \n def UtilIG_bayes_risk(self):\n \"\"\"\n 効用としてベイズリスクを計算する関数\n 重みの仮更新がこれで良いのか議論の余地あり\n \"\"\"\n m=np.argmax(self.w)\n p_exp=self.Expsim(self.x[m],self.C_best)#真値におけるms0の確立\n num=binomial(self.d, p_exp)#実験をd回行いms=0であった回数\n L_w=np.ones(self.n_particles())\n dU=np.zeros([self.n_exp(),1])\n for i in range(self.n_exp()):\n L_w[m]=binom.pmf(num,n=self.d,p=self.ptable_C[i])#各パーティクルでの実験でms=0にいた確率\n w_new=self.w*L_w.reshape([len(L_w),1]) #重みの更新\n x_infer=self.Mean(w_new,self.x)\n dU[i]=np.trace(self.Q*np.dot((self.x - x_infer[0]).T,(self.x - x_infer[0])))\n self.U=dU*self.U\n self.U=self.U/np.sum(self.U)\n self.C_best_i=np.argmin(self.U)\n self.C_best=self.C[self.C_best_i]\n\n def Update(self): #ベイズ推定を行う関数 引数(self,Ci)\n \"\"\"\n パーティクルの重みを更新する関数\n \"\"\"\n self.mode=1\n p_exp=self.Expsim(self.x0,self.C_best)#真値におけるms0の確立\n num=binomial(self.d, p_exp)#実験をd回行いms=0であった回数\n if self.ptable_mode==\"cross\":\n temp=binom.pmf(num,n=self.d,p=self.ptable_x)#各パーティクルでの実験でms=0にいた確率\n self.w=self.w*temp.reshape([len(temp),1]) #重みの更新\n elif self.ptable_mode==\"all\":\n temp=binom.pmf(num,n=self.d,p=self.ptable)[:,self.C_best_i]#各パーティクルでの実験でms=0にいた確率\n self.w=self.w*temp #重みの更新\n self.w=self.w/np.sum(self.w) #重みの規格化\n \n def Bayes_risk(self): #ベイズリスクを計算する関数\n \"\"\"\n ベイズリスクを計算する関数\n \"\"\"\n x_infer=self.Mean(self.w,self.x) \n self.risk.append(np.trace(self.Q*np.dot((self.x - x_infer[0]).T,(self.x - x_infer[0]))))\n \n #=============================結果を描画する関数=============================\n def show_w(self):\n \"\"\"\n 現在の重みを描画する関数\n \"\"\"\n wi=np.linspace(1,self.n_particles(),self.n_particles())\n plt.plot(wi,self.w)\n plt.xlabel(\"particle\")\n plt.ylabel(\"weight (a.u.)\")\n plt.title(\"weight\", fontsize=24)\n plt.show()\n \n def show_U(self):\n \"\"\"\n 現在の効用を描画する関数\n \"\"\"\n Ui=np.linspace(1,self.n_exp(),self.n_exp())\n plt.plot(Ui,self.U)\n plt.xlabel(\"experiment\")\n plt.ylabel(\"Utility (a.u.)\")\n plt.title(\"Utility\", fontsize=24)\n plt.show()\n \n def show_r(self):\n \"\"\"\n ベイズリスクの推移を描画する関数\n \"\"\"\n plt.plot(self.i_list,self.risk)\n plt.xlabel(\"experiment\")\n plt.ylabel(\"Bayes_risk \")\n plt.yscale(\"log\") #y軸をlogスケールに\n plt.title(\"Bayes_risk\", fontsize=24)\n plt.show()\n \n def show_hyper_parameter(self):\n \"\"\"\n ハイパーパラメータを出力する関数\n \"\"\"\n print(\"============================ハイパーパラメータの表示======================\\n\")\n print(\"実験回数:%d\" %(self.i))\n print(\"リサンプリング強度 %f\" %(self.a))\n print(\"リサンプリング閾値 %f\" %(self.resample_threshold))\n print(\"ベイズリスクの閾値 %f\" %(self.bayes_threshold))\n print(\"それぞれのハミルトニアンパラメータの分割数\")\n print(self.n)\n print(\"それぞれの実験パラメータの分割数\")\n print(self.g)\n print(\"パーティクルの真の値\")\n print(self.x0)\n print(\"始めのパーティクルの中心\")\n print(self.x_first)\n print(\"推定したパラメータD0, AN, QN, Bz, Ac_list\")\n print(self.ParamH)\n print(self.RangeH)\n print(\"変化させた実験設計とその範囲\")\n print(self.ParamC)\n print(self.RangeC)\n print(\"現在のパーティクルの数:%d\" %(self.n_particles()))\n print(\"ルックアップテーブルの表式 %s\" %(self.ptable_mode))\n \n \n #=================================GRAPE====================================\n def Tevo_operator(self,U_init,width):\n HI=Vd+H0-HfMW\n U=(-2*pi*1j*HI*width).expm()\n Ud=U*U_init\n return Ud\n \n def GRAPEpulse(self):\n m=np.argmax(self.w)\n C_known=[]\n for i in range(len(self.Ac_list)):\n if self.Ac_list[i] != 0:\n C_known.append(self.Ac_list[i])\n grape=GRAPE(self.x[m][0],self.x[m][1],0,C_known,C_inhomo,Be,theta,phi1,phi2,state_init,\n state_goal,weighting_value,permax,pulse_time,t_div,target_list)\n \n phi_array, self.omega_array=grape.optimize()\n return self.omega_array\n \n def GRAPE_operator(self,x,omega_j):\n H0=self.H(x)\n HfMW=self.H_rot(C[2])\n C_list=[]\n for i in range(len(self.Ac_list)):\n C_list.append(2)\n U0=tensor(III,III,self.C_mat)\n U=U0\n for i in range(len(omega_j)):\n VdMW=self.Vdrive_all(omega_j[i])\n U=self.Tevo_operator(H0,VdMW,HfMW,U,t_div)\n return U","sub_path":"機械学習/サブpy/Bayes_function09_1.py","file_name":"Bayes_function09_1.py","file_ext":"py","file_size_in_byte":18326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"28319758","text":"import json\nimport glob\n\njson_files = glob.glob('poetry_json/*.json')\n\nwith open('train.txt', 'w') as w:\n for file in json_files:\n with open(file, 'r') as f:\n str_data = f.read()\n dict_data = json.loads(str_data)\n for i in range(len(dict_data)):\n content = ''\n for j in range(len(dict_data[i]['paragraphs'])):\n content += dict_data[i]['paragraphs'][j]\n w.write(content+'\\n')\n","sub_path":"data/tangshi/process_poetry.py","file_name":"process_poetry.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"115346267","text":"# https://codeforces.com/problemset/problem/1288/A\n\n\nfrom math import ceil\n\n\ndef read_tokens():\n return input().strip().split(' ')\n\n\ndef read_ints():\n return [int(s) for s in read_tokens()]\n\n\ndef f(n: int, d: int) -> bool:\n good = 0\n bad = n\n\n while bad - good > 1:\n t = (good + bad) // 2\n if good_enough(n, d, t):\n good = t\n else:\n bad = t\n\n return good_enough(n, d, good)\n\n\ndef good_enough(n: int, d: int, x: int) -> bool:\n new_time = ceil(d / (x+1))\n total = x + new_time\n return total <= n\n\n\nT, = read_ints()\nfor _ in range(T):\n n, d = read_ints()\n yes = f(n, d)\n print(\"YES\" if yes else \"NO\")\n","sub_path":"bsearch/2-1288A.py","file_name":"2-1288A.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"56631396","text":"def fact(n):\n f=1\n print(n)\n if int(n) >=1:\n for i in range(1,int(n)+1):\n f = f*i\n print(f)\n return(f)\n \nn=145\nres=0\nfor i in str(n):\n \n res+=fact(i)\n \nprint(\"res t\",res)\nif res==n:\n print(\"strong\")\nelse:\n print(\"not striong\")\n \n","sub_path":"programmingInPython/nqt strong no.py","file_name":"nqt strong no.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"87263220","text":"import argparse\nimport os\nimport sys\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\n\nsys.path.append(os.getcwd())\n\nimport models\nimport utils\n\ntry:\n dataset_path = os.environ[\"DATASETS_ABS_PATH\"]\n sys.path.append(dataset_path)\nexcept KeyError:\n print(\"Datasets.py absolute path not found in PATH\")\nimport Datasets\n\n\ndef main(args):\n\n print(\"Loading config file: \", args.config)\n params = utils.load_config_file(args.config)\n params[\"dataset_paths\"] = utils.format_dataset_path(\n params[\"dataset_paths\"])\n if \"nyu\" not in params:\n params[\"nyu\"] = False\n\n # Data loading code\n print(\"Creating data loaders...\")\n if params[\"nyu\"]:\n from dataloaders.nyu import NYUDataset\n val_dataset = NYUDataset(params[\"dataset_paths\"], split='val')\n else:\n val_dataset = Datasets.FastDepthDataset(params[\"dataset_paths\"],\n split='val',\n depth_min=params[\"depth_min\"],\n depth_max=params[\"depth_max\"],\n input_shape_model=(224, 224),\n random_crop=False)\n\n # set batch size to be 1 for validation\n data_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=params[\"num_workers\"],\n pin_memory=True)\n\n # Set GPU\n params[\"device\"] = torch.device(\n \"cuda:{}\".format(params[\"device\"])\n if params[\"device\"] >= 0 and torch.cuda.is_available() else \"cpu\")\n print(\"Using device\", params[\"device\"])\n\n print(\"Loading models...\")\n models = []\n model_names = []\n for model_dict in params[\"models\"]:\n model_names.append(Path(model_dict[\"model_path\"]).stem)\n model, _ = utils.load_model(model_dict, model_dict[\"model_path\"], params[\"device\"])\n model.to(params[\"device\"])\n models.append(model)\n\n # Create output directory\n output_directory = os.path.join(params[\"save_folder\"], \".\".join(model_names))\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n params[\"output_directory\"] = output_directory\n print(\"Saving results to \" + output_directory)\n\n compare_models(params, data_loader, models)\n\n\ndef compare_models(params, loader, models):\n with torch.no_grad():\n for i, (input, target) in enumerate(loader):\n input, target = input.to(params[\"device\"]), target.to(\n params[\"device\"])\n\n # Inference\n predictions = []\n for model in models:\n prediction = model(input)\n\n # Clip prediction\n prediction[\n prediction > params[\"depth_max\"]] = params[\"depth_max\"]\n prediction[\n prediction < params[\"depth_min\"]] = params[\"depth_min\"]\n\n predictions.append(prediction)\n\n # Convert tensors to np arrays\n rgb = utils.tensor_to_rgb(input)\n prediction_images = []\n for prediction in predictions:\n prediction_images.append(utils.tensor_to_depth(prediction))\n gt = utils.tensor_to_depth(target)\n colored_predictions, gt = utils.visualize_depth_compare(prediction_images, gt)\n \n # Combine predictions and ground truth into one image\n combined = np.hstack([rgb * 255, *colored_predictions, gt])\n\n # Save combined image\n filename = os.path.join(params[\"output_directory\"],\n \"image_{}.png\".format(i))\n utils.save_image(combined, filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Compare two Fastdepth models.')\n parser.add_argument('-c',\n '--config',\n type=str,\n default=\"config/evaluate.json\",\n help=\"Path to config JSON.\")\n args = parser.parse_args()\n main(args)\n","sub_path":"fastdepth/scripts/compare_models.py","file_name":"compare_models.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"137069971","text":"from itertools import izip\nfrom collections import defaultdict\nfrom collections import Counter\n\ndef multi_dimensions(n, type):\n if n<=1:\n return type()\n return defaultdict(lambda:multi_dimensions(n-1, type))\n\ndef initial(filename_e,filename_f):\n fi_e = open(filename_e,\"r\")\n fi_f = open(filename_f,\"r\")\n trans=multi_dimensions(3,float)\n for line_e,line_f in izip(fi_e,fi_f):\n words_e=line_e.strip().split()\n words_f=line_f.strip().split()\n for w1 in words_e:\n for w2 in words_f:\n trans[w1][w2]=0 \n for w1 in trans.iterkeys():\n l=len(trans[w1])\n for w2 in trans[w1].iterkeys():\n trans[w1][w2]=float(1)/l\n fi_e.close()\n fi_f.close()\n return trans\n \n\ndef train(filename_e,filename_f,trans):\n # fi_ e = open(filename_e,\"r\")\n # fi_f = open(filename_f,\"r\")\n for ind in range(5):\n cef=multi_dimensions(3,float)\n ce={}\n with open(filename_e,\"r\") as fi_e,open(filename_f,\"r\") as fi_f: \n for line_e,line_f in izip(fi_e,fi_f):\n words_e=line_e.strip().split()\n words_f=line_f.strip().split()\n f={}\n g=multi_dimensions(3,float)\n for w2 in words_f:\n for w1 in words_e:\n f[w2]=f.get(w2,0)+trans[w1][w2]\n for w2 in words_f:\n for w1 in words_e:\n g[w1][w2]=float(trans[w1][w2])/f[w2]\n\n for w2 in words_f:\n for w1 in words_e:\n cef[w1][w2]+=g[w1][w2]\n ce[w1]=ce.get(w1,0)+g[w1][w2]\n # import pdb\n # pdb.set_trace()\n for w1 in trans.iterkeys():\n for w2 in trans[w1].iterkeys():\n trans[w1][w2]=float(cef[w1][w2])/ce[w1]\n fi_e.close()\n fi_f.close()\n return trans\n\ndef decode(file_dev_e,file_dev_f,file_out,trans):\n fi_e=open(file_dev_e,\"r\")\n fi_f=open(file_dev_f,\"r\")\n fo=open(file_out,\"w\")\n index0=0\n for line_e,line_f in izip(fi_e,fi_f):\n index0+=1\n words_e=line_e.strip().split()\n words_f=line_f.strip().split()\n index2=0\n for w2 in words_f:\n index2+=1\n a=-1\n b=-1\n index1=0\n for w1 in words_e:\n index1+=1\n if a < trans[w1][w2]:\n a=trans[w1][w2]\n b=index1\n fo.write(str(index0)+\" \"+str(b)+\" \"+str(index2)+\"\\n\")\n fo.close()\n fi_f.close()\n fi_e.close()\n\n\ntrans=initial(\"corpus.en\",\"corpus.es\")\ntrans=train(\"corpus.en\",\"corpus.es\",trans)\n# import pdb\n# pdb.set_trace()\ndecode(\"dev.en\",\"dev.es\",\"dev.out\",trans)\n","sub_path":"2-python-practice/7-IBM/IBM/ibm.py","file_name":"ibm.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"404431380","text":"# 117)\t(С.С. Поля��ов) Исполнитель Калькулятор преобразует число на экране.\n# У исполнителя есть три команды, которым присвоены номера:\n# 1. Прибавить 1\n# 2. Прибавить 5\n# 3. Умножить на 3\n# Сколько разных чисел может быть получено из числа 1 с помощью программ,\n# состоящих из 7 команд?\n\nm=set()\ndef f(a,k):\n if k==7:\n m.add(a)\n else:\n f(a+1,k+1)\n f(a+5,k+1)\n f(a*3,k+1)\nf(1,0) #Начальное значение 1, шаг 0\nprint(len(m))\n","sub_path":"23/23_кол-во_прог.py","file_name":"23_кол-во_прог.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"198498626","text":"\n###########################################################################\n# cc and ctc\n###########################################################################\n\ndef convert_cc_to_ctc_with_fixed_diagrams(cc,diagrams):\n\tctc = {}\n\tname = cc[\"name\"]\n\tctc[\"name\"] = name\n\tfor key in [\"pf1\",\"pf2\",\"pi2\",\"pi1\",\"gammas\"]:\n\t\tif key in cc:\n\t\t\tctc[key] = cc[key]\n\n\tif name == \"pi-pi\":\n\t\tctc[\"correlator_quantities\"] = [\"sumDiagSumXC\"]\n\telse:\n\t\tctc[\"correlator_quantities\"] = [\"sumDiagSumXPCP\",\"sumDiagSumXTracePCP\"]\n\n\tif cc[\"source-timeslices\"] == \"random.max_timeslice_sep\":\n\t\tctc[\"source-timeslices\"] = \"auto\"\n\n\tctc[\"diagrams\"] = diagrams\n\treturn ctc\n\ndef append_ctcs_for_cc(cc,ctcs):\n\tctcs.append(convert_cc_to_ctc_with_fixed_diagrams(cc,cc[\"diagrams\"]))\n#\tfor diagram in cc[\"diagrams\"]:\n#\t\tctcs.append(convert_cc_to_ctc_with_fixed_diagrams(cc,[diagram]))\n\ndef get_ctc_list_from_cc_list(ccs):\n\tctcs = []\n\tfor cc in ccs:\n\t\tappend_ctcs_for_cc(cc,ctcs)\n\treturn ctcs\n\n###########################################################################\n# cc to btc\n###########################################################################\n\ndef convert_cc_to_btc_with_fixed_diagrams(cc,diagrams):\n\tbtc = {}\n\tname = cc[\"name\"]\n\tbtc[\"name\"] = name\n\tfor key in [\"pf1\",\"pf2\",\"pi2\",\"pi1\",\"gammas\"]:\n\t\tif key in cc:\n\t\t\tbtc[key] = cc[key]\n\n\tif name == \"pi-pi\":\n\t\tbtc[\"correlator_quantities\"] = [\"sumDiagSumXC\"]\n\t\tbtc[\"bstrap_quantities\"] = [\"sumDiagSumXC\"]\n\telse:\n\t\tbtc[\"correlator_quantities\"] = [\"sumDiagSumXPCP\",\"sumDiagSumXTracePCP\"]\n\t\tbtc[\"bstrap_quantities\"] = [\"sumDiagSumXTracePCP\"]\n\n\tbtc[\"diagrams\"] = diagrams\n\treturn btc\n\ndef append_btcs_for_cc(cc,btcs):\n\tbtcs.append(convert_cc_to_btc_with_fixed_diagrams(cc,cc[\"diagrams\"]))\n\tfor diagram in cc[\"diagrams\"]:\n\t\tbtcs.append(convert_cc_to_btc_with_fixed_diagrams(cc,[diagram]))\n\ndef append_btcs_for_cc_with_this_diagram_sum(cc,btcs,diagrams):\n\tfor dia in diagrams:\n\t\tassert(dia in cc[\"diagrams\"])\n\tbtcs.append(convert_cc_to_btc_with_fixed_diagrams(cc,diagrams))\n\ndef get_btc_list_from_cc_list(ccs):\n\tbtcs = []\n\tfor cc in ccs:\n\t\tappend_btcs_for_cc(cc,btcs)\n\treturn btcs\n\ndef get_btc_list_from_cc_list_with_this_diagram_sum(ccs,diagrams):\n\tbtcs = []\n\tfor cc in ccs:\n\t\tappend_btcs_for_cc_with_this_diagram_sum(cc,btcs,diagrams)\n\treturn btcs\n\n###########################################################################\n# Argument list to btc\n###########################################################################\n\ndef setup_btc_momentum(btc,mom_name,mom):\n\tif not mom is None:\n\t\tbtc[mom_name] = mom\n\ndef get_btc_for_process_with_momenta_gammas_and_diagrams(quantity_string,process_name,pi1,pi2,pf1,pf2,gammas,diagrams):\n\tbtc = {}\n\tbtc[\"name\"] = process_name\n\tif process_name == \"pi-pi\":\n\t\tsetup_btc_momentum(btc,\"pi1\",pi1)\n\tsetup_btc_momentum(btc,\"pi2\",pi2)\n\tsetup_btc_momentum(btc,\"pf1\",pf1)\n\tsetup_btc_momentum(btc,\"pf2\",pf2)\n\tif not gammas is None:\n\t\tbtc[\"gammas\"] = gammas\n\tbtc[\"correlator_quantities\"] = quantity_string\n\tbtc[\"bstrap_quantities\"] = quantity_string\n\tbtc[\"diagrams\"] = diagrams\n\treturn btc\n\n\n","sub_path":"communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"606493875","text":"import numpy\nimport scipy as sp\nimport property_gen as pg\ndef weight(m,n,R,w):\n #R is the radius of the circle\n\n Lx = 2*R+2*m+1 # this specifies an Lx x Ly lattice to embed the circle in\n Ly = 2*R+2*n+1\n\n c = pg.square_lattice(R) \n\n lattice = sp.zeros( (Ly,Lx), dtype = 'int' )\n\n for i in c:\n lattice[ i[0]+R+n,i[1]+R+m ] = 1 # + count #This assigns '1' to the region A, offset by R\n \n w_mxn_name = '%02d%02d'%(m,n)\n\n # First term in weight of mxn is property of mxn\n w[w_mxn_name] = pg.property(m,n,Lx,Ly,lattice)\n #if (m != n):w[w_mxn_name] *= 2\n #print w[w_mxn_name],\n\n wformula = \"W%02d%02d=P%02d%02d\"%(m,n,m,n)\n\n for y in range (1,n+1):\n for x in range (y,m+1):\n if (y < n or x < m) and x > 1:\n if x > n: coeff = (m-x+1)*(n-y+1) # drop last term otherwise get negative coeff\n else: coeff = (m-x+1)*(n-y+1)+(m-y+1)*(n-x+1)\n \n if x==y: coeff = coeff/2 # different coefficents for squares\n\n wformula += \"%+d*W%02d%02d\"%(-coeff,x,y)\n\n w[w_mxn_name] -= coeff * w['%02d%02d'%(x,y)]\n \n #print w[w_mxn_name]\n #print wformula\n\n return w\n","sub_path":"mxn_weight.py","file_name":"mxn_weight.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"342959812","text":"# -*- coding=utf-8 -*-\nimport os\nimport objc, Quartz\nfrom AppKit import NSBitmapImageRep\nfrom Quartz.CoreGraphics import CGMainDisplayID\nimport pyautogui as pag\nimport send\nimport time\nfrom config import image_path,client_socket_port\n\ndef get_pixel_color_init():\n global mainID\n objc.parseBridgeSupport( \"\"\"\n\n\n \n \n \n \n \n \n \n \n\n\"\"\", globals(), '/System/Library/Frameworks/ApplicationServices.framework/Frameworks/CoreGraphics.framework')\n mainID = CGMainDisplayID()\n\ndef get_color_pixel(x,y):\n image = CGDisplayCreateImageForRect(mainID, ((x-1,y-1), (x+1,y+1)))\n bitmap = NSBitmapImageRep.alloc()\n bitmap.initWithCGImage_(image)\n # Get the RGB color (float values from 0 to 1 per color, plus alpha) at a particular point\n return bitmap.colorAtX_y_(1, 1)\n\ndef is_white(x,y):\n # Get the RGB color (float values from 0 to 1 per color, plus alpha) at a particular point\n results = get_color_pixel(x,y)\n result_list = str(results).split(' ')\n for i in range(1,4,1):\n if float(result_list[i])<245/255:\n return False\n return True\n\nif __name__ == '__main__':\n white_times = 0\n get_pixel_color_init()\n print(\"开始等待题目\")\n while True:\n time.sleep(0.1)\n if is_white(133,63) and is_white(385,505):\n if white_times%2 == 0:\n print(\"发现题目,获取提示中:{}\".format(time.time()))\n white_times += 1\n question_img = pag.screenshot(region=(40,200,345,310))\n question_img.save(image_path)\n send.send_file(image_path)\n print(\"图片发送完毕: {}\".format(time.time()))\n print(\" 开始15秒的等待时间\")\n for i in range(15):\n print(i)\n time.sleep(1)\n else:\n print(\"答案公布时间\")\n white_times += 1\n print(\" 开始13秒的等待时间\")\n for i in range(13):\n print(i)\n time.sleep(1)\n","sub_path":"find_question.py","file_name":"find_question.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"160368401","text":"import pandas as pd\n\ndef read_csv_where_row_has_value(csv, usecol=None, value=None, chunksize=None):\n assert usecol is not None\n assert value is not None\n \n def get_chunks():\n chunks = pd.read_csv(csv, chunksize=chunksize) \n if chunksize is None:\n return [chunks]\n else:\n return chunks \n \n store = []\n for chunk in get_chunks():\n assert usecol in chunk.columns\n store.append(chunk[chunk[usecol] == value])\n df = pd.concat(store)\n df.index = range(len(df))\n return df","sub_path":"pylabtools/pandas.py","file_name":"pandas.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"355583770","text":"\"\"\"\nThe solution to the \"when_it_rains_it_pours\" problem.\n\"\"\"\n\ndef answer(numbers):\n waterLevel = 0\n numbers2=[0]*len(numbers)\n for loop in range(1,len(numbers)-1):\n numbers2[loop] = min(max(numbers[:loop]),max(numbers[loop:]))\n for loop in range(1,len(numbers)-1):\n if numbers2[loop] > numbers[loop]:\n waterLevel += numbers2[loop] - numbers[loop]\n return waterLevel\n","sub_path":"when_it_rains_it_pours/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"448537849","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 28 17:43:14 2020\n\n@author: carolc24\n\"\"\"\n\nimport numpy as np\nimport commSelect\nimport os\nimport time as tm\n\nnum_wells = 10; \nnum_cycles = 1;\nparallel = False;\nrun_id = \"default\"\n\n# time params\ndt = 0.05;\ncycle_duration = 17;\n# number of integration steps for mature cycle, positive int\nnsteps = int(np.ceil(cycle_duration/dt));\n\n#mutable params initial conditions, non-negative float\nfp_init = 0.1;\nK_MR_init = 1.;\nK_MB_init = 1./3 * 500;\nb_Mmax_init = 0.7 / 1.2;\nb_Hmax_init = 0.3 / 1.2;\nK_HR_init = 1.;\n\n#mutable params upper bounds, non-negative float\nfp_bound = 1.;\nK_MR_bound = 1./3;\nK_MB_bound = 100./3;\nK_HR_bound = 0.2;\nb_Mmax_bound = 0.7;\nb_Hmax_bound = 0.8;\n\n#note that the Monod constants are saved as their reciprocals.\n#This is because the mutation function biases towards deleterious mutations\n#and each trait needs to be more advantageous at higher values to\n#preserve this effect. A higher Monod constant represents slower growth at\n#low resource.\ntraits_manu_bound = np.array([fp_bound, b_Mmax_bound, 1./K_MR_bound, 1./K_MB_bound]);\ntraits_help_bound = np.array([b_Hmax_bound, 1./K_HR_bound]);\ntraits_bound = [traits_manu_bound, traits_help_bound];\n\n#mutable params lower bounds, non-negative float\ntraits_manu_lowerbound = np.array([0., 0., 0., 0.]);\ntraits_help_lowerbound = np.array([0., 0.]);\ntraits_lowerbound = [traits_manu_lowerbound, traits_help_lowerbound];\n\n#constant growth + metabolite params, any type\nc_BM = 1./3;\nc_RM = 10**-4;\nc_RH = 10**-4;\nr_P = 1.;\n\n#death rates, non-negative float\nd_M = 3.5 * 10**-3;\nd_H = 1.5 * 10**-3;\n\n#metabolite initial conc, non-negative float\nR_init = 10.;\n\n#cell initial pop size, non-negative int\nM_init = 60;\nH_init = 40;\n\n#mutation params, non-negative float\np_mut = 2 * 10**-3; # mutation rate\nfrac_null = 0.5; # fraction of mutations that are null (set trait to 0.)\nsp0 = 0.05; # 'positive' mutation factor\nsn0 = 0.067; # 'negative' mutation factor\nmut_params = [p_mut, frac_null, sp0, sn0];\n\n#reproduction params\nnD = 100; # dilution factor for fixed fold pipetting, positive int\nBM_target = 100.; # target biomass for regular pipetting, positive float\ntop_tier = 1.; #top percent of adults chosen for reproduction, float >= 1.\nnewborns_per_adult = int(np.floor(num_wells / top_tier)); # max newborns per adult, positive int\n\n#system of diffeqs that describe community dynamics\n#arg t: timestep (required for solve_ivp)\n#arg y: state vector (required for solve_ivp)\n#arg manuCell: manufacturer CellType\n#arg helpCell: helper CellType\n#returns: dydt\ndef RBPFG_prime(t, y, manuCell, helpCell):\n \n #biomass vectors\n #length depends on how many mutants of each type there are\n Bio_M = manuCell.biomass(); #vector (multiple variants of M)\n Bio_H = helpCell.biomass(); #vector (multiple variants of H)\n \n #trait vectors with same length as Bio_M\n fp = manuCell.traits[:,0];\n b_Mmax = manuCell.traits[:,1];\n K_MR = manuCell.traits[:,2];\n K_MB = manuCell.traits[:,3];\n \n #trait vectors with same length as Bio_H\n b_Hmax = helpCell.traits[:,0];\n K_HR = helpCell.traits[:,1];\n \n #preventing division error\n K_HR[K_HR < 1e-9] = 1e-9;\n K_MR[K_MR < 1e-9] = 1e-9;\n K_MB[K_MB < 1e-9] = 1e-9;\n \n #metabolites (P is not needed for diffeqs)\n R = y[0];\n B = y[1];\n \n # birth rate coefficients \n # calculated separately for each mutant based on their traits\n #vector with same length as Bio_H\n b_Hcoef = R * K_HR / (R * K_HR + 1); # one value for each variant\n \n #vectors with same length as Bio_M\n R_M = R * K_MR;\n B_M = B * K_MB;\n b_Mcoef = (R_M * B_M) / (R_M + B_M) * (1. / (1 + R_M) + 1. / (1 + B_M));\n \n #rate of change of metabolites\n #find the production/consumption rate of each metabolite for each mutant\n #by multiplying vectors elementwise\n #then use sum() to add up all fluxes and find overall rate of change\n R_prime = -sum(b_Hmax * b_Hcoef * Bio_H) * c_RH \\\n -sum(b_Mmax * b_Mcoef * Bio_M) * c_RM;\n B_prime = sum(b_Hmax * b_Hcoef * Bio_H) \\\n -sum(b_Mmax * b_Mcoef * Bio_M) * c_BM;\n P_prime = sum(b_Mmax * b_Mcoef * fp * Bio_M) * r_P;\n \n #cell birth rates per biomass for each mutant\n #vector with same length as Bio_M\n M_prime_coef = (1 - fp) * b_Mmax * b_Mcoef;\n #vector with same length as Bio_H\n H_prime_coef = b_Hmax * b_Hcoef;\n \n metabol_prime = [R_prime, B_prime, P_prime]; # order is same as before\n bio_prime = np.append(M_prime_coef, H_prime_coef).tolist();\n \n #dydt (must be list, not numpy array)\n dydt = metabol_prime + bio_prime; #metabolites first, then cells\n\n return dydt;\n\n#community function = Pfinal\n#arg data: 3D array of time series data of all adults\n#axis 0: community index\n#axis 1: data type (M total biomass, H total biomass, R, B, P)\n#axis 2: time\ndef commFunc(data):\n return data[:,-1,-1]; #P conc at the end of maturation\n #for each community\n \nif __name__ == \"__main__\":\n \n #initialize output dir\n init_seed = np.random.randint(1e7);\n np.random.seed(init_seed);\n run_id += str(init_seed);\n os.mkdir(run_id);\n \n # initialize community structure\n ic_traits_manu = np.array([[fp_init, b_Mmax_init, 1./K_MR_init, 1./K_MB_init]]);\n ic_L_manu = np.array([1.]); #cell length\n ic_N_manu = np.array([M_init]); #number of cells\n ic_traits_help = np.array([[b_Hmax_init, 1./K_HR_init]]);\n ic_L_help = np.array([1.]);\n ic_N_help = np.array([H_init]);\n N0 = ic_L_manu * ic_N_manu + ic_L_help * ic_N_help; #total number of cells\n \n #args for construction of CellType: traits, L, N, death rate\n manu = commSelect.CellType.CellType(ic_traits_manu, ic_L_manu, ic_N_manu, d_M * dt);\n manu.traits_bound = traits_manu_bound;\n manu.traits_lowerbound = traits_manu_lowerbound;\n \n helper = commSelect.CellType.CellType(ic_traits_help, ic_L_help, ic_N_help, d_H * dt);\n helper.traits_bound = traits_help_bound;\n helper.traits_lowerbound = traits_help_lowerbound;\n \n #cell types and metabolites\n newbData = [manu, helper]; # cell types order: M, H\n ic_metabol = [R_init, 0., 0.]; # metabolites order: R, B, P\n \n # make (num_wells) copies of CellTypes\n newbDataAll = [];\n for i in range(num_wells):\n newbDataAll += [list(map(commSelect.CellType.CellType.copy, newbData))];\n \n print(\"first newborns initialized\");\n \n #cycle through\n for c in range(num_cycles):\n print(\"cycle %d...\" % c);\n \n #save newborn data\n if (c % 10 == 0):\n commSelect.save.saveNewborns(newbDataAll, run_id + \"/newb_%d.txt\" % c);\n \n #mature\n #args for mature function: \n #newbDataAll (list of newborn communities)\n #ic_metabol (initial metabolite concs)\n #RBPFG_prime (diffeq method)\n #nsteps (num timesteps per maturation phase)\n #dt (timestep length)\n #mut_params (mutation parameters)\n #parallel (boolean, True if using parallel processing)\n \n #returns:\n #adultDataAll (list of adult communities)\n #data (time series data of biomass and metabolites for each community)\n #data structure: [community index, 'data type', time]\n #'data type' is total biomass of each cell type and conc of each metabolite\n #biomass comes first (M, H) then metabolites (R, B, P)\n #so for example data[:,1,-1] is the final H biomass of each community\n start = tm.time();\n adultDataAll, data = commSelect.mature.mature(newbDataAll, ic_metabol, \\\n RBPFG_prime, nsteps, dt, mut_params, parallel);\n end = tm.time();\n print(\"Cycle completed in %.2f secs\" % (end - start));\n \n #save adult data\n if (c % 10 == 0):\n commSelect.save.saveAdults(adultDataAll, data, run_id + \"/adult_%d.txt\" % c); \n\n #evaluate community function for each adult\n P = commFunc(data);\n P_sorted = np.argsort(P);\n \n #re-initialize newborns\n newbDataAll = [];\n for i in range(num_wells):\n newbDataAll += [list(map(commSelect.CellType.CellType.copy, newbData))];\n \n #reproduce based on community function\n #arg adultDataAll: adults to reproduce\n #arg newbDataAll: newborns to fill with cells\n #P_sorted: indices of adults sorted from lowest to highest P\n #num_wells: number of wells\n #BM_target: target biomass per newborn\n #newborns_per_adult: max number of newborns to be made with 1 adult\n #cs: True if cell sorting, False if pipetting\n newbDataAll = commSelect.reproduce.reproduce(adultDataAll, newbDataAll, P_sorted, \\\n num_wells, BM_target, newborns_per_adult, False);\n\n","sub_path":"HMSixPhenotypes.py","file_name":"HMSixPhenotypes.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"139082602","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Post\nfrom taggit.models import Tag\nfrom django.core.paginator import Paginator\n\n\n# Create your views here.\n\n\ndef post_list(request, tag_slug=None):\n posts = Post.objects.filter(published=True)\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n posts = posts.filter(tags__in=[tag])\n\n paginator = Paginator(posts, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'blog/post/list.html', {'posts': posts, 'tag': tag, 'page_obj': page_obj})\n\n\ndef post_detail(request, post_id):\n post = Post.objects.get(id=post_id)\n return render(request, 'blog/post/detail.html', {'post': post})\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"227190483","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nfrom dash.exceptions import PreventUpdate\r\nimport plotly.express as px\r\nimport indoors as ind\r\nfrom indoors import Indoors\r\n\r\n\"\"\"\r\nmain.py contains the core functionality of the Dash app. It is responsible for taking inputs,\r\nfeeding those inputs to the model (indoors.py), and displaying the model outputs in an effective, concise\r\nmanner.\r\n\r\nProperties: \r\nDash App Setup\r\nCOVID-19 Calculator Setup\r\nDropdown Preset Values\r\nTab CSS Styles\r\nCustom HTML Headers\r\nMain App \r\n\r\nMethods: \r\ndef update_figure: Calculate model & update displayed values\r\ndef update_risk_tol_disp: Update risk tolerance display value\r\ndef update_air_frac_disp: Update air fraction display value\r\n\"\"\"\r\n\r\n# Dash App Setup\r\napp = dash.Dash(__name__)\r\n# Used for Heroku deployment\r\nserver = app.server\r\n\r\n# COVID-19 Calculator Setup\r\nmyInd = ind.Indoors()\r\nresults_df = myInd.calc_n_max_series(2, 100, 1.0)\r\nfig = px.line(results_df, x=\"Maximum Exposure Time (hours)\", y=\"Maximum Occupancy\",\r\n title=\"Occupancy vs. Exposure Time\",\r\n height=400, color_discrete_map={\"Maximum Occupancy\": \"#de1616\"})\r\n\r\n# Dropdown Preset Values\r\nventilation_default = 3\r\nis_custom_vent = False\r\nventilation_types = [\r\n {'label': \"Custom (see Advanced)\", 'value': -1},\r\n {'label': \"Bedroom, closed windows (0.34 ACH)\", 'value': 0.34},\r\n {'label': \"Mechanical Ventilation (3 ACH)\", 'value': 3},\r\n {'label': \"Mechanical Ventilation (8 ACH)\", 'value': 8},\r\n {'label': \"Laboratory, Restaurant (9 ACH)\", 'value': 9},\r\n {'label': \"Bar (15 ACH)\", 'value': 15},\r\n {'label': \"Hospital (18 ACH)\", 'value': 18},\r\n {'label': \"Toxic Laboratory (25 ACH)\", 'value': 25},\r\n]\r\n\r\nfilter_default = 2\r\nis_custom_filter = False\r\nfilter_types = [\r\n {'label': \"Custom (see Advanced)\", 'value': -1},\r\n {'label': \"None\", 'value': 0},\r\n {'label': \"Residential Window AC (MERV 1-4)\", 'value': 2},\r\n {'label': \"Residential/Commercial/Industrial (MERV 5-8)\", 'value': 6},\r\n {'label': \"Residential/Commercial/Hospital (MERV 9-12)\", 'value': 10},\r\n {'label': \"Hospital & General Surgery (MERV 13-16)\", 'value': 14},\r\n {'label': \"HEPA (MERV 17-20)\", 'value': 17}\r\n]\r\n\r\nexertion_types = [\r\n {'label': \"Resting\", 'value': 0.49},\r\n {'label': \"Standing\", 'value': 0.54},\r\n {'label': \"Light Exercise\", 'value': 1.38},\r\n {'label': \"Moderate Exercise\", 'value': 2.35},\r\n {'label': \"Heavy Exercise\", 'value': 3.30},\r\n]\r\n\r\nexpiratory_types = [\r\n {'label': \"Breathing (nose)\", 'value': 1.1},\r\n {'label': \"Breathing (nose-nose)\", 'value': 8.8},\r\n {'label': \"Breathing (deep-fast)\", 'value': 4.2},\r\n {'label': \"Breathing (fast-deep)\", 'value': 8.5},\r\n {'label': \"Speaking (quiet speech)\", 'value': 29},\r\n {'label': \"Speaking (whispered counting)\", 'value': 37},\r\n {'label': \"Speaking (normal speech)\", 'value': 72},\r\n {'label': \"Speaking (voiced counting)\", 'value': 72},\r\n {'label': \"Speaking (loud speech)\", 'value': 142},\r\n {'label': \"Singing (whispered 'aah')\", 'value': 103},\r\n {'label': \"Singing (voiced 'aah')\", 'value': 970},\r\n]\r\n\r\nmask_types = [\r\n {'label': \"None (100% passage)\", 'value': 1},\r\n {'label': \"Cloth (15% passage)\", 'value': 0.15},\r\n {'label': \"N95 Surgical (5% passage)\", 'value': 0.05},\r\n]\r\n\r\npresets = [\r\n {'label': \"House\", 'value': 'house'},\r\n {'label': \"Restaurant\", 'value': 'restaurant'},\r\n {'label': \"Lecture Hall\", 'value': 'lecture_hall'}\r\n]\r\n\r\n# Nmax values for main red text output\r\nmodel_output_n_vals = [2, 3, 5, 10, 25, 50, 100]\r\n\r\n# CSS Styles for Tabs (currently known issue in Dash with overriding default css)\r\ntab_style = {\r\n 'padding-left': '1em',\r\n 'padding-right': '1em'\r\n}\r\n\r\n# Custom HTML Header\r\napp.index_string = '''\r\n\r\n\r\n \r\n {%metas%}\r\n \r\n COVID-19 Indoor Safety\r\n {%favicon%}\r\n {%css%}\r\n \r\n \r\n \r\n \r\n \r\n {%app_entry%}\r\n
\r\n {%config%}\r\n {%scripts%}\r\n {%renderer%}\r\n
\r\n \r\n'''\r\n\r\n# Main App\r\napp.layout = html.Div(children=[\r\n html.H1(children='MIT COVID-19 Indoor Safety Guideline'),\r\n html.Div([\r\n html.Div(children='''\r\n Kasim Khan (2020)\r\n '''),\r\n html.Div(children='''\r\n https://github.com/kawesomekhan/covid-indoor\r\n '''),\r\n html.Div(children='''\r\n Martin Z. Bazant and John W. M. Bush, medRxiv preprint (2020):\r\n \"Beyond Six Feet: A Guideline to Limit Indoor Airborne Transmission of COVID-19\"\r\n '''),\r\n html.Div('''\r\n http://web.mit.edu/bazant/www/COVID-19/\r\n '''),\r\n ], style={'font-size': '13px'}),\r\n\r\n html.Br(),\r\n html.Div(\r\n className='grid',\r\n children=[\r\n html.Div(\r\n className='card',\r\n children=[\r\n dcc.Tabs(className='custom-tabs-container',\r\n value='tab-1', children=[\r\n dcc.Tab(\r\n label='About',\r\n className='custom-tab',\r\n children=[\r\n html.H6(\"About: \"),\r\n html.Div('''\r\n COVID-19 has been spreading in homes, restaurants, bars, classrooms, and other\r\n enclosed spaces via tiny, infective aerosol droplets suspended in the air.\r\n To mitigate this spread, official public health guidelines have taken the form \r\n of minimum social distancing rules (6 feet in the U.S.) or maximum occupancy \r\n (25 people in Massachusetts). \r\n '''),\r\n html.Br(),\r\n html.Div('''\r\n However, public health has been slow to catch up with rapidly advancing science.\r\n Naturally, the risk of COVID-19 transmission would not only depend on physical \r\n distance, but also on factors such as exposure time, mask usage, and ventilation\r\n systems, among other factors.\r\n '''),\r\n html.Br(),\r\n html.Div('''\r\n This app uses a mathematical model, developed by MIT professors Martin Z. Bazant \r\n and John Bush, to improve upon\r\n current distancing guidelines by providing a more accurate description of\r\n indoor COVID-19 transmission risk.\r\n '''),\r\n html.Br(),\r\n html.Div('''\r\n Adjust parameters in the other tabs and see how different spaces handle\r\n indoor COVID-19 transmission.\r\n '''),\r\n ],\r\n style=tab_style,\r\n selected_style=tab_style\r\n ),\r\n dcc.Tab(\r\n label='Room Specifications',\r\n className='custom-tab',\r\n children=[\r\n html.H6(\"Room Specifications: \"),\r\n html.Br(),\r\n html.Div([\"Floor Area (sq. ft.): \",\r\n dcc.Input(id='floor-area', value=900, type='number')]),\r\n html.Br(),\r\n html.Div([\"Ceiling Height (ft.): \",\r\n dcc.Input(id='ceiling-height', value=12, type='number')]),\r\n html.Br(),\r\n html.Div(className='card-dropdown',\r\n children=[html.Div([\"Ventilation System: \"])]),\r\n html.Div(className='card-dropdown',\r\n children=[dcc.Dropdown(id='ventilation-type',\r\n options=ventilation_types,\r\n value=ventilation_default,\r\n searchable=False,\r\n clearable=False)]),\r\n html.Br(),\r\n html.Div([\"Filtration System: \",\r\n dcc.Dropdown(id='filter-type',\r\n options=filter_types,\r\n value=filter_default,\r\n searchable=False,\r\n clearable=False)]),\r\n html.Br(),\r\n html.Div([\"Outdoor Air Fraction: \",\r\n html.Span(id='air-fraction-output'),\r\n dcc.Slider(id='outdoor-air-fraction',\r\n min=0.01,\r\n max=1,\r\n step=0.01,\r\n value=0.2,\r\n marks={\r\n 0.01: {'label': '0 (closed room)'},\r\n 1: {'label': '1 (outdoors)'}\r\n })])\r\n ],\r\n style=tab_style,\r\n selected_style=tab_style\r\n ),\r\n dcc.Tab(\r\n label='Human Behavior',\r\n className='custom-tab',\r\n children=[\r\n html.H6(\"Human Behavior: \"),\r\n html.Br(),\r\n html.Div([\"Exertion Level: \",\r\n dcc.Dropdown(id='exertion-level',\r\n options=exertion_types,\r\n value=0.49,\r\n searchable=False,\r\n clearable=False)]),\r\n html.Br(),\r\n html.Div([\"Expiratory Activity: \",\r\n dcc.Dropdown(id='exp-activity',\r\n options=expiratory_types,\r\n value=29,\r\n searchable=False,\r\n clearable=False)]),\r\n html.Br(),\r\n html.Div([\"Masks? \",\r\n dcc.Dropdown(id='mask-type',\r\n options=mask_types,\r\n value=0.15,\r\n searchable=False,\r\n clearable=False)]),\r\n html.Br(),\r\n html.Div([\"Risk Tolerance: \",\r\n html.Span(id='risk-tolerance-output'),\r\n html.Div('''\r\n This represents the number of expected transmissions during the\r\n occupancy period. A vulnerable population, due to age or\r\n preexisting medical conditions, will generally require\r\n a lower risk tolerance. \r\n ''', style={'font-size': '13px', 'margin-left': '20px'}),\r\n dcc.Slider(id='risk-tolerance',\r\n min=0.01,\r\n max=1,\r\n step=0.01,\r\n value=0.1,\r\n marks={\r\n 0.01: {'label': '0.01: Contact Tracing'},\r\n 1: {'label': '1.0: Unsafe'}\r\n })\r\n ])\r\n ],\r\n style=tab_style,\r\n selected_style=tab_style\r\n ),\r\n dcc.Tab(\r\n label='Advanced',\r\n className='custom-tab',\r\n children=[\r\n html.H6(\"Advanced Input: \"),\r\n html.Div(['''\r\n Know your specific ACH or MERV specifications? Input them here:\r\n ''']),\r\n html.Br(),\r\n html.Div([\"Ventilation System (ACH): \",\r\n dcc.Input(id='ventilation-type-adv', value=ventilation_default,\r\n type='number')]),\r\n html.Br(),\r\n html.Div([\"Filtration System (MERV): \",\r\n dcc.Input(id='filtration-type-adv', value=filter_default,\r\n type='number')]),\r\n html.Br(),\r\n html.H6(\"Graph Output: \"),\r\n html.Div([\r\n dcc.Graph(\r\n id='safety-graph',\r\n figure=fig\r\n ),\r\n ])\r\n ],\r\n style=tab_style,\r\n selected_style=tab_style\r\n )\r\n ],\r\n colors={\r\n \"border\": \"#c9c9c9\",\r\n \"primary\": \"#de1616\"\r\n }),\r\n html.Br()\r\n ]),\r\n html.Div(\r\n className='card',\r\n children=[\r\n html.Div([\r\n html.H3([\r\n '''Based on this model, it should be safe for this room to have:\r\n ''']),\r\n html.H4(className='model-output-text', id='model-text-1'),\r\n html.H4(className='model-output-text', id='model-text-2'),\r\n html.H4(className='model-output-text', id='model-text-3'),\r\n html.H4(className='model-output-text', id='model-text-4'),\r\n html.H4(className='model-output-text', id='model-text-5'),\r\n html.H4(className='model-output-text', id='model-text-6'),\r\n html.H4(className='model-output-text', id='model-text-7'),\r\n html.Br(),\r\n html.H3([\r\n '''In comparison, current six feet distancing guidelines recommend no more than''',\r\n html.Span(id='six-ft-output', children=''' 2 people ''', style={'color': '#de1616'}),\r\n ''' in this room.''']),\r\n ]),\r\n ], style={'padding-top': '0px'}),\r\n ]\r\n ),\r\n])\r\n\r\n\r\n# Model Update & Calculation\r\n# See indoors.py def set_default_params(self) for parameter descriptions.\r\n@app.callback(\r\n [Output('safety-graph', 'figure'),\r\n Output('model-text-1', 'children'),\r\n Output('model-text-2', 'children'),\r\n Output('model-text-3', 'children'),\r\n Output('model-text-4', 'children'),\r\n Output('model-text-5', 'children'),\r\n Output('model-text-6', 'children'),\r\n Output('model-text-7', 'children'),\r\n Output('six-ft-output', 'children')],\r\n [Input('floor-area', 'value'),\r\n Input('ceiling-height', 'value'),\r\n Input('ventilation-type', 'value'),\r\n Input('outdoor-air-fraction', 'value'),\r\n Input('filter-type', 'value'),\r\n Input('exertion-level', 'value'),\r\n Input('exp-activity', 'value'),\r\n Input('mask-type', 'value'),\r\n Input('risk-tolerance', 'value'),\r\n Input('ventilation-type-adv', 'value'),\r\n Input('filtration-type-adv', 'value')]\r\n)\r\ndef update_figure(floor_area, ceiling_height, air_exchange_rate, outdoor_air_fraction, merv,\r\n breathing_flow_rate, infectiousness, mask_passage_prob, risk_tolerance, ach_adv, merv_adv):\r\n # Check if any custom values are selected; if so, grab the ach from the advanced tab instead.\r\n if air_exchange_rate == -1:\r\n air_exchange_rate = ach_adv\r\n\r\n if merv == -1:\r\n merv = merv_adv\r\n\r\n # Update model with newly-selected parameters\r\n aerosol_radius = 2\r\n\r\n myInd.physical_params = [floor_area, ceiling_height, air_exchange_rate, outdoor_air_fraction,\r\n Indoors.merv_to_eff(merv, aerosol_radius)]\r\n myInd.physio_params = [breathing_flow_rate, aerosol_radius]\r\n myInd.disease_params = [infectiousness, 0.3]\r\n myInd.prec_params = [mask_passage_prob, risk_tolerance]\r\n\r\n # Update the figure with a new model calculation\r\n new_df = myInd.calc_n_max_series(2, 100, 1.0)\r\n new_fig = px.line(new_df, x=\"Maximum Exposure Time (hours)\", y=\"Maximum Occupancy\",\r\n title=\"Occupancy vs. Exposure Time\", height=400,\r\n color_discrete_map={\"Maximum Occupancy\": \"#de1616\"})\r\n new_fig.update_layout(transition_duration=500)\r\n\r\n # Update the red text output with new model calculations\r\n model_output_text = [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]\r\n index = 0\r\n for n_val in model_output_n_vals:\r\n max_time = myInd.calc_max_time(n_val) # hours\r\n units = 'hours'\r\n if max_time < 1:\r\n units = 'minutes'\r\n max_time = max_time * 60\r\n\r\n if round(max_time) == 1:\r\n units = units[:-1]\r\n\r\n base_string = '{n_val} people for {val:.0f} ' + units + ','\r\n model_output_text[index] = base_string.format(n_val=n_val, val=max_time)\r\n index += 1\r\n\r\n model_output_text[-2] = model_output_text[-2] + ' or'\r\n model_output_text[-1] = model_output_text[-1][:-1] + '.'\r\n\r\n six_ft_people = myInd.get_six_ft_n()\r\n if six_ft_people == 1:\r\n six_ft_text = ' {} person'.format(six_ft_people)\r\n else:\r\n six_ft_text = ' {} people'.format(six_ft_people)\r\n\r\n # Update all relevant display items (figure, red output text)\r\n return new_fig, model_output_text[0], model_output_text[1], model_output_text[2], model_output_text[3], \\\r\n model_output_text[4], model_output_text[5], model_output_text[6], six_ft_text\r\n\r\n\r\n# Update Advanced ventilation setting based on dropdown selection.\r\n# If the custom preset is selected, update the custom value to the default.\r\n@app.callback(\r\n Output('ventilation-type-adv', 'value'),\r\n Input('ventilation-type', 'value')\r\n)\r\ndef update_adv_ventilation_fwd(air_exchange_rate):\r\n global is_custom_vent\r\n if air_exchange_rate == -1:\r\n is_custom_vent = True\r\n raise PreventUpdate\r\n else:\r\n is_custom_vent = False\r\n return air_exchange_rate\r\n\r\n\r\n# Update Advanced ventilation dropdown if set to a custom value\r\n@app.callback(\r\n Output('ventilation-type', 'value'),\r\n Input('ventilation-type-adv', 'value')\r\n)\r\ndef update_adv_ventilation_rev(air_exchange_rate):\r\n for vent_type in ventilation_types:\r\n if vent_type['value'] == air_exchange_rate:\r\n return air_exchange_rate\r\n\r\n return -1\r\n\r\n\r\n# Update Advanced filtration setting based on dropdown selection.\r\n# If the custom preset is selected, update the custom value to the default.\r\n@app.callback(\r\n Output('filtration-type-adv', 'value'),\r\n Input('filter-type', 'value')\r\n)\r\ndef update_adv_filtration_fwd(merv):\r\n global is_custom_filter\r\n if merv == -1:\r\n is_custom_filter = True\r\n raise PreventUpdate\r\n else:\r\n is_custom_filter = False\r\n return merv\r\n\r\n\r\n# Update Advanced filtration dropdown if set to a custom value\r\n@app.callback(\r\n Output('filter-type', 'value'),\r\n Input('filtration-type-adv', 'value')\r\n)\r\ndef update_adv_filtration_rev(merv):\r\n for filter_type in filter_types:\r\n if filter_type['value'] == merv:\r\n return merv\r\n\r\n return -1\r\n\r\n\r\n# Risk tolerance slider value display\r\n@app.callback(\r\n [Output('risk-tolerance-output', 'children')],\r\n [Input('risk-tolerance', 'value')]\r\n)\r\ndef update_risk_tol_disp(risk_tolerance):\r\n return [\"{:.2f}\".format(risk_tolerance)]\r\n\r\n\r\n# Outdoor Air Fraction slider value display\r\n@app.callback(\r\n [Output('air-fraction-output', 'children')],\r\n [Input('outdoor-air-fraction', 'value')]\r\n)\r\ndef update_air_frac_disp(outdoor_air_fraction):\r\n return [\"{:.2f}\".format(outdoor_air_fraction)]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=False)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"406047886","text":"import collections\n\n\ndef count_unique_words(filename):\n # your code here \n words = {}\n with open(filename, 'rt') as infile:\n for line in infile:\n normalized = line.split()\n for word in normalized:\n if word not in words:\n words[word] = 0\n words[word] += 1 \n return words\n \n\n\nif __name__ == '__main__':\n words = count_unique_words('IntermidiatePython/hamlet.txt')\n print(words['the'])\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"520572057","text":"# -*- coding: utf-8 -*-\nclass Student:\n \n def __init__(self, id, name, gender, clazz, birthday):\n self.id = id\n self.name = name\n self.gender = gender\n self.clazz = clazz\n self.birthday = birthday\n self.desc = \"没有对这个人的任何备注\"\n \n \n \n","sub_path":"schoolManager/vo/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"256149307","text":"logging_messages = {0: {}, 1: {}}\ndo_print = False # For debugging\nvery_verbose = False\n\n\ndef log(level, group, message):\n global logging_messages\n\n if group in logging_messages[level]:\n logging_messages[level][group].add(message)\n else:\n logging_messages[level].update({group: {message}})\n\n\ndef warn(target, vulnerability, msg=\"\"):\n message = \"Warning: \" + target + \" \" + vulnerability + \" \" + msg\n\n if do_print:\n print(message)\n else:\n log(0, vulnerability, message)\n\n\ndef vulnerability(target, vulnerability, msg=\"\"):\n message = \"Vulnerability: \" + target + \" \" + vulnerability + \" \" + msg\n\n if do_print:\n print(message)\n else:\n log(1, vulnerability, message)\n\n\ndef info(target, msg=\"\"):\n print(\"Information: \" + target + \" \" + msg)\n","sub_path":"webvulnscan/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"392897452","text":"# show fps camera\n\n\nimport cv2\nimport numpy as np \nfrom imutils.video import FPS\nimport imutils\nimport face_recognition\n\n\nvc = cv2.VideoCapture(0)\n\n\nwhile True:\n\tfps = FPS().start()\n\tret, frame = vc.read()\n\n\trgb = frame[:, :, ::-1]\n\n\tface_locations = face_recognition.face_locations(rgb)\n\n\tfor (top, right, bottom, left) in face_locations:\n\t\tcv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1)\n\n\tfps.update()\n\tfps.stop()\n\tfont = cv2.FONT_HERSHEY_DUPLEX\n\tcv2.putText(frame, \"FPS:{}\".format(fps.fps()), (10, 20), font, 1.0, (255, 255, 255), 1)\n\tcv2.imshow(\"frame\", frame)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\n# Release handle to the webcam\nvc.release()\ncv2.destroyAllWindows()\n","sub_path":"test_n1.py","file_name":"test_n1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"315964691","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport sys, os\n\nfor path in sys.argv[1:]:\n\tts, pixels = np.loadtxt(path, unpack=True)\n\tts -= ts[0]\n\n\ttotal_angle_pix = 224\n\n\t# 113(3) 126(3) 179(5)\n\tfov1 = np.arctan(113 / 179)\n\tfov2 = np.arctan(126 / 179)\n\n\tfov = fov1 + fov2\n\n\t# relative to horizontal\n\tpixels = total_angle_pix - pixels\n\n\tangles = pixels * fov / total_angle_pix - fov2\n\n\tys = (1.81 - 0.25) * np.tan(angles)\n\n\t# 0 means highest point\n\tys = np.mean(ys[:3]) - ys\n\n\tdef fitter(t, t0, k, fps=1000, g=9.81):\n\t\tfact = (np.sign(t - t0) + 1) / 2\n\t\treturn fact * np.log(np.cosh(((t - t0) / fps) * np.sqrt(g * k))) / k\n\n\tpopt, pcov = curve_fit(fitter, ts, ys, p0 = (0, 0.3), bounds=([-100, 0], [100, np.inf]))\n\tk_over_m = popt[1]\n\tS_k_over_m = np.sqrt(np.diag(pcov))[1]\n\n\tprint('k/m = ', k_over_m, '+-', S_k_over_m)\n\n\t#def freefall(t, t0, fps=1000, g=9.81):\n\t#\treturn 0.5 * g * ((t - t0) / fps)**2\n\n\t#if os.isatty(1):\n\t#\tplt.plot(ts, freefall(ts, popt[0]))\n\n\tplt.plot(ts, fitter(ts, popt[0], popt[1]), color='r', linewidth=3)\n\t#plt.plot(ts, fitter(ts, popt[0], popt[1] * 1.5), color='r', linewidth=3)\n\t#plt.plot(ts, fitter(ts, popt[0], popt[1] / 1.5), color='r', linewidth=3)\n\n\tplt.scatter(ts, ys, marker='.', color='k')\n\tplt.show()\n","sub_path":"scripts/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"74389362","text":"import zipfile\nimport urllib\nimport os\nimport spacy\n\n\nclass TargetClassifier(object):\n def __init__(self):\n \"\"\"\n During initialization, spaCy models are loaded and kept ready for classifying a sentence to a topic\n \"\"\"\n\n modelInfoFromEnv = os.environ['KATECHEO_NER']\n '''\n Parse the String in the environment variable.\n - Each model information is separated by ','\n - A specific model name and it's NER model location URL is separated by '='\n '''\n modelInfos = [\n sentence.split('=') for sentence in modelInfoFromEnv.split(',')\n ]\n\n nlpModels = {}\n\n # Iterate through each entry with the model information.\n for modelInfo in modelInfos:\n modelName = modelInfo[0]\n modelURL = modelInfo[1]\n\n # Download the model\n modelRootDirectory = \"./\" + modelName\n\n # Check if the model files already exists.\n if (modelName not in os.listdir(\".\")):\n urllib.request.urlretrieve(modelURL, modelName + \".zip\")\n zipRef = zipfile.ZipFile(modelName + \".zip\", 'r')\n zipRef.extractall(modelRootDirectory)\n zipRef.close()\n\n # Get the internal directory of the NER model.\n modelMainDirectory = os.listdir('./' + modelName)[0]\n\n # Check if the model directory has been downloaded.\n if (modelMainDirectory):\n\n # Load the spaCy models.\n nlpModels[modelName] = spacy.load(\n os.path.join(modelRootDirectory, modelMainDirectory))\n\n self.models = nlpModels\n\n \"\"\"\n Returns a string with data passed on from the previous models.\n\n Parameters\n ----------\n X : list of input texts\n feature_names : list of feature names\n meta : object with additional tags\n \"\"\"\n def predict(self, X, feature_names, meta):\n\n # logic from parent\n if 'tags' in meta and 'proceed' in meta['tags'] and meta['tags'][\n 'proceed']:\n\n topicName = \"\"\n matchedEntities = []\n\n # Get the text string that is to be classified.\n messageText = str(X[0])\n\n # Iterate through all the models\n for topic, model in self.models.items():\n\n # Get the inference result from the NER model for a question.\n doc = model(messageText)\n\n # Check if the model has recognised the trained entities in the question.\n if doc.ents:\n topicName = topic\n matchedEntities.append(doc.ents)\n\n # TODO: List out all the topics with a percentage of the match confidence.\n # Currently we would like to return classification result\n # only if it matches a single topic.\n if len(matchedEntities) == 1:\n self.result = {'proceed': True}\n self.result['topic'] = topicName\n return X\n else:\n self.result = {'proceed': False}\n self.result['point_of_failure'] = 'No Matching Topic'\n return X\n\n else:\n self.result = meta['tags']\n return X\n\n def tags(self):\n return self.result\n","sub_path":"target-classifier/TargetClassifier.py","file_name":"TargetClassifier.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"332735771","text":"#\n# ParamSet.py -- Groups of widgets holding parameters\n# \n# Eric Jeschke (eric@naoj.org)\n#\n# Copyright (c) Eric R. Jeschke. All rights reserved.\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nfrom ginga.misc import Widgets, Callback, Bunch\n\n\nclass ParamSet(Callback.Callbacks):\n def __init__(self, logger, params):\n super(ParamSet, self).__init__()\n \n self.logger = logger\n self.paramlst = []\n self.params = params\n self.widgets = {}\n\n for name in ('changed', ):\n self.enable_callback(name)\n \n def build_params(self, paramlst, orientation='vertical'):\n # construct a set of widgets for the parameters\n captions = []\n for param in paramlst:\n title = param.get('time', param.name)\n\n captions.append((title+':', 'label', param.name, 'entry'))\n\n w, b = Widgets.build_info(captions, orientation=orientation)\n\n # fill with default values and tool tips\n for param in paramlst:\n name = param.name\n\n # if we have a cached value for the parameter, use it\n if name in self.params:\n value = self.params[name]\n b[name].set_text(str(value))\n\n # otherwise initialize to the default value, if available\n elif 'default' in param:\n value = param.default\n b[name].set_text(str(value))\n self.params[name] = value\n\n if 'description' in param:\n b[name].set_tooltip(param.description)\n\n b[name].add_callback('activated', self._value_changed_cb)\n \n self.paramlst = paramlst\n self.widgets = b\n\n return w\n\n def _get_params(self):\n for param in self.paramlst:\n w = self.widgets[param.name]\n value = w.get_text()\n if 'type' in param:\n value = param.type(value)\n self.params[param.name] = value\n\n def sync_params(self):\n for param in self.paramlst:\n key = param.name\n w = self.widgets[key]\n if key in self.params:\n value = self.params[key]\n w.set_text(str(value))\n\n def get_params(self):\n self._get_params()\n return self.params\n \n def _value_changed_cb(self, w):\n self._get_params()\n self.make_callback('changed', self.params)\n \n\n#END\n","sub_path":"ginga/misc/ParamSet.py","file_name":"ParamSet.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"533138944","text":"#!/usr/bin/env python3\nimport os.path\nimport sys\nimport json\nimport jsonschema\nimport re\n\nschema_relative_path = \"..\"\nschema_dir = os.path.abspath(schema_relative_path).replace('\\\\', '/') + \"/\"\nspec_path = os.path.join(schema_dir, \"spec.md\")\nprint(schema_dir)\n\ndef check_example(schema_path, example):\n full_path = os.path.join(schema_dir, schema_path)\n with open(full_path, 'r') as f:\n try: \n schema = json.load(f)\n except Exception as e:\n raise Exception(\"There was an error parsing the JSON schema\", e)\n \n resolver = jsonschema.RefResolver(base_uri = 'file:///' + full_path, referrer = schema)\n jsonschema.validate(example, schema, resolver = resolver)\n\ndef get_next(iterator):\n try:\n return next(iterator)\n except StopIteration: \n return None\n\ndef get_next_non_blank(iterator):\n line = \" \"\n while line.isspace():\n line = get_next(iterator)\n return line\n\ndef get_example(lines):\n next_line = get_next_non_blank(lines)\n if re.match(\"#* ?Example$\", next_line):\n return get_example_body(lines)\n else:\n return None\n\ndef get_example_body(lines):\n line = get_next_non_blank(lines)\n json = \"\"\n while line and is_preformatted(line):\n json += line[4:]\n line = get_next(lines)\n if json.isspace():\n return None\n else:\n return json\n\ndef is_preformatted(line):\n return line[0:4].isspace() and (not line.isspace())\n\ndef validate(url, example):\n print(\"Checking [{}] against {}\".format(url, example.strip()))\n try:\n data = json.loads(example)\n except Exception as e:\n raise Exception(\"There was an error parsing the example JSON\", e)\n check_example(url, data) \n\ndef check_spec(spec_path):\n with open(spec_path, 'r') as f:\n spec = f.readlines()\n pattern = re.compile(\"Schema: \\[.+\\]\\((?P.+)\\)$\", flags=re.MULTILINE) # \\[`(\\w|\\.)+`\\]\\((\\w|\\.+)\\)\n lines = iter(spec)\n line = get_next(lines)\n while line:\n match = pattern.match(line)\n if match:\n url = match.group('url')\n example = get_example(lines)\n if example:\n validate(url, example) \n else:\n raise Exception(\"No example given for {}\".format(url))\n \n line = get_next(lines)\n\ncheck_spec(spec_path)\nprint(\"Finished without errors ☺\")\n","sub_path":"spec/test/test-schema.py","file_name":"test-schema.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"423765030","text":"#!/bin/bash\nd = { 'A': 'T', 'T' : 'A', 'G' : 'C' , 'C': 'G'}\n#print (d)\n\nsequence= input (\"enter your sequence: \") # Exercise4-script1 modified.\nmySeq = list (str(sequence)) \n#print (mySeq)\nmySeq.reverse()\n#print (mySeq)\ncomplement = [d[base] for base in mySeq]\nR_complement = \"\".join(complement)\nprint (R_complement) ##CK add a message","sub_path":"Exercise-Scripts/Exercise4_script1.py","file_name":"Exercise4_script1.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"218169165","text":"import pygame\n\n\nclass itemBase(object):\n\tdef __init__(self, Type, img = 0):\n\t\ttry:\n\t\t\tself.img = pygame.image.load(img).convert_alpha()\n\t\texcept:\n\t\t\tself.img = pygame.image.load('img/test.png').convert_alpha()\n\t\tself.Type = Type\n\t\tself.uses = -1\n\t\tself.imgFile = img\n\t\tself.draw1 = False\n\t\tself.x = 0\n\t\tself.y = 0\n\n\tdef drop(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.draw1 = True\n\n\tdef pickup(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.draw1 = False\n\n\tdef draw(self, window, x=0, y=0):\n\t\tif self.draw1 == True:\n\t\t\twindow.blit(self.img, ((self.x * 32) - 1, (self.y*32) - 1))\n\t\telif self.draw1 == False:\n\t\t\twindow.blit(self.img, (x, y))\n\tdef save(self):\n\t\tself.img = self.imgFile\n\n\t\t\t","sub_path":"items/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"63467675","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport sqlite3\n\nclass TiebaPipeline(object):\n def open_spider(self,spider):\n self.con=sqlite3.connect(\"sqlite.sqlite3\")\n self.cur=self.con.cursor()\n sql=\"create table if not exists tieba(title varchar(100),content text)\"\n self.cur.execute(sql)\n def close_spider(self,spider):\n self.con.close()\n def process_item(self, item, spider):\n sql=u\"insert into tieba values('{0}','{1}')\"\n sq=sql.format(item[\"title\"],item[\"content\"])\n self.cur.execute(sq)\n self.con.commit()\n return item\n","sub_path":"TiebaCrawl/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"552893801","text":"import chainer\nfrom rnn_classify import MultiLayerLSTM, Classifier\nfrom sklearn.model_selection import train_test_split\nimport time\nimport numpy as np\nfrom chainer import Variable, optimizers\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\nfrom TrainingSet import TrainRNN, TrainMLP\nimport os\nimport pickle\nimport random\nimport winsound\n\n\"\"\" Trains a Recurrent Neural Network\n Args:\n Requires time series, each time sample labelled.\n Requires input and training parameters to be adjusted.\n Return:\n An RNN model. \"\"\"\n\n\"\"\" Data should be three dimensional array\n data: [num_samples x length of time signal x number of channels]\n label: [num_samples x 1 x length of time signal]\"\"\"\n\nprint_name = '\\D12a3a-session2-MLP5'\n\n# As we are using reshape form Matrix dimensions should be as this\nsamples_o = sio.loadmat('data.mat')\nlabels_o = sio.loadmat('label.mat')\ntargets_o = sio.loadmat('tar.mat')\nraw_samples = samples_o['data_m']\nraw_labels = np.concatenate(targets_o['target_m']).astype(np.int32)\n\n# Parameters\nnum_layer = 20 # Number of LSTM layers (depth)\nsize_layer = 50\nsize_out = 2\nmax_num_iter = 200\ntest_size = .1\n\nMLP = TrainMLP(raw_samples, raw_labels, ratio_batch=0.2, test_size=test_size,\n valid_size=0.1, num_layer=num_layer, max_num_iter=max_num_iter,\n valid_epoch=2)\n\nif os.path.exists('models' + print_name + '.p'):\n MLP.min_val_model = pickle.load(open(\"models\" + print_name + \".p\", \"rb\"))\nelse:\n MLP.train()\n pickle.dump(MLP.min_val_model, open(\"models\" + print_name + \".p\", \"wb\"))\n plt.figure()\n plt.plot(np.arange(1, MLP.max_num_iter + 1), MLP.train_loss,\n label='train loss')\n plt.plot(np.arange(MLP.valid_epoch, MLP.max_num_iter + 1,\n step=MLP.valid_epoch, ), MLP.valid_loss,\n label='valid loss')\n plt.xlabel('iteration[number]')\n plt.ylabel('loss[softmax cross entropy]')\n plt.legend(loc='upper right', shadow=True)\n plt.grid(True)\n plt.savefig('.\\FigDummy' + print_name + '_Train.pdf', format='pdf')\n\nMLP.test()\nprint('Accuracy: %{}'.format(100 * MLP.acc[0]))\n","sub_path":"RDat_Demo_MLP.py","file_name":"RDat_Demo_MLP.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"535652311","text":"import sys,time\nimport numpy as np\ndef convert_list(word):\n return list(word)\n\n# message=\"Common sense is not so common.\"\n\nf = open(\"text.txt\",\"r\")\nmessage = f.read()\n\nm=message.replace(\" \",\"#\")\n\nkey=8\ncol=len(message)\n\nrem=col%key\n\nquo=col//key\nif(rem!=0):\n quo=quo+1\nelse:\n quo=quo\n\nn=convert_list(m)\nadd_zero=(quo*key)-col\n\nfor i in range(0,add_zero):\n n.append(0)\n\nnp.array(n)\no=np.reshape(n,(quo,key))\np=o.transpose()\nb=\" \"\n\n\nfor k in range(0,p.shape[0]):\n for l in range(0,p.shape[1]-1):\n b+=str(p[k,l])\n b+=str(p[k,-1])\nres_1=b.replace(\"#\",\" \")\nres_2=res_1.replace(\"0\",\" \")\n# print(res_2)\nf.close()\n\ng = open(\"text.encript.txt\",\"w\")\ng.write(res_2)\ng.close()","sub_path":"CEASER_FILE_ENCRIPT_DECRIPT/File_ceaser_encry.py","file_name":"File_ceaser_encry.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"386320898","text":"import argparse\nimport datetime\nimport hashlib\nimport os\nimport sys\ntry:\n from math import isclose\nexcept (ImportError, AttributeError): # not in Python 2.7\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\nfrom . import Path\n\nfrom dateutil import parser as date_parser\n\nfrom materials_commons.api import get_all_projects\nfrom materials_commons.etl.common.worksheet_data import ExcelIO\nfrom materials_commons.etl.common.metadata import Metadata\nfrom materials_commons.etl.common.meta_data_verify import MetadataVerification\n\n\nclass Compare:\n\n def __init__(self):\n self.project = None\n self.experiment = None\n self.metadata = Metadata()\n self.upload = None\n self.download = None\n self.do_files = False\n self.checksum = False\n\n def set_options(self, do_files=False, upload=None, download=None, checksum=False):\n if do_files and not (upload and download):\n do_files = False\n print(\"Both upload and download must be set to compare files\")\n self.do_files = do_files\n self.upload = upload\n self.download = download\n self.checksum = checksum\n\n def compare(self, project_name, experiment_name, input_file_path, output_file_path, apikey):\n\n check_ok = self.set_up_project_experiment_metadata(project_name, experiment_name, apikey=apikey)\n if not check_ok:\n return\n\n print('Input --', input_file_path)\n excel_io_controller = ExcelIO()\n excel_io_controller.read_workbook(input_file_path)\n sheet_name_list = excel_io_controller.sheet_name_list()\n sheet_name = excel_io_controller.set_current_worksheet_by_index(0)\n print(\"Input -- Selecting: \", sheet_name, \"(from\", sheet_name_list, \")\")\n data1 = excel_io_controller.read_entire_data_from_current_sheet()\n excel_io_controller.close()\n data1 = self.check_for_end_tag(data1)\n print(\"Input data size:\", len(data1), len(data1[0]))\n\n print('Output --', output_file_path)\n excel_io_controller = ExcelIO()\n excel_io_controller.read_workbook(input_file_path)\n sheet_name_list = excel_io_controller.sheet_name_list()\n sheet_name = excel_io_controller.set_current_worksheet_by_index(0)\n print(\"Output -- Selecting: \", sheet_name, \"(from\", sheet_name_list, \")\")\n data2 = excel_io_controller.read_entire_data_from_current_sheet()\n excel_io_controller.close()\n data2 = self.check_for_end_tag(data2)\n print(\"Output data size:\", len(data2), len(data2[0]))\n\n metadata = self.metadata\n print('----')\n if self.compare_data_shape(data1, data2):\n self.compare_headers(metadata, data1, data2)\n self.compare_first_col(metadata, data1, data2)\n self.compare_data_area(metadata, data1, data2)\n if self.do_files:\n self.compare_files(metadata, data1, data2)\n\n def set_up_project_experiment_metadata(self, project_name, experiment_name, apikey):\n project_list = get_all_projects(apikey=apikey)\n for proj in project_list:\n if proj.name == project_name:\n self.project = proj\n if not self.project:\n print(\"Can not find project with name = \" + str(project_name) + \". Quiting.\")\n return False\n experiment_list = self.project.get_all_experiments()\n found = []\n for exp in experiment_list:\n if exp.name == experiment_name:\n found.append(exp)\n if not found:\n print(\"Can not find Experiment with name = \" + str(experiment_name) + \". Quiting.\")\n return False\n if len(found) > 1:\n print(\"Found more the one Experiment with name = \" + str(experiment_name) + \";\")\n print(\"Rename experiment so that '\" + str(experiment_name) + \"' is unique.\")\n print(\"Quiting.\")\n return False\n self.experiment = found[0]\n check_ok = self.metadata.read(self.experiment.id)\n if not check_ok:\n print(\"There was no ETL metadata for the experiment '\" + str(experiment_name) + \"';\")\n print(\"This experiment does not appear to have been created using ETL input.\")\n print(\"Quiting.\")\n return False\n metadata = MetadataVerification().verify(self.metadata)\n if not metadata:\n return False\n self.metadata = metadata\n return True\n\n @staticmethod\n def compare_data_shape(data1, data2):\n mismatch = False\n if len(data1) == 0 or len(data1) == 0:\n if len(data1) == 0 and len(data1):\n print(\"No data in either spreadsheet (zero length)\")\n if len(data1) == 0:\n print(\"No data in input spreadsheet (zero length)\")\n else:\n print(\"No data in output spreadsheet (zero length)\")\n return False\n else:\n if not len(data1) == len(data2):\n print(\"Number of data rows differ: \", len(data1), len(data2))\n mismatch = True\n if not len(data1[0]) == len(data2[0]):\n print(\"Number of data cols differ: \", len(data1[0]), len(data2[0]))\n mismatch = True\n if not mismatch:\n print(\"Data number of rows and cols match\")\n return not mismatch\n\n @staticmethod\n def compare_headers(metadata, data1, data2):\n len1 = len(data1)\n len2 = len(data2)\n if len1 < metadata.header_row_end:\n print(\"Missing header data (length), input\")\n if len2 < metadata.header_row_end:\n print(\"Missing header data (length), output\")\n if len1 >= metadata.header_row_end and len2 >= metadata.header_row_end:\n row_length_check = True\n for row in range(0, metadata.header_row_end):\n check1 = len(data1[row]) >= metadata.data_col_end\n check2 = len(data2[row]) >= metadata.data_col_end\n if not check1:\n print(\"Header row shorter then expected, input, row \" + str(row))\n if not check2:\n print(\"Header row shorter then expected, output, row \" + str(row))\n row_length_check = row_length_check and check1 and check2\n identical = True\n if row_length_check:\n for row in range(0, metadata.header_row_end):\n row_data1 = data1[row]\n row_data2 = data2[row]\n for col in range(0, metadata.data_col_end):\n match = (row_data1[col] == row_data2[col])\n identical = identical and match\n if not match:\n print(\"Header mismatch at row = \" + str(row) + \", col = \" + str(col) + \": \"\n + str(row_data1[col]) + \", \" + str(row_data2[col]))\n if identical:\n print(\"Headers match\")\n\n @staticmethod\n def compare_first_col(metadata, data1, data2):\n len1 = len(data1)\n len2 = len(data2)\n if len1 < metadata.data_row_end:\n print(\"Missing rows (first col matching), input \" + str(len1))\n if len2 < metadata.data_row_end:\n print(\"Missing rows (first col matching), input \" + str(len2))\n if len1 >= metadata.data_row_end and len2 >= metadata.data_row_end:\n identical = True\n for row in range(0, metadata.data_row_end):\n row_data1 = data1[row]\n row_data2 = data2[row]\n match = (row_data1[0] == row_data2[0])\n identical = identical and match\n if not match:\n print(\"First col mismatch at row = \" + str(row) + \": \"\n + str(row_data1[0]) + \", \" + str(row_data2[0]))\n if identical:\n print(\"First cols match\")\n\n @staticmethod\n def check_for_end_tag(data):\n first_row = data[0]\n index = 0\n missing_end = True\n end_col = len(first_row)\n for col in first_row:\n if str(col).startswith(\"END\"):\n print(\"Found END marker at column \" + str(index)\n + \", updating data end to this location\")\n end_col = index\n missing_end = False\n break\n index += 1\n if missing_end:\n return data\n # else\n update = []\n for data_row in data:\n update_row = []\n for index in range(0, end_col):\n update_row.append(data_row[index])\n update.append(update_row)\n return update\n\n def compare_data_area(self, metadata, data1, data2):\n len1 = min(len(data1), metadata.data_row_end)\n len2 = min(len(data2), metadata.data_row_end)\n\n end_row = len1\n if not (len1 == len2):\n print(\"Data number of rows differ: \" + str(len1) + \", \" + str(len2))\n if len2 < len1:\n end_row = len2\n\n if end_row < metadata.data_row_end:\n if len1 < metadata.data_row_end:\n print(\"Missing data rows, input, expected \"\n + str(metadata.data_row_end) + \", found\" + str(len1))\n if len2 < metadata.data_row_end:\n print(\"Missing data rows, output, expected \"\n + str(metadata.data_row_end) + \", found\" + str(len2))\n types1 = data1[1]\n types2 = data2[1]\n\n types = [\"\"] # element at index zero is ignored\n\n for col in range(1, metadata.data_col_end):\n type1 = types1[col]\n type2 = types2[col]\n data_type = type1\n if not type1 == type2:\n print(\"Data type mismatch, col \" + str(col) + \":\", type1, type2)\n if self.type_expect_data(type1):\n data_type = type1\n elif self.type_expect_data(type2):\n data_type = type2\n types.append(data_type)\n\n identical = True\n for row in range(metadata.data_row_start, end_row):\n row_data1 = data1[row]\n row_data2 = data2[row]\n row_len1 = min(len(row_data1), metadata.data_col_end)\n row_len2 = min(len(row_data2), metadata.data_col_end)\n if not (row_len1 == row_len2):\n print(\"Data row \" + str(row) + \", lengths differ: \"\n + str(row_len1) + \", \" + str(row_len2))\n end_col = row_len1\n if end_col < metadata.data_col_end:\n if row_len1 < metadata.data_col_end:\n print(\"Missing data - row \" + str(row) + \" is short, data1, expected \"\n + str(metadata.data_col_end) + \", found \" + str(row_len1))\n print(row_data1)\n if row_len2 < metadata.data_col_end:\n print(\"Missing data - row \" + str(row) + \" is short, data2, expected \"\n + str(metadata.data_col_end) + \", found \" + str(row_len2))\n print(row_data2)\n if row_len2 < row_len1:\n end_col = row_len2\n\n for col in range(1, end_col):\n if not self.type_expect_data(types[col]):\n continue\n probe1 = row_data1[col]\n probe2 = row_data2[col]\n if isinstance(probe1, datetime.datetime):\n if isinstance(probe2, str):\n probe2 = date_parser.parse(probe2)\n match = probe1.isoformat() == probe2.isoformat()\n elif isinstance(probe1, float) or isinstance(probe2, float):\n if isinstance(probe1, str):\n probe1 = float(probe1)\n if isinstance(probe2, str):\n probe2 = float(probe2)\n match = isclose(probe1, probe2)\n elif (probe1 is None) or (probe2 is None):\n if isinstance(probe1, str) and probe1 == \"None\":\n probe1 = None\n if isinstance(probe2, str) and probe2 == \"None\":\n probe2 = None\n match = (probe1 is None) and (probe2 is None)\n else:\n match = (probe1 == probe2)\n identical = identical and match\n if not match:\n obj_type1 = type(probe1)\n obj_type2 = type(probe2)\n print(\"Data mismatch at row = \" + str(row) + \", col = \" + str(col) + \": \"\n + str(probe1) + \", \" + str(probe2) + \", \"\n + str(obj_type1) + \", \" + str(obj_type2))\n\n if identical:\n print(\"Data values match\")\n\n @staticmethod\n def type_expect_data(data_type):\n return data_type == \"MEAS\" or data_type == \"PARAM\" or \\\n data_type == \"SAMPLES\" or data_type == \"FILES\"\n\n def compare_files(self, metadata, data1, data2):\n compare_list = self.get_compare_list(metadata, data1, data2)\n if not compare_list:\n print(\"Skipping check on file content (no files found to compare).\")\n return\n compare_list = self.compare_file_paths(compare_list)\n matching_by = \"names\"\n if self.checksum:\n matching_by += \" and checksums\"\n compare_list = self.compare_file_checksum(compare_list)\n if not compare_list:\n print(\"No matching files were found.\")\n return\n\n print(\"All compared files match (by \" + matching_by + \").\")\n# matching = []\n# for record in compare_list:\n# matching.append(record['path'])\n# print(\"Matching files/directories (by \" + matching_by + \"): \" + \", \".join(matching))\n\n def get_compare_list(self, metadata, data1, data2):\n types1 = data1[1]\n types2 = data2[1]\n\n types = [\"\"] # element at index zero is ignored\n\n for col in range(1, metadata.data_col_end):\n type1 = types1[col]\n type2 = types2[col]\n if (not type1 == \"FILES\") and (not type2 == \"FILES\"):\n types.append(None)\n continue\n data_type = type1\n if not type1 == type2:\n print(\"Data FILES headers mismatch, ignoring col \" + str(col) + \":\", type1, type2)\n data_type = None\n types.append(data_type)\n\n path_strings1 = []\n path_strings2 = []\n len1 = min(len(data1), metadata.data_row_end)\n len2 = min(len(data2), metadata.data_row_end)\n\n end_row = min(len1, len2)\n\n for row in range(metadata.data_row_start, end_row):\n row_data1 = data1[row]\n row_data2 = data2[row]\n row_len1 = min(len(row_data1), metadata.data_col_end)\n row_len2 = min(len(row_data2), metadata.data_col_end)\n col_end = min(row_len1, row_len2)\n for col in range(1, col_end):\n if types[col] == 'FILES':\n elem1 = row_data1[col]\n if elem1:\n for p_string in elem1.split(\",\"):\n p_string = p_string.strip()\n if p_string not in path_strings1:\n path_strings1.append(p_string)\n elem2 = row_data2[col]\n if elem2:\n for p_string in elem2.split(\",\"):\n p_string = p_string.strip()\n if p_string not in path_strings2:\n path_strings2.append(p_string)\n path_strings = []\n for p in path_strings2:\n if p not in path_strings1:\n print(\"File upload spec from output not in input, ignoring: \" + p)\n elif p not in path_strings:\n path_strings.append(p)\n for p in path_strings1:\n if p not in path_strings2:\n print(\"File upload spec from input not in output, ignoring: \" + p)\n\n check_records = []\n base_download = Path(os.path.abspath(self.download))\n base_upload = Path(os.path.abspath(self.upload))\n for p in path_strings:\n record = {\n 'path': p,\n 'upload': Path(base_upload, p),\n 'download': Path(base_download, p)\n }\n check_records.append(record)\n more_records = []\n for r in check_records:\n path = r['path']\n upload = r['upload']\n download = r['download']\n if upload.is_dir() and download.is_dir():\n more_records += self.compare_expand_dir(path, upload, download)\n if download.is_dir() and (not upload.is_dir()):\n print(\"Expected upload path to be directory, it is not, ignoring: \", path)\n if upload.is_dir() and (not download.is_dir()):\n print(\"Expected download path to be directory, it is not, ignoring: \", path)\n return check_records + more_records\n\n def compare_expand_dir(self, path, upload, download):\n more_records = []\n upload_children = os.listdir(upload)\n download_children = os.listdir(download)\n names = []\n for name in upload_children:\n if name not in download_children:\n print(\"In upload directory, file/directory not in download directory, ignoring: \" + path + '/' + name)\n else:\n names.append(name)\n for name in download_children:\n if name not in upload_children:\n print(\"In download directory, file/directory not in upload directory, ignoring: \" + path + '/' + name)\n for name in names:\n n_path = path + \"/\" + name\n n_upload = Path(upload, name)\n n_download = Path(download, name)\n if n_upload.is_dir() and n_download.is_dir():\n more_records += self.compare_expand_dir(n_path, n_upload, n_download)\n elif n_download.is_dir() and (not n_upload.is_dir()):\n print(\"Expected upload path to be directory, it is not, ignoring: \", n_path)\n elif n_upload.is_dir() and (not n_download.is_dir()):\n print(\"Expected download path to be directory, it is not, ignoring: \", n_path)\n else:\n record = {\n 'path': n_path,\n 'upload': n_upload,\n 'download': n_download\n }\n more_records.append(record)\n return more_records\n\n @staticmethod\n def compare_file_paths(compare_list):\n update_compare_list = []\n for record in compare_list:\n path = record['path']\n upload = record['upload']\n download = record['download']\n if not (upload.exists() and download.exists()):\n if not upload.exists():\n print(\"File in spreadsheet not found in upload directory, ignoring: \" + path)\n if not download.exists():\n print(\"File in spreadsheet not found in download directory, ignoring: \" + path)\n else:\n update_compare_list.append(record)\n return update_compare_list\n\n def compare_file_checksum(self, compare_list):\n update_compare_list = []\n for record in compare_list:\n path = record['path']\n upload = record['upload']\n download = record['download']\n if upload.is_dir():\n continue\n check_upload = self.md5(upload)\n check_download = self.md5(download)\n if not check_upload == check_download:\n print(\"Checksum mismatch for path: \" + path)\n else:\n update_compare_list.append(record)\n return update_compare_list\n\n @staticmethod\n def md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\ndef _verify_data_dir(dir_path):\n path = Path(dir_path)\n check_ok = path.exists() and path.is_dir()\n return check_ok\n\n\nif __name__ == '__main__':\n\n argv = sys.argv\n parser = argparse.ArgumentParser(\n description='Build a workflow from given (well formatted) Excel spreadsheet')\n parser.add_argument('proj', type=str, help=\"Project Name\")\n parser.add_argument('exp', type=str, help=\"Experiment Name\")\n parser.add_argument('input', type=str,\n help='Path to input EXCEL file')\n parser.add_argument('output', type=str,\n help='Path to output EXCEL file')\n parser.add_argument('--apikey', type=str, help=\"User's APIKEY\")\n parser.add_argument('--upload', type=str,\n help=\"Path to dir for uploading files; if none, files are not compared\")\n parser.add_argument('--download', type=str,\n help=\"Path to dir for downloaded files; if none, files are not compared\")\n parser.add_argument('--checksum', action='store_true',\n help=\"In comparing upload/download files, also compare checksum; optional\")\n\n args = parser.parse_args(argv[1:])\n\n args.input = os.path.abspath(args.input)\n args.output = os.path.abspath(args.output)\n\n print(\"Path to input EXCEL file: \" + args.input)\n print(\"Path to output EXCEL file: \" + args.output)\n\n if args.upload:\n args.upload = os.path.abspath(args.upload)\n print(\"Path to uploaded files: \" + args.upload)\n if args.download:\n args.download = os.path.abspath(args.download)\n print(\"Path to download files: \" + args.download)\n\n if args.upload or args.download:\n if not _verify_data_dir(args.upload):\n print(\"Path to upload directory is not valid; ignoring.\")\n args.upload = \"\"\n if not _verify_data_dir(args.download):\n print(\"Path to download directory is not valid; ignoring.\")\n args.download = \"\"\n ok = True\n missing = \"\"\n if not (args.upload or args.download):\n missing = \"both upload and download\"\n ok = False\n elif not args.upload:\n args.upload = \"\"\n missing = \"upload\"\n ok = False\n elif not args.download:\n args.download = \"\"\n missing = \"upload\"\n ok = False\n if not ok:\n print(\"To compare files, you must specify both optional arguments upload and download; missing\", missing)\n print(\"Files compare will be skipped!\")\n else:\n print(\"Files and directories on upload and download path will be compared\")\n if args.checksum:\n print(\"In addition, file checksums will be computed and compared\")\n\n ok_to_upload = (args.upload is not None) and (args.download is not None)\n\n c = Compare()\n c.set_options(do_files=ok_to_upload, upload=args.upload, download=args.download, checksum=args.checksum)\n c.compare(args.proj, args.exp, args.input, args.output, apikey=args.apikey)\n","sub_path":"mcetl/cli_utils/compare_spreadsheets.py","file_name":"compare_spreadsheets.py","file_ext":"py","file_size_in_byte":23352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"281452941","text":"#\n# @lc app=leetcode id=206 lang=python\n#\n# [206] Reverse Linked List\n#\n# https://leetcode.com/problems/reverse-linked-list/description/\n#\n# algorithms\n# Easy (59.32%)\n# Total Accepted: 838.9K\n# Total Submissions: 1.4M\n# Testcase Example: '[1,2,3,4,5]'\n#\n# Reverse a singly linked list.\n# \n# Example:\n# \n# \n# Input: 1->2->3->4->5->NULL\n# Output: 5->4->3->2->1->NULL\n# \n# \n# Follow up:\n# \n# A linked list can be reversed either iteratively or recursively. Could you\n# implement both?\n# \n#\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def r_node(self, last_node, node):\n if node.next is None:\n node.next = last_node\n return node\n next_node = node.next\n node.next = last_node\n return self.r_node(node, next_node)\n\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head is None:\n return head\n if head.next is None:\n return head\n next_node = head.next\n head.next = None\n return self.r_node(head, next_node)\n","sub_path":"206.reverse-linked-list.py","file_name":"206.reverse-linked-list.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"189200373","text":"#!/usr/bin/env python\n\n__author__ = 'frankojis'\n\nimport json\nimport threading\nfrom core.exhaust import mongo\nfrom core.fuel.facebook import scraper\nfrom core.engine.nlp import processor\nimport analyzer\n\n\ndef process_stream(data):\n event_data = json.loads(data)\n for key, value in event_data.iteritems():\n if key == \"posts\":\n process_post(value['data'])\n elif key == \"lazycomments\":\n analyzer.lazy_comments(value)\n\n\ndef process_post(posts):\n for post in xrange(len(posts)):\n post_data = dict()\n post_id = posts[post]['id']\n post_data['post_id'] = post_id\n try:\n post_message = posts[post]['message']\n except KeyError:\n post_message = None\n post_data['post_message'] = post_message\n post_data['post_message_sentiment'] = processor.Trainer().get_sentiment(post_message) if post_message is not None else \"NA\"\n post_data['created_time'] = posts[post]['created_time']\n post_data['post_from'] = posts[post]['from']\n post_data['post_type'] = posts[post]['type']\n post_data['source_link'] = posts[post]['link']\n post_data['status_type'] = posts[post]['status_type']\n mongo.Mongo().update_document(\"POSTS\", key='post_id', value=post_id, document=post_data)\n try:\n post_shares = posts[post]['shares']\n except KeyError:\n post_shares = 0\n mongo.Mongo().update_document(\"SHARES\", key='post_id', value=post_id, document=dict(post_id=post_id, shares_count=post_shares))\n try:\n post_likes = posts[post]['likes']\n like_thread = threading.Thread(target=process_post_attributes, args=(post_id, \"likes\", ))\n like_thread.start()\n except KeyError:\n post_likes = 0\n mongo.Mongo().update_document(\"LIKES\", key='post_id', value=post_id, document=dict(post_id=post_id, likes_count=post_likes))\n try:\n post_comments = posts[post]['comments']\n comment_thread = threading.Thread(target=process_post_attributes, args=(post_id, \"comments\", ))\n comment_thread.start()\n except KeyError:\n post_comments = 0\n mongo.Mongo().update_document(\"COMMENTS\", key='post_id', value=post_id, document=dict(post_id=post_id, comments_count=post_comments))\n mongo.Mongo().update_document(\"COMMENT_SENTIMENT_FLAG\", key='post_id', value=post_id, document={'flag': 0})\n\n\ndef process_post_attributes(post_id, attr_type):\n collection_name = attr_type.upper()\n count_key = attr_type + \"_count\"\n data_key = attr_type + \"_data\"\n document_dict = dict()\n document_dict['post_id'] = post_id\n if attr_type == 'comments':\n document_dict['sentiment_analysed'] = 0\n scraper_instance = scraper.Scraper(post_id, None)\n a_count, a_data = scraper_instance.scrape_data(attr_type)\n if a_count is not None:\n document_dict[count_key] = a_count\n document_dict[data_key] = a_data\n else:\n document_dict[count_key] = 0\n mongo.Mongo().update_document(collection_name, key='post_id', value=post_id, document=document_dict)\n\n","sub_path":"core/engine/spark/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"334295649","text":"\n\ndef valor_carta(n):\n if n == 'A':\n n = 1\n elif n == 'T':\n n = 10\n elif n == 'J':\n n = 11\n elif n == 'Q':\n n = 12\n elif n == 'K':\n n = 13\n else:\n n = int(n)\n return n\n\n\ndef valor_palo(n):\n if n == 'E':\n n = 1\n elif n == 'C':\n n = 2\n elif n == 'T':\n n = 3\n elif n == 'D':\n n = 4\n else:\n n = int(n)\n return n\n\ndef merge_sort(arr):\n \n if len(arr) > 1:\n \n mid = len(arr) // 2\n \n izquierda = arr[:mid]\n derecha = arr[mid:]\n\n \n merge_sort(izquierda)\n merge_sort(derecha)\n\n i =j = k = 0\n \n while i < len(izquierda) and j < len(derecha):\n \n valor_izquierdo = izquierda[i]\n valor_derecho = derecha[j]\n \n if valor_izquierdo[0] == valor_derecho[0]:\n if valor_palo(valor_izquierdo[1]) < valor_palo(valor_derecho[1]):\n arr[k] = izquierda[i]\n i += 1\n else:\n arr[j] = derecha[j]\n j += 1\n \n elif valor_carta(valor_izquierdo[0]) < valor_carta(valor_derecho[0]):\n arr[k] = izquierda[i]\n i += 1\n else:\n arr[k] = derecha[j]\n j += 1\n k += 1\n\n \n while i < len(izquierda):\n arr[k] = izquierda[i]\n i += 1\n k += 1\n while j < len(derecha):\n arr[k]=derecha[j]\n j += 1\n k += 1\n\nvalor = ['A',2,3,4,5,6,7,8,9,'T','J','Q','K']\n\npalo = ['E','C','T','D']\n\narr = []\n\nvueltas= int(input(''))\n\nfor i in range(0,vueltas):\n arr2 = input('').split(' ')\n arr.append(arr2)\n arr[i].pop(0)\n\nimprimir = ''\nfor i in range(0,vueltas):\n merge_sort(arr[i])\n imprimir += ' '.join(arr[i])\n imprimir += '\\n'\nprint(imprimir)\n","sub_path":"ene-jun-2020/Carlos.Leos/Practica4/Practica4.py","file_name":"Practica4.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"202415708","text":"# 数据库配置\n# mongodb\nfrom pymongo import MongoClient\n\nMG = MongoClient('127.0.0.1', 27017)\nmongo = MG['AI_test']\n\n# redis\nfrom redis import Redis\n\nredis_cli = Redis(host='127.0.0.1', port=6379, db=6)\n\n# 配置头像存储目录\nICON_PATH = 'icon'\nCHAT_PATH = 'chat/rec'\n\n\n# 百度ai\nfrom aip import AipSpeech\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '16981700'\nAPI_KEY = 'cHLC0p7dsOUVA0idSWQxV0lf'\nSECRET_KEY = 'VZCXoeuHfViaAA2EThwIHLukAYcT0pf9'\n\nAI_CLIENT = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\nVOICE = {\n 'vol': 5,\n 'spd': 5,\n 'pit': 6,\n 'per': 4,\n}\n\n\n# 图灵机器人\nTU_SERVER_URL = 'http://openapi.tuling123.com/openapi/api/v2'\nDATA = {\n\n \"perception\": {\n \"inputText\": {\n \"text\": \"附近的酒店\"\n }\n },\n\n \"userInfo\": {\n \"apiKey\": \"2213889293634c759484cac88a91c170\",\n \"userId\": \"110\"\n }\n}\n","sub_path":"AI_test/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"526017731","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# vim: expandtab tabstop=4 shiftwidth=4\n\nimport sys\nimport re\nimport math\nimport json\n\nfrom StringIO import StringIO\n\nclass Item:\n \"\"\"\n Contains values and comments of a define ... { } nagios\n block.\n\n Use to detect type of nagios object file lines.\n \"\"\"\n\n RE_START_BLOCK = re.compile(\"^.*define[ \\t]*([a-z-_]*)[ \\t]*{.*$\")\n RE_END_BLOCK = re.compile(\"^[# \\t]*}.*$\")\n RE_KEY_VAL = re.compile(\"^[^a-zA-Z_\\-]*([^ \\t]*)[ \\t]*(.*)[ \\t]*.*$\")\n RE_COMMENT = re.compile(\"^[ \\t]*#(.*)\\n*$\")\n RE_INACTIVE = re.compile(\"^[ \\t]*[#].*\")\n\n @staticmethod\n def is_blockstart(line):\n \"\"\"\n Is the line starting to define something ?\n \"\"\"\n if Item.RE_START_BLOCK.match(line):\n return True\n return False\n\n @staticmethod\n def is_blockend(line):\n \"\"\"\n Is the line the end of block define ?\n \"\"\"\n if Item.RE_END_BLOCK.match(line):\n return True\n return False\n\n @staticmethod\n def is_confline(line):\n \"\"\"\n Is the line a configuration/comment line ?\n \"\"\"\n if not Item.is_blockstart(line) and not Item.is_blockend(line):\n return True\n return False\n\n def __init__(self, mintabs=3, tabsize=8):\n self.datas = {} # key value, key is nagios' keyword and the value... the value.\n self.active = True # an inactive block is a commented block, but with a valid definition.\n self.type = \"\" # host, service...\n self.comments = \"\"\n self.mintabs = mintabs\n self.tabsize = tabsize\n\n def define(self, val):\n \"\"\"\n Find if the block is an active on\n and get the block's type\n \"\"\"\n if Item.RE_INACTIVE.match(val):\n self.active = False\n\n typeof = Item.RE_START_BLOCK.match(val)\n self.type = typeof.group(1).strip(' \\t\\r\\n')\n return self.active\n\n def try_get_val(self, sort_by):\n \"\"\"\n Get a value from a nagios block (nobject)\n If the key does not exists, will return\n empty strings tuple.\n\n sort_by is an ARRAY and the first valid value\n will be given if [sort_by[x]] exists in nobject.\n \"\"\"\n res = (\"\", \"\")\n for sort in sort_by:\n try:\n val = self.datas[sort]\n res = (sort, val)\n break\n except KeyError:\n pass\n return res\n\n def add(self, line):\n \"\"\"\n Add datas from the block.\n \"\"\"\n com = Item.RE_COMMENT.match(line)\n if com: # if comment...\n comchar = \"\"\n if self.active:\n comchar = \"#\"\n\n kvr = Item.RE_KEY_VAL.match(line) # ...test validity...\n if kvr: # ...yes, take it as a valid one.\n self.datas[comchar + kvr.group(1)] = kvr.group(2)\n else: # ...no, put it in a single data where all ugly comments are.\n self.comments += comchar + com.group(1) + \"\\n\"\n else: # if not comment...\n kvr = Item.RE_KEY_VAL.match(line)\n if kvr: # ...test validity and then take it.\n self.datas[kvr.group(1)] = kvr.group(2)\n else: # ...owmygad, your nagios worked like this ? Should never appear.\n sys.stderr.write(\"/!\\ problem with: \" + line)\n sys.stderr.flush()\n\n def __repr__(self):\n \"\"\"\n Print nagios blocks the correct way.\n\n [mintabs] minimal number of tabs to insert between\n left and right.\n\n [tabsize] is the size the tabulation will appear\n in his spaced equivalent.\n \"\"\"\n output = ''\n\n startchar = \"#\"\n if self.active:\n startchar = \"\"\n\n output += startchar + \"define \" + self.type + \" {\\n\"\n\n keylist = self.datas.keys()\n keylist.sort()\n for i in keylist:\n tabs = 1\n len_i = 0\n try:\n len_i = len(i.decode('utf-8'))\n except:\n len_i = len(i)\n # .<- here is start ........ and end here ->.\n # blah <- took 4 spaces ... want end here ->.\n # How much spaces should I have since I want\n # [mintabs] tabs of [tabsize] size,\n # knowing [len(i) == len(\"blah\")] ?\n empty = self.mintabs * self.tabsize - len_i - 1 # here is the answer\n # Then convert computed spaces to tabs\n tabs += self.mintabs + int(math.floor(float(empty) / float(self.tabsize)))\n # And of course, ensure we have at least one space between left and right values\n if tabs < 1:\n tabs = 1\n output += startchar + \"\\t\" + i + \"\\t\" * tabs + self.datas[i] + \"\\n\"\n\n if self.comments != \"\":\n output += startchar + \"\\t\" + self.comments\n output += startchar + \"}\"\n return output\n\n def tojson(self):\n pjson = {\n 'metadatas':{},\n 'datas':{}\n }\n pjson['metadatas']['type'] = self.type\n pjson['metadatas']['active'] = self.active\n for key, val in self.datas.items():\n pjson['datas'][key] = val.split(',')\n return pjson\n\ndef getItems(datas):\n nobjects = []\n in_block = False\n nobj = None\n lines = StringIO(datas).readlines()\n for line in lines:\n if not in_block and Item.is_blockstart(line):\n nobj = Item()\n nobj.define(line)\n in_block = True\n\n elif in_block and Item.is_confline(line):\n nobj.add(line)\n\n elif in_block and Item.is_blockend(line):\n in_block = False\n nobjects.append(nobj)\n\n return nobjects\n","sub_path":"nagios.py","file_name":"nagios.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"557888147","text":"from django.db import models\nfrom django.utils import timezone\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom district.models import Location\n\n\n@register_snippet\nclass PersonalObj(models.Model):\n personal_code = models.CharField('Лицевой счет', max_length=31)\n name = models.CharField('Имя собственника', max_length=128)\n created = models.DateTimeField('Дата создания', blank=False, default=timezone.now)\n date_lastvisit = models.DateTimeField('Дата последнего входа', blank=True, null=True)\n district = models.ForeignKey('district.Location', verbose_name=\"Прямой родитель\", on_delete=models.SET_NULL, blank=True, null=True)\n\n panels = [\n FieldPanel('personal_code'),\n FieldPanel('name'),\n FieldPanel('created'),\n FieldPanel('date_lastvisit'),\n FieldPanel('district')\n ]\n\n search_fields = [\n index.SearchField('name'),\n index.SearchField('personal_code'),\n ]\n\n class Meta:\n verbose_name = 'Лицевой счет'\n verbose_name_plural = 'Лицевые счета'\n\n def __str__(self):\n return self.personal_code + \" (\" + self.name + \")\"\n","sub_path":"dsk/personalobject/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"324756697","text":"import unittest\nimport numpy as np\n\nfrom robo.models.dngo import DNGO\n\n\nclass TestDNGO(unittest.TestCase):\n\n def setUp(self):\n self.X = np.random.rand(10, 1)\n y = self.X * 2\n self.y = y[:, 0]\n self.model = DNGO(num_epochs=10, burnin_steps=10, chain_length=20)\n self.model.train(self.X, self.y, do_optimize=False)\n\n def test_predict(self):\n X_test = np.random.rand(10, 1)\n\n m, v = self.model.predict(X_test)\n\n assert len(m.shape) == 1\n assert m.shape[0] == X_test.shape[0]\n assert len(v.shape) == 1\n assert v.shape[0] == X_test.shape[0]\n\n def test_marginal_log_likelihood(self):\n theta = np.array([np.log(1), np.log(1000)])\n mll = self.model.marginal_log_likelihood(theta)\n\n def test_negative_mll(self):\n theta = np.array([np.log(1), np.log(1000)])\n mll = self.model.negative_mll(theta)\n\n def test_get_incumbent(self):\n inc, inc_val = self.model.get_incumbent()\n\n b = np.argmin(self.y)\n np.testing.assert_almost_equal(inc, self.X[b], decimal=5)\n assert inc_val == self.y[b]\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_models/test_dngo.py","file_name":"test_dngo.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"2252893","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('signup/', views.signup, name='signup'),\n path('login/', views.login_user, name='login'),\n path('logout/', views.logout_user, name='logout'),\n path('profile/', views.profile_view, name='profile'),\n path('profile/update', views.profile_update, name='profile_update'),\n ]\n","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"249063661","text":"#\n# Copyright (c) 2018 Wind River Systems, Inc.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_service import service\nfrom oslo_service import wsgi\nimport pecan\n\nfrom inventory.api import config\nfrom inventory.api import middleware\nfrom inventory.common.i18n import _\nfrom inventory.common import policy\n\nCONF = cfg.CONF\n\nLOG = log.getLogger(__name__)\n\n_launcher = None\n_launcher_pxe = None\n\n\ndef get_pecan_config():\n # Set up the pecan configuration\n filename = config.__file__.replace('.pyc', '.py')\n return pecan.configuration.conf_from_file(filename)\n\n\ndef setup_app(config=None):\n policy.init_enforcer()\n\n if not config:\n config = get_pecan_config()\n\n pecan.configuration.set_config(dict(config), overwrite=True)\n app_conf = dict(config.app)\n\n app = pecan.make_app(\n app_conf.pop('root'),\n debug=CONF.debug,\n logging=getattr(config, 'logging', {}),\n force_canonical=getattr(config.app, 'force_canonical', True),\n guess_content_type_from_ext=False,\n wrap_app=middleware.ParsableErrorMiddleware,\n **app_conf\n )\n return app\n\n\ndef load_paste_app(app_name=None):\n \"\"\"Loads a WSGI app from a paste config file.\"\"\"\n if app_name is None:\n app_name = cfg.CONF.prog\n\n loader = wsgi.Loader(cfg.CONF)\n app = loader.load_app(app_name)\n return app\n\n\ndef app_factory(global_config, **local_conf):\n return setup_app()\n\n\ndef serve(api_service, conf, workers=1):\n global _launcher\n\n if _launcher:\n raise RuntimeError(_('serve() _launcher can only be called once'))\n\n _launcher = service.launch(conf, api_service, workers=workers)\n\n\ndef serve_pxe(api_service, conf, workers=1):\n global _launcher_pxe\n\n if _launcher_pxe:\n raise RuntimeError(_('serve() _launcher_pxe can only be called once'))\n\n _launcher_pxe = service.launch(conf, api_service, workers=workers)\n\n\ndef wait():\n _launcher.wait()\n\n\ndef wait_pxe():\n _launcher_pxe.wait()\n","sub_path":"inventory/inventory/inventory/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"321214848","text":"# Copyright 2021 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n\nimport os\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nfrom flasc.dataframe_operations import (\n dataframe_filtering as dff,\n dataframe_manipulations as dfm,\n)\n\n\ndef load_data():\n # Load the data\n print(\"Loading .ftr data. This may take a minute or two...\")\n root_path = os.path.dirname(os.path.abspath(__file__))\n data_path = os.path.join(root_path, \"data\", \"01_in_common_df_format\")\n return pd.read_feather(os.path.join(data_path, \"scada_data_60s.ftr\"))\n\n\nif __name__ == \"__main__\":\n # In this script, we do some very basic filtering steps, such as filtering\n # for negative wind speeds and power productions. We also filter the data\n # by one or multiple variables that inherently already tells us if data\n # is good or bad according to the data logger/turbine itself. In our case,\n # this self-flagged variable is \"is_operational_normal_00x\".\n\n # Load data and get properties\n df = load_data()\n num_turbines = dfm.get_num_turbines(df)\n\n root_path = os.path.dirname(os.path.abspath(__file__))\n out_path = os.path.join(root_path, \"data\", \"02_basic_filtered\")\n figs_path = os.path.join(out_path, \"figures\")\n os.makedirs(figs_path, exist_ok=True)\n\n # Basic filters: address self flags and obviously wrong points\n for ti in range(num_turbines):\n # Specify filtering conditions\n conds = [\n ~df[\"is_operation_normal_{:03d}\".format(ti)], # Self-status\n df[\"ws_{:03d}\".format(ti)] <= 0.0, # Non-negative wind speeds\n df[\"pow_{:03d}\".format(ti)] <= 0.0,\n ] # Non-negative powers\n\n # Retrieve a single, combined condition array\n conds_combined = conds[0]\n for cond in conds:\n conds_combined = conds_combined | cond\n\n # Plot time vs filtered data\n fig, ax = dff.plot_highlight_data_by_conds(df, conds, ti)\n ax.legend(\n [\"All data\", \"Bad self-status\", \"Negative WS\", \"Negative power\"]\n )\n fp = os.path.join(figs_path, \"basic_filtering_%03d.png\" % ti)\n print(\"Saving figure to {:s} for turbine {:03d}.\".format(fp, ti))\n fig.savefig(fp, dpi=200)\n plt.close(fig)\n\n # Apply filtering to dataframe\n df = dff.df_mark_turbdata_as_faulty(\n df, conds_combined, ti, verbose=True\n )\n\n # Remove unnecessary columns after filtering\n self_status_cols = [\n \"is_operation_normal_%03d\" % ti for ti in range(num_turbines)\n ]\n df = df.drop(columns=self_status_cols) # Remove self status columns\n\n # Save as a single file and as batch files\n fout = os.path.join(out_path, \"scada_data_60s.ftr\")\n print(\"Savig filtered data to {:s}.\".format(fout))\n os.makedirs(out_path, exist_ok=True)\n df = df.reset_index(drop=(\"time\" in df.columns))\n df.to_feather(fout)\n","sub_path":"examples_artificial_data/raw_data_processing/a_02_basic_filters.py","file_name":"a_02_basic_filters.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"521114677","text":"# Copyright (c) 2020 brainlife.io\n#\n# Author: Amin Saberi\n\n# set up environment\nimport json\nimport nilearn.image as nlimg\n\n# load inputs from config.json\nwith open('config.json') as config_json:\n\tconfig = json.load(config_json)\n\n# Load into variables predefined code inputs\nin_filename = str(config['bold'])\n \n# Get the number of volumes to be removed\nn_vols = int(config['n_vols'])\n\n# Load the input fMRI image and remove the first n_vols volumes\nin_func_img = nlimg.load_img(in_filename)\nout_func_img = in_func_img.slicer[:,:,:,n_vols:]\n\n# save the output file (with the new resolution) to disk\nout_func_img.to_filename('out_dir/bold.nii.gz')\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"90965716","text":"import numpy as np\n\nfrom mot.common.gaussian_density import GaussianDensity\nfrom mot.common.state import Gaussian\nfrom mot.configs import SensorModelConfig\nfrom mot.measurement_models import MeasurementModel\nfrom mot.motion_models import MotionModel\nfrom .base_single_object_tracker import SingleObjectTracker\n\n\nclass NearestNeighbourTracker(SingleObjectTracker):\n def __init__(\n self,\n gating_size: float,\n meas_model: MeasurementModel,\n sensor_model: SensorModelConfig,\n motion_model: MotionModel,\n *args,\n **kwargs,\n ) -> None:\n self.meas_model = meas_model\n self.sensor_model = sensor_model\n self.motion_model = motion_model\n self.gating_size = gating_size\n super().__init__()\n\n @property\n def name(self):\n return \"Nearest Neighbout SOT\"\n\n def estimate(self, initial_state: Gaussian, measurements):\n \"\"\"Tracks a single object using nearest neighbour association\n\n For each filter recursion iteration implemented next steps:\n 1) gating\n 2) calculates the predicted likelihood for each measurement in the gate\n 3) find the nearest neighbour measurement\n 4) compares the weight of the missed detection hypotheses and\n the weight of the object detection hypothesis created using\n the nearest neigbour measurement\n 5) if the object detection hypothesis using the nearest neighbour\n measurement has the hightes weight, perform Kalman update\n 6) extract object state estimate\n 7) prediction\n \"\"\"\n prev_state = initial_state\n estimations = [None for x in range(len(measurements))]\n for timestep, measurements_in_scene in enumerate(measurements):\n estimations[timestep] = self.estimation_step(\n predicted_state=prev_state,\n current_measurements=np.array(measurements_in_scene),\n )\n prev_state = GaussianDensity.predict(state=estimations[timestep], motion_model=self.motion_model, dt=1.0)\n return tuple(estimations)\n\n def estimation_step(self, predicted_state: Gaussian, current_measurements: np.ndarray):\n # 1. Gating\n\n (meas_in_gate, _) = GaussianDensity.ellipsoidal_gating(\n state_prev=predicted_state,\n z=current_measurements,\n measurement_model=self.meas_model,\n gating_size=self.gating_size,\n )\n if meas_in_gate.size == 0: # number of hypothesis\n current_step_state = predicted_state\n\n else:\n # 2. Calculate the predicted likelihood for each measurement in the gate\n\n predicted_likelihood = GaussianDensity.predict_loglikelihood(\n state_pred=predicted_state,\n z=meas_in_gate,\n measurement_model=self.meas_model,\n )\n\n # Hypothesis evaluation\n # detection\n w_theta_factor = np.log(self.sensor_model.P_D / self.sensor_model.intensity_c)\n w_theta_k = predicted_likelihood + w_theta_factor\n # misdetection\n w_theta_0 = 1 - self.sensor_model.P_D\n\n # 3. Compare the weight of the missed detection\n # hypothesis and the weight of the object detection hypothesis\n # using the nearest neighbour measurement\n max_k = np.argmax(w_theta_k)\n max_w_theta_k = w_theta_k[max_k]\n\n if w_theta_0 < max_w_theta_k:\n # nearest neighbour measurement\n z_NN = meas_in_gate[max_k]\n z_NN = np.atleast_2d(z_NN)\n current_step_state = GaussianDensity.update(\n state_pred=predicted_state,\n z=z_NN,\n measurement_model=self.meas_model,\n )\n else:\n current_step_state = predicted_state\n estimation = current_step_state\n return estimation\n","sub_path":"src/mot/trackers/single_object_trackers/nearest_neighbour_tracker.py","file_name":"nearest_neighbour_tracker.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"500105010","text":"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport cv2\n\nfrom preprocess import elastic_transform \n\ndata_path = './'\n\nimage_rows = 420\nimage_cols = 580\n\n\ndef create_train_data():\n\ttrain_data_path = os.path.join(data_path, 'train')\n\timages = os.listdir(train_data_path)\n\ttotal = len(images) / 2\n\n\timgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)\n\timgs_mask = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)\n\n\timgs_t = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)\n\timgs_mask_t = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)\n\n\ti = 0\n\tprint('-'*30)\n\tprint('Creating training images...')\n\tprint('-'*30)\n\tfor image_name in images:\n\t\tif 'mask' in image_name:\n\t\t\tcontinue\n\t\timage_mask_name = image_name.split('.')[0] + '_mask.tif'\n\t\timg = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)\n\t\timg_mask = cv2.imread(os.path.join(train_data_path, image_mask_name), cv2.IMREAD_GRAYSCALE)\n\n\t\timg = np.array([img])\n\t\timg_mask = np.array([img_mask])\n\n\t\timgs[i] = img\n\t\timgs_mask[i] = img_mask\n\n\t\t# do elastic transform\n\t\timg_new = cv2.imread(os.path.join(train_data_path, image_name), -1)\n\t\timg_mask_new = cv2.imread(os.path.join(train_data_path, image_mask_name), -1)\n\t\timg_merge = np.concatenate((img_new[...,None], img_mask_new[...,None]), axis=2)\n\t\timg_merge_t = elastic_transform(img_merge, img_merge.shape[1] * 2, img_merge.shape[1] * 0.08, img_merge.shape[1] * 0.08)\n\t\timgs_t[i] = img_merge_t[...,0]\n\t\timgs_mask_t[i] = img_merge_t[...,1]\n\n\t\tif i % 100 == 0:\n\t\t\tprint('Done: {0}/{1} images'.format(i, total))\n\t\ti += 1\n\tprint('Loading done.')\n\n\tnp.save('imgs_train.npy', imgs)\n\tnp.save('imgs_mask_train.npy', imgs_mask)\n\tprint('Saving to .npy files done.')\n\n\tnp.save('imgs_train_t.py', imgs_t)\n\tnp.save('imgs_mask_train_t.npy', imgs_mask_t)\n\tprint('Saving of elastic transform files to .npy done.')\n\ndef load_train_data():\n\timgs_train = np.load('imgs_train.npy')\n\timgs_mask_train = np.load('imgs_mask_train.npy')\n\timgs_train_t = np.load('imgs_train_t.npy')\n\timgs_mask_train_t = np.load('imgs_mask_train_t.npy')\n\timgs_res = np.vstack((imgs_train, imgs_train_t))\n\timgs_res_mask = np.vstack((imgs_mask_train, imgs_mask_train_t))\n\treturn imgs_res, imgs_res_mask\n\n\ndef create_test_data():\n\ttrain_data_path = os.path.join(data_path, 'test')\n\timages = os.listdir(train_data_path)\n\ttotal = len(images)\n\n\timgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)\n\timgs_id = np.ndarray((total, ), dtype=np.int32)\n\n\ti = 0\n\tprint('-'*30)\n\tprint('Creating test images...')\n\tprint('-'*30)\n\tfor image_name in images:\n\t\timg_id = int(image_name.split('.')[0])\n\t\timg = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)\n\n\t\timg = np.array([img])\n\n\t\timgs[i] = img\n\t\timgs_id[i] = img_id\n\n\t\tif i % 100 == 0:\n\t\t\tprint('Done: {0}/{1} images'.format(i, total))\n\t\ti += 1\n\tprint('Loading done.')\n\n\tnp.save('imgs_test.npy', imgs)\n\tnp.save('imgs_id_test.npy', imgs_id)\n\tprint('Saving to .npy files done.')\n\n\ndef load_test_data():\n\timgs_test = np.load('imgs_test.npy')\n\timgs_id = np.load('imgs_id_test.npy')\n\treturn imgs_test, imgs_id\n\nif __name__ == '__main__':\n\tcreate_train_data()\n\tcreate_test_data()\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"604455899","text":"#################################\n## NETWORK MODEL ##\n#################################\n\nNETWORK_MODEL = \"swta\"\nNETWORK_PARAMS = dict( # overwrite model default settings\n) \n\n\n#################################\n## SIMULATION CHAIN ##\n#################################\n\n# Params:\n# data: path to data file (string)\n# simTime: simulation time in sec (float)\n# learning: enable/disable learning (bool)\n# result: path to result file (string)\n# init: path to data file to init model (string)\nSIMULATION_CHAIN = [\n dict(data=\"data/training\", simTime= 400., learning=True, result=\"results/training\"),\n]\n\n\n#################################\n## SIMULATION PARAMETERS ##\n#################################\n\nDT = 0.001 # simulation time step in sec\nSIMULATION_SEED = 42\n\n#################################\n## SIMULATION VISUALIZATION ##\n#################################\n\nSHOW_LEARNING_PROGRESS = True # if True show network input weights every 10% of simulation time\n\n","sub_path":"simulations/oriented_bars/simulation_settings.py","file_name":"simulation_settings.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"608678334","text":"print('Loading Modules..')\nimport pandas as pd\nimport numpy as np\nimport os\nimport json\nimport sys\nfrom glob import glob\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nimport keras\nimport keras_preprocessing.image as KPImage\nfrom PIL import Image\nimport pydicom\nfrom keras.applications.resnet50 import ResNet50 as PTModel, preprocess_input\nfrom keras.layers import (Input, BatchNormalization, GlobalAveragePooling2D,\n Dropout, Dense)\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n\n# Load data \nprint('Loading Data..')\nimage_bbox_df = pd.read_csv('./input/stage_1_image_bbox_full.csv')\nwith open('params.json') as f:\n params = json.load(f)\n\n# Encode labels\nprint('Encoding labels..')\nclass_enc = LabelEncoder()\nimage_bbox_df['class_idx'] = class_enc.fit_transform(image_bbox_df['class'])\noh_enc = OneHotEncoder(sparse=False)\nimage_bbox_df['class_vec'] = oh_enc.fit_transform(\n image_bbox_df['class_idx'].values.reshape(-1, 1)).tolist()\n\n# Create train/validation dsets\nprint('Creating train/validation sets..')\ntrain_df, val_df = train_test_split(image_bbox_df,\n stratify=image_bbox_df['class_idx'])\n\n# Balance training data\nprint('Balancing Data..')\ntrain_df = train_df.groupby('class_idx'). \\\n apply(lambda x: x.sample(params['TRAIN_SIZE'] // 3)). \\\n reset_index(drop=True)\n\n\n# Fix Keras for DICOM\ndef read_dicom(infile):\n img = pydicom.read_file(infile).pixel_array\n return img / img.max()\n\n\nclass MedicalPIL:\n @staticmethod\n def open(infile):\n if infile.endswith('.dcm'):\n char_slice = read_dicom(infile)\n int_slice = (255 * char_slice).clip(0, 255).astype(np.uint8)\n return Image.fromarray(int_slice)\n return Image.open(infile)\n\n fromarray = Image.fromarray\n\n\nKPImage.pil_image = MedicalPIL\n\n# Prepare datasets\nprint('Preparing Data..')\nimg_gen_params = dict(horizontal_flip=True,\n height_shift_range=0.05,\n width_shift_range=0.02,\n rotation_range=3.0,\n shear_range=0.01,\n zoom_range=0.05,\n preprocessing_function=preprocess_input\n )\nimg_gen = KPImage.ImageDataGenerator(**img_gen_params)\n\n\ndef flow_from_dataframe(img_data_gen, in_df, path_col, y_col,\n seed=None, **dflow_args):\n base_dir = os.path.dirname(in_df[path_col].values[0])\n df_gen = img_data_gen.flow_from_directory(base_dir,\n class_mode='sparse',\n seed=seed,\n **dflow_args)\n df_gen.filenames = in_df[path_col].values\n df_gen.classes = np.stack(in_df[y_col].values, 0)\n df_gen.samples = in_df.shape[0]\n df_gen.n = in_df.shape[0]\n df_gen._set_index_array()\n df_gen.directory = '' # since we have the full path\n print('Reinserting dataframe: {} images'.format(in_df.shape[0]))\n return df_gen\n\n\n# For training\ntrain_gen = flow_from_dataframe(img_gen, train_df,\n path_col='path',\n y_col='class_vec',\n target_size=params['IMG_SIZE'],\n color_mode='rgb',\n batch_size=params['BATCH_SIZE'])\n# For validation\nval_gen = flow_from_dataframe(img_gen, val_df,\n path_col='path',\n y_col='class_vec',\n target_size=params['IMG_SIZE'],\n color_mode='rgb',\n batch_size=256)\n# For test\nvalid_X, valid_Y = next(flow_from_dataframe(img_gen, val_df,\n path_col='path',\n y_col='class_vec',\n target_size=params['IMG_SIZE'],\n color_mode='rgb',\n batch_size=params['TEST_SIZE']))\n\n# Build model\nprint('Building Model Structure..')\nt_x, t_y = next(train_gen)\n\n# Base\nbase_model = PTModel(input_shape=t_x.shape[1:], include_top=False)\nbase_model.trainable = False\n\n# Attentional\nbase_shape = base_model.get_output_shape_at(0)[1:]\nlayers = [Input(base_shape, name='feature_input')]\nlayers.append(BatchNormalization()(layers[-1]))\nlayers.append(GlobalAveragePooling2D()(layers[-1]))\nlayers.append(Dropout(params['DROPOUT'])(layers[-1]))\nlayers.append(Dense(params['DENSE_COUNT'], activation='elu')(layers[-1]))\nlayers.append(Dropout(params['DROPOUT'])(layers[-1]))\nlayers.append(Dense(t_y.shape[1], activation='softmax')(layers[-1]))\nattn_model = Model(inputs=[layers[0]], outputs=[layers[-1]],\n name='trained_model')\n\n# Stitch Models Together\nmodel = Sequential(name='combined_model')\nmodel.add(base_model)\nmodel.add(attn_model)\nmodel.compile(optimizer=Adam(lr=params['LEARNING_RATE']),\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n\n# Set up training callbacks\nweight_file = \"lung_opacity_weights.best.hd5\"\ncheckpoint = ModelCheckpoint(weight_file, verbose=1, save_best_only=True,\n save_weights_only=True)\nreduceLR = ReduceLROnPlateau(factor=0.8, verbose=1, cooldown=5, min_lr=0.0001)\nearlystop = EarlyStopping(patience=10)\ncallbacks = [checkpoint, reduceLR, earlystop]\n\n# Train model\nif 'train' in sys.argv:\n print('Fitting Model')\n model.fit_generator(train_gen, validation_data=(valid_X, valid_Y),\n epochs=20, callbacks=callbacks, workers=2)\nelse:\n print('Not fitting model')\n\n# Save model\nmodel.load_weights(weight_file)\nmodel.save('full_model.h5')\n\n# Make prediction\ninputdir = './input'\ntest_dicom_dir = '/'.join([inputdir, 'stage_1_test_images'])\n\nsub_df = pd.DataFrame({'path': glob(os.path.join(test_dicom_dir, '*.dcm'))})\nsub_df['patientId'] = sub_df['path'].map(\n lambda x: os.path.splitext(os.path.basename(x))[0])\n\nsub_gen = flow_from_dataframe(img_gen, sub_df,\n path_col='path',\n y_col='patientId',\n target_size=params['IMG_SIZE'],\n color_mode='rgb',\n batch_size=params['BATCH_SIZE'],\n shuffle=False)\n\nsteps = 2*sub_df.shape[0]//params['BATCH_SIZE']\nout_ids, out_vec = [], []\nprint(\"Making prediction..\")\nfor _, (t_x, t_y) in zip(tqdm(range(steps)), sub_gen):\n out_vec += [model.predict(t_x)]\n out_ids += [t_y]\nout_vec = np.concatenate(out_vec, 0)\nout_ids = np.concatenate(out_ids, 0)\n\npred_df = pd.DataFrame(out_vec, columns=class_enc.classes_)\npred_df['patientId'] = out_ids\npred_avg_df = pred_df.groupby('patientId').agg('mean').reset_index()\nprint(\"Saving submission..\")\npred_avg_df['PredictionString'] = pred_avg_df['Lung Opacity'].map(\n lambda x: ('%2.2f 0 0 1024 1024' % x) if x>0.5 else '')\nsub_file = 'submission.csv'\npred_avg_df[['patientId', 'PredictionString']].to_csv(sub_file, index=False)\nprint(\"Submission saves as\",sub_file)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"479571718","text":"# Copyright 2020 Curtin University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: Richard Hosking\n\n\nimport os\nfrom datetime import timedelta\nfrom unittest.mock import MagicMock, patch\n\nimport pendulum\nfrom click.testing import CliRunner\n\nfrom oaebu_workflows.config import test_fixtures_folder\nfrom oaebu_workflows.workflows.oapen_workflow import OapenWorkflow, OapenWorkflowRelease\nfrom observatory.platform.utils.file_utils import load_jsonl\nfrom observatory.platform.utils.gc_utils import (\n run_bigquery_query,\n)\nfrom observatory.platform.utils.test_utils import (\n ObservatoryEnvironment,\n ObservatoryTestCase,\n Table,\n bq_load_tables,\n make_dummy_dag,\n)\nfrom observatory.platform.utils.workflow_utils import (\n make_dag_id,\n)\n\n\nclass TestOapenWorkflow(ObservatoryTestCase):\n \"\"\"\n Test the OapenWorkflow class.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.org_name = \"OAPEN Press\"\n self.gcp_project_id = \"project_id\"\n self.data_location = os.getenv(\"TESTS_DATA_LOCATION\")\n\n # Release Object Defaults for reference\n self.ao_gcp_project_id = \"academic-observatory\"\n\n @patch(\"oaebu_workflows.workflows.oapen_workflow.OapenWorkflow.make_release\")\n @patch(\"observatory.platform.utils.gc_utils.select_table_shard_dates\")\n def test_cleanup(self, mock_sel_table_suffixes, mock_mr):\n mock_sel_table_suffixes.return_value = [pendulum.datetime(2021, 1, 1)]\n with CliRunner().isolated_filesystem():\n wf = OapenWorkflow()\n\n mock_mr.return_value = OapenWorkflowRelease(\n release_date=pendulum.datetime(2021, 1, 1),\n gcp_project_id=self.gcp_project_id,\n )\n\n release = wf.make_release(execution_date=pendulum.datetime(2021, 1, 1))\n wf.cleanup(release)\n\n def test_dag_structure(self):\n\n with CliRunner().isolated_filesystem():\n wf = OapenWorkflow()\n dag = wf.make_dag()\n self.assert_dag_structure(\n {\n \"oapen_irus_uk_oapen_press_sensor\": [\"check_dependencies\"],\n \"oapen_metadata_sensor\": [\"check_dependencies\"],\n \"check_dependencies\": [\"create_onix_formatted_metadata_output_tasks\"],\n \"create_onix_formatted_metadata_output_tasks\": [\"create_oaebu_book_product_table\"],\n \"create_oaebu_book_product_table\": [\"export_oaebu_table.book_product_list\"],\n \"export_oaebu_table.book_product_list\": [\"export_oaebu_table.book_product_metrics\"],\n \"export_oaebu_table.book_product_metrics\": [\"export_oaebu_table.book_product_metrics_country\"],\n \"export_oaebu_table.book_product_metrics_country\": [\n \"export_oaebu_table.book_product_metrics_institution\"\n ],\n \"export_oaebu_table.book_product_metrics_institution\": [\"export_oaebu_table.institution_list\"],\n \"export_oaebu_table.institution_list\": [\"export_oaebu_table.book_product_metrics_city\"],\n \"export_oaebu_table.book_product_metrics_city\": [\n \"export_oaebu_table.book_product_metrics_referrer\"\n ],\n \"export_oaebu_table.book_product_metrics_referrer\": [\n \"export_oaebu_table.book_product_metrics_events\"\n ],\n \"export_oaebu_table.book_product_metrics_events\": [\n \"export_oaebu_table.book_product_publisher_metrics\"\n ],\n \"export_oaebu_table.book_product_publisher_metrics\": [\n \"export_oaebu_table.book_product_subject_bic_metrics\"\n ],\n \"export_oaebu_table.book_product_subject_bic_metrics\": [\n \"export_oaebu_table.book_product_subject_bisac_metrics\"\n ],\n \"export_oaebu_table.book_product_subject_bisac_metrics\": [\n \"export_oaebu_table.book_product_subject_thema_metrics\"\n ],\n \"export_oaebu_table.book_product_subject_thema_metrics\": [\n \"export_oaebu_table.book_product_year_metrics\"\n ],\n \"export_oaebu_table.book_product_year_metrics\": [\n \"export_oaebu_table.book_product_subject_year_metrics\"\n ],\n \"export_oaebu_table.book_product_subject_year_metrics\": [\n \"export_oaebu_table.book_product_author_metrics\"\n ],\n \"export_oaebu_table.book_product_author_metrics\": [\"cleanup\"],\n \"cleanup\": [],\n },\n dag,\n )\n\n\nclass TestOapenWorkflowFunctional(ObservatoryTestCase):\n \"\"\"Functionally test the workflow.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.timestamp = pendulum.now()\n self.oapen_table_id = \"oapen\"\n\n self.data_location = os.getenv(\"TEST_GCP_DATA_LOCATION\")\n\n self.ao_gcp_project_id = \"academic-observatory\"\n self.oapen_metadata_dataset_id = \"oapen\"\n self.oapen_metadata_table_id = \"metadata\"\n self.public_book_metadata_dataset_id = \"observatory\"\n self.public_book_metadata_table_id = \"book\"\n\n self.org_name = OapenWorkflow.ORG_NAME\n self.gcp_project_id = os.getenv(\"TEST_GCP_PROJECT_ID\")\n\n self.gcp_dataset_id = \"oaebu\"\n self.irus_uk_dag_id_prefix = \"oapen_irus_uk\"\n self.irus_uk_table_id = \"oapen_irus_uk\"\n\n self.irus_uk_dataset_id = \"fixtures\"\n\n def setup_fake_data(self, settings_dataset_id: str, release_date: pendulum.DateTime):\n country = load_jsonl(test_fixtures_folder(\"onix_workflow\", \"country.jsonl\"))\n schema_path = test_fixtures_folder(\"onix_workflow\", \"schema\")\n tables = [\n Table(\n \"country\",\n False,\n settings_dataset_id,\n country,\n \"country\",\n schema_path,\n ),\n ]\n\n bq_load_tables(\n tables=tables, bucket_name=self.gcp_bucket_name, release_date=release_date, data_location=self.data_location\n )\n\n def test_run_workflow_tests(self):\n \"\"\"Functional test of the OAPEN workflow\"\"\"\n\n # Setup Observatory environment\n env = ObservatoryEnvironment(self.gcp_project_id, self.data_location, enable_api=False)\n org_name = self.org_name\n\n # Create datasets\n oaebu_intermediate_dataset_id = env.add_dataset(prefix=\"oaebu_intermediate\")\n oaebu_output_dataset_id = env.add_dataset(prefix=\"oaebu\")\n oaebu_onix_dataset_id = env.add_dataset(prefix=\"oaebu_onix_dataset\")\n oaebu_elastic_dataset_id = env.add_dataset(prefix=\"data_export\")\n oaebu_settings_dataset_id = env.add_dataset(prefix=\"settings\")\n\n # Create the Observatory environment and run tests\n with env.create(task_logging=True):\n self.gcp_bucket_name = env.transform_bucket\n\n # Setup workflow\n start_date = pendulum.datetime(year=2021, month=5, day=9)\n workflow = OapenWorkflow(\n oaebu_onix_dataset=oaebu_onix_dataset_id,\n oaebu_dataset=oaebu_output_dataset_id,\n oaebu_intermediate_dataset=oaebu_intermediate_dataset_id,\n oaebu_elastic_dataset=oaebu_elastic_dataset_id,\n irus_uk_dataset_id=self.irus_uk_dataset_id,\n start_date=start_date,\n country_project_id=self.gcp_project_id,\n country_dataset_id=oaebu_settings_dataset_id,\n )\n\n # Override sensor grace period and dag check\n for sensor in workflow.operators[0]:\n sensor.grace_period = timedelta(seconds=1)\n sensor.check_exists = False\n\n # Make DAG\n workflow_dag = workflow.make_dag()\n\n # If the DAG you are monitoring doesn't exist in dagrun database, it will return success to skip waiting.\n expected_state = \"success\"\n with env.create_dag_run(workflow_dag, start_date):\n ti = env.run_task(f\"{make_dag_id(self.irus_uk_dag_id_prefix, org_name)}_sensor\")\n self.assertEqual(expected_state, ti.state)\n\n ti = env.run_task(f\"oapen_metadata_sensor\")\n self.assertEqual(expected_state, ti.state)\n\n # Run Dummy Dags\n expected_state = \"success\"\n execution_date = pendulum.datetime(year=2021, month=5, day=16)\n release_date = pendulum.datetime(year=2021, month=5, day=22)\n\n # Setup fake data\n self.setup_fake_data(oaebu_settings_dataset_id, release_date)\n\n dag = make_dummy_dag(make_dag_id(self.irus_uk_dag_id_prefix, org_name), execution_date)\n with env.create_dag_run(dag, execution_date):\n # Running all of a DAGs tasks sets the DAG to finished\n ti = env.run_task(\"dummy_task\")\n self.assertEqual(expected_state, ti.state)\n\n dag = make_dummy_dag(\"oapen_metadata\", execution_date)\n with env.create_dag_run(dag, execution_date):\n # Running all of a DAGs tasks sets the DAG to finished\n ti = env.run_task(\"dummy_task\")\n self.assertEqual(expected_state, ti.state)\n\n # Run end to end tests for the DAG\n with env.create_dag_run(workflow_dag, execution_date):\n # Test that sensors go into 'success' state as the DAGs that they are waiting for have finished\n ti = env.run_task(f\"{make_dag_id(self.irus_uk_dag_id_prefix, org_name)}_sensor\")\n self.assertEqual(expected_state, ti.state)\n\n ti = env.run_task(f\"oapen_metadata_sensor\")\n self.assertEqual(expected_state, ti.state)\n\n # Check dependencies\n ti = env.run_task(\"check_dependencies\")\n self.assertEqual(expected_state, ti.state)\n\n # Mock make_release\n workflow.make_release = MagicMock(\n return_value=OapenWorkflowRelease(\n release_date=release_date,\n gcp_project_id=self.gcp_project_id,\n )\n )\n\n # Format OAPEN Metadata like ONIX to enable the next steps\n ti = env.run_task(workflow.create_onix_formatted_metadata_output_tasks.__name__)\n self.assertEqual(expected_state, ti.state)\n\n # Create oaebu output tables\n ti = env.run_task(workflow.create_oaebu_book_product_table.__name__)\n self.assertEqual(expected_state, ti.state)\n\n # Export oaebu elastic tables\n export_tables = [\n \"book_product_list\",\n \"book_product_metrics\",\n \"book_product_metrics_country\",\n \"book_product_metrics_institution\",\n \"institution_list\",\n \"book_product_metrics_city\",\n \"book_product_metrics_referrer\",\n \"book_product_metrics_events\",\n \"book_product_publisher_metrics\",\n \"book_product_subject_bic_metrics\",\n \"book_product_subject_bisac_metrics\",\n \"book_product_subject_thema_metrics\",\n \"book_product_year_metrics\",\n \"book_product_subject_year_metrics\",\n \"book_product_author_metrics\",\n ]\n\n for table in export_tables:\n ti = env.run_task(f\"{workflow.export_oaebu_table.__name__}.{table}\")\n self.assertEqual(expected_state, ti.state)\n\n # Test conditions\n release_suffix = release_date.strftime(\"%Y%m%d\")\n\n # Check records in book_product and book_product_list match\n sql = (\n f\"SELECT COUNT(*) from {self.gcp_project_id}.{oaebu_output_dataset_id}.book_product{release_suffix}\"\n )\n records = run_bigquery_query(sql)\n count_book_product = len(records)\n\n sql = f\"SELECT COUNT(*) from {self.gcp_project_id}.{oaebu_elastic_dataset_id}.{self.gcp_project_id.replace('-', '_')}_book_product_list{release_suffix}\"\n records = run_bigquery_query(sql)\n count_book_product_list = len(records)\n\n self.assertEqual(count_book_product, count_book_product_list)\n\n # Ensure there are no duplicates\n sql = f\"\"\" SELECT\n count\n FROM(SELECT \n COUNT(*) as count\n FROM {self.gcp_project_id}.{oaebu_elastic_dataset_id}.{self.gcp_project_id.replace('-', '_')}_book_product_metrics{release_suffix} \n GROUP BY product_id, month)\n WHERE count > 1\"\"\"\n records = run_bigquery_query(sql)\n self.assertEqual(len(records), 0)\n\n # Cleanup\n env.run_task(workflow.cleanup.__name__)\n","sub_path":"oaebu_workflows/workflows/tests/test_oapen_workflow.py","file_name":"test_oapen_workflow.py","file_ext":"py","file_size_in_byte":13987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"553303128","text":"# Imports from 3rd party libraries\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Imports from this application\nfrom app import app\n\n# HEADER -> Add to Layout to include in the app\nheader = dbc.Col(\n [\n dcc.Markdown(\n \"\"\"\n ![header](/assets/dance_hoorah.gif)\n \"\"\"\n ),\n ],\n md=15,\n style={\n 'textAlign': 'center',\n }\n)\n\n\n# 2 column layout. 1st column width = 4/12\n# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\ncolumn1 = dbc.Col(\n [\n dcc.Markdown(\n \"\"\"\n\n ## Dishify MyDish\n\n With Mydish, save and create recipes with the magic of Data Science. Have a handwritten recipe in a Notebook?\n No problem, just take a photo and feed it to our app. Found a website with an interesting recipe? Copy and paste the url into our app and the\n recipe will be saved instantly. Want it to be saved in Spanish, French, or English? That can be done. MyDish will\n save time and reliably save your recipes.\n\n \"\"\"\n ),\n dcc.Link(dbc.Button('Make A Recipe!',\n color='primary'), href='/UrlGetter')\n ],\n md=4,\n)\n\n\ncolumn2 = dbc.Col(\n [\n dcc.Markdown(\n \"\"\"\n ![header](/assets/food_anime.gif)\n \"\"\"\n\n ),\n ],\n md=50,\n style={\n 'textAlign': 'center',\n }\n)\n\n# dbc.Row([header]) <- Add to layout for a header.\nlayout = dbc.Row([column1, column2]), dbc.Row([header])\n","sub_path":"DishDash/pages/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"146735457","text":"import sublime_plugin\n\nfrom .server.ServerManager import get_a_new_server_manager\nfrom .server.ServerManager import get_server_manager\n\nfrom .client.ClientManager import get_a_new_client_manager\nfrom .client.ClientManager import get_client_manager\n\nclass ServerControllerCommand(sublime_plugin.TextCommand):\n\n\tdef __init__(self,view):\n\t\tsuper(ServerControllerCommand,self).__init__(view)\n\t\tself.server = get_server_manager()\n\t\tself.client = get_client_manager()\n\n\tdef run(self, edit):\n\t\t\n\t\tif(self.client.is_client_closable):\n\t\t\tdel self.server\n\t\t\tdel self.client\n\t\t\tself.server = get_a_new_server_manager()\n\t\t\tself.client = get_a_new_client_manager()\n\n\t\tself.client.init_config_file()\n\t\tself.client.startclient()\n\t\tself.server.run()\n\n","sub_path":"ServerControllerCommand.py","file_name":"ServerControllerCommand.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"416128560","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport numpy as np\n\nclass CrossEntropyLossMaybeSmooth(nn.CrossEntropyLoss):\n ''' Calculate cross entropy loss, apply label smoothing if needed. '''\n\n def __init__(self, smooth_eps=0.0):\n super(CrossEntropyLossMaybeSmooth, self).__init__()\n self.smooth_eps = smooth_eps\n\n def forward(self, output, target, smooth=False):\n if not smooth:\n return F.cross_entropy(output, target)\n\n target = target.contiguous().view(-1)\n n_class = output.size(1)\n one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)\n smooth_one_hot = one_hot * (1 - self.smooth_eps) + (1 - one_hot) * self.smooth_eps / (n_class - 1)\n log_prb = F.log_softmax(output, dim=1)\n loss = -(smooth_one_hot * log_prb).sum(dim=1).mean()\n return loss\n\n\ndef mixup_data(x, y, alpha=1.0):\n\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\n if alpha > 0.0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.0\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).cuda()\n\n mixed_x = lam * x + (1 - lam) * x[index,:]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam, smooth):\n return lam * criterion(pred, y_a, smooth=smooth) + \\\n (1 - lam) * criterion(pred, y_b, smooth=smooth)\n\n\nclass GradualWarmupScheduler(_LRScheduler):\n \"\"\" Gradually warm-up(increasing) learning rate in optimizer.\n Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n multiplier: target learning rate = base lr * multiplier\n total_iter: target learning rate is reached at total_iter, gradually\n after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)\n \"\"\"\n\n def __init__(self, optimizer, multiplier, total_iter, after_scheduler=None):\n self.multiplier = multiplier\n if self.multiplier <= 1.:\n raise ValueError('multiplier should be greater than 1.')\n self.total_iter = total_iter\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch > self.total_iter:\n if self.after_scheduler:\n if not self.finished:\n self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]\n self.finished = True\n return self.after_scheduler.get_lr()\n return [base_lr * self.multiplier for base_lr in self.base_lrs]\n\n return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_iter + 1.) for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished and self.after_scheduler:\n return self.after_scheduler.step(epoch)\n else:\n return super(GradualWarmupScheduler, self).step(epoch)\n","sub_path":"prune_util.py","file_name":"prune_util.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"553979173","text":"# Copyright (c) 2015 OpenStack Foundation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport mock\nimport six\nfrom webob import exc\n\nfrom neutron.api.v2 import attributes\nfrom neutron import context\nfrom neutron.db import models_v2\nfrom neutron.extensions import external_net\nfrom neutron.extensions import extraroute\nfrom neutron.extensions import l3\nfrom neutron.extensions import l3_ext_gw_mode\nfrom neutron.extensions import portbindings\nfrom neutron.extensions import providernet as pnet\nfrom neutron.extensions import securitygroup as secgrp\nfrom neutron import manager\nfrom neutron.tests.unit import _test_extension_portbindings as test_bindings\nfrom neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin\nfrom neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts\nfrom neutron.tests.unit.extensions import test_extraroute as test_ext_route\nfrom neutron.tests.unit.extensions import test_l3 as test_l3_plugin\nfrom neutron.tests.unit.extensions \\\n import test_l3_ext_gw_mode as test_ext_gw_mode\nfrom neutron.tests.unit.scheduler \\\n import test_dhcp_agent_scheduler as test_dhcpagent\nfrom neutron import version\n\nfrom neutron_lib import constants\nfrom neutron_lib import exceptions as n_exc\nfrom oslo_config import cfg\nfrom oslo_serialization import jsonutils\nfrom oslo_utils import uuidutils\n\nfrom vmware_nsx.common import utils\nfrom vmware_nsx.nsxlib.v3 import client as nsx_client\nfrom vmware_nsx.nsxlib.v3 import cluster as nsx_cluster\nfrom vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin\nfrom vmware_nsx.tests import unit as vmware\nfrom vmware_nsx.tests.unit.extensions import test_metadata\nfrom vmware_nsx.tests.unit.nsx_v3 import mocks as nsx_v3_mocks\nfrom vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase\n\n\nPLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin'\n\n\nclass NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,\n nsxlib_testcase.NsxClientTestCase):\n\n def setUp(self, plugin=PLUGIN_NAME,\n ext_mgr=None,\n service_plugins=None):\n\n self._patchers = []\n\n self.mock_api = nsx_v3_mocks.MockRequestSessionApi()\n nsxlib_testcase.NsxClientTestCase.setup_conf_overrides()\n self.cluster = nsx_cluster.NSXClusteredAPI(\n http_provider=nsxlib_testcase.MemoryMockAPIProvider(self.mock_api))\n\n def _patch_object(*args, **kwargs):\n patcher = mock.patch.object(*args, **kwargs)\n patcher.start()\n self._patchers.append(patcher)\n\n def _new_cluster(*args, **kwargs):\n return self.cluster\n\n self.mocked_rest_fns(\n nsx_plugin.security.firewall, 'nsxclient',\n mock_cluster=self.cluster)\n self.mocked_rest_fns(\n nsx_plugin.router.nsxlib, 'client', mock_cluster=self.cluster)\n\n mock_client_module = mock.Mock()\n mock_cluster_module = mock.Mock()\n mocked_client = self.new_mocked_client(\n nsx_client.NSX3Client, mock_cluster=self.cluster)\n mock_cluster_module.NSXClusteredAPI.return_value = self.cluster\n mock_client_module.NSX3Client.return_value = mocked_client\n _patch_object(nsx_plugin, 'nsx_client', new=mock_client_module)\n _patch_object(nsx_plugin, 'nsx_cluster', new=mock_cluster_module)\n\n # populate pre-existing mock resources\n cluster_id = uuidutils.generate_uuid()\n self.mock_api.post(\n 'api/v1/logical-routers',\n data=jsonutils.dumps({\n 'display_name': nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID,\n 'router_type': \"TIER0\",\n 'id': nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID,\n 'edge_cluster_id': cluster_id}),\n headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)\n\n self.mock_api.post(\n 'api/v1/edge-clusters',\n data=jsonutils.dumps({\n 'id': cluster_id,\n 'members': [\n {'member_index': 0},\n {'member_index': 1}\n ]}),\n headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)\n\n self.mock_api.post(\n 'api/v1/switching-profiles',\n data=jsonutils.dumps({\n 'id': uuidutils.generate_uuid(),\n 'display_name': nsx_plugin.NSX_V3_NO_PSEC_PROFILE_NAME\n }), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)\n\n self.mock_api.post(\n 'api/v1/transport-zones',\n data=jsonutils.dumps({\n 'id': uuidutils.generate_uuid(),\n 'display_name': nsxlib_testcase.NSX_TZ_NAME\n }), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)\n\n self.mock_api.post(\n 'api/v1/bridge-clusters',\n data=jsonutils.dumps({\n 'id': uuidutils.generate_uuid(),\n 'display_name': nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME\n }), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)\n\n super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin,\n ext_mgr=ext_mgr)\n\n self.maxDiff = None\n\n def tearDown(self):\n for patcher in self._patchers:\n patcher.stop()\n super(NsxV3PluginTestCaseMixin, self).tearDown()\n\n def _create_network(self, fmt, name, admin_state_up,\n arg_list=None, providernet_args=None,\n set_context=False, tenant_id=None,\n **kwargs):\n tenant_id = tenant_id or self._tenant_id\n data = {'network': {'name': name,\n 'admin_state_up': admin_state_up,\n 'tenant_id': tenant_id}}\n # Fix to allow the router:external attribute and any other\n # attributes containing a colon to be passed with\n # a double underscore instead\n kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())\n if external_net.EXTERNAL in kwargs:\n arg_list = (external_net.EXTERNAL, ) + (arg_list or ())\n\n if providernet_args:\n kwargs.update(providernet_args)\n for arg in (('admin_state_up', 'tenant_id', 'shared',\n 'availability_zone_hints') + (arg_list or ())):\n # Arg must be present\n if arg in kwargs:\n data['network'][arg] = kwargs[arg]\n network_req = self.new_create_request('networks', data, fmt)\n if set_context and tenant_id:\n # create a specific auth context for this request\n network_req.environ['neutron.context'] = context.Context(\n '', tenant_id)\n return network_req.get_response(self.api)\n\n def _create_l3_ext_network(\n self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID):\n name = 'l3_ext_net'\n net_type = utils.NetworkTypes.L3_EXT\n providernet_args = {pnet.NETWORK_TYPE: net_type,\n pnet.PHYSICAL_NETWORK: physical_network}\n return self.network(name=name,\n router__external=True,\n providernet_args=providernet_args,\n arg_list=(pnet.NETWORK_TYPE,\n pnet.PHYSICAL_NETWORK))\n\n def _save_networks(self, networks):\n ctx = context.get_admin_context()\n for network_id in networks:\n with ctx.session.begin(subtransactions=True):\n ctx.session.add(models_v2.Network(id=network_id))\n\n\nclass TestNetworksV2(test_plugin.TestNetworksV2, NsxV3PluginTestCaseMixin):\n\n @mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones')\n def test_create_network_with_availability_zone(self, mock_validate_az):\n name = 'net-with-zone'\n zone = ['zone1']\n\n mock_validate_az.return_value = None\n with self.network(name=name, availability_zone_hints=zone) as net:\n az_hints = net['network']['availability_zone_hints']\n self.assertListEqual(az_hints, zone)\n\n\nclass TestPortsV2(test_plugin.TestPortsV2, NsxV3PluginTestCaseMixin,\n test_bindings.PortBindingsTestCase,\n test_bindings.PortBindingsHostTestCaseMixin,\n test_bindings.PortBindingsVnicTestCaseMixin):\n\n VIF_TYPE = portbindings.VIF_TYPE_OVS\n HAS_PORT_FILTER = True\n\n def setUp(self):\n super(TestPortsV2, self).setUp()\n self.plugin = manager.NeutronManager.get_plugin()\n self.ctx = context.get_admin_context()\n\n def test_update_port_delete_ip(self):\n # This test case overrides the default because the nsx plugin\n # implements port_security/security groups and it is not allowed\n # to remove an ip address from a port unless the security group\n # is first removed.\n with self.subnet() as subnet:\n with self.port(subnet=subnet) as port:\n data = {'port': {'admin_state_up': False,\n 'fixed_ips': [],\n secgrp.SECURITYGROUPS: []}}\n req = self.new_update_request('ports',\n data, port['port']['id'])\n res = self.deserialize('json', req.get_response(self.api))\n self.assertEqual(res['port']['admin_state_up'],\n data['port']['admin_state_up'])\n self.assertEqual(res['port']['fixed_ips'],\n data['port']['fixed_ips'])\n\n def test_fail_create_port_with_ext_net(self):\n expected_error = 'InvalidInput'\n with self._create_l3_ext_network() as network:\n with self.subnet(network=network, cidr='10.0.0.0/24'):\n device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'\n res = self._create_port(self.fmt,\n network['network']['id'],\n exc.HTTPBadRequest.code,\n device_owner=device_owner)\n data = self.deserialize(self.fmt, res)\n self.assertEqual(expected_error, data['NeutronError']['type'])\n\n def test_fail_update_port_with_ext_net(self):\n with self._create_l3_ext_network() as network:\n with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:\n with self.port(subnet=subnet) as port:\n device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'\n data = {'port': {'device_owner': device_owner}}\n req = self.new_update_request('ports',\n data, port['port']['id'])\n res = req.get_response(self.api)\n self.assertEqual(exc.HTTPBadRequest.code,\n res.status_int)\n\n def test_create_port_with_qos(self):\n with self.network() as network:\n policy_id = uuidutils.generate_uuid()\n data = {'port': {\n 'network_id': network['network']['id'],\n 'tenant_id': self._tenant_id,\n 'qos_policy_id': policy_id,\n 'name': 'qos_port',\n 'admin_state_up': True,\n 'device_id': 'fake_device',\n 'device_owner': 'fake_owner',\n 'fixed_ips': [],\n 'mac_address': '00:00:00:00:00:01'}\n }\n with mock.patch.object(self.plugin, '_get_qos_profile_id'):\n port = self.plugin.create_port(self.ctx, data)\n self.assertEqual(policy_id, port['qos_policy_id'])\n # Get port should also return the qos policy id\n with mock.patch('vmware_nsx.services.qos.nsx_v3.utils.'\n 'get_port_policy_id',\n return_value=policy_id):\n port = self.plugin.get_port(self.ctx, port['id'])\n self.assertEqual(policy_id, port['qos_policy_id'])\n\n def test_update_port_with_qos(self):\n with self.network() as network:\n data = {'port': {\n 'network_id': network['network']['id'],\n 'tenant_id': self._tenant_id,\n 'name': 'qos_port',\n 'admin_state_up': True,\n 'device_id': 'fake_device',\n 'device_owner': 'fake_owner',\n 'fixed_ips': [],\n 'mac_address': '00:00:00:00:00:01'}\n }\n port = self.plugin.create_port(self.ctx, data)\n policy_id = uuidutils.generate_uuid()\n data['port']['qos_policy_id'] = policy_id\n with mock.patch.object(self.plugin, '_get_qos_profile_id'):\n res = self.plugin.update_port(self.ctx, port['id'], data)\n self.assertEqual(policy_id, res['qos_policy_id'])\n # Get port should also return the qos policy id\n with mock.patch('vmware_nsx.services.qos.nsx_v3.utils.'\n 'get_port_policy_id',\n return_value=policy_id):\n res = self.plugin.get_port(self.ctx, port['id'])\n self.assertEqual(policy_id, res['qos_policy_id'])\n\n def test_create_ext_port_with_qos_fail(self):\n with self._create_l3_ext_network() as network:\n with self.subnet(network=network, cidr='10.0.0.0/24'):\n policy_id = uuidutils.generate_uuid()\n data = {'port': {'network_id': network['network']['id'],\n 'tenant_id': self._tenant_id,\n 'qos_policy_id': policy_id}}\n # Cannot add qos policy to a port on ext network\n self.assertRaises(n_exc.InvalidInput,\n self.plugin.create_port, self.ctx, data)\n\n def test_create_port_with_qos_on_net(self):\n with self.network() as network:\n policy_id = uuidutils.generate_uuid()\n device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'\n data = {'port': {\n 'network_id': network['network']['id'],\n 'tenant_id': self._tenant_id,\n 'name': 'qos_port',\n 'admin_state_up': True,\n 'device_id': 'fake_device',\n 'device_owner': device_owner,\n 'fixed_ips': [],\n 'mac_address': '00:00:00:00:00:01'}\n }\n with mock.patch.object(self.plugin,\n '_get_qos_profile_id') as get_profile:\n with mock.patch('vmware_nsx.services.qos.nsx_v3.utils.'\n 'get_network_policy_id', return_value=policy_id):\n self.plugin.create_port(self.ctx, data)\n get_profile.assert_called_once_with(self.ctx, policy_id)\n\n def test_update_port_with_qos_on_net(self):\n with self.network() as network:\n data = {'port': {\n 'network_id': network['network']['id'],\n 'tenant_id': self._tenant_id,\n 'name': 'qos_port',\n 'admin_state_up': True,\n 'device_id': 'fake_device',\n 'device_owner': 'fake_owner',\n 'fixed_ips': [],\n 'mac_address': '00:00:00:00:00:01'}\n }\n port = self.plugin.create_port(self.ctx, data)\n policy_id = uuidutils.generate_uuid()\n device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'\n data['port']['device_owner'] = device_owner\n with mock.patch.object(self.plugin,\n '_get_qos_profile_id') as get_profile:\n with mock.patch('vmware_nsx.services.qos.nsx_v3.utils.'\n 'get_network_policy_id', return_value=policy_id):\n self.plugin.update_port(self.ctx, port['id'], data)\n get_profile.assert_called_once_with(self.ctx, policy_id)\n\n\nclass DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt,\n NsxV3PluginTestCaseMixin):\n\n def setUp(self, plugin=None):\n super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(\n plugin=PLUGIN_NAME)\n\n\nclass NSXv3DHCPAgentAZAwareWeightSchedulerTestCase(\n test_dhcpagent.DHCPAgentAZAwareWeightSchedulerTestCase,\n NsxV3PluginTestCaseMixin):\n\n def setUp(self):\n super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp()\n self.plugin = manager.NeutronManager.get_plugin()\n self.ctx = context.get_admin_context()\n\n def setup_coreplugin(self, core_plugin=None):\n super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase,\n self).setup_coreplugin(core_plugin=PLUGIN_NAME)\n\n\nclass TestL3ExtensionManager(object):\n\n def get_resources(self):\n # Simulate extension of L3 attribute map\n # First apply attribute extensions\n for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():\n l3.RESOURCE_ATTRIBUTE_MAP[key].update(\n l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))\n l3.RESOURCE_ATTRIBUTE_MAP[key].update(\n extraroute.EXTENDED_ATTRIBUTES_2_0.get(key, {}))\n # Finally add l3 resources to the global attribute map\n attributes.RESOURCE_ATTRIBUTE_MAP.update(\n l3.RESOURCE_ATTRIBUTE_MAP)\n return l3.L3.get_resources()\n\n def get_actions(self):\n return []\n\n def get_request_extensions(self):\n return []\n\n\ndef backup_l3_attribute_map():\n \"\"\"Return a backup of the original l3 attribute map.\"\"\"\n return dict((res, attrs.copy()) for\n (res, attrs) in six.iteritems(l3.RESOURCE_ATTRIBUTE_MAP))\n\n\ndef restore_l3_attribute_map(map_to_restore):\n \"\"\"Ensure changes made by fake ext mgrs are reverted.\"\"\"\n l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore\n\n\nclass L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxV3PluginTestCaseMixin):\n\n def _restore_l3_attribute_map(self):\n l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk\n\n def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None,\n service_plugins=None):\n self._l3_attribute_map_bk = backup_l3_attribute_map()\n cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)\n cfg.CONF.set_default('max_routes', 3)\n self.addCleanup(restore_l3_attribute_map, self._l3_attribute_map_bk)\n ext_mgr = ext_mgr or TestL3ExtensionManager()\n super(L3NatTest, self).setUp(\n plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)\n self.plugin_instance = manager.NeutronManager.get_plugin()\n self._plugin_name = \"%s.%s\" % (\n self.plugin_instance.__module__,\n self.plugin_instance.__class__.__name__)\n self._plugin_class = self.plugin_instance.__class__\n\n\nclass TestL3NatTestCase(L3NatTest,\n test_l3_plugin.L3NatDBIntTestCase,\n test_ext_route.ExtraRouteDBTestCaseBase,\n test_metadata.MetaDataTestCase):\n\n def setUp(self, plugin=PLUGIN_NAME,\n ext_mgr=None,\n service_plugins=None):\n super(TestL3NatTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)\n cfg.CONF.set_override('metadata_mode', None, 'nsx_v3')\n cfg.CONF.set_override('metadata_on_demand', False, 'nsx_v3')\n\n def _test_create_l3_ext_network(\n self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID):\n name = 'l3_ext_net'\n net_type = utils.NetworkTypes.L3_EXT\n expected = [('subnets', []), ('name', name), ('admin_state_up', True),\n ('status', 'ACTIVE'), ('shared', False),\n (external_net.EXTERNAL, True),\n (pnet.NETWORK_TYPE, net_type),\n (pnet.PHYSICAL_NETWORK, physical_network)]\n with self._create_l3_ext_network(physical_network) as net:\n for k, v in expected:\n self.assertEqual(net['network'][k], v)\n\n def test_create_l3_ext_network_with_default_tier0(self):\n self._test_create_l3_ext_network()\n\n def test_floatingip_update(self):\n super(TestL3NatTestCase, self).test_floatingip_update(\n expected_status=constants.FLOATINGIP_STATUS_DOWN)\n\n def test_floatingip_with_invalid_create_port(self):\n self._test_floatingip_with_invalid_create_port(self._plugin_name)\n\n def test_routes_update_for_multiple_routers(self):\n self.skipTest('not supported')\n\n def test_floatingip_multi_external_one_internal(self):\n self.skipTest('not supported')\n\n def test_floatingip_same_external_and_internal(self):\n self.skipTest('not supported')\n\n def test_route_update_with_external_route(self):\n self.skipTest('not supported')\n\n def test_multiple_subnets_on_different_routers(self):\n with self.network() as network:\n with self.subnet(network=network) as s1,\\\n self.subnet(network=network,\n cidr='11.0.0.0/24') as s2,\\\n self.router() as r1,\\\n self.router() as r2:\n self._router_interface_action('add', r1['router']['id'],\n s1['subnet']['id'], None)\n self.assertRaises(n_exc.InvalidInput,\n self.plugin_instance.add_router_interface,\n context.get_admin_context(),\n r2['router']['id'],\n {'subnet_id': s2['subnet']['id']})\n self._router_interface_action('remove', r1['router']['id'],\n s1['subnet']['id'], None)\n self._router_interface_action('add', r2['router']['id'],\n s2['subnet']['id'], None)\n self._router_interface_action('remove', r2['router']['id'],\n s2['subnet']['id'], None)\n\n def test_multiple_subnets_on_same_router(self):\n with self.network() as network:\n with self.subnet(network=network) as s1,\\\n self.subnet(network=network,\n cidr='11.0.0.0/24') as s2,\\\n self.router() as r1:\n self._router_interface_action('add', r1['router']['id'],\n s1['subnet']['id'], None)\n self.assertRaises(n_exc.InvalidInput,\n self.plugin_instance.add_router_interface,\n context.get_admin_context(),\n r1['router']['id'],\n {'subnet_id': s2['subnet']['id']})\n self._router_interface_action('remove', r1['router']['id'],\n s1['subnet']['id'], None)\n\n def test_router_remove_interface_inuse_return_409(self):\n with self.router() as r1,\\\n self.subnet() as ext_subnet,\\\n self.subnet(cidr='11.0.0.0/24') as s1:\n self._set_net_external(ext_subnet['subnet']['network_id'])\n self._router_interface_action(\n 'add', r1['router']['id'],\n s1['subnet']['id'], None)\n self._add_external_gateway_to_router(\n r1['router']['id'],\n ext_subnet['subnet']['network_id'])\n with self.port(subnet=s1,) as p:\n fip_res = self._create_floatingip(\n self.fmt,\n ext_subnet['subnet']['network_id'],\n subnet_id=ext_subnet['subnet']['id'],\n port_id=p['port']['id'])\n fip = self.deserialize(self.fmt, fip_res)\n self._router_interface_action(\n 'remove',\n r1['router']['id'],\n s1['subnet']['id'],\n None,\n expected_code=exc.HTTPConflict.code)\n self._delete('floatingips', fip['floatingip']['id'])\n self._remove_external_gateway_from_router(\n r1['router']['id'],\n ext_subnet['subnet']['network_id'])\n self._router_interface_action('remove',\n r1['router']['id'],\n s1['subnet']['id'],\n None)\n\n def test_router_update_on_external_port(self):\n with self.router() as r:\n with self.subnet(cidr='10.0.1.0/24') as s:\n self._set_net_external(s['subnet']['network_id'])\n self._add_external_gateway_to_router(\n r['router']['id'],\n s['subnet']['network_id'])\n body = self._show('routers', r['router']['id'])\n net_id = body['router']['external_gateway_info']['network_id']\n self.assertEqual(net_id, s['subnet']['network_id'])\n port_res = self._list_ports(\n 'json',\n 200,\n s['subnet']['network_id'],\n tenant_id=r['router']['tenant_id'],\n device_owner=constants.DEVICE_OWNER_ROUTER_GW)\n port_list = self.deserialize('json', port_res)\n self.assertEqual(len(port_list['ports']), 1)\n\n routes = [{'destination': '135.207.0.0/16',\n 'nexthop': '10.0.1.3'}]\n\n self.assertRaises(n_exc.InvalidInput,\n self.plugin_instance.update_router,\n context.get_admin_context(),\n r['router']['id'],\n {'router': {'routes':\n routes}})\n self._remove_external_gateway_from_router(\n r['router']['id'],\n s['subnet']['network_id'])\n body = self._show('routers', r['router']['id'])\n gw_info = body['router']['external_gateway_info']\n self.assertIsNone(gw_info)\n\n def test_create_router_gateway_fails(self):\n self.skipTest('not supported')\n\n def test_router_remove_ipv6_subnet_from_interface(self):\n self.skipTest('not supported')\n\n def test_router_add_interface_multiple_ipv6_subnets_same_net(self):\n self.skipTest('not supported')\n\n def test_router_add_interface_multiple_ipv4_subnets(self):\n self.skipTest('not supported')\n\n\nclass ExtGwModeTestCase(L3NatTest,\n test_ext_gw_mode.ExtGwModeIntTestCase):\n pass\n\n\nclass TestNsxV3Utils(NsxV3PluginTestCaseMixin):\n\n def test_build_v3_tags_payload(self):\n result = utils.build_v3_tags_payload(\n {'id': 'fake_id',\n 'tenant_id': 'fake_tenant_id'},\n resource_type='os-neutron-net-id',\n project_name='fake_tenant_name')\n expected = [{'scope': 'os-neutron-net-id', 'tag': 'fake_id'},\n {'scope': 'os-project-id', 'tag': 'fake_tenant_id'},\n {'scope': 'os-project-name', 'tag': 'fake_tenant_name'},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(expected, result)\n\n def test_build_v3_tags_payload_internal(self):\n result = utils.build_v3_tags_payload(\n {'id': 'fake_id',\n 'tenant_id': 'fake_tenant_id'},\n resource_type='os-neutron-net-id',\n project_name=None)\n expected = [{'scope': 'os-neutron-net-id', 'tag': 'fake_id'},\n {'scope': 'os-project-id', 'tag': 'fake_tenant_id'},\n {'scope': 'os-project-name', 'tag': 'NSX Neutron plugin'},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(expected, result)\n\n def test_build_v3_tags_payload_invalid_length(self):\n self.assertRaises(n_exc.InvalidInput,\n utils.build_v3_tags_payload,\n {'id': 'fake_id',\n 'tenant_id': 'fake_tenant_id'},\n resource_type='os-neutron-maldini-rocks-id',\n project_name='fake')\n\n def test_build_v3_api_version_tag(self):\n result = utils.build_v3_api_version_tag()\n expected = [{'scope': 'os-neutron-id',\n 'tag': 'NSX Neutron plugin'},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(expected, result)\n\n def test_is_internal_resource(self):\n project_tag = utils.build_v3_tags_payload(\n {'id': 'fake_id',\n 'tenant_id': 'fake_tenant_id'},\n resource_type='os-neutron-net-id',\n project_name=None)\n internal_tag = utils.build_v3_api_version_tag()\n\n expect_false = utils.is_internal_resource({'tags': project_tag})\n self.assertFalse(expect_false)\n\n expect_true = utils.is_internal_resource({'tags': internal_tag})\n self.assertTrue(expect_true)\n\n def test_get_name_and_uuid(self):\n uuid = 'afc40f8a-4967-477e-a17a-9d560d1786c7'\n suffix = '_afc40...786c7'\n expected = 'maldini%s' % suffix\n short_name = utils.get_name_and_uuid('maldini', uuid)\n self.assertEqual(expected, short_name)\n\n name = 'X' * 255\n expected = '%s%s' % ('X' * (80 - len(suffix)), suffix)\n short_name = utils.get_name_and_uuid(name, uuid)\n self.assertEqual(expected, short_name)\n\n def test_build_v3_tags_max_length_payload(self):\n result = utils.build_v3_tags_payload(\n {'id': 'X' * 255,\n 'tenant_id': 'X' * 255},\n resource_type='os-neutron-net-id',\n project_name='X' * 255)\n expected = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'X' * 40},\n {'scope': 'os-project-name', 'tag': 'X' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(expected, result)\n\n def test_add_v3_tag(self):\n result = utils.add_v3_tag([], 'fake-scope', 'fake-tag')\n expected = [{'scope': 'fake-scope', 'tag': 'fake-tag'}]\n self.assertEqual(expected, result)\n\n def test_add_v3_tag_max_length_payload(self):\n result = utils.add_v3_tag([], 'fake-scope', 'X' * 255)\n expected = [{'scope': 'fake-scope', 'tag': 'X' * 40}]\n self.assertEqual(expected, result)\n\n def test_add_v3_tag_invalid_scope_length(self):\n self.assertRaises(n_exc.InvalidInput,\n utils.add_v3_tag,\n [],\n 'fake-scope-name-is-far-too-long',\n 'fake-tag')\n\n def test_update_v3_tags_addition(self):\n tags = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'Y' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n resources = [{'resource_type': 'os-instance-uuid',\n 'tag': 'A' * 40}]\n tags = utils.update_v3_tags(tags, resources)\n expected = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'Y' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()},\n {'scope': 'os-instance-uuid',\n 'tag': 'A' * 40}]\n self.assertEqual(sorted(expected), sorted(tags))\n\n def test_update_v3_tags_removal(self):\n tags = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'Y' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n resources = [{'resource_type': 'os-neutron-net-id',\n 'tag': ''}]\n tags = utils.update_v3_tags(tags, resources)\n expected = [{'scope': 'os-project-id', 'tag': 'Y' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(sorted(expected), sorted(tags))\n\n def test_update_v3_tags_update(self):\n tags = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'Y' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n resources = [{'resource_type': 'os-project-id',\n 'tag': 'A' * 40}]\n tags = utils.update_v3_tags(tags, resources)\n expected = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},\n {'scope': 'os-project-id', 'tag': 'A' * 40},\n {'scope': 'os-project-name', 'tag': 'Z' * 40},\n {'scope': 'os-api-version',\n 'tag': version.version_info.release_string()}]\n self.assertEqual(sorted(expected), sorted(tags))\n","sub_path":"vmware_nsx/tests/unit/nsx_v3/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":34571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"211789582","text":"data = open('19chal.txt').readlines()\nn_lines = data.index('\\n')\nrule_data = [i.replace('\\n','') for i in data[:n_lines] if i!='\\n']\nstr_to_check = [i.replace('\\n','') for i in data[(n_lines+1):] if i!='\\n']\n\ndef line_parser(line):\n key, remainder = line.split(': ')\n chars = remainder.split(' ')\n return key, chars\nclass rule:\n def __init__(self, remainder):\n self.ran_before=False\n self.chars = remainder\n nquot = self.chars[0].count('\"')\n self.multirule=False\n if nquot==2:\n self.final_type = True\n self.char_desired = self.chars[0].replace('\\\"','')\n else:\n self.final_type=False\n if '|' in self.chars:\n i = self.chars.index('|')\n self.multirule = True\n self.rules = [rule(self.chars[:i]), rule(self.chars[(i+1):])]\n def checkrule(self, list_of_char, rules_dict):\n ### Returns a number from 0 to N where N is the length of the str\n n = len(list_of_char)\n if self.multirule:\n idxs = [rule.checkrule(list_of_char, rules_dict) for rule in self.rules]\n ret = min(idxs)\n elif self.final_type:\n if self.char_desired in list_of_char:\n ret = list_of_char.index(self.char_desired)+1\n else:\n ret = n+1\n else:\n min_idx = 0\n for k in range(len(self.chars)):\n sub_string_occurance = rules_dict[self.chars[k]].checkrule(list_of_char[(min_idx):], rules_dict)\n if (min_idx+sub_string_occurance)>len(list_of_char):\n min_idx = len(list_of_char)+1\n break\n else:\n min_idx = min_idx+sub_string_occurance\n ret = min_idx\n return(ret)\n\n\n\n\nparsed_lines = [line_parser(i) for i in rule_data]\n\ndef solution(parsed_lines):\n rules = {key: rule(remainder) for key, remainder in parsed_lines}\n lens_of_strings = [len(l) for l in str_to_check]\n res = [rules['0'].checkrule(_str, rules) for _str in str_to_check]\n matched = [last_idx==n for n,last_idx in zip(lens_of_strings, res)]\n return matched\nmatched = solution(parsed_lines)\ndef remap(key, remainder,rep = [0,0]):\n if key == '8':\n remainder = ['42']+['42']*rep[0]\n if key == '11':\n remainder = ['42'] +['42']*rep[1]+ ['31']*rep[1]+ ['31']\n return key, remainder\nall_masks = []\nfor k in range(20):\n for k2 in range(20):\n remapped_lines = [remap(key, remainder, rep=[k,k2]) for key, remainder in parsed_lines]\n maskie = solution(remapped_lines)\n all_masks.append(maskie)\n\nn = len(all_masks[0])\nnrow = len(all_masks)\nans = sum([any([all_masks[i][j] for i in range(nrow)]) for j in range(n)])\nprint(ans)\n###>230\n### >303\n### 357\n### Not 325\n### <416","sub_path":"code/19chal.py","file_name":"19chal.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"50031190","text":"class Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n visited = set()\n\n def valid_pos(p):\n return 0 <= p[0] < len(grid) and 0 <= p[1] < len(grid[0])\n\n def on_island(p):\n return grid[p[0]][p[1]] == '1'\n\n def marker(p):\n visited.add(p)\n generate = lambda x: [(x[0] - 1, x[1],), (x[0] + 1, x[1],), (x[0], x[1] - 1,), (x[0], x[1] + 1,), ]\n trace = generate(p)\n while trace:\n t = trace.pop(0)\n if t in visited or not valid_pos(t) or not on_island(t):\n continue\n visited.add(t)\n trace.extend(generate(t))\n\n counter = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n p = (i, j,)\n if grid[i][j] == '1' and p not in visited:\n counter += 1\n marker(p)\n\n return counter\n","sub_path":"codes/200.py","file_name":"200.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"333392092","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\n#import matplotlib.pyplot as plt\n#from matplotlib import pyplot\n\n\nimport scipy\nfrom scipy.optimize import curve_fit\n\ndef func(x, m, c):\n return m*x +c\n\n\nmy_dict = {}\n\nf = open(\"data.txt\", \"r\")\n\nfor x in f:\n\n if x == '\\n':\n pass\n\n else:\n\n splitlist = x.split()\n\n if splitlist[0] in my_dict:\n my_dict[splitlist[0]].append(splitlist[1])\n else:\n my_dict[splitlist[0]] = [splitlist[1]]\n\n\nprint(my_dict)\nf.close()\ndf=pd.DataFrame.from_dict(my_dict,orient='index').transpose()\nprint(df)\ndf = df.reindex(sorted(df.columns), axis=1)\n\n\ndf.to_csv(\"Output1.csv\")\n\ncolumn_list = df.columns.values.tolist()\nnp_voltage = np.asarray(column_list, dtype=np.float32)\n\nfinal_result=[]\n\nfor index, rows in df.iterrows():\n\n current_values_list =[]\n for x in column_list:\n current_values_list.append(float(rows[x]))\n\n np_current = np.asarray(current_values_list, dtype=np.float32)\n\n popt, pcov = scipy.optimize.curve_fit(func, np_voltage, np_current) # your data x, y to fit\n\n final_result.append(1/popt[0])\nprint(final_result)\n\n#hist, bin_edges = np.histogram(final_result)\n#print (hist)\n#print (bin_edges)\n#\n\"\"\"\nn, bins, patches = matplotlib.pyplot.hist(x=final_result, bins='auto', color='#0504aa',\n alpha=0.7, rwidth=0.85)\nmatplotlib.pyplot.grid(axis='y', alpha=0.75)\nmatplotlib.pyplot.xlabel('Value')\nmatplotlib.pyplot.ylabel('Frequency')\nmatplotlib.pyplot.title('My Very Own Histogram')\nmatplotlib.pyplot.text(23, 45, r'$\\mu=15, b=3$')\nmaxfreq = n.max()\n# Set a clean upper y-axis limit.\nmatplotlib.pyplot.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)\n\"\"\"\ndf_final = pd.DataFrame(final_result , columns =['Resistance'])\ndf_final.to_csv(\"FinalResults.csv\")","sub_path":"Main2.py","file_name":"Main2.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"120194900","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n #url(r'^$', TemplateView.as_view(template_name='base.html')),\n\n url(r'', include('social_auth.urls')),\n url(r'^$', 'social.views.home', name='home'),\n\n url(r'^my-profile/', 'social.views.my_profile', name='my-profile'),\n url(r'^edit-profile/', 'social.views.edit_profile', name='edit-profile'),\n url(r'^edit-account/', 'social.views.edit_account', name='edit-account'),\n url(r'^view-user/', 'social.views.view_user', name='view-user'),\n\n url(r'^logout/', 'social.views.logout', name='logout'),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n# Uncomment the next line to serve media files in dev.\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )\n","sub_path":"getbizy_project/getbizy_project/getbizy_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"58995163","text":"n, m = map(int, input().split())\nPy = []\nfor i in range(m):\n py = list(map(int, input().split()))\n Py.append(py+[i])\nPy = sorted(sorted(Py, key = lambda x: x[1]))\ntmp = -1\nans = []\nfor i in range(m):\n out = str(Py[i][0]).zfill(6)\n cnt = cnt + 1 if Py[i][0] == tmp else 1\n out += str(cnt).zfill(6)\n tmp = Py[i][0]\n ans.append([Py[i][2], out])\nans.sort()\nfor i in range(m):\n print(ans[i][1])","sub_path":"113/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"502639994","text":"class AutoSaveModelFormMixin(object):\n \"\"\"\n Auto save content from selected fields with prefix to local storage.\n If changed data available, ask user to restore.\n \"\"\"\n autosave_prefix = ''\n autosave_fields = []\n\n def __init__(self, *args, **kwargs):\n super(AutoSaveModelFormMixin, self).__init__(*args, **kwargs)\n\n instance = kwargs.get('instance')\n if instance:\n instance_identifier = instance.id\n else:\n instance_identifier = 'add'\n\n for field_name in self.autosave_fields:\n field = self.fields[field_name]\n field.widget.attrs = field.widget.attrs or {}\n field.widget.attrs.update({\n 'data-autosave': 'autosave_%s_%s:%s' % (self.autosave_prefix, instance_identifier, field),\n })\n\n @property\n def media(self):\n media = super(AutoSaveModelFormMixin, self).media\n media.add_js(['admin/js/admin-models-autosave.js'])\n return media\n","sub_path":"iwg_blog/utils/forms/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"636346531","text":"from transiter.services import routeservice\nfrom transiter.services import stopservice\nfrom transiter.services import systemservice\nfrom transiter.services import exceptions\n\nfrom transiter.scheduler import client\n#print(jsonify(systemservice.get('nycsubway')))\n\n\"\"\"\n#print(jsonify(stopservice.get('L03')))\n#print(jsonify(routeservice.get('D')))\nif(__name__=='__main__'):\n # print(jsonutil.convert_for_cli(routeservice.get_by_id(None, 'L')))\n #print(jsonutil.convert_for_cli(stopservice.get_by_id(None, 'L03')))\n\n try:\n systemservice.delete_by_id('nycsubway')\n except exceptions.IdNotFoundError:\n pass\n\n systemservice.install('nycsubway')\n\n\"\"\"\n\n\nimport requests\nimport json\n\nclient.refresh_jobs()\n\n\nexit()\n\ndef compare_responses(rtr_s, transiter_s):\n right = 0\n wrong = 0\n rtr = json.loads(rtr_s)\n trn = json.loads(transiter_s)\n trn_stop_event = {}\n for stop_event in trn['stop_events']:\n trn_stop_event[stop_event['trip']['trip_id']] = stop_event\n\n for direction in rtr['directions']:\n for rtr_trip in direction['trips']:\n if rtr_trip['trip_uid'] not in trn_stop_event:\n print('Trip {} in RTR; not in Transitor'.format(rtr_trip['trip_uid']))\n wrong += 1\n continue\n\n trn_trip = trn_stop_event[rtr_trip['trip_uid']]\n del trn_stop_event[rtr_trip['trip_uid']]\n #print(trip['arrival_time'])\n #print(trn_stop_event[trip['trip_uid']]['arrival_time'])\n if rtr_trip['arrival_time'] == trn_trip['arrival_time']:\n right += 1\n else:\n if trn_trip['trip']['feed_update_time'] != rtr_trip['feed_last_updated']:\n continue\n print('Trip {} mismatching details'.format(rtr_trip['trip_uid']))\n print(' Arrival time: RTR: {}; Transitor: {}'.format(\n rtr_trip['arrival_time'],\n trn_trip['arrival_time']))\n #print(jsonutil.convert_for_http(trn_trip))\n #trn_trip['trip']'['feed_update_time']\n wrong += 1\n\n for trip_id in trn_stop_event.keys():\n print('Trip {} not in RTR; in Transitor'.format(trip_id))\n wrong += len(trn_stop_event)\n if right+wrong == 0:\n return 1\n print('Trip matching: {}%'.format(right*100/(right+wrong)))\n return right/(right+wrong)\n #print('Trips in Transitor not in RTR: {}'.format(len(trn_stop_event)))\n\n\n\"\"\"\nfrom .database.accessobjects import StopDao\nstop_dao = StopDao()\n\n\nstops = stop_dao.list_all_in_system('nycsubway')\nstops = reversed(sorted(stops, key = lambda stop: stop.stop_id))\nnorth = None\nsouth = None\nlines = []\nfor stop in stops:\n if len(stop.direction_names) >= 2:\n for direction_name in stop.direction_names:\n if direction_name.track is not None:\n continue\n if direction_name.direction == 'N':\n north = direction_name.name\n else:\n south = direction_name.name\n lines.append((stop.stop_id, north, south))\n\nlines.append(('stop_id', 'north_direction_name', 'south_direction_name'))\nlines.reverse()\n\n\nwith open('direction_names.csv', 'w') as f:\n for line in lines:\n f.write('{},{},{}\\n'.format(*line))\nexit()\n\n\"\"\"\n\nstop_ids = set()\n\nroutes = routeservice.list_all_in_system('nycsubway')\nroute_ids = [route['route_id'] for route in routes]\nprint(route_ids)\n\n#route_ids = ['R'] #, '2', '3', '4', '5', '6', 'L', 'A', 'B', 'C', 'D', 'E', 'F']\nfor route_id in route_ids:\n route = routeservice.get_in_system_by_id('nycsubway', route_id)\n for stop in route['stops']:\n stop_ids.add(stop['stop_id'])\n\nprint(sorted(stop_ids))\n\nstop_id_to_rtr_response = {}\n\ncomparisons = 0\nsum_of_proportions = 0\n\n\n#stop_ids = ['112']*20\n\n\nfor index, stop_id in enumerate(sorted(stop_ids)):\n\n if index%10 == 0:\n print('Updating Transitor')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/123456/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/L/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/G/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/ACE/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/BDFM/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/NQRW/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/7/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/SIR/')\n requests.post('http://localhost:5000/systems/nycsubway/feeds/JZ/')\n\n print('Retrieving RTR response for stop_id={}'.format(stop_id))\n rtr_response = \\\n requests.get('https://www.realtimerail.nyc/json/stops/{}.json'.format(\n stop_id)).content\n print('Retrieving Transitor response for stop_id={}'.format(stop_id))\n transitor_response = \\\n requests.get('http://localhost:5000/systems/nycsubway/stops/{}'.format(\n stop_id)).content\n print('Comparing responses')\n comparisons += 1\n sum_of_proportions += compare_responses(rtr_response, transitor_response)\n\n #if index == 19:\n # break\n\nprint('Total success: {}%'.format(sum_of_proportions*100/comparisons))\n\n\"\"\"\nexit()\nprint('Updating')\nrequests.post('http://localhost:5000/systems/nycsubway/feeds/123456/')\n\nstop_id_to_transitor_response = {}\n\nprint('Downloading RTR')\nrtr_s = requests.get('https://www.realtimerail.nyc/json/stops/635.json').content\n\nprint('Reading Transiter')\ntransiter_s = requests.get('http://localhost:5000/systems/nycsubway/stops/635').content\n\"\"\"\n\n\n\n\n\n","sub_path":"tests/rtr_consistency.py","file_name":"rtr_consistency.py","file_ext":"py","file_size_in_byte":5635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"349788775","text":"# todo: tests\n\nimport sys\nimport socket\nimport re\nimport contracts\nimport pathlib\nimport requests\n\n# [+]------------------------------------------------------\n# 0=INITIAL DATA\nmodels_used_list = [\"TD-1\", ]\ndev_found_mac_dict = {}\npath_to_save_results = pathlib.Path.cwd() / \"RESULTS\" # will change from script argv!\n\n# [+]------------------------------------------------------\n# 1=PATH TO SAVE\nif len(sys.argv) == 3:\n argv_name = sys.argv[1]\n argv_value = sys.argv[2]\n if argv_name == \"--dir\":\n try:\n path_to_save_results = pathlib.Path.cwd() / argv_value\n except:\n pass\n print(sys.argv)\npath_to_save_results.mkdir(exist_ok=True)\n\n\n# [+?]-----------------------------------------------------\n# 2=DEV TEST=json_rpc\n@contracts.contract(ip=str, dev_id=int, returns=\"int|None\")\ndef dev_test_start(ip, dev_id):\n dev_url = f\"http://{ip}/api\"\n request_json_rpc = {\"jsonrpc\": \"2.0\", \"method\": \"do_test\", \"id\": dev_id}\n try:\n response_http = requests.post(url=dev_url, json=request_json_rpc)\n except:\n return None\n\n try:\n # response_http_json = response_http.json() #del by corrector\n # response_json_rpc = response_http_json.get(\"data\", {\"result\": -1}) #del by corrector\n response_json_rpc = response_http.json() # add by corrector\n\n if response_json_rpc[\"jsonrpc\"] == \"2.0\" and response_json_rpc[\"id\"] == dev_id:\n response_dev_result = response_json_rpc[\"result\"]\n return response_dev_result\n except:\n return -1\n\n\n# [+]------------------------------------------------------\n# 3=SAVE RESULT\n@contracts.contract(mac=str)\ndef dev_test_save_result(mac):\n dev_dict = dev_found_mac_dict[mac]\n\n dev_test_result = dev_dict.get(\"result\", None)\n if dev_test_result == 0:\n file_dev_test_result = \"пройден успешно!\"\n elif type(dev_test_result) == int and 0 < dev_test_result <= 100:\n file_dev_test_result = f\"провален: ошибка {dev_test_result}.\"\n elif dev_test_result is None:\n file_dev_test_result = f\"провален: НЕТ ОТВЕТА.\"\n else:\n file_dev_test_result = \"провален: некорректный ответ.\"\n\n file_dev_data = f\"Тест {dev_dict['model']} с SN {dev_dict.get('sn', 'ERROR')} {file_dev_test_result}\"\n with open(file=path_to_save_results / f\"{dev_dict.get('sn', mac)}.txt\", mode=\"w\", encoding=\"utf-8\") as file_obj:\n file_obj.write(file_dev_data)\n return\n\n\n# [+]------------------------------------------------------\n# 4=UDP LISTEN\ndef udp_listen():\n json_rpc_dev_id = 0\n\n udp_multicast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_multicast.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n udp_multicast.bind(('', 12345))\n\n while True:\n udp_get_msg_b, (dev_ip, _) = udp_multicast.recvfrom(1024)\n udp_get_msg = udp_get_msg_b.decode(\"utf-8\")\n # print(udp_get_msg)\n\n mask_udp = r\"(.+),([0-9a-fA-F]{2}(?:[:-][0-9a-fA-F]{2}){5}),(\\d{0,7})\"\n match = re.fullmatch(mask_udp, udp_get_msg)\n if match:\n json_rpc_dev_id += 1\n\n dev_model = match[1]\n dev_mac = match[2]\n dev_sn = match[3]\n\n if dev_model in models_used_list:\n print(\"Get UDP: \", dev_ip, dev_mac, dev_sn)\n\n if dev_mac not in dev_found_mac_dict:\n if dev_ip is None:\n continue\n\n dev_found_mac_dict.update({dev_mac: {\"model\": dev_model, \"sn\": dev_sn, \"ip\": dev_ip}})\n dev_test_result = dev_test_start(dev_ip, json_rpc_dev_id)\n dev_found_mac_dict[dev_mac].update({\"result\": dev_test_result})\n\n dev_test_save_result(dev_mac)\n\n\nudp_listen()\n","sub_path":"test-util.py","file_name":"test-util.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"551970029","text":"#coding=utf-8\nimport scrapy\nfrom urlparse import urlparse \nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom priceremider.items import PriceremiderItem\nfrom scrapy.shell import inspect_response\nfrom scrapy import log\n\n\nclass Siba_autoSpider(CrawlSpider):\n name = \"siba_auto\"\n allowed = ['siba.se']\n start_urls = [\"http://www.siba.se\"]\n rules = (\n Rule(LinkExtractor(allow=(\"\")), callback='parse_item', follow=True),)\n \n def parse_item(self, response):\n product = PriceremiderItem()\n for product_detail in response.xpath('//div[@id=\"product-wrapper\"]/div[@class=\"product-info\"]'):\n title1 = product_detail.xpath('div[@class=\"product-title\"]/h1/text()').extract()[0].strip()\n title2 = product_detail.xpath('div[@class=\"product-title\"]/h2/text()').extract()[0].strip()\n# description = product_detail.xpath('div/div[@id=\"product-data\"]/div/div/div[@class=\"product-description\"]').extract()[0].strip()\n description = product_detail.xpath('div[@class=\"product-info-extra\"]//ul/li/text()').extract()\n if description:\n pass\n else:\n description = title1+title2\n price = product_detail.xpath('div[@class=\"buy-area-outer\"]/div[@class=\"buy-area\"]/div[@class=\"price\"]/text()').extract()[0].strip()\n if price:\n currency = product_detail.xpath('div[@class=\"buy-area-outer\"]/div[@class=\"buy-area\"]/div[@class=\"price\"]/span[@class=\"currency\"]/text()').extract()[0].strip()\n else:\n price = product_detail.xpath('//div[@class=\"price\"]/text()').extract()[0].strip()\n currency = product_detail.xpath('//div[@class=\"price\"]/span[@class=\"currency\"]/text()').extract()[0].strip()\n image = response.xpath('//div[@class=\"product-images\"]/div[@class=\"product-main-image\"]/a/img/@data-src').extract()[0].strip()\n status = response.xpath('//div[@class=\"buy-button\"]/input/@value').extract()[0].strip()\n if status:\n pass\n else:\n status = response.xpath('//div[@class=\"buy-area\"]/div[@class=\"buy-msg gray-buy-msg\"]/h3/text()').extract()[0].strip() \n \n# url = product_detail.xpath('link[@itemprop=\"url\"]/@href').extract()[0].strip()\n url = response.url\n category = response.xpath('//div[@class=\"breadcrumb-holder\"]/div/ul[@class=\"breadcrumb\"]/li[3]/a/text()').extract()[0].strip()\n if isinstance(description, (str,unicode)):\n description = description\n else:\n description = \" \".join(description)\n product['description'] = description.encode('utf-8')\n for i, u, p, c, s, t1, t2, cat in zip([image], [url], [price], [currency], [status], [title1], [title2], [category]):\n product['image'] = i.encode('utf-8') + \"&width=450\"\n product['url'] = u.encode('utf-8')\n p = p.encode('utf-8')\n c = c.encode('utf-8')\n product['status'] = s.encode('utf-8')\n product['title'] = t1.encode('utf-8') + t2.encode('utf-8')\n product['category'] = cat.encode('utf-8')\n ch = ''.join(it for it in p if '0' <= it <= '9')\n product['price'] = ch + c\n product['company'] = 'Siba'\n \n yield product","sub_path":"priceremider/build/lib/priceremider/spiders/siba_auto.py","file_name":"siba_auto.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"643114715","text":"#!/usr/bin/env python\nimport rospy\nfrom rosplane_msgs.msg import State\nfrom rosplane_msgs.msg import Waypoint\nfrom rosplane_msgs.msg import Current_Path\nfrom rosflight_msgs.msg import Status\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\nclass gps_writer:\n def __init__(self):\n self.recieved_path = False;\n self.path_type = 0 # 0 is orbit, 1 is line\n self.r_p = []\n self.q_p = []\n self.c_p = []\n self.r_p.append(0)\n self.r_p.append(0)\n self.r_p.append(0)\n self.q_p.append(0)\n self.q_p.append(0)\n self.q_p.append(0)\n self.c_p.append(0)\n self.c_p.append(0)\n self.c_p.append(0)\n self.new_path = False\n self.old_type = -1\n self.old_r_n = 99999999.0\n self.old_rho = -1.0\n self.old_c_n = 999999999.0\n self.rho_p = 1\n self.wps = [[], []]\n self.last_index = 0\n self.num_wps = 0\n self.plotted_wps = 0\n self.R = 10.0\n self.next_wp = 0\n self.Es = []\n self.Ns = []\n self.wpn = []\n self.wpe = []\n self.theta = np.linspace(0,2.0*np.pi,200)\n self.RC = True\n self.initial = True\n self.fig = plt.figure()\n self.gps_sub_ = rospy.Subscriber(\"state\" , State , self.stateCallback, queue_size=1)\n self.rc_sub_ = rospy.Subscriber(\"status\", Status, self.rcCallback , queue_size=1)\n # self.wp_sub_ = rospy.Subscriber(\"waypoint_path\", Waypoint, self.waypointCallback, queue_size=1)\n self.path_sub_ = rospy.Subscriber(\"current_path\", Current_Path, self.currentPathCallback, queue_size=1)\n self.anim = animation.FuncAnimation(self.fig, self.plot_path, interval=1.0)\n\n # WAYPOINT DATA\n plt.scatter([0],[0])\n plt.axis('equal')\n plt.draw()\n plt.pause(0.0000001)\n # style.use('fivethirtyeight')\n # ROS_INFO(\"POSITION PLOTTER INITIALIZED\")\n\n while not rospy.is_shutdown():\n rospy.spin()\n\n def plot_path(self, i):\n if self.initial:\n # n = 200.0\n # e = 200.0\n # plt.fill(e + self.R*np.cos(self.theta),n + self.R*np.sin(self.theta),\"r\")\n # self.wps[0].append(n)\n # self.wps[1].append(e)\n # plt.text(e, n, str(1), fontsize=12)\n\n self.initial = False\n if self.num_wps > self.plotted_wps:\n for i in range(self.plotted_wps,self.num_wps):\n plt.fill(self.wps[1][i] + self.R*np.cos(self.theta), self.wps[0][i] + self.R*np.sin(self.theta),\"r\")\n if len(self.Ns) > 0:\n var = len(self.Es) - 1\n if self.RC:\n plt.plot(self.Es[self.last_index:],self.Ns[self.last_index:],'b',linewidth=4)\n else:\n plt.plot(self.Es[self.last_index:],self.Ns[self.last_index:],'r',linewidth=4)\n self.last_index = var\n plt.draw()\n plt.pause(0.000000001)\n if self.recieved_path and self.new_path == True:\n self.new_path = False\n if self.path_type == 0:\n # plot an orbit\n plt.plot(self.c_p[1] + self.rho_p*np.sin(self.theta), self.c_p[0] + self.rho_p*np.cos(self.theta),\"k\")\n if self.path_type == 1:\n length = 800.0;\n pe_n = self.r_p[0] + self.q_p[0]*length\n pe_e = self.r_p[1] + self.q_p[1]*length\n plt.plot([self.r_p[1], pe_e], [self.r_p[0], pe_n],\"k\")\n # plot a line\n\n def stateCallback(self, msg):\n self.Ns.append(msg.position[0])\n self.Es.append(msg.position[1])\n\n def rcCallback(self, msg):\n \tself.RC = msg.rc_override\n\n # def waypointCallback(self, msg):\n # self.num_wps = self.num_wps + 1\n # self.wps[0].append(msg.w[0])\n # self.wps[1].append(msg.w[1])\n def currentPathCallback(self, msg):\n self.recieved_path = True;\n if (self.old_type != msg.path_type or self.old_r_n != msg.r[0] or self.old_rho != msg.rho or self.old_c_n != msg.c[0]):\n self.new_path = True\n self.old_type = msg.path_type\n self.old_r_n = msg.r[0]\n self.old_rho = msg.rho\n self.old_c_n = msg.c[0]\n self.path_type = msg.path_type\n self.r_p[0] = msg.r[0]\n self.r_p[1] = msg.r[1]\n self.r_p[2] = msg.r[2]\n self.q_p[0] = msg.q[0]\n self.q_p[1] = msg.q[1]\n self.q_p[2] = msg.q[2]\n self.c_p[0] = msg.c[0]\n self.c_p[1] = msg.c[1]\n self.c_p[2] = msg.c[2]\n self.rho_p = msg.rho\n\nif __name__ == '__main__':\n rospy.init_node('gps_writer_py', anonymous=True)\n gp = gps_writer()\n","sub_path":"rosplane/src/position_plotter.py","file_name":"position_plotter.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"114049906","text":"from django.apps import AppConfig\n\nimport os\n\nimport jinja2\nfrom jinja2 import loaders\n\nimport codegen.jinja2py as jinja2py\n\nTEMPALTES_PATH = os.path.join(os.path.dirname(__file__), 'templates')\n\nJINJA_RENDERER = jinja2py.JinjaRenderer(None)\n\nBANED_NAMES = (\n 'neurogen_parsers.py.jinja2',\n)\nGENERATE_NAMES = list()\nCODES = dict()\n\n\nclass CodegenConfig(AppConfig):\n name = 'codegen'\n\n def ready(self):\n global JINJA_RENDERER\n jinja_env = jinja2.Environment(\n loader=loaders.FileSystemLoader(TEMPALTES_PATH),\n trim_blocks=True,\n lstrip_blocks=True,\n extensions=['jinja2.ext.do']\n )\n JINJA_RENDERER = jinja2py.JinjaRenderer(jinja_env)\n\n generate_names = list()\n codes = dict()\n for f in os.listdir(TEMPALTES_PATH):\n if f in BANED_NAMES:\n continue\n if os.path.isfile(os.path.join(TEMPALTES_PATH, f)):\n if f.endswith('.py.jinja2'):\n generate_names.append(f[:-len('.jinja2')])\n elif f.endswith('.py'):\n with open(os.path.join(TEMPALTES_PATH, f)) as c:\n codes[f] = c.read()\n\n global GENERATE_NAMES\n GENERATE_NAMES = generate_names\n global CODES\n CODES = codes\n","sub_path":"codegen/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"175219769","text":"class Point:\n \"\"\"Represents a point on the plane.\n\n attributes: x, y.\n \"\"\"\n\nclass Circle:\n \"\"\"Represents a circle.\n\n attributes: center (a Point object), radius.\n \"\"\"\n\nc1 = Circle()\nc1.center = Point()\nc1.center.x = 150\nc1.center.y = 100\nc1.radius = 75\n\ndef point_in_circle(p, circ):\n\n \"\"\"Determine wether a given point lies in or on\n a given cicle.\n\n p: the point\n circ: the circle\n\n returns: boolean value\n \"\"\"\n\n a = circ.center.x\n b = circ.center.y\n r = circ.radius\n return (p.x - a)^2 + (p.y - b)^2 <= r^2\n\np1 = Point()\np1.x = 170\np1.y = 110\n\nprint(point_in_circle(p1,c1))\n\nclass Rect:\n \"\"\"Represents a rectangle.\n\n attributes: width, height, corner (a Point)\n \"\"\"\n\nr1 = Rect()\nr1.corner = Point()\nr1.corner.x = 140\nr1.corner.y = 105\nr1.width = 10\nr1.height = 11\n\ndef rect_in_circle(rect, circ):\n a = rect.corner.x\n b = rect.corner.y\n corners = [\n (a,b),\n (a + rect.width, b),\n (a, b + rect.height),\n (a + rect.width, b + rect.height)\n ]\n inside = True\n for corner in corners:\n p = Point()\n p.x, p.y = corner\n inside = inside and point_in_circle(p, circ)\n return inside\n\nprint(rect_in_circle(r1, c1))\n\ndef point_inside_circ(p, circ):\n a = circ.center.x\n b = circ.center.y\n r = circ.radius\n return (p.x - a)^2 + (p.y - b)^2 < r^2\n\n\ndef rect_circle_overlap(rect, circ):\n a = rect.corner.x\n b = rect.corner.y\n corners = [\n (a,b),\n (a + rect.width, b),\n (a, b + rect.height),\n (a+ rect.width, b + rect.height)\n ]\n overlap = False\n for corner in corners:\n p = Point()\n p.x, p.y = corner\n overlap = overlap or point_inside_circ(p, circ)\n return overlap\n\nr2 = Rect()\nr2.corner = Point()\nr2.corner.x = 160\nr2.corner.y = 110\nr2.width = 100\nr2.height = 100\n\nprint(rect_circle_overlap(r2, c1))\n\n","sub_path":"Homer_Code/Chapter_15/Exercise_15-01.py","file_name":"Exercise_15-01.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"183312727","text":"#!/usr/bin/python3\n\nimport os\nimport json\nimport random\nimport json\nfrom flask import jsonify\n\n\ndir_path = os.path.abspath(\"../../resources\")\npath_json = os.path.join(dir_path, \"data.json\")\n\n\nclass FileManager:\n \"\"\"Handle local file system IO.\"\"\"\n \n @staticmethod\n def get_extension(path):\n \"\"\"Get file extension from file path.\"\"\"\n return os.path.splitext(path)[-1][1:]\n\n @staticmethod\n def read_json(path, mode='r', *args, **kwargs):\n with open(path, mode=mode, *args, **kwargs) as handle:\n return json.load(handle)\n\nclass Vocabulary:\n \"\"\"Standardize vocabulary representation from multiple sources.\"\"\"\n\n files = FileManager()\n\n @classmethod\n def from_file(cls, path, *args, **kwargs):\n extension = cls.files.get_extension(path)\n representation = cls.strategies(extension)(path, *args, **kwargs)\n return representation\n\n @classmethod\n def from_json(cls, path, fields=True, *args, **kwargs):\n data = cls.files.read_json(path, *args, **kwargs)\n if fields:\n representation = (data, data.keys())\n else:\n representation = data\n return representation\n\n @classmethod\n def strategies(cls, file_extension, intent='read'):\n input_strategies = {'json': cls.from_json}\n if intent is 'read':\n return input_strategies[file_extension]\n\n\nclass EpithetGenerator(object):\n vocab = Vocabulary()\n files = FileManager()\n epithet_list = []\n\n def random_word(self, column):\n \"\"\"randomn number generator\"\"\"\n random_item = random.choice(column) \n return random_item \n\n def epithet_gen(self, path_json, num):\n \"\"\"Returns a random epithet from the three columns of vocab\"\"\"\n self.epithet_list = []\n\n for i in range(num): \n data = self.files.read_json(path_json)\n column1 = data['Column 1']\n column2 = data['Column 2']\n column3 = data['Column 3']\n \n vocab1 = self.random_word(column1)\n vocab2 = self.random_word(column2)\n vocab3 = self.random_word(column3)\n epithet = f'Thou {vocab1} {vocab2} {vocab3}'\n self.epithet_list.append(epithet)\n \n return self.epithet_list\n\n def vocab_data(self, path_json):\n \"\"\"Returns the vocab used for the epithet\"\"\"\n\n vocab_data = self.files.read_json(path_json)\n return vocab_data\n\n def random_num(self):\n return random.randint(1, 50)\n \n ","sub_path":"submissions/sprint_a/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"614925303","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Stop',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('stop_id', models.CharField(max_length=5, verbose_name=b'Stop ID')),\n ('stop_code', models.CharField(max_length=5, null=True, verbose_name=b'Code', blank=True)),\n ('stop_name', models.CharField(max_length=200, verbose_name=b'Name')),\n ('stop_desc', models.CharField(max_length=200, null=True, verbose_name=b'Description', blank=True)),\n ('stop_lat', models.DecimalField(verbose_name=b'Latitude', max_digits=8, decimal_places=3)),\n ('stop_lon', models.DecimalField(verbose_name=b'Longitude', max_digits=8, decimal_places=3)),\n ('zone_id', models.CharField(max_length=5, null=True, verbose_name=b'Fare Zone ID', blank=True)),\n ('stop_url', models.URLField(max_length=100, null=True, verbose_name=b'Web Page', blank=True)),\n ('location_type', models.IntegerField(blank=True, null=True, verbose_name=b'Type', choices=[(0, b'Stop'), (1, b'Station')])),\n ('parent_station', models.IntegerField(null=True, verbose_name=b'Parent Station', blank=True)),\n ('stop_timezone', models.CharField(max_length=50, null=True, verbose_name=b'Timezone', blank=True)),\n ('wheelchair_boarding', models.IntegerField(blank=True, null=True, verbose_name=b'Wheelchair Boarding', choices=[(0, b'No Accessibility Information'), (1, b'Can Be Boarded'), (2, b'Not Possible')])),\n ],\n ),\n ]\n","sub_path":"stops/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"538883613","text":"\"\"\"\nImplementation of Depth First Search\n\"\"\"\n\nBLACK = \"Black\"\nWHITE = \"White\"\nGRAY = \"Gray\"\n\nglobal time\ntime = 0\n\n\nclass Node:\n \"\"\"\n Node class to represent graph vertices\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n self.color = WHITE\n self.predecessor = None\n self.d = None\n self.f = None\n self.visited = False\n self.neighbors = list()\n self.reversedNeighbors = list()\n\n def __eq__(self, other):\n if other is None:\n return False\n return self.value == other.value and \\\n self.color == other.color and \\\n self.d == other.distance\n\n def __str__(self):\n return \"Value: \" + str(self.value) + \\\n \"\\t Distance: \" + str(self.d) + \\\n \"\\t f:\" + str(self.f) + \\\n \"\\t predecessor: \" + str(self.get_pred())\n\n def get_pred(self):\n if self.predecessor is None:\n return None\n return self.predecessor.value\n\n def add_neighbor(self, node):\n self.neighbors.append(node)\n\n def remove_neighbor(self, node):\n self.neighbors.remove(node)\n\n\ndef dfs(adjList):\n \"\"\"\n Runs DFS and checks if each node is visited\n @param adjList: Adjacency List of the graph\n \"\"\"\n global time\n time = 0\n for node in adjList:\n if node.color == WHITE:\n dfs_visit(node)\n\n\ndef dfs_visit(node):\n \"\"\"\n Recursively visits each neighbor of the node\n @param node: the current node to explore\n \"\"\"\n global time\n time += 1\n node.d = time\n node.color = GRAY\n for neighbor in node.neighbors:\n if neighbor.color == WHITE:\n neighbor.predecessor = node\n dfs_visit(neighbor)\n node.color = BLACK\n time += 1\n node.f = time\n\n\ndef topological_sort(adjList):\n \"\"\"\n Sorts the vertex in the graph\n @param adjList: Adjacency List of the graph\n \"\"\"\n global time\n time = 0\n result = list()\n for node in adjList:\n if node.color == WHITE:\n topological_visit(node, result)\n for item in result:\n print(item)\n\n\ndef topological_visit(node, result):\n \"\"\"\n Recursively visits each neighbor of the node\n @param node: the current node to explore\n @param result: list with the nodes ordered\n \"\"\"\n global time\n time += 1\n node.d = time\n node.color = GRAY\n for neighbor in node.neighbors:\n if neighbor.color == WHITE:\n neighbor.predecessor = node\n topological_visit(neighbor, result)\n node.color = BLACK\n time += 1\n node.f = time\n result.insert(0, node)\n\n\ndef strongly_connected_components(adjList):\n \"\"\"\n Finds the strongly connected components of a graph\n @param adjList: Adjacency List of the graph\n \"\"\"\n dfs(adjList)\n for node in adjList:\n for neighbor in node.neighbors:\n neighbor.reversedNeighbors.append(node)\n for node in adjList:\n node.neighbors = node.reversedNeighbors\n adjList = sorted(adjList, key=lambda x: x.f, reverse=True)\n for node in adjList:\n if not node.visited:\n result = list()\n scc_helper(node, result)\n print(result)\n\n\ndef scc_helper(node, result):\n \"\"\"\n Helper function for strongly connected components\n @param node: node to start exploring\n @param result: list of the connected components to current node\n \"\"\"\n node.visited = True\n for neighbor in node.neighbors:\n if not neighbor.visited:\n scc_helper(neighbor, result)\n result.append(node.value)\n\n\ndef main():\n \"\"\"\n Creates a graph and calls DFS\n @return:\n \"\"\"\n print(\"####### DFS #######\")\n node_0 = Node(0)\n node_1 = Node(1)\n node_2 = Node(2)\n node_3 = Node(3)\n node_4 = Node(4)\n node_5 = Node(5)\n node_0.neighbors = [node_1, node_3, node_4]\n node_1.neighbors = [node_0, node_3]\n node_2.neighbors = [node_3, node_4, node_5]\n node_3.neighbors = [node_0, node_1, node_2]\n node_4.neighbors = [node_0, node_2]\n node_5.neighbors = [node_2]\n adjList = [node_0, node_1, node_2, node_3, node_4, node_5]\n dfs(adjList)\n for node in adjList:\n print(node)\n print(\"\\n\\n####### Topological Sort #######\")\n undershorts = Node(\"undershorts\")\n pants = Node(\"pants\")\n belt = Node(\"belt\")\n shirt = Node(\"shirt\")\n tie = Node(\"tie\")\n jacket = Node(\"jacket\")\n shoes = Node(\"shoes\")\n socks = Node(\"socks\")\n watch = Node(\"watch\")\n undershorts.neighbors = [pants, shoes]\n pants.neighbors = [belt, shoes]\n belt.neighbors = [jacket]\n shirt.neighbors = [belt, tie]\n tie.neighbors = [jacket]\n socks.neighbors = [shoes]\n adjList = [shirt, tie, jacket, belt, watch, undershorts, pants, shoes, socks]\n topological_sort(adjList)\n print(\"\\n\\n####### Strongly Connected Components #######\")\n node_a = Node(\"a\")\n node_b = Node(\"b\")\n node_c = Node(\"c\")\n node_d = Node(\"d\")\n node_e = Node(\"e\")\n node_f = Node(\"f\")\n node_g = Node(\"g\")\n node_h = Node(\"h\")\n node_a.neighbors = [node_b]\n node_b.neighbors = [node_c, node_e, node_f]\n node_c.neighbors = [node_d, node_g]\n node_d.neighbors = [node_c, node_h]\n node_e.neighbors = [node_a, node_f]\n node_f.neighbors = [node_g]\n node_g.neighbors = [node_f, node_h]\n node_h.neighbors = [node_h]\n adjList = [node_a, node_b, node_c, node_d, node_e, node_f, node_g, node_h]\n strongly_connected_components(adjList)\n\n\nmain()\n","sub_path":"Algorithms/depth_first_search.py","file_name":"depth_first_search.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"631694771","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt5.QtWidgets import QAction, QLabel, QToolBar, QProgressBar, QVBoxLayout, QScrollArea, QMainWindow, QDesktopWidget, QMessageBox, QApplication, QWidget, QToolTip, QPushButton\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QCoreApplication, QTimer, QThread, Qt\nimport os\nfrom yaourtqt5 import UpdaterBackend\n\n\nclass UpdateThread(QThread):\n\n def __init__(self):\n QThread.__init__(self)\n\n def __del__(self):\n self.wait()\n\n def run(self):\n UpdaterBackend.Update()\n\n\nclass CenterWidget(QWidget):\n def __init__(self):\n super(CenterWidget, self).__init__()\n self.initUI()\n def initUI(self):\n\n self.pbr = QProgressBar()\n\n self.lbl = QLabel(self)\n self.scrl = QScrollArea()\n self.scrl.setWidget(self.lbl)\n self.lbl.setText('')\n self.lbl.adjustSize()\n\n self.pbrlabel = QLabel(self)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.scrl)\n vbox.addWidget(self.pbrlabel)\n vbox.addWidget(self.pbr)\n\n self.setLayout(vbox)\n self.pbr.setValue(1)\n\n self.show()\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.initUI()\n\n def initUI(self):\n\n QToolTip.setFont(QFont('SansSerif', 9))\n\n # self.pbr = QProgressBar()\n # self.pbrd = QDockWidget()\n # self.pbrd.setWidget(self.pbr)\n # self.addDockWidget(Qt.BottomDockWidgetArea, self.pbrd)\n\n self.cwidget = CenterWidget()\n self.setCentralWidget(self.cwidget)\n\n self.setToolTip('This is a QWidget widget')\n\n if not os.path.exists('guitemp.txt'):\n self.cwidget.pbrlabel.hide()\n self.cwidget.pbr.hide()\n\n self.statusBar().showMessage('Ready')\n\n # btn = QPushButton('Quit', self)\n # btn.clicked.connect(QCoreApplication.instance().quit)\n # btn.setToolTip('This is a QPushButton widget')\n # btn.resize(btn.sizeHint())\n # btn.move(210, 160)\n\n UpdateAction = QAction(QIcon('UpdateIcon.png'), '&Start Update', self)\n UpdateAction.setShortcut('Ctrl+U')\n UpdateAction.setStatusTip('Update System')\n UpdateAction.triggered.connect(self.startUpdate)\n\n exitAction = QAction(QIcon('Arch-linux-logo.png'), '&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.triggered.connect(self.quitApp)\n\n self.toolbar = QToolBar()\n self.addToolBar(Qt.LeftToolBarArea, self.toolbar)\n self.toolbar.addAction(UpdateAction)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(exitAction)\n\n self.setGeometry(600, 600, 600, 400)\n self.center()\n self.setWindowTitle('Arch Updater (yaourt)')\n self.setWindowIcon(QIcon('Arch-linux-logo.png'))\n # if not SystemTrayIcon(QIcon('Arch-linux-logo.png')).isVisible():\n # self.tray_icon = SystemTrayIcon(QIcon('Arch-linux-logo.png'), self)\n # self.tray_icon.show()\n\n self.labeltimer = QTimer(self)\n self.labeltimer.timeout.connect(self.onChanged)\n self.labeltimer.start(100)\n\n self.pbrtimer = QTimer(self)\n self.pbrtimer.timeout.connect(self.pbrUpdate)\n\n self.show()\n\n def startUpdate(self):\n self.updateThread = UpdateThread()\n self.updateThread.start()\n self.cwidget.pbr.show()\n self.cwidget.pbrlabel.show()\n self.pbrtimer.start(100)\n\n def onChanged(self):\n if os.path.exists('guitemp.txt'):\n guitemp = open('guitemp.txt')\n self.cwidget.lbl.setText(guitemp.read())\n self.cwidget.lbl.adjustSize()\n if self.updateThread.isFinished():\n self.cwidget.pbr.hide()\n self.cwidget.pbrlabel.hide()\n self.pbrtimer.stop()\n\n def pbrUpdate(self):\n if self.updateThread.isRunning():\n self.processLabel()\n if 'downloading' in self.pbrlblvalue:\n if '.db' in self.pbrlblvalue:\n self.cwidget.pbr.setValue(0)\n else:\n dtempf = open('downtemp.txt')\n dvalues = dtempf.read()\n dcurrent = int(dvalues.split('/')[0])\n dtotal = int(dvalues.split('/')[1])\n dpercent = int((dcurrent/dtotal)*100)\n self.cwidget.pbr.setValue(dpercent)\n if dpercent >= 100:\n self.cwidget.pbr.setValue(0)\n if 'upgrading' in self.pbrlblvalue:\n utempf = open('upgrtemp.txt')\n uvalues = utempf.read()\n ucurrent = int(uvalues.split('/')[0])\n utotal = int(uvalues.split('/')[1])\n upercent = int((ucurrent/utotal)*100)\n self.cwidget.pbr.setValue(upercent)\n\n def processLabel(self):\n if os.path.exists('temp.txt'):\n tmp = open('temp.txt')\n self.pbrlblvalue = tmp.read().replace(\"b'\", '').replace(\"'\", '').replace('\\\\n', '')\n if 'Foreign packages:' in self.pbrlblvalue:\n pass\n else:\n self.cwidget.pbrlabel.setText(self.pbrlblvalue)\n self.cwidget.pbrlabel.adjustSize()\n\n\n def center(self):\n\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Are you sure you want to quit?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n if os.path.exists('guitemp.txt'):\n os.remove('guitemp.txt')\n event.ignore()\n self.hide()\n # event.accept()\n else:\n event.ignore()\n\n def quitApp(self):\n if os.path.exists('guitemp.txt'):\n os.remove('guitemp.txt')\n QCoreApplication.instance().quit()\n\n\ndef MainWindowStart():\n if os.path.exists('guitemp.txt'):\n os.remove('guitemp.txt')\n if os.path.exists('downtemp.txt'):\n os.remove('downtemp.txt')\n if os.path.exists('upgrtemp.txt'):\n os.remove('upgrtemp.txt')\n if os.path.exists('temp.txt'):\n os.remove('temp.txt')\n if UpdaterBackend.processVerification('ArchUpdaterGUI.py') == 'running':\n mbapp = QApplication(sys.argv)\n mb = QMessageBox()\n mb.setIcon(QMessageBox.Information)\n mb.setWindowTitle('Error')\n mb.setText('The process is already running.')\n mb.setStandardButtons(QMessageBox.Ok)\n mb.show()\n sys.exit(mbapp.exec_())\n else:\n app = QApplication(sys.argv)\n mw = MainWindow()\n sys.exit(app.exec_())\n\n# def startBoth():\n# app = QApplication(sys.argv)\n# mw = MainWindow()\n# tray_icon = SystemTrayIcon(QIcon('Arch-linux-logo.png'))\n# tray_icon.show()\n# sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n MainWindowStart()","sub_path":"yaourtqt5/ArchUpdaterGUI.py","file_name":"ArchUpdaterGUI.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"246611913","text":"\nimport tensorflow as tf\nimport time\nimport os\nimport fcn\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\nfcn.num_classes = 21\nfcn.mask_out_class_label = 255\nfcn.resized_height = 384\nfcn.resized_width = 384\nfcn.ckpt_path = 'fcn8s.ckpt'\nfcn.vgg16_ckpt_path='vgg_16.ckpt'\nfcn.dataset.resized_height = 384\nfcn.dataset.resized_width = 384\n\n\nckpt_dir = '' # dir which ckpt_path is in\nimage_tfrecords_file_train = 'images_train.tfrecords'\nimage_tfrecords_file_eval = 'images_eval.tfrecords'\nuse_fcn_paper=False\nbatch_size = 4\n\ncur_iou=0.0\ncur_acc=0.0\ni =0\t\nwhile True:\n\tif not os.listdir(ckpt_dir):\n\t\tretrain=False\n\t\tepochs = 2\n\telse:\n\t\tretrain=True\n\t\tepochs = 1\n\t\t\n\tfcn.train(image_tfrecords_file_train, epochs=epochs, batch_size=batch_size, retrain=retrain, restore_step=None, use_fcn_paper=use_fcn_paper)\n\t\n\ttf.reset_default_graph()\n\tiou, acc = fcn.eval(image_tfrecords_file_eval, batch_size=batch_size, use_fcn_paper=use_fcn_paper)\n\ttf.reset_default_graph()\n\t\n\tfile=open('result.txt','a') \n\tcur_time = time.strftime('%H:%M:%S', time.localtime())\n\tfile.write(\"{}, {}, {}, {}, {}, {}\\n\".format(i, cur_time, 'light', epochs, iou, acc))\n\tfile.close()\n\t\n\tif iou>j&1:\n t += ABCD[j+1]\n else:\n t -= ABCD[j+1]\n \n if t == 7:\n print(\"{}{}{}{}{}{}{}=7\".format(\n ABCD[0], \"+\" if i>>0&1 else \"-\",\n ABCD[1], \"+\" if i >> 1 & 1 else \"-\",\n ABCD[2], \"+\" if i >> 2 & 1 else \"-\",\n ABCD[3], \n ))\n break\n \n","sub_path":"abc/abc079/abc079_c.py","file_name":"abc079_c.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"20749","text":"# -*- coding: UTF-8 -*- \r\n#!/usr/bin/env python\r\n# \r\n# Copyright 2010- Hui Zhang\r\n# E-mail: hui.zh012@gmail.com\r\n\r\n__all__ = ['Setting', 'Settings',\r\n 'SettingTuple', 'SettingParseError',\r\n 'ValueSerializer',]\r\n\r\nimport io, json\r\nfrom collections import OrderedDict\r\nfrom .utils import Dir\r\n\r\nclass SettingParseError(Exception): pass\r\n\r\nclass Undefined(object):\r\n def __bool__(self): return False\r\n def __len__(self): return 0\r\n def __str__(self): return ''\r\n def __int__(self): return 0\r\n def __float__(self): return 0.0\r\n\r\nclass SettingTuple(tuple): pass\r\n\r\nclass Setting(object):\r\n '''\r\n settings format:\r\n \r\n #comments\r\n #another comments line\r\n key.prop1.normal = value\r\n #this is a tuple value\r\n #a tuple (item1, item2, time3) will be returned\r\n #no empty tuple could be defined\r\n key.prop2.array = | item1\r\n | item2\r\n | item3\r\n \r\n *** the fields in a key should not start with _\r\n *** all the blank line will be removed.\r\n *** the blank chars at the begin and end of a line\r\n will be striped.\r\n '''\r\n \r\n undefined = Undefined()\r\n \r\n def __init__(self, source=None):\r\n self._data = OrderedDict()\r\n if source is not None:\r\n self.load(source)\r\n \r\n def load(self, source):\r\n close_source = False\r\n if not hasattr(source, \"read\"):\r\n source = open(source, \"r\")\r\n close_source = True \r\n self._parse(source)\r\n if close_source:\r\n source.close()\r\n \r\n def save(self, filename):\r\n #TODO: unify the encoding to utf-8\r\n with open(filename, 'w') as fp:\r\n fp.write(self._gentext())\r\n \r\n def _parse(self, lines):\r\n comments = []\r\n key = value = ''\r\n lineno = 0\r\n for l in lines:\r\n lineno += 1\r\n l = l.strip()\r\n if l.startswith('#'):\r\n comments.append(l[1:])\r\n elif l.startswith('| '):\r\n value = value + (l[2:],)\r\n elif '=' in l:\r\n if key:\r\n if isinstance(value, tuple):\r\n value = SettingTuple(value)\r\n self._data[key] = [value, '\\n'.join(comments)]\r\n comments = []\r\n key = value = ''\r\n key, value = map(str.strip, l.split('=', 1))\r\n if value.startswith('| '): value = (value[2:], )\r\n elif value=='||': value=()\r\n elif not l:\r\n continue\r\n else:\r\n raise SettingParseError('{} : {}'.format(lineno, l))\r\n if key:\r\n if isinstance(value, tuple):\r\n value = SettingTuple(value)\r\n self._data[key] = [value, '\\n'.join(comments)]\r\n \r\n \r\n def _gentext(self):\r\n text = ''\r\n for key, setting in self._data.items():\r\n value, comments = setting\r\n if comments:\r\n t = io.StringIO()\r\n for l in io.StringIO(comments):\r\n t.write('#'+l)\r\n text += '\\n' + t.getvalue()\r\n if isinstance(value, str):\r\n text += '\\n' + ' = '.join([key, value])\r\n else:\r\n if not value:\r\n text += '\\n' + ' = '.join([key, '||'])\r\n else:\r\n pad = len(key)+3\r\n text += '\\n{} = | {}'.format(key, '\\n{}| '.format(' '*pad).join(value))\r\n return text\r\n \r\n def has(self, key):\r\n return key in self._data\r\n \r\n def get(self, *args):\r\n if len(args) == 1:\r\n return self._data[args[0]][0]\r\n \r\n key, fallback = args\r\n return fallback if key not in self._data else self._data[key][0]\r\n \r\n def getComment(self, key):\r\n return self._data[key][1]\r\n \r\n def xget(self, key, objer=eval):\r\n v = self.get(key)\r\n if isinstance(v, str):\r\n return objer(self.get(key))\r\n return tuple(map(objer, v))\r\n\r\n def set(self, key, value):\r\n assert isinstance(value, (str, SettingTuple))\r\n self._data.setdefault(key, ['', ''])[0] = value\r\n\r\n def setCommnet(self, key, comment):\r\n self._data[key][1] = comment\r\n\r\n def xset(self, key, val, strer=repr):\r\n if isinstance(val, tuple):\r\n value = SettingTuple(map(strer, val))\r\n else:\r\n value = strer(val)\r\n self.set(key, value)\r\n \r\n def __str__(self):\r\n return self._gentext()\r\n \r\n def __getitem__(self, key):\r\n return SettingProxy(self, key)\r\n \r\ndef _bool(s):\r\n if s.lower() in ['on', 'true', 'yes', '1']:\r\n return True\r\n if s.lower() in ['', 'off', 'false', 'no', '0']:\r\n return False\r\n raise ValueError('cannot convert {} to a bool variant'.format(s))\r\n \r\nValueSerializer = {\r\n \"_int\" : (int, repr),\r\n \"_float\" : (float, repr),\r\n \"_eval\" : (eval, repr),\r\n \"_bool\" : (_bool, repr),\r\n \"_bytearray\": (eval, str),\r\n \"_json\" : (json.loads, json.dumps),\r\n}\r\n \r\nclass SettingProxy(object):\r\n def __init__(self, setting, key):\r\n self._setting = setting\r\n self._key = key\r\n \r\n def __getattr__(self, attr):\r\n if attr in ValueSerializer:\r\n return self._setting.xget(self._key, ValueSerializer[attr][0])\r\n assert not attr.startswith('_')\r\n return self(attr)\r\n \r\n def __setattr__(self, attr, value):\r\n if attr in ValueSerializer:\r\n self._setting.xset(self._key, value, ValueSerializer[attr][1])\r\n else:\r\n super(SettingProxy, self).__setattr__(attr, value)\r\n \r\n def __call__(self, key):\r\n return SettingProxy(self._setting, '.'.join(filter(None, [self._key, key]))) \r\n \r\n def __eq__(self, other):\r\n return isinstance(other, SettingProxy) and (self._setting, self._key) == (other._setting, other._key)\r\n \r\n @property\r\n def _defined(self):\r\n return self._setting.has(self._key)\r\n \r\n @property\r\n def _val(self):\r\n return self._setting.get(self._key)\r\n \r\n @_val.setter\r\n def _val(self, value):\r\n self._setting.set(self._key, value)\r\n\r\n @property \r\n def _comment(self):\r\n return self._setting.get_comment(self._key)\r\n \r\n @_comment.setter\r\n def _comment(self, comment):\r\n self._setting.set_comment(self._key, comment)\r\n \r\nclass Settings(object):\r\n def __init__(self, folder, setting_factory=Setting):\r\n self._dir = Dir(folder)\r\n self._setting_fac = setting_factory\r\n self._settings = {}\r\n \r\n def __getattr__(self, attr):\r\n if attr not in self._settings:\r\n path = self._dir.file(attr).touch().path\r\n self._settings[attr] = self._setting_fac(path)\r\n return self._settings[attr]\r\n \r\n def save(self):\r\n for name, setting in self._settings.items():\r\n setting.save(self._dir.file(name).path)\r\n \r\n def __getitem__(self, key):\r\n if '.' in key:\r\n s, k = key.split('.', 1)\r\n else:\r\n s, k = key, ''\r\n return self.__getattr__(s)[k]","sub_path":"full_moon/src/moon/core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"535554630","text":"\n\nfrom xai.brain.wordbase.nouns._fetish import _FETISH\n\n#calss header\nclass _FETISHES(_FETISH, ):\n\tdef __init__(self,): \n\t\t_FETISH.__init__(self)\n\t\tself.name = \"FETISHES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fetish\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_fetishes.py","file_name":"_fetishes.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"136635141","text":"__author__ = 'fengpeng'\n\n\n# Definition for a undirected graph node\nclass UndirectedGraphNode(object):\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\n\nclass Solution(object):\n def cloneGraph(self, node):\n \"\"\"\n :type node: UndirectedGraphNode\n :rtype: UndirectedGraphNode\n \"\"\"\n if not node:\n return None\n d = {}\n copy = UndirectedGraphNode(node.label)\n d[node] = copy\n que = [node]\n\n while que:\n cur = que.pop(0)\n neighbors = cur.neighbors\n for neighbor in neighbors:\n if neighbor not in d:\n clone = UndirectedGraphNode(neighbor.label)\n d[neighbor] = clone\n que.append(neighbor)\n # d[cur].neighbors.append(clone)\n # else:\n # d[cur].neighbors.append(d[neighbor])\n d[cur].neighbors.append(d[neighbor])\n return copy\n\n\n def cloneGraph(self, node):\n \"\"\"\n :type node: UndirectedGraphNode\n :rtype: UndirectedGraphNode\n \"\"\"\n if not node:\n return None\n dict = {}\n copy = UndirectedGraphNode(node.label)\n dict[node] = copy\n self.dfs(node, dict)\n return copy\n\n def dfs(self, node, dict):\n if not node:\n return\n neighbors = node.neighbors\n for neighbor in neighbors:\n if neighbor not in dict:\n clone = UndirectedGraphNode(neighbor.label)\n dict[neighbor] = clone\n dict[node].neighbors.append(clone)\n self.dfs(neighbor, dict)\n else:\n dict[node].neighbors.append(dict[neighbor])\n\n def cloneGraph(self, node):\n \"\"\"\n :type node: UndirectedGraphNode\n :rtype: UndirectedGraphNode\n \"\"\"\n if not node:\n return None\n dict = {}\n return self.clone(node, dict)\n\n def clone(self, node, dict):\n if not node:\n return None\n if node in dict:\n return dict[node]\n copy = UndirectedGraphNode(node.label)\n dict[node] = copy\n neighbors = node.neighbors\n for neighbor in neighbors:\n copy.neighbors.append(self.clone(neighbor, dict))\n return copy\n\n def cloneGraph(self, node):\n \"\"\"\n :type node: UndirectedGraphNode\n :rtype: UndirectedGraphNode\n \"\"\"\n if not node:\n return None\n stk=[]\n visited={}\n copy =UndirectedGraphNode(node.label)\n stk.append(node)\n visited[node]=copy\n\n while stk:\n top = stk.pop()\n\n for neighbor in top.neighbors:\n if neighbor not in visited:\n stk.append(neighbor)\n visited[neighbor]=UndirectedGraphNode(neighbor.label)\n visited[top].neighbors.append(visited[neighbor])\n\n return copy\n\n","sub_path":"Clone_Graph/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"494838127","text":"import math\r\n\r\ndef sqRt(c):\r\n t = c\r\n epsilon = float(1 * math.pow(10, -15))\r\n while abs(t - c / t) > epsilon * t:\r\n t = ((c / t) + t) / 2\r\n return t\r\n\r\n\r\nvalue = int(input(\"Enter Value :\"))\r\nval = sqRt(value)\r\nprint(val)","sub_path":"squareroot.py","file_name":"squareroot.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"339639412","text":"MAX = 4\n\n#zakładam że elementy tablicy różne od 0\ndef longest_substring(tab):\n i = MAX-3\n max_sub = 2\n for i in range(MAX-2):\n q = tab[1][i+1] / tab[0][i]\n current_sub = 2\n j = i+2\n while j < MAX:\n if tab[j-i][j] / tab[j-i-1][j-1] != q:\n break\n current_sub += 1\n j += 1\n\n max_sub = max(current_sub, max_sub)\n\n q = tab[i+1][1] / tab[i][0]\n current_sub = 2\n j = i+2\n while j < MAX:\n if tab[j][j-i] / tab[j-1][j-i-1] != q:\n break\n current_sub += 1\n j += 1\n\n max_sub = max(current_sub, max_sub)\n\n return max_sub\n\nt = [[2, 2, 3, 4],\n [1, 1, 4, 4],\n [1, 2, 1, 8],\n [2, 2, 3, 2]]\nprint(longest_substring(t))\n\n\n","sub_path":"Ćwiczenia_4/Zadanie_8.py","file_name":"Zadanie_8.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"337516225","text":"from __future__ import division\n\nfrom PlotUtils import *\n\n##\n## BASICS\n##\n\nYEAR_STR = \"2012\"\nEOS_PATH = \"root://cmseos.fnal.gov//store/user/jrainbol\"\nHOME_PATH = \"/uscms/home/jrainbol/nobackup\"\n\n\n\n##\n## SYSTEMATICS\n##\n\nF_NR = 0.04\nBF_LL = 0.033658\nGAMMA_Z = 2.4952\n\nmu_id = { \"4l\":0.0137, \"4m\":0.0195, \"2m2e\":0.0094, \"4e\":0 }\nel_id = { \"4l\":0.0154, \"4m\":0, \"2m2e\":0.0213, \"4e\":0.0512 }\nel_reco = { \"4l\":0, \"4m\":0, \"2m2e\":0, \"4e\":0 }\necal = { \"4l\":0, \"4m\":0, \"2m2e\":0, \"4e\":0 }\nqcd = 0.008592\npdf = 0.001185\npileup = 0.009704\n# Systematic uncertainty for nonprompt\nDELTA_LAMBDA = 0.3\n\n# Systematic uncertainties for MC\nUNC_DIBOSON, UNC_TTBAR, UNC_TAUTAU, UNC_OTHER = 0.1, 0.1, 0.05, 0.2\n\n\n\n##\n## SAMPLE INFO\n##\n\nELEC_TRIG_SF = 1\nINT_LUMI, LUMI_UNC = 19.712, .026\nSQRT_S = 8\nMU_SUFF, EL_SUFF = \"muon_\" + YEAR_STR, \"electron_\" + YEAR_STR\n\nNGEN_ZZ_4L = 1499064 + 1499093 + 1497445 + 823922 + 823911 + 824466\nXSEC_ZZ_4L = 3 * 0.1767 + 3 * 0.07691\nNGEN_ZJETS_M50, XSEC_ZJETS_M50 = 30459503, 3531.9\nNGEN_TTBAR, XSEC_TTBAR = 12011428, 25.81\nNGEN_TTZ_2L2NU, XSEC_TTZ_2L2NU = 210160, 0.2057\nNGEN_WW_2L2NU, XSEC_WW_2L2NU = 10000431, 57.25\nNGEN_WZ_2L2Q, XSEC_WZ_2L2Q = 3215990, 5.09\nNGEN_WZ_3LNU, XSEC_WZ_3LNU = 2017979, 1.086\nNGEN_ZZ_2L2Q, XSEC_ZZ_2L2Q = 1936727, 2.47\nNGEN_ZZ_2L2NU, XSEC_ZZ_2L2NU = 954911, 0.71\n\nN_MC = 9\nN_DY = 3\n\nMC_SUFF = [ \"zz_4l\", \"zjets_m-50\", \"ttbar\", \"ww_2l2nu\",\n \"wz_2l2q\", \"wz_3lnu\", \"zz_2l2nu\", \"zz_2l2q\",\n \"ttz_2l2nu\",\n ]\n\nNGEN_ = [ NGEN_ZZ_4L, NGEN_ZJETS_M50, NGEN_TTBAR, NGEN_WW_2L2NU,\n NGEN_WZ_2L2Q, NGEN_WZ_3LNU, NGEN_ZZ_2L2NU, NGEN_ZZ_2L2Q,\n NGEN_TTZ_2L2NU,\n ]\n\nXSEC_ = [ XSEC_ZZ_4L, XSEC_ZJETS_M50, XSEC_TTBAR, XSEC_WW_2L2NU,\n XSEC_WZ_2L2Q, XSEC_WZ_3LNU, XSEC_ZZ_2L2NU, XSEC_ZZ_2L2Q,\n XSEC_TTZ_2L2NU,\n ]\n\nCOLOR_ = [ lLightBlue, lYellow, lGreen, lOrange,\n lOrange, lOrange, lOrange, lOrange,\n lGreen, \n ]\n\nMC_TEX_ = [ r\"$\\ZZtofL$\", r\"$\\ZtoTT$\", r\"$\\ttbar$\", r\"$\\WWtoLLNuNu$\",\n r\"$\\WZtoLLQQ$\", r\"$\\WZtoLLLNu$\", r\"$\\ZZtoLLNuNu$\", r\"$\\ZZtoLLQQ$\",\n r\"$\\TTZtoLLNuNu$\",\n ]\n\nMC_UNC_ = [ UNC_DIBOSON, UNC_TAUTAU, UNC_TTBAR, UNC_DIBOSON,\n UNC_DIBOSON, UNC_DIBOSON, UNC_DIBOSON, UNC_DIBOSON,\n UNC_OTHER,\n ]\n\nNGEN = dict(zip(MC_SUFF, NGEN_))\nXSEC = dict(zip(MC_SUFF, XSEC_))\nCOLOR = dict(zip(MC_SUFF, COLOR_))\nMC_TEX = dict(zip(MC_SUFF, MC_TEX_))\nMC_UNC = dict(zip(MC_SUFF, MC_UNC_))\n\nMC_SUFF_4L = MC_SUFF\nMC_SUFF_2L = list(MC_SUFF)\nMC_SUFF_2L[0], MC_SUFF_2L[1] = MC_SUFF[1], MC_SUFF[0]\n","sub_path":"python/Cuts2012.py","file_name":"Cuts2012.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"460924136","text":"from ... qualitative_population import QuantitativePopulation\nfrom ... quantitative_sample import QuantitativeSample\n\nclass UnsupervisedBinning:\n\n @staticmethod\n def equal_width_binning(dataset: list, k: int):\n from math import ceil\n ''' The algorithm divides the data into k intervals of equal size. '''\n N = len(dataset)\n BIN_WIDTH = ceil(N / k)\n NUMBER_OF_BINS = ceil(N / BIN_WIDTH)\n\n current_bin = 0\n\n while current_bin < NUMBER_OF_BINS:\n slice_start = current_bin * BIN_WIDTH \n slice_end = (current_bin + 1) * BIN_WIDTH\n\n yield QuantitativeSample(\n dataset[slice_start:slice_end]\n )\n\n current_bin += 1","sub_path":"statistics/algorithms/binning/unsupervised.py","file_name":"unsupervised.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"603926588","text":"import re\nfrom deoplete.base.filter import Base\n\n\nclass Filter(Base):\n def __init__(self, vim):\n super().__init__(vim)\n\n self.name = \"converter_auto_paren_lsp\"\n self.description = \"auto add parentheses converter for lsp\"\n\n def filter(self, context):\n p = re.compile(r\"\\(.*\\)\")\n for candidate in [\n x for x in context[\"candidates\"] if (\"kind\" in x and p.search(x[\"kind\"]))\n ]:\n candidate[\"word\"] += \"(\"\n\n return context[\"candidates\"]\n","sub_path":"rplugin/python3/deoplete/filter/converter_auto_paren_lsp.py","file_name":"converter_auto_paren_lsp.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"95690969","text":"import torch\nfrom content_cost import ContentCost\nfrom style_cost import LayerStyleCost\nfrom style_cost import STYLE_LAYERS\nfrom model import model\nfrom model import load_image\nfrom model import tensor_to_image\nfrom model import ITERATIONS\nfrom model import features_l\nfrom model import MOST_INFLUENTIAL_CONV_LAYER\nfrom style_cost import gram_matrix\nfrom model import compute_total_loss\nfrom model import generated_image_update\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.optim as optim\nimport imageio\n\nCOLOR_HISTOGRAM_MATCHING = True\nLUMINANCE_ONLY_TRANSFER = False\n\nALPHA = 1\nBETA = 10 ** 6\n\nratio = ALPHA / BETA\n\nPATH = \"../savedModels/initial/initialmodel.pth\"\n\nCONTENT_IMAGE_NAME = \"blagaj.jpg\"\nSTYLE_IMAGE_NAME = \"StarryNightOverTheRoneVanGogh.jpg\"\n\ncontent_cost = ContentCost()\nstyle_cost = LayerStyleCost()\n\n\"\"\"\nWe need to freeze all the network except the final layer. We need to set requires_grad == False to freeze \nthe parameters so that the gradients are not computed in backward().\nIn this case, we only want to keep the convolution's part of VGG-19.\nWe are optimizing pixels of generated (G) image.\n\"\"\"\nfor param in model.parameters():\n param.requires_grad = False\n\nprint(\"VGG-19 architecture:\\n\\n\" + str(model))\n\ndevice = torch.device(\"cuda\")\nmodel.to(device)\n\n# CONV and pooling layers\ntorch_layers = model._modules.items()\n\n# inputs\ncontent_image = load_image(\"../images/content/\" + CONTENT_IMAGE_NAME).to(device)\nstyle_image = load_image(\"../images/style/\" + STYLE_IMAGE_NAME, shape=content_image.shape[-2:]).to(device)\n\nif COLOR_HISTOGRAM_MATCHING:\n style_image = load_image(\"../images/results/CHM_\" + STYLE_IMAGE_NAME, shape=content_image.shape[-2:]).to(device)\n\nif LUMINANCE_ONLY_TRANSFER:\n style_image = load_image(\"../images/results/LOT_\" + STYLE_IMAGE_NAME, shape=content_image.shape[-2:]).to(device)\n\nassert content_image.size() == style_image.size(), \\\n \"content and style images need to be of the same size\"\n\nplot_image = np.concatenate((tensor_to_image(content_image), tensor_to_image(style_image)), axis=1)\n\nplt.imshow(plot_image)\nplt.show()\n\ncontent_features = features_l(content_image, torch_layers)\nstyle_features = features_l(style_image, torch_layers)\n\n# gram matrices for each layer\ngram_matrices = LayerStyleCost.calculate_gram_matrices(style_features)\n\ngenerated_image = content_image.clone().requires_grad_(True).to(device)\n\n\ndef run():\n \"\"\"\n cost function that minimizes both the style and the content cost\n J(G) = ALPHA * Jcontent(C, G) + BETA * Jstyle(S, G)\n :return:\n \"\"\"\n save_image_steps = 1000\n\n optimizer = optim.Adam([generated_image], lr=0.003)\n losses = []\n\n for step in range(1, ITERATIONS + 1):\n generated_image_features = features_l(generated_image, torch_layers)\n\n J_style = 0\n J_content = ContentCost.compute(generated_image_features, content_features,\n MOST_INFLUENTIAL_CONV_LAYER)\n\n for layer in STYLE_LAYERS:\n generated_image_feature_l = generated_image_features[layer]\n generated_image_gram_matrix_l = gram_matrix(generated_image_feature_l)\n\n batch_size, depth, height, width = generated_image_feature_l.shape\n\n style_image_gram_matrix = gram_matrices[layer]\n\n J_style_layer = LayerStyleCost.compute_layer_style_cost(layer, generated_image_gram_matrix_l,\n style_image_gram_matrix)\n\n J_style += LayerStyleCost.compute(J_style_layer, depth, height, width)\n\n # alpha, content_loss, beta, style_loss\n Jg = compute_total_loss(ALPHA, J_content, BETA, J_style)\n\n generated_image_update(optimizer, Jg)\n\n if step % save_image_steps == 0:\n print(\"Jg = \" + str(Jg.item()))\n imageio.imwrite(\"../images/results/blagajvangogh/\" + str(step) + \".jpg\",\n tensor_to_image(generated_image))\n losses.append(\"Jg = \" + str(Jg.item()))\n\n with open(\"../images/results/blagajvangogh/losses.txt\", \"w\") as f:\n for loss in losses:\n f.write(str(loss) + \"\\n\")\n\n torch.save(model, PATH)\n\n\nrun()\n","sub_path":"initialModel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"154663150","text":"import scxx as sc\nimport scxx.preprocessing as pp\nimport scxx.plotting as pl\nimport sys\nfrom keras import optimizers\n\nmode=str(sys.argv[1])\nlambda_regulizer=float(sys.argv[2])\nbatch_input_size=int(sys.argv[3])\ntest_input_file=\"./data/panc8/panc8.h5ad\"\ntest_result_path=\"./results/pancreas/scxx_%s/%s_%s_%s/\" % (str(sys.argv[1]).lower(),str(sys.argv[1]).lower(),str(sys.argv[2]),str(sys.argv[3]))\n\nadata = pp.read_sc_data(test_input_file)\ndatasets=pp.split_object(adata,by=\"dataset\")\n\n# Run scxx. Here, rawdata is optional. It will be generated from datasets if not provided.\nhandle=sc.SCXX(\n\t\t\t\t\t\t\trawdata=adata,\n\t\t\t\t\t\t\tdatasets=datasets,\n\t\t\t\t\t\t\tres_path=test_result_path,\n\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\tsplit_by=\"dataset\",\n\t\t\t\t\t\t\tpatience_es=50,\n\t\t\t\t\t\t\tpatience_lr=20,\n\t\t\t\t\t\t\tmt_ratio=0.8,\n\t\t\t\t\t\t\tlambda_regulizer=lambda_regulizer,\n\t\t\t\t\t\t\tbatch_input_size=batch_input_size,\n\t\t\t\t\t\t\tkeep_order=True,\n\t\t\t\t\t\t\t#model_file=\"model.h5\"\n\t\t\t\t\t\t\t)\nadata_transform=handle.fit_transform()\n# \npl.plotPrediction2(adata_transform.raw.X,adata_transform.X,result_path=test_result_path,lim=20)\npl.run_embedding(adata_transform,path=test_result_path,method=\"umap\")\npl.plotEmbedding(adata_transform, path=test_result_path, method='umap', group_by=\"dataset\",legend_loc=\"right margin\")\npl.plotEmbedding(adata_transform, path=test_result_path, method='umap', group_by=\"celltype\",legend_loc=\"on data\")\n","sub_path":"code/pancreas_scripts/run_cvae2_on_pancreas.py","file_name":"run_cvae2_on_pancreas.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"191227925","text":"# encoding: utf-8\n'''\n@version: v1.0\n@author: Jason\n@time: 2018/8/19 18:40\n@email: xinchaocheng.jason@gmail.com\n'''\n\nimport os\nimport numpy as np\nimport nibabel as nib\nimport glob\nimport torch\nimport torch.nn.functional as f\n\n\n###################################\nfrom dtfcn.model2dunet4dtfcn import MultidirectionUnet\nmodel_out_path = 'D:/jason/pytorchlog/checkpoint/model_2d_dtfcn_epoch_best_231.pth'\n###################################\n\n\nimport time\nprint(f'===> Using cuda')\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = MultidirectionUnet(n_channels=2,n_classes=4).to(device)\n\n\ndef read_image4test(path):\n print(f'path is {path}')\n t1 = nib.load(glob.glob(os.path.join(path, '*_t1_corrected.nii.gz'))[0]).get_data().astype(np.float32)\n t1ce = nib.load(glob.glob(os.path.join(path, '*_t1ce_corrected.nii.gz'))[0]).get_data().astype(np.float32)\n t2 = nib.load(glob.glob(os.path.join(path, '*_t2_corrected.nii.gz'))[0]).get_data().astype(np.float32)\n flair = nib.load(glob.glob(os.path.join(path, '*_flair_corrected.nii.gz'))[0]).get_data().astype(np.float32)\n assert t1.shape == t1ce.shape == t2.shape == flair.shape\n\n image = np.empty((4, t1.shape[0], t1.shape[1], t1.shape[2]), dtype=np.float32)\n print(f't1 shape is {t1.shape}')\n image[0,...] = t1\n image[1,...] = t1ce\n image[2,...] = t2\n image[3,...] = flair\n\n image = np.transpose(image, [0, 3, 2, 1])\n print(f'image shape is {image.shape}')\n return image\n\ndef generate_anchor_locations(patch_size, stride, im_size):\n stride_size_z = patch_size[0] / stride\n stride_size_y = patch_size[1] / stride\n stride_size_x = patch_size[2] / stride\n pad_x = (int(patch_size[2] / 2),\n int(np.ceil(im_size[2] / stride_size_x) * stride_size_x - im_size[2] + patch_size[2] / 2))\n pad_y = (int(patch_size[1] / 2),\n int(np.ceil(im_size[1] / stride_size_y) * stride_size_y - im_size[1] + patch_size[1] / 2))\n pad_z = (int(patch_size[0] / 2),\n int(np.ceil(im_size[0] / stride_size_z) * stride_size_z - im_size[0] + patch_size[0] / 2))\n x = np.arange(patch_size[0] / 2, im_size[2] + pad_x[0] + pad_x[1] - patch_size[2] / 2 + 1, stride_size_x)\n y = np.arange(patch_size[1] / 2, im_size[1] + pad_y[0] + pad_y[1] - patch_size[1] / 2 + 1, stride_size_y)\n z = np.arange(patch_size[2] / 2, im_size[0] + pad_z[0] + pad_z[1] - patch_size[0] / 2 + 1, stride_size_z)\n return (z, y, x), (pad_z, pad_y, pad_x)\n\n\ndef deploy(path, output_path, idx, nclass):\n\n print(f'deploy idx is {idx}')\n\n image_raw = read_image4test(path)\n image = np.empty((2, image_raw.shape[1], image_raw.shape[2], image_raw.shape[3]), dtype=np.float32)\n image[0,...] = image_raw[1,...]\n image[1,...] = image_raw[2,...]\n # print(f'image shape is {image.shape}')\n # print(f'image shape[:-1] is {image.shape[:-1]}')\n\n patch_size = [32, 32, 32]\n patch_stride = 2\n\n locations, padding = generate_anchor_locations(patch_size, patch_stride, image.shape[1:])\n\n pad_image = np.pad(image, ((0, 0),) + padding, 'constant')\n pad_result = np.zeros((pad_image.shape[1:]), dtype=np.float32)\n\n print(f'padding is {padding}')\n print(f'pad image shape is {pad_image.shape}')\n print(f'pad result shape is {pad_result.shape}')\n\n print(f'time start')\n start = time.clock()\n for z in locations[0]:\n for y in locations[1]:\n for x in locations[2]:\n\n inputpatch = pad_image[:, int(z - patch_size[0] / 2): int(z + patch_size[0] / 2),\n int(y - patch_size[1] / 2): int(y + patch_size[1] / 2),\n int(x - patch_size[2] / 2): int(x + patch_size[2] / 2)]\n\n inputpatch = np.expand_dims(inputpatch, axis=0)\n # print(f'inputpatch shape is {inputpatch.shape}')\n\n inputpatch = torch.from_numpy(inputpatch).float().to(device)\n\n\n probs = model(inputpatch)\n # probs = f.softmax(probs, dim=1)\n\n\n probs = probs.cpu().data.numpy()\n probs = np.transpose(probs, [0, 2, 1])\n probs = np.reshape(probs, (32, 32, 32, nclass))\n probs = np.asarray(np.argmax(probs, axis=3), np.uint16)\n\n if probs[0,0,0] > 0:\n print(f'zyx is {z, y, x}')\n print(f'probs is {probs[0,0,0]}')\n\n pad_result[int(z - patch_size[0] / 2): int(z + patch_size[0] / 2),\n int(y - patch_size[1] / 2): int(y + patch_size[1] / 2),\n int(x - patch_size[2] / 2): int(x + patch_size[2] / 2)] += probs\n\n segresult = pad_result[padding[0][0] : padding[0][0] + image.shape[1],\n padding[1][0]: padding[1][0] + image.shape[2],\n padding[2][0]: padding[2][0] + image.shape[3]]\n end = time.clock()\n print(f'run time is {end-start}')\n return segresult\n\n\nif __name__ == '__main__':\n print(\"big data n4 bias model test is training\")\n\n # cascade = 1, 2, 4, 5\n # idx_val = [1, 2, 4, 5]\n num_classes = 4\n idx = 5\n print(f'num class is {num_classes}')\n print(f'idx class is {idx}')\n\n input_path = 'D:/jason/BraTS-2018/test/check'\n output_path = 'D:/jason/BraTS-2018/test/output'\n\n\n all_paths = []\n for dirpath, dirnames, files in os.walk(input_path):\n if os.path.basename(dirpath)[0:7] == 'Brats18':\n all_paths.append(dirpath)\n\n print(f'all paths is {all_paths}')\n\n for path in all_paths[:1]:\n starttime = time.time()\n\n model.load_state_dict(torch.load(model_out_path))\n\n segmentation= deploy(path, output_path, idx, num_classes)\n\n segmentation[np.where(segmentation == 3)] = 4\n segmentation = np.transpose(segmentation, [2, 1, 0])\n\n filename = os.path.join(output_path, os.path.basename(path) + '.nii.gz')\n print(f'{filename} save is start')\n img = nib.Nifti1Image(segmentation, np.eye(4))\n nib.save(img, filename)\n print(f'save is done')\n stoptime = time.time()\n print(f'file {filename} time is {stoptime-starttime}')\n\n","sub_path":"dtfcn/submit324dtfcn.py","file_name":"submit324dtfcn.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"373012058","text":"from flask import (\n Blueprint,\n jsonify,\n request,\n)\nfrom utils import (\n json_response,\n log,\n)\nfrom models.book import Book\n\nmain = Blueprint('books', __name__)\n\n\n@main.route('/all')\ndef books_all():\n bs = Book.all()\n return json_response([b.json() for b in bs])\n\n\n@main.route('/')\ndef book_id(id):\n b = Book.get(id)\n return json_response(b.json())\n\n\n@main.route(\"/add\", methods=[\"POST\"])\ndef add():\n data = request.get_json()\n # log(data)\n b = Book.isbn(data[\"isbn\"])\n return json_response(b.json())\n\n\n@main.route('/delete/')\ndef delete(id):\n b = Book.get(id)\n b.delete()\n return json_response(Book.get(id).json())\n\n\n@main.route('/update/', methods=['POST'])\ndef update(id):\n b = Book.get(id)\n data = request.json\n b.update(data)\n return json_response(Book.get(id).json())\n\n\n@main.route('/get')\ndef get():\n r = {\n \"status\": \"no\",\n }\n title = request.args.get('title')\n books = Book.find(title=title)\n if books:\n r['status'] = 'ok'\n r['books'] = [b.json() for b in books]\n return json_response(r)\n","sub_path":"routes/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"236213331","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 23 21:06:41 2020\r\n\r\n@author: JU\r\n\"\"\"\r\n\r\n#以正確的副檔名下載網頁中的圖片,範例網頁:https://www.ptt.cc/bbs/Beauty/M.1556291059.A.75A.html\r\n\r\nimport requests\r\nimport os\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom PIL import Image\r\n\r\nurl = \"https://www.ptt.cc/bbs/Beauty/M.1556291059.A.75A.html\"\r\nresp = requests.get(url, cookies = {'over18':'1'})\r\n\r\nsoup = BeautifulSoup(resp.text)\r\n\r\n#決定要儲存的資料夾\r\noutput_dir = \"downloads\"\r\n\r\n#假如資料夾不存在就建一個資料夾\r\nif not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n \r\n#定位我有圖片的tag\r\nimage_tags = soup.find(id = \"main-content\").findChildren(\"a\", recursive = False )\r\nfor img_tag in image_tags:\r\n #取得所有圖片在第三方服務的id\r\n if \"imgur\" not in img_tag[\"href\"]:\r\n continue\r\n img_id = img_tag[\"href\"].split(\"/\")[-1] #取最後一段\r\n #組合圖片而非網站的網址\r\n img_url = 'https://i.imgur.com/{}.jpg'.format(img_id)\r\n #對圖片發出請求\r\n with requests.get(img_url, stream = True) as r:\r\n \r\n r.raise_for_status()\r\n #檢查圖片副檔名\r\n img = Image.open(r.raw)\r\n img_savename = \"{outdir}/{img_id}.{img_ext}\".format(outdir = output_dir,img_id= img_id, img_ext=img.format.lower())\r\n img.save(img_savename)\r\n print(\"Save image {}\".format(img_savename))","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"170150615","text":"# -*- encoding:utf-8 -*-\nimport random\nimport re\nfrom urllib import request, parse\nfrom bs4 import BeautifulSoup\n\n\ndef hello_bot(msg):\n trigger = '안녕 봇아'\n if msg.startswith(trigger):\n return '니 영혼에 고통을 안겨주마!'\n\n return False\n\n\ndef choice(msg):\n trigger = '!선택 '\n if msg.startswith(trigger):\n choices = msg.split()[1:]\n idx = random.randrange(0, len(choices))\n return '제 선택은 ' + choices[idx]\n\n return False\n\n\ndef percentage(msg):\n trigger = '!확률 '\n if msg.startswith(trigger):\n event = msg[len(trigger):]\n percent = random.random() * 100\n\n return '%s의 확률은 %.1f%%입니다.' % (event, percent)\n\n return False\n\n\ndef weather(msg):\n trigger = '!날씨 '\n if msg.startswith(trigger):\n place = msg[len(trigger):]\n query = parse.quote(place + ' 날씨')\n #page = request.urlopen(\"https://www.google.co.kr/#q=%s\" % query)\n page = request.urlopen(\"https://search.naver.com/search.naver?query=%s\" % query)\n page = page.read()\n soup = BeautifulSoup(page, 'html.parser')\n\n location = None\n temperature = None\n sky = None\n for header in soup.find_all('div'):\n if header.get('class') is not None and header.get('class')[0] == 'fl':\n children = header.findChildren()\n child = [c for c in children[2]]\n temperature = str(child[0])\n sky = str(child[2].text)\n\n # print(temperature, sky)\n\n if header.get('class') is not None and header.get('class')[0] == 'noti_area':\n children = header.findChildren()\n location = str(children[1].text).replace('\\'', '')\n\n if location is None or temperature is None or sky is None:\n return '해당하신 지역을 조회할 수 없습니다'\n\n return location + '의 날씨는 ' + temperature + '도 ' + sky + '입니다'\n\n return False\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"323319891","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom flask import render_template, request, json\nfrom flask import Response\nfrom flask_restful import reqparse, abort, Api, Resource, fields, marshal_with\nfrom bson.son import SON\nfrom pymongo import ASCENDING, DESCENDING\nimport time\n\nfrom flask import current_app as app\n\napi = Api(app)\n\ncollection = app.config['data_collection']\n\nparser = reqparse.RequestParser()\nparser.add_argument('keywords', action='append', help='document keyword list')\nparser.add_argument('offset', type=int)\nparser.add_argument('limit', type=int)\nparser.add_argument('time_start', type=int, required=True)\nparser.add_argument('time_end', type=int, required=True)\n\ndata_fields = {\n 'url': fields.String,\n \"title\": fields.String,\n \"text\": fields.String,\n \"authors\": fields.List(fields.String),\n \"publish_date\": fields.DateTime(dt_format='rfc822'),\n 'keywords': fields.String,\n 'tags': fields.String,\n 'updatetime': fields.Float,\n}\n\ndata_list_fields = fields.List(data_fields)\n\nclass SpiderData(Resource):\n @marshal_with(data_list_fields)\n def get(self):\n args = parser.parse_args()\n keywords = args['keywords']\n tags = args['tags']\n offset = args['offset']\n limit = args['limit']\n time_start = args['time_start']\n time_end = args['time_end']\n\n pipeline = []\n match = {}\n match_or = []\n match_and = []\n\n match['updatetime'] = {'$gte': time_start, '$lte': time_end}\n\n if keywords:\n match_or.append({'keywords':{'$in': keywords }})\n if tags:\n match_or.append({'tags':{'$in': tags }})\n\n if len(match_or) :\n match['$or'] = match_or\n \n if len(match_and) :\n match['$and'] = match_and\n \n pipeline.append({'$match':match})\n \n if offset:\n pipeline.append({'$offset': offset})\n if limit:\n pipeline.append({'$limit': limit})\n\n logger.debug('construct aggregate pipeline: %s' % pipeline)\n\n return collection.aggregate(pipeline).get('result')\n\n\n\napi.add_resource(Data, 'spider/data')","sub_path":"crawler_data_api/resources/spider_data.py","file_name":"spider_data.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"564363225","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\nclass DataSelectionStrategy(object):\n \"\"\" \n Implementation of Data Selection Strategy class which serves as base class for other \n dataselectionstrategies for supervised learning frameworks.\n \"\"\"\n\n def __init__(self, trainloader, valloader,model):\n \"\"\"\n Constructer method\n \"\"\"\n \n self.trainloader = trainloader # assume its a sequential loader.\n self.valloader = valloader\n self.model = model\n self.N_trn = len(trainloader.dataset)\n self.N_val = len(valloader.dataset)\n self.grads_per_elem = None\n self.val_grads_per_elem = None\n self.numSelected = 0\n \n\n def select(self, budget, model_params):\n pass\n\n def compute_gradients(self, valid=False):\n\n self.grads_per_elem = self.model.module.compute_gradients(self.trainloader,\\\n self.model.device_ids)\n self.val_grads_per_elem = self.model.module.compute_gradients(self.valloader,\\\n self.model.device_ids)\n \n def update_model(self, model_params):\n \"\"\"\n Update the models parameters\n\n Parameters\n ----------\n model_params: OrderedDict\n Python dictionary object containing models parameters\n \"\"\"\n\n self.model.load_state_dict(model_params)\n","sub_path":"subset_selection/ssl/dataselectionstrategy.py","file_name":"dataselectionstrategy.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"365292001","text":"# https://www.youtube.com/watch?v=GmpyAMpjpUY 这个视频讲的可以\n\n# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\nclass Solution:\n def minMeetingRooms(self, intervals: List[Interval]) -> int:\n if not intervals:\n return 0\n free_rooms = [] # 这个堆存的是,每个房间最后活动的结束时间\n # intervals按照开始时间从小到大排序\n intervals.sort(key=lambda x: x.start)\n # 把第一个开始时间的活动结束时间 push到 堆里\n heapq.heappush(free_rooms, intervals[0].end)\n for i in intervals[1:]:\n if free_rooms[0] <= i.start:\n heapq.heappop(free_rooms) # pop掉堆的最小值\n heapq.heappush(free_rooms, i.end)\n return len(free_rooms)\n\n\n","sub_path":"code/253. Meeting Rooms II.py","file_name":"253. Meeting Rooms II.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"389039068","text":"import logging\nimport os\nimport sys\n\n# Configure Logger\nLog = None\n\ndef pprint(msg, level=logging.DEBUG):\n global Log\n if Log is None:\n return\n if isinstance(level, str):\n if level.count('info'):\n level = logging.INFO\n elif level.count('debug'):\n level = logging.DEBUG\n Log.log(level, msg)\n\ndef configure(taskoutpath, doSaveToDisk=0, doWriteStdOut=0):\n global Log\n Log = logging.getLogger('deletemove')\n\n Log.setLevel(logging.DEBUG)\n Log.handlers = [] # remove pre-existing handlers!\n formatter = logging.Formatter('%(message)s')\n # Config logger to save transcript of log messages to plain-text file\n if doSaveToDisk:\n # birth-vtranscript.txt logs everything\n fh = logging.FileHandler(\n os.path.join(\n taskoutpath,\n \"delete-transcript-verbose.txt\"))\n fh.setLevel(0)\n fh.setFormatter(formatter)\n Log.addHandler(fh)\n\n # birth-transcript.txt logs high-level messages\n fh = logging.FileHandler(\n os.path.join(\n taskoutpath,\n \"delete-transcript-summary.txt\"))\n fh.setLevel(logging.DEBUG + 1)\n fh.setFormatter(formatter)\n Log.addHandler(fh)\n\n # Config logger that can write to stdout\n if doWriteStdOut:\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n Log.addHandler(ch)\n # Config null logger, avoids error messages about no handler existing\n if not doSaveToDisk and not doWriteStdOut:\n Log.addHandler(logging.NullHandler())\n","sub_path":"bnpy/deletemove/DLogger.py","file_name":"DLogger.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"149384356","text":"\"\"\" Detect people wearing masks in videos\n\"\"\"\nimport torch\nfrom skvideo.io import FFmpegWriter, vreader\nfrom torchvision.transforms import Compose, Resize, ToPILImage, ToTensor\n\nfrom .Common.Face_Detector import FaceDetector\nfrom .Train import MaskDetector\n\n# Various Trained Models\nmodelpath_v1 = \"Covid_Mask_Detector/Tensorboard/Mask_Detector/version_0/checkpoints/epoch=8-val_loss=0.08-val_acc=98.95.ckpt\"\nmodelpath_v2 = \"Covid_Mask_Detector/Tensorboard/Mask_Detector/version_0/checkpoints/epoch=8-val_loss=0.08-val_acc=99.09.ckpt\"\nmodelpath_v3 = \"Covid_Mask_Detector/Tensorboard/Mask_Detector/version_1/checkpoints/epoch=8-val_loss=0.08-val_acc=98.91.ckpt\"\n\n# @torch.no_grad()\n\ndef detect_face_mask(frame):\n\n model = MaskDetector()\n\n # Use GPU for processing if Available. Else use CPU.\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model.load_state_dict(torch.load(modelpath_v3, map_location=device)['state_dict'], strict=False)\n\n model = model.to(device)\n model.eval()\n \n faceDetector = FaceDetector(\n prototype = 'Covid_Mask_Detector/Models/deploy.prototxt.txt',\n model = 'Covid_Mask_Detector/Models/res10_300x300_ssd_iter_140000.caffemodel',\n )\n \n transformations = Compose([\n ToPILImage(),\n Resize((100, 100)),\n ToTensor(),\n ])\n\n labels = ['No Mask', 'Mask']\n\n faces = faceDetector.detect(frame)\n for face in faces:\n xStart, yStart, width, height = face\n \n xStart, yStart = max(xStart, 0), max(yStart, 0)\n \n # Predict mask label on extracted face\n faceImg = frame[yStart:yStart+height, xStart:xStart+width]\n output = model(transformations(faceImg).unsqueeze(0).to(device))\n\n # We only need the Predicted Data. Discard first returned Value.\n _, predicted = torch.max(output.data, 1)\n\n return labels[predicted]\n\nif __name__ == '__main__':\n detect_face_mask()","sub_path":"Covid_Mask_Detector/Frame_Face_Recognition.py","file_name":"Frame_Face_Recognition.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"427200730","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nnp.random.seed(123)\n\nclass NodeMinibatchIterator(object):\n \n \"\"\" \n This minibatch iterator iterates over nodes for supervised learning.\n\n G -- networkx graph\n placeholders -- standard tensorflow placeholders object for feeding\n batch_size -- size of the minibatches\n max_degree -- maximum size of the downsampled adjacency lists\n \"\"\"\n def __init__(self, G, placeholders, \n **kwargs):\n\n self.G = G\n self.nodes = G.nodes()\n self.placeholders = placeholders\n self.max_degree = max_degree\n self.train_nodes = self.nodes\n\n def batch_feed_dict(self, batch_nodes, val=False):\n feed_dict = dict()\n feed_dict.update({self.placeholders['batch_size'] : len(batch_nodes)})\n feed_dict.update({self.placeholders['batch']: batch_nodes})\n\n return feed_dict\n\n def next_minibatch_feed_dict(self):\n #feed all at a time for RL training\n start_idx = 0 \n end_idx = len(self.train_nodes)\n batch_nodes = self.train_nodes[start_idx : end_idx]\n return self.batch_feed_dict(batch_nodes)\n\n def shuffle(self):\n \"\"\" Re-shuffle the training set.\n Also reset the batch number.\n \"\"\"\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0\n\n\nclass GraphMinibatchIterator(object):\n \n \"\"\" \n This minibatch iterator iterates over graphs for reinfocement learning.\n\n G -- json graph\n placeholders -- standard tensorflow placeholders object for feeding\n batch_size -- size of the graph minibatches\n max_degree -- maximum size of the downsampled adjacency lists\n \"\"\"\n def __init__(self, graphs, placeholders, seed = 1, upstream_devices_num=5, train_ratio = 0.8, batch_size =\n 32, max_degree=25, **kwargs):\n\n self.graphs = graphs\n \n self.placeholders = placeholders\n \n self.upstream_devices_num = upstream_devices_num\n\n self.batch_size = batch_size\n self.max_degree = max_degree\n self.graph_num = 0\n self.eval_graph_num = 0\n\n self.adj_ins, self.adj_outs, self.deg = self.construct_adj()\n \n self.construct_dep_sources()\n\n np.random.seed(seed)\n\n num_trains = (int)(train_ratio * len(self.graphs))\n self.train_graphs = list(np.random.choice(self.graphs, num_trains, replace=False))\n train_sets = set([g.real_idx for g in self.train_graphs])\n self.eval_graphs = []\n for g in self.graphs:\n if g.real_idx not in train_sets:\n self.eval_graphs.append(g)\n\n def end(self):\n return self.graph_num >= len(self.train_graphs)\n \n def eval_end(self):\n if self.eval_graph_num == len(self.eval_graphs):\n self.eval_graph_num = 0\n return True\n else:\n return False\n \n def next_batch_size(self):\n remaining = len(self.train_graphs) - self.graph_num\n return min(remaining, self.batch_size)\n\n def construct_adj(self):\n max_graph_size = max([len(G.nodes()) for G in self.graphs])\n \n adj_ins = max_graph_size*np.ones((len(self.graphs), max_graph_size+1, self.max_degree), dtype = np.int32)\n adj_outs = max_graph_size*np.ones((len(self.graphs), max_graph_size+1, self.max_degree), dtype = np.int32)\n \n deg = np.zeros((len(self.graphs), max_graph_size,), dtype = np.int32)\n \n for G in self.graphs:\n for nodeid in G.nodes():\n neighbors = np.array([_ for _ in G.neighbors(nodeid)])\n deg[G.g_id, nodeid] = len(neighbors)\n if len(neighbors) == 0:\n continue\n \n ins = np.array([_ for _ in G.ins(nodeid)])\n outs = np.array([_ for _ in G.outs(nodeid)])\n \n if len(ins) != 0:\n if len(ins) >= self.max_degree:\n ins = np.random.choice(ins, self.max_degree, replace=False)\n elif len(ins) < self.max_degree:\n ins = np.random.choice(ins, self.max_degree, replace=True)\n adj_ins[G.g_id, nodeid, :] = ins\n \n if len(outs) != 0:\n if len(outs) >= self.max_degree:\n outs = np.random.choice(outs, self.max_degree, replace=False)\n elif len(outs) < self.max_degree:\n outs = np.random.choice(outs, self.max_degree, replace=True)\n adj_outs[G.g_id, nodeid, :] = outs\n \n return adj_ins, adj_outs, deg\n \n def construct_dep_sources(self):\n deps = []\n source_counts = []\n source_weights = []\n for idx, G in enumerate(self.graphs):\n print(\"processing graph {} id {}\".format(idx, G.g_id))\n nodes = G.nodes()\n node_sources = np.zeros((len(nodes), self.upstream_devices_num), dtype=int)\n node_source_weights = np.zeros((len(nodes), self.upstream_devices_num), dtype=float)\n node_num_sources = np.zeros((len(nodes)), dtype=int)\n #should iterate through the ordered list\n print(\"iterating graph {} order {}\".format(G.g_id, G.order))\n for nidx in nodes:\n nodeid = G.order[nidx]\n\n neighs = G.neighbors(nodeid)\n \n ins = []\n for n in neighs:\n order = G.reverse_order[n]\n if order < nidx:\n ins.append(n)\n\n print(\"{} source {}\".format(nodeid, ins))\n \n node_num_sources[nodeid] = min(len(ins), self.upstream_devices_num)\n\n if len(ins) > self.upstream_devices_num:\n ins = np.random.choice(np.array(ins), self.upstream_devices_num, replace=False)\n node_sources[nodeid, :] = np.array([G.reverse_order[i] for i in ins.tolist()])\n node_source_weights[nodeid, :] = np.array(G.node[nodeid].get_weight(ins.tolist()))\n else:\n node_sources[nodeid, :len(ins)] = np.array([G.reverse_order[i] for i in ins])\n node_source_weights[nodeid, :len(ins)] = np.array(G.node[nodeid].get_weight(ins), dtype = np.float32)\n \n deps.append(node_sources)\n source_weights.append(node_source_weights)\n source_counts.append(node_num_sources)\n \n self.deps = deps\n self.source_counts = source_counts\n self.source_weights = source_weights\n \n def batch_feed_dict(self, batch_nodes, cpu_weights, sources, source_weights, num_sources, graph_idx, real_idx, max_throughput, val=False):\n feed_dict = dict()\n feed_dict.update({self.placeholders['batch_size'] : len(batch_nodes)})\n feed_dict.update({self.placeholders['batch'] : batch_nodes})\n feed_dict.update({self.placeholders['batch_sources'] : sources})\n feed_dict.update({self.placeholders['batch_source_weights'] : source_weights})\n feed_dict.update({self.placeholders['batch_num_sources'] : num_sources})\n feed_dict.update({self.placeholders['graph_idx']: graph_idx})\n self.graph_num += 1\n return feed_dict, batch_nodes, cpu_weights, len(batch_nodes), sources, source_weights, num_sources, graph_idx, real_idx, max_throughput\n\n def next_minibatch_feed_dict(self):\n #feed all at a time for RL training\n G = self.train_graphs[self.graph_num]\n #nodes = G.nodes()\n nodes = G.order\n cpu_weights = []\n for n in nodes:\n cpu_weights.append(G.node[n].weighted_load)\n return self.batch_feed_dict(nodes, cpu_weights, self.deps[G.g_id], self.source_weights[G.g_id], self.source_counts[G.g_id], G.g_id, G.real_idx, G.max_throughput)\n\n def shuffle(self):\n \"\"\" Re-shuffle the training set.\n Also reset the batch number.\n \"\"\"\n self.train_graphs = np.random.permutation(self.train_graphs)\n self.graph_num = 0\n \n def eval_batch_feed_dict(self, batch_nodes, cpu_weights, sources, source_weights, num_sources, graph_idx, real_idx, max_throughput, val=False):\n feed_dict = dict()\n feed_dict.update({self.placeholders['batch_size'] : len(batch_nodes)})\n feed_dict.update({self.placeholders['batch_sources'] : sources})\n feed_dict.update({self.placeholders['batch_source_weights'] : source_weights})\n feed_dict.update({self.placeholders['batch_num_sources'] : num_sources})\n feed_dict.update({self.placeholders['batch'] : batch_nodes})\n feed_dict.update({self.placeholders['graph_idx']: graph_idx})\n self.eval_graph_num += 1\n return feed_dict, batch_nodes, cpu_weights, len(batch_nodes), sources, source_weights, num_sources, graph_idx, real_idx, max_throughput\n\n def next_eval_minibatch_feed_dict(self):\n #feed all at a time for RL training\n G = self.eval_graphs[self.eval_graph_num]\n #nodes = G.nodes()\n #cpu_weights = [n.weighted_load for n in G.node]\n nodes = G.order\n cpu_weights = []\n for n in nodes:\n cpu_weights.append(G.node[n].weighted_load)\n return self.eval_batch_feed_dict(nodes, cpu_weights, self.deps[G.g_id], self.source_weights[G.g_id], self.source_counts[G.g_id], G.g_id, G.real_idx, G.max_throughput)\n","sub_path":"graphsage/minibatch_order.py","file_name":"minibatch_order.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"34492131","text":"from django.conf.urls import patterns, include, url\nimport settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'toasted.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\turl(r'^$', 'posts.views.index', name='home'),\n\turl(r'^about/', 'posts.views.about', name='about'),\n\turl(r'^gallery/', 'images.views.galleries', name='galleries'),\n\turl(r'^new_comment/', 'comments.views.addNewComment'),\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n\turlpatterns += patterns('',\t(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}))\n","sub_path":"toasted/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"208765096","text":"# -*- coding: gbk -*-\r\n\"\"\"\r\nCreated on Thu Mar 20 20:53:01 2014\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n#coding=utf8\r\n\r\nimport urllib2\r\nimport re\r\nimport codecs\r\n\r\n\"\"\"\r\nLogin to Sina Weibo with cookie\r\n\"\"\"\r\n\r\nf=open('d:\\\\cookie.txt','r')\r\ndata=f.readlines()\r\nd={}\r\nfor line in data:\r\n s=line.split('\\t')\r\n d[s[0]]=s[1]\r\n\r\n\r\nCOOKIE = d\r\n#fill with your weibo.com cookie\r\nHEADERS = {\r\n 'cookie': d,\r\n 'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'\r\n \r\n \r\n}\r\n\r\ndef test_login():\r\n url = 'http://weibo.com/u/1663112274/home'\r\n req = urllib2.Request(url, headers=HEADERS)\r\n s = urllib2.urlopen(req).read()\r\n \r\n return s\r\n\r\n\r\nif __name__ == '__main__':\r\n s=test_login()\r\n u=s.decode('utf-8')\r\n \r\n","sub_path":"python01/others/untitled9.py","file_name":"untitled9.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"204900989","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 18 10:07:21 2017\n\n@author: claire\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n#from sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\n\nimport mne\nfrom mne.decoding import (SlidingEstimator, GeneralizingEstimator,\n cross_val_multiscore, LinearModel, get_coef)\nimport os\nfrom mne.parallel import parallel_func\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import permutation_test_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\n\n\n\ncond = ['stim', 'imag']\n\nexclude = [7]\nall_epochs=list()\n# We start by exploring the frequence content of our epochs.\nfor subject_id in range(1,12):#,12):\n if subject_id in exclude:\n continue\n subject = 'S%02d' %subject_id\n data_path = os.path.join('/home/claire/DATA/Data_Face_House/' + subject +'/EEG/Evoked_Lowpass')\n fname_in = os.path.join(data_path, '%s-epo.fif' %subject)\n epochs=mne.read_epochs(fname_in)\n epochs.interpolate_bads()\n all_epochs.append(epochs)\n \nepochs = mne.concatenate_epochs(all_epochs)\n\nmne.epochs.combine_event_ids(epochs, ['stim/face', 'stim/house'], {'stim':100}, copy=False) \nmne.epochs.combine_event_ids(epochs, ['imag/face', 'imag/house'], {'imag':200}, copy=False)\n\n #only look at occipital channels\n# select_chans = [u'Iz', u'Oz', u'O1', u'O2', u'O3', u'PO7', u'PO8', u'POz', u'PO1', u'PO3', u'PO2', u'PO4']\n #select_chans = [ u'PO7', u'PO8']\n #select_chans = [ u'Cz', u'FPz']\n\n #ch_names=[ch_name.replace('', '') for ch_name in select_chans]\n # epochs.pick_types(eeg=True).pick_channels(ch_names)\n \n # average group of 4 trials\n#data_cond1 = epochs['imag/face'].get_data()\n#data_cond2 = epochs['imag/house'].get_data()\n#\n#mean_cond1=[]\n#ind_trial = 0\n#while ind_trial<= len(data_cond1)-5:\n# mean_cond1.append(mean(data_cond1[ind_trial:(ind_trial+4)], 0))\n# print ind_trial\n# ind_trial+=5\n#\n#mean_cond2=[]\n#ind_trial = 0\n#while ind_trial<= len(data_cond2)-5:\n# mean_cond2.append(mean(data_cond2[ind_trial:(ind_trial+4)], 0))\n# print ind_trial\n# ind_trial+=5\n#\n#X=[]\n## create variable for decoding\n#X = mean_cond1 + mean_cond2\n#X=np.array(X)\n#y = np.array([0] * len(mean_cond1) + [1] * len(mean_cond2)) \n\n\n#----------------------------------#\n# Time decoding\n#----------------------------------#\n\nepochs=epochs['stim/face', 'stim/house']\n\n# fit and time decoder\nX = epochs.get_data() # MEG signals: n_epochs, n_channels, n_times\nle = LabelEncoder()\ny = le.fit_transform(epochs.events[:, 2])\n\nclf = make_pipeline(StandardScaler(), LogisticRegression())\n\ntime_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')\n\nscores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)\n# Mean scores across cross-validation splits\nscores = np.mean(scores, axis=0)\n\n# Plot\nfig, ax = plt.subplots()\nax.plot(epochs.times, scores, label='score')\nax.axhline(.5, color='k', linestyle='--', label='chance')\nax.set_xlabel('Times')\nax.set_ylabel('AUC') # Area Under the Curve\nax.legend()\nax.axvline(.0, color='k', linestyle='-')\nax.set_title('Sensor space decoding')\nplt.show()\n\n# You can retrieve the spatial filters and spatial patterns if you explicitly\n# use a LinearModel\nclf = make_pipeline(StandardScaler(), LinearModel(LogisticRegression()))\ntime_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')\ntime_decod.fit(X, y)\n\ncoef = get_coef(time_decod, 'patterns_', inverse_transform=True)\nevoked = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])\nevoked.plot_joint(times=np.arange(0., .500, .100), title='patterns')\n\n#----------------------#\n# With statistics\n#----------------------#\n\n\n\n\n\n#cv=StratifiedKFold(n_splits=3, shuffle=False)\n#cv.get_n_splits(X, y)\n\n\nclf = make_pipeline(StandardScaler(), LogisticRegression())\n\ntime_decod = SlidingEstimator(clf, n_jobs=1, scoring='accuracy')\n\n#from sklearn.svm import SVC\nsvc = SVC(C=1, kernel='linear')\n\ncv = StratifiedKFold(3)\n\n\n\nscore, permutation_scores, pvalue = permutation_test_score(\n svc, X, y, scoring=\"accuracy\", cv=3, n_permutations=100, n_jobs=1)\n\n\n\n\n\n\n\n\n\n\n\n\n#scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)\n\nscores = cross_val_multiscore(time_decod, X, y, cv=cv, n_jobs=1)\n# Mean scores across cross-validation splits\nscores = np.mean(scores, axis=0)\n\nclass_balance = np.mean(y == y[0])\nclass_balance = max(class_balance, 1. - class_balance)\n\n # Plot\nfig, ax = plt.subplots()\nax.plot(epochs.times, scores, label='score')\nax.axhline(class_balance, color='k', linestyle='--', label='chance')\nax.set_xlabel('Times')\nax.set_ylabel('AUC') # Area Under the Curve\nax.legend()\nax.axvline(.0, color='k', linestyle='-')\nax.set_title('Sensor space decoding')\nplt.show()\n\n \nfrom mne.stats import permutation_t_test\nn_permutations = 50000\nT0, p_values, H0 = permutation_t_test(scores, n_permutations, n_jobs=1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Old/run_time_decoding_average_trials_all_subj.py","file_name":"run_time_decoding_average_trials_all_subj.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"32780860","text":"\"\"\"\nModule provides functions for the handling of concept glosses in linguistic datasets.\n\"\"\"\nimport re\nfrom collections import defaultdict\n\nfrom clldutils.misc import lazyproperty\n\nimport attr\n\n__all__ = ['parse_gloss', 'Gloss', 'concept_map']\n\n\n@attr.s\nclass Gloss(object):\n main = attr.ib(default='')\n # the start character indicating a potential comment:\n comment_start = attr.ib(default='')\n # the comment (everything occurring in brackets in the input string:\n comment = attr.ib(default='')\n # the end character indicating the end of a potential comment:\n comment_end = attr.ib(default='')\n # the part of speech, in case this was specificied by a preceding \"the\" or a\n # preceding \"to\" in the mainpart of the string:\n pos = attr.ib(default='')\n # the prefix, that is, words, like, eg. \"be\", \"in\", which may precede the main\n # gloss in concept lists, as in \"be quiet\":\n prefix = attr.ib(default='')\n # the longest constituent, which is identical with the main part if there's no\n # whitespace in the main part, otherwise the longest part part of the main gloss\n # split by whitespace:\n longest_part = attr.ib(default='')\n # the original gloss (for the purpose of testing):\n gloss = attr.ib(default='', converter=lambda s: s.lower().replace('*', ''))\n\n frequency = attr.ib(default=0)\n\n @lazyproperty\n def tokens(self):\n return ' '.join(s for s in self.gloss.split() if s not in ['or'])\n\n def similarity(self, other):\n # first-order-match: identical glosses\n if self.gloss == other.gloss:\n if self.pos and self.pos == other.pos:\n return 1\n return 2\n # second-order match: identical main-parts\n if self.main == other.gloss or self.gloss == other.main or\\\n self.main == other.main:\n # best match if pos matches\n return 3 if self.pos and self.pos == other.pos else 4\n if self.longest_part == other.longest_part:\n return 5 if self.pos and self.pos == other.pos else 6\n if other.longest_part in self.main.split():\n return 7\n if self.longest_part in other.main.split():\n return 8\n return 100\n\n @classmethod\n def from_string(cls, s, language='en'):\n return parse_gloss(s, language=language)[0]\n\n\ndef parse_gloss(gloss, language='en'):\n \"\"\"\n Parse a gloss into its constituents by applying some general logic.\n\n Parameters\n ----------\n gloss : str\n The gloss as found in various sources (we assume that we are dealing\n with English glosses here.\n\n Returns\n -------\n A list of `Gloss` instances.\n\n Notes\n -----\n\n The basic purpose of this function is to provide a means to make it easier\n to compare meanings across different resources. Often, linguists will\n annotate their resources quite differently, and for one and the same\n concept, we may find very different glosses. The concept \"kill [verb]\", for\n example may be glossed as \"to kill\", \"kill\", \"kill (v.)\", \"kill\n (somebody)\", etc. In order to guarantee comparability, this function tries\n to use basic knowledge of glossing tendencies to disentangle the variety of\n glossing styles which can be found in the literature. Thus, in the case of\n \"kill [verb]\", the function will analyze the different strings as follows::\n\n >>> glosses = [\"to kill\", \"kill\", \"kill (v.)\", \"kill (somebody)\"]\n >>> for gloss in glosses:\n ... parsed_gloss = parse_gloss(gloss)[0]\n ... print(parsed_gloss.main, parsed_gloss.pos)\n kill verb\n kill\n kill verb\n kill\n\n As can be seen: it seeks to extract the most important part of the gloss\n and may thus help to compare different glosses across different resources.\n \"\"\"\n if not gloss:\n print(gloss)\n raise ValueError(\"Your gloss is empty\")\n G = []\n gpos = ''\n if language == 'en':\n pos_markers = {'the': 'noun', 'a': 'noun', 'to': 'verb'}\n prefixes = ['be', 'in', 'at']\n elif language == 'de':\n pos_markers = {'der': 'noun', 'die': 'noun', 'das': 'noun'}\n prefixes = []\n elif language == 'fr':\n pos_markers = {\n 'le': 'noun',\n 'la': 'noun',\n 'les': 'noun',\n 'du': 'noun',\n 'des': 'noun',\n 'de': 'noun',\n 'un': 'noun',\n 'une': 'noun'}\n prefixes = ['il', 'est']\n elif language == \"es\":\n pos_markers = {\n \"el\": \"noun\",\n \"la\": \"noun\",\n \"los\": \"noun\",\n \"mi\": \"noun\",\n \"un\": \"noun\",\n \"una\": \"noun\",\n \"unos\": \"noun\",\n \"las\": \"noun\",\n \"su\": \"noun\",\n }\n prefixes = [\"lo\", \"les\", \"le\"]\n\n else:\n pos_markers = {}\n prefixes = []\n\n abbreviations = [\n ('vb', 'verb'),\n ('v.', 'verb'),\n ('v', 'verb'),\n ('adj', 'adjective'),\n ('nn', 'noun'),\n ('n.', 'noun'),\n ('adv', 'adverb'),\n ('noun', 'noun'),\n ('verb', 'verb'),\n ('adjective', 'adjective'),\n ('cls', 'classifier')\n ]\n\n # we use /// as our internal marker for glosses preceded by concepticon\n # gloss information and followed by literal readings\n if '///' in gloss:\n gloss = gloss.split('///')[1]\n\n # if the gloss consists of multiple parts, we store both the separate part\n # and a normalized form of the full gloss\n constituents = [x.strip() for x in re.split(',|;|:|/| or | OR ', gloss) if x.strip()]\n if len(constituents) > 1:\n constituents += [' / '.join(sorted([c.strip() for c in constituents]))]\n\n for constituent in constituents:\n if constituent.strip():\n res = Gloss(gloss=gloss)\n mainpart = ''\n in_comment = False\n for char in constituent:\n if char in '([{(<':\n in_comment = True\n res.comment_start += char\n elif char in ')]})>':\n in_comment = False\n res.comment_end += char\n else:\n if in_comment:\n res.comment += char\n else:\n mainpart += char\n\n mainpart = ''.join(m for m in mainpart if m not in '?!\"¨:;,»«´“”*+-')\\\n .strip().lower().split()\n\n # search for pos-markers\n if gpos:\n res.pos = gpos\n else:\n if len(mainpart) > 1 and mainpart[0] in pos_markers:\n gpos = res.pos = pos_markers[mainpart.pop(0)]\n\n # search for strip-off-prefixes\n if len(mainpart) > 1 and mainpart[0] in prefixes:\n res.prefix = mainpart.pop(0)\n\n if mainpart:\n # check for a \"first part\" in case we encounter white space in the\n # data (and return only the largest string of them)\n res.longest_part = sorted(mainpart, key=lambda x: len(x))[-1]\n\n # search for pos in comment\n if not res.pos:\n cparts = res.comment.split()\n for p, t in sorted(\n abbreviations, key=lambda x: len(x[0]), reverse=True):\n if p in cparts or p in mainpart or t in cparts or t in mainpart:\n res.pos = t\n break\n\n res.main = ' '.join(mainpart)\n G.append(res)\n\n return G\n\n\ndef concept_map2(from_, to, freqs=None, language='en', **_):\n # get frequencies\n freqs = freqs or defaultdict(int)\n\n # extract glossing information from the data\n glosses = {'from': defaultdict(list), 'to': defaultdict(list)}\n mapped = defaultdict(lambda: defaultdict(list))\n for l_, key in [(from_, 'from'), (to, 'to')]:\n for i, concept in enumerate(l_):\n for gloss in parse_gloss(concept, language=language):\n glosses[key][i] += [gloss]\n mapped[gloss.main][key] += [i]\n mapping = {}\n sims = {}\n for k, v in mapped.items():\n if 'from' in v and 'to' in v:\n for i in v['from']:\n current_sim = sims.get(i, 10)\n best = mapping.get(i, set())\n for j in v['to']:\n for glossA in glosses['from'][i]:\n for glossB in glosses['to'][j]:\n sim = glossA.similarity(glossB) or 10\n if sim < current_sim:\n best = {j}\n current_sim = sim\n elif sim == current_sim:\n best.add(j)\n mapping[i] = best\n sims[i] = current_sim\n for i in mapping:\n mapping[i] = (\n sorted(\n mapping[i], key=lambda x: freqs.get(to[x].split('///')[0], 0),\n reverse=True),\n sims[i])\n return mapping\n\n\ndef concept_map(from_, to, similarity_level=5, language='en', **kw):\n \"\"\"\n Function compares two concept lists and outputs suggestions for mapping.\n\n Notes\n -----\n Idea is to take one conceptlist as the basic list and then to search for a plausible\n mapping of concepts in the second list to the first list. All suggestions can then be\n output in various forms, both with multiple matches excluded or included, and in\n textform or in other forms.\n\n What is important, regarding the output here, is, that the output contains all\n matches, including non-matched items which occur **in the second list but not in the\n first list**. Non-matched items which occur in the first list but not in the second\n list are ignored.\n \"\"\"\n # extract glossing information from the data\n glosses = {'from': {}, 'to': {}}\n for l_, key in [(from_, 'from'), (to, 'to')]:\n for i, concept in enumerate(l_):\n if isinstance(concept, tuple):\n concept, pos, frequency = concept\n else:\n pos, frequency = None, 0\n glosses[key][i] = parse_gloss(concept, language=language)\n if pos or frequency:\n for gloss in glosses[key][i]:\n gloss.pos = pos\n gloss.frequency = frequency\n # now that we have prepared all the glossed list as planned, we compare them item by\n # item and check for similarity\n sims = []\n for i, fglosses in glosses['from'].items():\n for fgloss in fglosses:\n for j, tglosses in glosses['to'].items():\n for tgloss in tglosses:\n sim = fgloss.similarity(tgloss)\n if sim and sim <= similarity_level:\n sims.append((i, j, sim, tgloss.frequency))\n\n # we keep track of which target concepts have already been chosen as best matches:\n best, consumed, alternatives = {}, set(), defaultdict(list)\n\n # go through *all* matches from best to worst:\n for i, j, sim, frequency in sorted(sims, key=lambda x: (x[2], -x[3])):\n if i not in best and j not in consumed:\n best[i] = ([j], sim)\n consumed.add(j)\n elif j not in alternatives[i]:\n alternatives[i].append(j)\n\n return best\n","sub_path":"src/pyconcepticon/glosses.py","file_name":"glosses.py","file_ext":"py","file_size_in_byte":11432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"326447562","text":"import io\nimport sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nimport sklearn.cluster\nimport sklearn.metrics\nimport sklearn.mixture\n\nimport scipy\nimport scipy.cluster\nfrom scipy.cluster.hierarchy import dendrogram, linkage, fcluster\n\nimport itertools\n\n###################\n#### Locations ####\n###################\n\nproject_location = \"../../\"\nfunction_location = project_location + \"code/functions/\"\ndata_location = project_location + \"data/\"\ndata_edited = data_location + \"edited/\"\ndata_created = data_location + \"created/\"\nimages = project_location + \"images/\"\n\nsys.path.append(data_edited)\nsys.path.append(function_location)\n\n\nfrom visuals_functions import three_d_scatter_rotation_gen,three_d_cluster_rotation_gen\nfrom visuals_functions import pair_plot_funct, three_d_plot_funct\nfrom outlier_and_normalization_functions import leverage_make,normalize_matrix\nfrom kappa_functions import kappa_function, max_kappa\n\n\n################\n# Loading Data #\n################\n\nX_full = np.load(data_created+\"X_full.npy\")\nnames_full = np.load(data_created+\"names_full.npy\")\nX_full_scaled = np.load(data_created+\"X_full_scaled.npy\")\nX_wo = np.load(data_created+\"X_wo.npy\")\nX_wo_scaled = np.load(data_created+\"X_wo_scaled.npy\")\nnames_wo = np.load(data_created+\"names_wo.npy\")\n\ndata_names = [\"X_full\",\"X_full_scaled\",\"X_wo\",\"X_wo_scaled\"]\ndata = [ X_full , X_full_scaled , X_wo , X_wo_scaled ]\n\n\n\nwo_grey = [np.arange(X_full.shape[-1]),np.array([0,2,3,4])]\n\n\n###################################################\n# Comments on standard to hold prediction vectors #\n###################################################\n\n# We will hold prediction vectors in specific structure, since the only change \n# in shape comes for the data, we're going ot create a list of np arrays for \n# each data set seperately. In our parition algorithms we'll have \n# (num_observations x num_clusters x 2) np array, and for our hierarchical \n# algorithms we'll just have (num_observations x 2). Where the final dimension \n# for both accounts for potential inclusion of the \"grey\" feature.\n\n\n# To compare later we'll need to address the change in number of observations\nkept_truth=np.array([True if x in names_wo else False for x in names_full])\nassert(np.all(names_full[kept_truth]==names_wo))\n\n\n\n\n###############\n#### Kmeans ###\n###############\n\ncluster_choices = [2,3]\nkmean_predictions= [np.zeros((X_full.shape[0],len(cluster_choices),2)) for x in np.arange(4)]\n\n# progress bar\n####\ntoolbar_width = len(data)*4\nsys.stdout.write(\"Kmeans: \")\nsys.stdout.write(\"[%s]\" % (\" \" * toolbar_width))\nsys.stdout.flush()\nsys.stdout.write(\"\\b\" * (toolbar_width + 1))\n####\n\n\nfor num_data, index_clusters,grey_option in itertools.product(np.arange(len(data)),np.arange(len(cluster_choices)),wo_grey):\n\t# removing grey name\n\tgrey_output, kk = \"wo_grey\", 1\n\tif len(grey_option) == X_full.shape[-1]:\n\t\tgrey_output, kk = \"w_grey\",0\n\n\t# choosing the correct number of clusters\n\tnum_clusters=cluster_choices[index_clusters]\n\n\n\tX = data[num_data][:,grey_option]\n\n\tkmean = sklearn.cluster.KMeans(num_clusters)\n\tprediction = kmean.fit_predict(X)\n\n\tpairplotsX = pd.concat([pd.DataFrame(X),pd.DataFrame(prediction)],axis=1)\n\tpairplotsX.columns = [\"x\"+str(i) for i in range(len(grey_option))]+[\"prediction\"]\n\n\t# storage of prediction vector (dealing with different num of observations)\n\tstorage_prediction = prediction\n\tif X.shape[0]!=X_full.shape[0]:\n\t\tstorage_prediction = np.ones((X_full.shape[0]))*-1\n\t\t# for the outliers\n\t\tstorage_prediction[kept_truth] = prediction \n\n\tkmean_predictions[num_data][:,index_clusters,kk]=storage_prediction\n\n\t# imaging \n\timage_extension = \"kmeans\"+\"(\"+str(num_clusters)+\")_\"+data_names[num_data]+\"_\"+grey_output+\".png\"\n\n\t# pairs plots\n\tpair_plot_funct(pairplotsX,image_extension=image_extension)\n\t\n\t# 3d plots\n\tthree_d_plot_funct(X,prediction,np.arange(X.shape[-1])[-3:],\n\t\timage_extension=image_extension)\n\n\n\tsys.stdout.write(\"-\")\n\tsys.stdout.flush()\n\nsys.stdout.write(\"\\n\")\n\n\n###################\n#### Dirichlet ####\n###################\n\ncluster_choices = [2,3]\ndirichlet_predictions= [np.zeros((X_full.shape[0],len(cluster_choices),2)) for x in np.arange(4)]\n\n# progress bar\n####\ntoolbar_width = len(data)*4\nsys.stdout.write(\"Dirichlet: \")\nsys.stdout.write(\"[%s]\" % (\" \" * toolbar_width))\nsys.stdout.flush()\nsys.stdout.write(\"\\b\" * (toolbar_width + 1))\n####\n\n\nfor num_data, index_clusters,grey_option in itertools.product(np.arange(len(data)),np.arange(len(cluster_choices)),wo_grey):\n\t# removing grey name\n\tgrey_output, kk = \"wo_grey\", 1\n\tif len(grey_option) == X_full.shape[-1]:\n\t\tgrey_output, kk = \"w_grey\",0\n\n\t# choosing the correct number of clusters\n\tnum_clusters=cluster_choices[index_clusters]\n\n\n\tX = data[num_data][:,grey_option]\n\tdirichlet = sklearn.mixture.DPGMM(num_clusters)\n\tprediction = dirichlet.fit_predict(X)\n\n\tpairplotsX = pd.concat([pd.DataFrame(X),pd.DataFrame(prediction)],axis=1)\n\tpairplotsX.columns = [\"x\"+str(i) for i in range(len(grey_option))]+[\"prediction\"]\n\n\t# storage of prediction vector (dealing with different num of observations)\n\tstorage_prediction = prediction\n\tif X.shape[0]!=X_full.shape[0]:\n\t\tstorage_prediction = np.ones((X_full.shape[0]))*-1\n\t\t# for the outliers\n\t\tstorage_prediction[kept_truth] = prediction \n\n\tdirichlet_predictions[num_data][:,index_clusters,kk]=storage_prediction\n\n\t# imaging \n\timage_extension = \"dirichlet\"+\"(\"+str(num_clusters)+\")_\"+data_names[num_data]+\"_\"+grey_output+\".png\"\n\n\t# pairs plots\n\tpair_plot_funct(pairplotsX,image_extension=image_extension)\n\t\n\t# 3d plots\n\tthree_d_plot_funct(X,prediction,np.arange(X.shape[-1])[-3:],\n\t\timage_extension=image_extension)\n\n\n\tsys.stdout.write(\"-\")\n\tsys.stdout.flush()\n\nsys.stdout.write(\"\\n\")\n\n\n\nnp.save(data_created+\"kmean_predictions.npy\",kmean_predictions)\nnp.save(data_created+\"dirichlet_predictions.npy\",dirichlet_predictions)\n\n\n\n\n# # single hierarchial clustering\n\n# distance = scipy.spatial.distance.pdist(X)\n# single_hierarchy = scipy.cluster.hierarchy.single(distance)\n\n# max_d = 1.05\n# prediction = fcluster(single_hierarchy_minus2, max_d, criterion='distance')\n# prediction_single_hier = prediction\n\n# pairplotsX = pd.concat([pd.DataFrame(data[2]),pd.DataFrame(prediction)],axis=1)\n# pairplotsX.columns = [\"x\"+str(i) for i in range(5)]+[\"prediction\"]\n\n# sns.set_style(\"white\")\n# g = sns.PairGrid(pairplotsX,hue=\"prediction\",vars=pairplotsX.columns[:-1],size=1.5)\n# g.map_diag(plt.hist)\n# g.map_upper(plt.scatter)\n# g.map_lower(plt.scatter)\n# #g.add_legend()\n\n\n# d_k = sklearn.metrics.confusion_matrix(prediction_dirichlet,prediction_kmean)\n# s_k = sklearn.metrics.confusion_matrix(prediction_single_hier,prediction_kmean)\n# d_s = sklearn.metrics.confusion_matrix(prediction_dirichlet,prediction_single_hier)\n\n\n\n\n# amount_single_1_k_s = np.zeros(prediction_single_hier.shape[0])\n# amount_single_0_k_s = np.zeros(prediction_single_hier.shape[0])\n\n# cluster_num = set(prediction_single_hier)\n\n# for i in cluster_num:\n# \tamount_single_1_k_s[i-1] = sum(prediction_kmean[prediction_single_hier==i])\n# \tamount_single_0_k_s[i-1] = sum(prediction_single_hier==i) - sum(prediction_kmean[prediction_single_hier==i])\n\n# plt.scatter(prediction_single_hier,prediction_kmean+np.arange(len(prediction_kmean))/len(prediction_kmean))\n# plt.plot([0,42],[1,1])\n\n# for x,y,lower,upper in zip(cluster_num,np.zeros(len(cluster_num))-.1,[str(x) for x in amount_single_0_k_s],\n# \t[str(x) for x in amount_single_1_k_s]):\n# \t# print(x,y,item)\n# \tplt.text(x,y,lower,fontsize=8,rotation=90)\n# \tplt.text(x,y+2.2,upper,fontsize=8,rotation=90)\n\n\n\n# fig = plt.figure()\n# ax1 = fig.add_subplot(211)\n# ax1.hist(prediction_single_hier[prediction_kmean==0],bins=len(cluster_num))\n# ax1.set_xlim(0,42)\n# ax1.grid(True)\n# ax1.set_ylim(0,80)\n# ax1.set_title(\"Kmean = 0\")\n# ax2 = fig.add_subplot(212)\n# ax2.grid(True)\n# ax2.hist(prediction_single_hier[prediction_kmean==1],bins=len(cluster_num))\n# ax2.set_xlim(0,42)\n# ax2.set_ylim(0,40)\n# ax2.set_title(\"Kmean = 1\")\n# plt.show()\n\n\n\n\n","sub_path":"code/scripts/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"299990577","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom travel import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'travel.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n \n url(r'^ckeditor/', include('ckeditor.urls')),\n \n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'first.views.index'),\n url(r'^login$', 'first.views.login'), \n url(r'^logout$', 'first.views.logout'),\n url(r'^register$', 'first.views.register'),\n \n url(r'^line/(?P\\d+$)','first.views.line'), \n url(r'^linegroup/(?P\\d+$)','first.views.line_group'),\n \n url(r'^lineorder/(?P\\d+$)','first.views.order'),\n \n url(r'^membercenter$', 'first.views.membercenter'),\n url(r'^myinfo$', 'first.views.myinfo'),\n url(r'^changepass$', 'first.views.changepass'),\n \n url(r'^cjline$', 'first.views.cjline'),\n url(r'^longline$', 'first.views.longline'),\n url(r'^shortline$', 'first.views.shortline'),\n \n url(r'^about$', 'first.views.about'),\n url(r'^linelist$', 'first.views.linelist'),\n \n url(r'^docment$', 'first.views.docment'),\n url(r'^docmentshow/(?P\\d+$)', 'first.views.docmentshow'),\n url(r'^picshow$', 'first.views.picshow'),\n url(r'^suggest$', 'first.views.suggest'),\n \n) \n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT )\n\n \n ","sub_path":"travel/travel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"388210013","text":"\"\"\"This module contains PlayerScreen class\"\"\"\n\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\n\nclass PlayerScreen(Screen):\n \"\"\"No ideas\"\"\"\n def __init__(self, **kwargs):\n super(PlayerScreen, self).__init__(**kwargs)\n root_box = BoxLayout(orientation=\"vertical\")\n root_box.add_widget(self.config_player_add_inteface_())\n player_grid = GridLayout(cols=2, size_hint=(1, .85))\n root_box.add_widget(player_grid)\n root_box.add_widget(self.config_status_bar_())\n self.add_widget(root_box)\n\n def config_player_add_inteface_(self):\n \"\"\"Method to configure player add interface\"\"\"\n add_user_box = BoxLayout(orientation=\"horizontal\", size_hint=(1, .05))\n name_input = TextInput(text='', size_hint=(.8, 1))\n add_button = Button(text='add', size_hint=(.2, 1),\n on_press=self.add_callback)\n add_user_box.add_widget(name_input)\n add_user_box.add_widget(add_button)\n return add_user_box\n\n @staticmethod\n def config_status_bar_():\n \"\"\"Method to configure status bar add interface\"\"\"\n res_box = BoxLayout(orientation='horizontal', size_hint=(1, .1))\n res_box.add_widget(Button(text='Готово', size_hint=(.2, 1)))\n res_box.add_widget(Button(text='Отмена', size_hint=(.2, 1)))\n return res_box\n\n @staticmethod\n def add_callback(instance):\n \"\"\"callback method to add player\"\"\"\n player_name = instance.parent.children[1].text\n instance.parent.children[1].text = ''\n player_grid = instance.parent.parent.children[1]\n player_grid.add_widget(Label(text=player_name))\n","sub_path":"players_screen.py","file_name":"players_screen.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"152472087","text":"# The goal of this skeleton is helping you start with the credential.\n# Following this API is not mandatory and you can change it as you see fit.\n# This skeleton only provides major classes/functionality that you need. You \n# should define more classes and functions.\n\n# Hint: for a clean code, you should create classes for messages that you want\n# to pass between user and issuer. The serialization helps you with (de)serializing them\n# (network API expects byte[] as input).\n\nfrom serialization import jsonpickle\nfrom petrelic.multiplicative.pairing import G1, G2\nfrom petrelic.bn import Bn\nimport hashlib\n\n\nclass PSSignature(object):\n \"\"\"PS's Multi-message signature from section 4.2\n \n **Important** This class has no direct use in the project.\n\n Implementing this class allows you to get familiar with coding crypto schemes\n and its simplicity in comparison with the ABC scheme allows you to realize\n misunderstandings/problems early on.\n \"\"\"\n @classmethod\n def generate_key(cls):\n gen = G2.generator()\n sk = [G1.order().random(),G1.order().random()]\n pk = [gen] + [gen ** i for i in sk]\n return sk, pk\n\n @classmethod\n def sign(cls, sk, messages):\n m = Bn.from_binary(hashlib.sha256(messages).digest())\n h = G1.generator() ** G1.order().random() \n while h == G1.neutral_element():\n h = G1.generator() ** G1.order().random()\n sig = [h, h ** (sk[0] + sk[1] * m)]\n return sig\n \n @classmethod\n def verify(cls, pk, messages, signature):\n m = Bn.from_binary(hashlib.sha256(messages).digest())\n is_gen = signature[0] == G1.neutral_element()\n is_valid = signature[0].pair(pk[1] * pk[2] ** m) == signature[1].pair(pk[0])\n return is_valid and not is_gen\n\n\nclass Issuer(object):\n \"\"\"Allows the server to issue credentials\"\"\"\n\n @staticmethod\n def issue(sk, request, username, attributes):\n \"\"\"Issues a credential for a new user. \n\n This function should receive a issuance request from the user\n (AnonCredential.create_issue_request), and a list of known attributes of the\n user (e.g. the server received bank notes for subscriptions x, y, and z).\n\n You should design the issue_request as you see fit.\n \"\"\"\n #extract public and secret key\n secret_key = sk[0]\n public_key = sk[1]\n\n #Derive challenge\n challenge = hashlib.sha256(jsonpickle.encode(request.C).encode())\n challenge.update(jsonpickle.encode(request.commitment).encode())\n challenge.update(jsonpickle.encode(public_key).encode())\n challenge = Bn.from_binary(challenge.digest())\n\n #Compare the derived challenge to the received challenge\n challenge_valid = challenge == request.challenge\n\n #Compute the zkp\n candidate = request.C ** challenge\n for e in zip(public_key, request.response):\n candidate = candidate * e[0] ** e[1]\n \n\n proof_valid = request.commitment == candidate\n\n #If the proof and the derived challenge is valid, sig the credential\n if proof_valid and challenge_valid:\n u = G1.order().random()\n sig = (public_key[0] ** u,(secret_key * request.C) ** u)\n return sig\n else :\n raise ValueError\n\n\nclass AnonCredential(object):\n \"\"\"An AnonCredential\"\"\"\n\n def __init__(self, server_pk, credential, attributes):\n self.server_pk = server_pk\n self.credential = credential\n self.attributes = attributes\n \n @staticmethod\n def create_issue_request(server_pk, attributes):\n \"\"\"Gets all known attributes (subscription) of a user and creates an issuance request.\n You are allowed to add extra attributes to the issuance.\n\n You should design the issue_request as you see fit.\n \"\"\"\n attributes = [Bn.from_binary(hashlib.sha256(attr.encode()).digest()) for attr in attributes]\n gen_g1 = server_pk[0]\n t = G1.order().random()\n\n #Gen C\n C = gen_g1 ** t\n for e in zip(server_pk[1:], attributes):\n C = C * e[0] ** e[1]\n \n #Gen commitment\n comm_values = [G1.order().random() for _ in range(len(attributes) + 1)]\n comm = gen_g1 ** comm_values[0]\n for e in zip(server_pk[1:], comm_values[1:]):\n comm = comm * e[0] ** e[1]\n \n #Gen challenge\n challenge = hashlib.sha256(jsonpickle.encode(C).encode())\n challenge.update(jsonpickle.encode(comm).encode())\n challenge.update(jsonpickle.encode(server_pk).encode())\n challenge = Bn.from_binary(challenge.digest())\n\n #Generate response\n response = [e[0].mod_sub(challenge * e[1],G1.order()) for e in zip(comm_values, [t] + attributes)]\n\n\n return IssuanceRequest(C, comm, challenge, response),t\n\n\n @staticmethod\n def receive_issue_response(server_pk, response, private_state):\n \"\"\"This function finishes the credential based on the response of issue.\n\n Hint: you need both secret values from the create_issue_request and response\n from issue to build the credential.\n\n You should design the issue_request as you see fit.\n \"\"\"\n t = private_state[0]\n attributes = private_state[1]\n return AnonCredential(server_pk,(response[0], response[1] / (response[0] ** t)),attributes)\n\n def sign(self, message, revealed_attr):\n \"\"\"Signs the message.\n\n Args:\n message (byte []): message\n revealed_attr (string []): a list of revealed attributes\n\n Return:\n Signature: signature\n \"\"\"\n #public_key separation\n nb_attr_public_key = (len(self.server_pk) - 3) // 2\n gen_g1_pk = self.server_pk[0]\n public_key1 = self.server_pk[1:nb_attr_public_key + 1]\n gen_g2_pk = self.server_pk[nb_attr_public_key + 1]\n x_g2_pk = self.server_pk[nb_attr_public_key + 2]\n public_key2 = self.server_pk[nb_attr_public_key + 3:]\n\n #Gen signature\n r = G1.order().random()\n t = G1.order().random()\n signature = (self.credential[0] ** r, (self.credential[1] * self.credential[0]**t)**r)\n\n #attributes work\n revealed_attributes_idx = [self.attributes.index(attr) for attr in self.attributes if attr in revealed_attr]\n revealed_attributes_bn = [Bn.from_binary(hashlib.sha256(attr.encode()).digest()) for attr in revealed_attr]\n hidden_attributes_idx = [self.attributes.index(attr) for attr in self.attributes if attr not in revealed_attr]\n hidden_attributes_bn = [Bn.from_binary(hashlib.sha256(attr.encode()).digest()) for attr in self.attributes if attr not in revealed_attr]\n\n\n #Gen C (left-hand side)\n C = signature[1].pair(gen_g2_pk) / signature[0].pair(x_g2_pk)\n for i in range(len(revealed_attr)):\n C = C * signature[0].pair(public_key2[revealed_attributes_idx[i]]) ** (-revealed_attributes_bn[i] % G1.order())\n \n\n #Gen commitment (to prove right-hand side)\n comm_values = [G1.order().random() for _ in range(len(hidden_attributes_idx) + 1)]\n comm = signature[0].pair(gen_g2_pk) ** comm_values[0]\n for e in zip(hidden_attributes_idx, comm_values[1:]):\n comm = comm * signature[0].pair(public_key2[e[0]])**e[1]\n\n\n #Gen Challenge\n challenge = hashlib.sha256(jsonpickle.encode(C).encode())\n challenge.update(jsonpickle.encode(comm).encode())\n challenge.update(jsonpickle.encode(self.server_pk).encode())\n challenge.update(message)\n challenge = Bn.from_binary(challenge.digest())\n\n #Gen Responses\n response = [e[0].mod_sub(challenge * e[1],G1.order()) for e in zip(comm_values, [t] + hidden_attributes_bn)]\n\n\n return Signature(signature, comm, challenge, response, revealed_attributes_idx)\n\n\nclass Signature(object):\n \"\"\"A Signature\"\"\"\n\n def __init__(self, signature, commitment, challenge, response, attributes_idx):\n self.signature = signature\n self.commitment = commitment\n self.challenge = challenge\n self.response = response\n self.attributes_idx = attributes_idx\n\n def verify(self, issuer_public_info, public_attrs, message):\n \"\"\"Verifies a signature.\n\n Args:\n issuer_public_info (): output of issuer's 'get_serialized_public_key' method\n public_attrs (dict): public attributes\n message (byte []): list of messages\n\n returns:\n valid (boolean): is signature valid\n \"\"\"\n #public_key separation\n nb_attr_public_key = (len(issuer_public_info) - 3) // 2\n gen_g1_pk = issuer_public_info[0]\n public_key1 = issuer_public_info[1:nb_attr_public_key + 1]\n gen_g2_pk = issuer_public_info[nb_attr_public_key + 1]\n x_g2_pk = issuer_public_info[nb_attr_public_key + 2]\n public_key2 = issuer_public_info[nb_attr_public_key + 3:]\n\n #attributes work\n nb_attr = len(self.response) - 1 + len(public_attrs)\n public_attributes_idx = self.attributes_idx\n public_attributes_bn = [Bn.from_binary(hashlib.sha256(attr.encode()).digest()) for attr in public_attrs]\n hidden_attributes_idx = [i for i in range(nb_attr) if i not in public_attributes_idx]\n\n\n #Gen C (left-hand side)\n C = self.signature[1].pair(gen_g2_pk) / self.signature[0].pair(x_g2_pk)\n for i in range(len(public_attrs)):\n C = C * self.signature[0].pair(public_key2[public_attributes_idx[i]]) ** (-public_attributes_bn[i] % G1.order())\n\n #Gen Challenge\n challenge = hashlib.sha256(jsonpickle.encode(C).encode())\n challenge.update(jsonpickle.encode(self.commitment).encode())\n challenge.update(jsonpickle.encode(issuer_public_info).encode())\n challenge.update(message)\n challenge = Bn.from_binary(challenge.digest())\n\n #check challenge\n challenge_valid = challenge == self.challenge\n\n #Compute zkp\n candidate = C ** challenge * self.signature[0].pair(gen_g2_pk) ** self.response[0]\n for e in zip(hidden_attributes_idx, self.response[1:]):\n candidate = candidate * self.signature[0].pair(public_key2[e[0]]) ** e[1]\n\n proof_valid = candidate == self.commitment\n\n\n return challenge_valid and proof_valid \n\n def serialize(self):\n \"\"\"Serialize the object to a byte array.\n\n Returns: \n byte[]: a byte array \n \"\"\"\n data = jsonpickle.encode(self)\n return data.encode()\n\n @staticmethod\n def deserialize(data):\n \"\"\"Deserializes the object from a byte array.\n\n Args: \n data (byte[]): a byte array \n\n Returns:\n Signature\n \"\"\"\n return jsonpickle.decode(data)\n\n\nclass IssuanceRequest(object):\n \"\"\"An Issuance Request\"\"\"\n def __init__(self, C, commitment, challenge, response):\n self.C = C\n self.commitment = commitment\n self.challenge = challenge\n self.response = response\n \n def serialize(self):\n data = jsonpickle.encode(self)\n return data.encode()\n\n @staticmethod\n def deserialize(data):\n return jsonpickle.decode(data)","sub_path":"part1/credential.py","file_name":"credential.py","file_ext":"py","file_size_in_byte":11289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"162215079","text":"import util, traceback, json\n\n\nclass logger:\n \"\"\" This class is responsible for most of the communication to the student and logging for\n debugging purposes\n \"\"\"\n\n def __init__(self, manager):\n \"\"\" Initialization function, not much to see here\n \"\"\"\n self.manager = manager\n self.name = \"logger\"\n\n def print_formatted_tests(self, received, possible, percent_int, tests, ungraded_tests):\n \"\"\" This is the basic 'pretty print' function, will return color coded output\n on student test submission, but strips that out for final submission as it makes\n the text come through in a weird way\n \"\"\"\n\n # Initial notification block\n msg = \"Beginning formatted test printing\"\n self.log(msg)\n msg = \"{}\\n\\n\\n=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=\\n\".format(\n util.bcolors.OKBLUE if not self.manager.is_secure else \"\")\n msg += \"=*=*=*=*=*=*=*=*=*=*= {}YOUR AUTOGRADING RESULTS BELOW {}*=*=*=*=*=*=*=*=*=*=*=*=*=*=\\n\".format(\n util.bcolors.OKGREEN if not self.manager.is_secure else \"\",\n util.bcolors.OKBLUE if not self.manager.is_secure else \"\")\n msg += \"=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*={}\".format(\n util.bcolors.ENDC if not self.manager.is_secure else \"\")\n msg += \"\\n\\nFor additional details about any error(s), check the output above containing the full stack trace\"\n print(msg)\n\n # Jump to the helper\n self.print_formatted_tests_helper(tests, True)\n self.print_formatted_tests_helper(ungraded_tests, False)\n\n # If this is the final submission or a MOOC, print out a grade\n if self.manager.is_secure or self.manager.is_mooc:\n self.print_percent_grade(received, possible, percent_int)\n\n def print_formatted_tests_helper(self, tests, graded):\n \"\"\" Instead of writing two functions to do the same thing, this one will print both graded and\n ungraded test results\n \"\"\"\n if tests is None:\n return\n # Print each test in each language\n for lang in tests:\n if len(tests[lang]) == 0:\n continue\n msg = \"{}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{}\\n\".format(\n util.bcolors.OKBLUE, util.bcolors.ENDC)\n msg += \"Results for {}{}{}\\n\".format(\n util.bcolors.OKGREEN, lang, util.bcolors.ENDC)\n self.log(msg)\n print(\"\\n\\n{}\".format(msg))\n\n # List out the file in which the test appears\n for test_file in tests[lang]:\n if test_file == 'included':\n continue\n msg = \"\\n\\tTest file: {}{}{}\".format(\n util.bcolors.OKGREEN, test_file, util.bcolors.ENDC)\n self.log(msg)\n print(msg)\n\n # List the test cases below each file in the fashion of:\n # > +1/1 pts [PASS] test_case_1\n # > +0/1 pts [FAIL] test_case_2\n for test_case in tests[lang][test_file]:\n t = tests[lang][test_file][test_case]\n if not t['performed']:\n continue\n\n received = t['weight'] if t['passed'] else 0\n if 'partial' in t.keys():\n received = t['partial']\n msg = \"\\t[{}] > +{}/{} pts {}{}{}\".format(\n \"{}PASS{}\".format(\n util.bcolors.OKBLUE if not self.manager.is_secure else \"\",\n util.bcolors.ENDC if not self.manager.is_secure else \"\")\n if t['passed'] else \"{}FAIL{}\".format(\n util.bcolors.FAIL if not self.manager.is_secure else \"\",\n util.bcolors.ENDC if not self.manager.is_secure else \"\"),\n received,\n t['weight'],\n util.bcolors.OKGREEN if not self.manager.is_secure else \"\",\n test_case,\n util.bcolors.ENDC if not self.manager.is_secure else \"\")\n if not t['passed']:\n if t['message'] is not None:\n msg += ' => {}'.format(t['message'][:min(len(t['message']), 100)])\n else:\n msg += ' => n/a'\n\n self.log(msg)\n print(msg)\n\n def print_percent_grade(self, received, possible, percent_int):\n \"\"\" Print out the grade as a %, with test failures shown as fraction.\n This should be used if the script has run in secure mode and all tests\n are included. It will be confusing if this is shown to students when they\n run their available tests, but hidden tests run after the fact as the\n percentage won't match\n \"\"\"\n msg = \"{}\\n\\n=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*={}\\n\".format(\n util.bcolors.OKBLUE if not self.manager.is_secure else \"\",\n util.bcolors.ENDC if not self.manager.is_secure else \"\")\n msg += \"Final score: {}{}% ({}/{} pts){}\\n\\n\".format(\n util.bcolors.OKGREEN if not self.manager.is_secure else \"\",\n percent_int,\n received,\n possible,\n util.bcolors.ENDC if not self.manager.is_secure else \"\")\n self.log(msg)\n print(msg)\n\n def log(self, message, error=None, raising_class=None):\n \"\"\" Simple logging function\n \"\"\"\n f = open(self.manager.manager_logger, 'a+')\n\n if raising_class is None:\n raising_class = self\n\n to_write = '{} > {}:\\t{}\\n'.format(\n util.get_datetime(),\n raising_class.name,\n message)\n f.write(to_write)\n if self.manager.debug_mode:\n print(to_write, end='')\n if error is not None:\n to_write = '\\t{}\\n'.format(error)\n to_write += '\\t{}\\n'.format(traceback.format_exc())\n f.write(to_write)\n if self.manager.debug_mode:\n print(to_write, end='')\n f.close()\n\n\n\n","sub_path":"Inhertiance & Data Structures in Java_HW 3/.guides/test/control/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"302160122","text":"import json\nimport pandas as pd\nimport argparse\nfrom pathlib import Path\n\npath_group = '/deep/group/aihc-bootcamp-spring2020/localize'\n\ndef clean_label(dataset = 'valid'):\n \"\"\"\n Rewrite md.ai labels to an organized format\n \"\"\" \n label_file = f'{path_group}/annotations/val_test_mdai_stanford_project_7.json'\n \n name_mapping_file = f'{path_group}/annotations/mdai_stanford_project_7_mapping_{dataset}.csv'\n write_file = f'{path_group}/annotations/{dataset}_annotations.json'\n \n if not Path(label_file).exists():\n print(\"Annotations file not found\")\n return\n \n if not Path(name_mapping_file).exists():\n print(\"Annotations file not found\")\n return\n \n print(f'Reading annotations from {label_file}')\n with open(label_file) as f:\n chexpert_valid = json.load(f)\n \n # dictionary for pathology labels\n label_dict = {}\n for label in chexpert_valid['labelGroups'][0]['labels']:\n label_id = label['id']\n name = label['name']\n name = 'Support Devices' if 'Support Device' in name else name\n name = 'Airspace Opacity' if 'Lung Opacity' in name else name\n label_dict[label_id] = name\n \n # all annotations\n if dataset == 'valid':\n annotations = chexpert_valid['datasets'][0]['annotations']\n elif dataset == 'test':\n annotations = chexpert_valid['datasets'][1]['annotations']\n \n print('Transform names')\n # map from md ai id to image names \n name_mapping = pd.read_csv(name_mapping_file, sep=\",\")\n if dataset == 'valid':\n name_mapping['name'] = name_mapping['original_filename'].apply(lambda x: x.split('/')[1].replace(\".jpg\",'').replace('localize_','').replace('view','_view'))\n else:\n name_mapping['name'] = name_mapping['original_filename'].apply(lambda x: x.split('/')[1].replace(\".jpg\",'').replace('tlocalize_','').replace('view','_view'))\n name_dict = dict([(Id,name) for name,Id in zip(name_mapping.name, name_mapping.StudyInstanceUID)])\n\n ground_truth = {}\n \n print(f'Write to cleaned format at {write_file}')\n # write to new format\n not_found = 0\n for item in annotations:\n \n if item['StudyInstanceUID'] not in name_dict:\n not_found += 1\n continue\n \n name = name_dict[item['StudyInstanceUID']]\n\n label = label_dict[item['labelId']]\n polygon = item['data']['vertices']\n\n if name not in ground_truth:\n ground_truth[name] = {}\n \n # add image size\n ground_truth[name]['img_size'] = (item['height'],item['width'])\n \n # add pathology contour coordinates\n if label in ground_truth[name]:\n ground_truth[name][label].append(polygon)\n else:\n ground_truth[name][label]= [polygon]\n \n \n# for study_id in name_dict:\n# name = name_dict[study_id]\n# if name not in ground_truth:\n# ground_truth[name] = {}\n print(f\"Writing cleaned annotations to {write_file}\")\n with open(write_file, \"w\") as outfile: \n json.dump(ground_truth, outfile) \n \n return write_file\n\n\ndef clean_vietnam():\n \"\"\"\n Clean vietnam annotations and export to json\n \n The output json is organized such that:\n \n patientid:\n tasks1:\n polygons coordinates \n task2:\n ...\n \n Only the positive labels appear in the annotation data\n \"\"\"\n vietnam_file = f'{path_group}/annotations/test_vietnam.json'\n output_path = f'{path_group}/annotations/vietnam_annotations.json'\n \n print(f\"Loading annotation file from {vietnam_file}\")\n with open(vietnam_file) as f:\n chexpert_vietnam = json.load(f)\n \n print(\"Reading labels\")\n label_dict = {}\n for idx,label in enumerate(chexpert_vietnam['labelGroups']['0']['labels']):\n label_id = idx\n name = label['name']\n name = 'Support Devices' if 'Support Device' in name else name\n name = 'Airspace Opacity' if 'Lung Opacity' in name else name\n label_dict[label_id] = name \n \n annotations = chexpert_vietnam['datasets']['0']['annotations']\n \n print(\"Create clean annotation file\")\n vietnam_ann = {}\n \n for item in annotations:\n \n name = '_'.join(item['fileId'].replace('.jpg','').split('/')[1:]) \n label = label_dict[item['labelId']]\n img_size = [item['height'],item['width']]\n\n if name not in vietnam_ann:\n vietnam_ann[name] = {}\n\n if 'img_size' not in vietnam_ann[name]:\n vietnam_ann[name]['img_size'] = img_size\n\n for instance in item['data']:\n\n polygon = instance['vertices']\n\n if label in vietnam_ann[name]:\n vietnam_ann[name][label].append(polygon)\n else:\n vietnam_ann[name][label]= [polygon]\n \n print(f'Writing to new json at {output_path}')\n with open(output_path, \"w\") as outfile: \n json.dump(vietnam_ann, outfile)\n \n return output_path\n\n\ndef merge_ann(ann_file):\n \"\"\"\n Merge Cardiomegaly and Enlarged Cardiomediastinum annotations for the label Cardiomediastinum\n \n Args:\n ann_file(str): input annotation json file\n Returns:\n None\n (Save merged file to original location)\n \"\"\"\n print(f'Read original annotations from {ann_file}')\n with open(ann_file) as f:\n ann = json.load(f)\n \n print('Merge labels')\n for img_id in ann.keys():\n if 'Cardiomegaly' in ann[img_id]:\n \n if 'Enlarged Cardiomediastinum' in ann[img_id]:\n ann[img_id]['Enlarged Cardiomediastinum'] += ann[img_id]['Cardiomegaly']\n else:\n ann[img_id]['Enlarged Cardiomediastinum'] = ann[img_id]['Cardiomegaly']\n \n write_file = ann_file.replace('.json','_merged.json')\n print(f'Write to new file at {write_file}')\n with open(write_file, \"w\") as outfile: \n json.dump(ann, outfile) \n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default = 'valid' , help = \"valid, test or vietnam\")\n args = parser.parse_args()\n \n if args.dataset == \"vietnam\":\n output_path = clean_vietnam()\n else:\n output_path = clean_label(args.dataset)\n \n merge_ann(output_path)\n \n ","sub_path":"chexpert-model/localization_eval/clean_annotations.py","file_name":"clean_annotations.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"443771101","text":"# from kivy.utils import rgba\nfrom kivy.utils import get_color_from_hex\nfrom kivy.compat import string_types\n\n\ndef rgba(s, *args):\n '''Return a Kivy color (4 value from 0-1 range) from either a hex string or\n a list of 0-255 values.\n\n .. versionadded:: 1.9.2\n '''\n if isinstance(s, string_types):\n return get_color_from_hex(s)\n elif isinstance(s, (list, tuple)):\n s = list(map(lambda x: x / 255., s))\n return s\n elif isinstance(s, (int, float)):\n s = list(map(lambda x: x / 255., [s] + list(args)))\n return s\n raise Exception('Invalid value (not a string / list / tuple)')\n\n\nWHITE = rgba(255, 255, 255, 255)\nGUARDSMAN_RED = rgba(211, 1, 2, 255)\nCERISE = rgba(211, 54, 130, 255)\nCUTTY_SARK = rgba(88, 110, 117, 255)\nCURIOUS_BLUE = rgba(38, 141, 210, 255)\nLAUREL_GREEN = rgba(10, 130, 0, 255)\nAQUA_GREEN = rgba(42, 161, 152, 255)\nOLD_LACE = rgba(253, 246, 229, 255)\nBLACK = rgba(0, 0, 0, 255)\nGREY = rgba(184, 184, 184, 255)\n","sub_path":"colorscheme.py","file_name":"colorscheme.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"629297735","text":"def main():\r\n\r\n while True:\r\n\r\n num_nodes = int(input())\r\n if num_nodes == 0:\r\n break\r\n lines = []\r\n for _ in range(num_nodes):\r\n lines.append(input().split())\r\n graph = construct_graph(lines)\r\n print(graph)\r\n result = traverse_graph(graph, num_nodes)\r\n if result:\r\n print(\"Yes\")\r\n else:\r\n print(\"No\")\r\n\r\nclass Node(object):\r\n def __init__(this, typ, val, adjs):\r\n if typ == \"T\":\r\n val = -val\r\n this.typ = typ\r\n this.val = val\r\n this.adjs = adjs\r\n def __repr__(this):\r\n return \"({} {} {})\".format(this.typ, this.val, this.adjs)\r\n\r\ndef construct_graph(lines):\r\n graph = dict()\r\n for index in range(len(lines)):\r\n typ, val, *adjs, _ = lines[index]\r\n graph[index + 1] = Node(typ, int(val), [int(adj) for adj in adjs])\r\n return graph\r\n\r\ndef traverse_graph(graph, target):\r\n visited = dict()\r\n candidates = [(1, 0)]\r\n while len(candidates) > 0:\r\n node, money = candidates.pop()\r\n print(\"node: {}, money: {}\".format(node, money))\r\n if node == target:\r\n return True\r\n visited[node] = money\r\n\r\n # changing money amounts\r\n if graph[node].val > money:\r\n money = graph[node].val\r\n elif graph[node].val < 0:\r\n money += graph[node].val\r\n\r\n # adding candidates\r\n for adj in graph[node].adjs:\r\n if graph[adj].val + money >= 0:\r\n if adj not in visited:\r\n candidates.append((adj, money))\r\n else:\r\n if visited[adj] < money:\r\n candidates.append((adj, money))\r\n return False\r\n\r\nmain()\r\n","sub_path":"leprechauns_and_trolls_graph_100_point.py","file_name":"leprechauns_and_trolls_graph_100_point.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"539576919","text":"from pyengine.lib.error import *\nfrom pyengine.lib.command import Command\n\nDEFAULT_LIMIT = 50\n\nclass ListEvents(Command):\n\n # Request Parameter Info\n req_params = {\n 'user_id': ('o', 'str'),\n 'group_id': ('o', 'str'),\n 'limit': ('o', 'str'),\n }\n \n def __init__(self, api_request):\n super(self.__class__, self).__init__(api_request)\n\n def execute(self):\n search = self.makeSearch('user_id', 'group_id') \n search_or = self.params.get('search_or', [])\n sort = self.params.get('sort', {'key': 'created', 'desc':True})\n if self.params.has_key('limit'):\n page = self.params.get('page', {'limit':int(self.params['limit'])})\n else:\n page = self.params.get('page', {'limit':DEFAULT_LIMIT})\n res_params = self.params.get('res_params', [])\n\n mgr = self.locator.getManager('EventManager')\n\n (infos, total_count) = mgr.listEvents(search, search_or, sort, page, res_params)\n\n response = {}\n response['total_count'] = total_count\n response['results'] = []\n\n for info in infos:\n response['results'].append(info.result(self.user_meta['timezone']))\n\n return response\n","sub_path":"pyengine/api/v1/ListEvents.py","file_name":"ListEvents.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"135632143","text":"import random\r\n\r\nprint(\"\"\"Taş-Kağıt-Makas oyunu.\r\nBeş kez oynanır, kim daha çok kazandıysa galip gelir.\"\"\")\r\n\r\nTAS = 1\r\nKAGIT = 2\r\nMAKAS = 3\r\n\r\nsayac = 0\r\noyuncu = 0\r\nbilgisayar = 0\r\n\r\nwhile sayac < 5:\r\n\r\n girdi = input(\"Taş-Kağıt-Makas?\").lower().strip()\r\n tahmin = random.randint(1,3)\r\n \r\n sayac += 1\r\n\r\n if girdi == \"taş\":\r\n \r\n if tahmin == TAS:\r\n print(\"Berabere.\")\r\n elif tahmin == KAGIT:\r\n print(\"Kaybettiniz.\")\r\n bilgisayar += 1\r\n else:\r\n print(\"Kazandınız.\")\r\n oyuncu += 1\r\n\r\n elif girdi == \"kağıt\":\r\n \r\n if tahmin == TAS:\r\n print(\"Kazandınız.\")\r\n oyuncu += 1\r\n elif tahmin == KAGIT:\r\n print(\"Berabere.\")\r\n else:\r\n print(\"Kaybettiniz.\")\r\n bilgisayar += 1\r\n\r\n elif girdi == \"makas\":\r\n \r\n if tahmin == TAS:\r\n print(\"Kaybettiniz.\")\r\n bilgisayar += 1\r\n elif tahmin == KAGIT:\r\n print(\"Kazandınız.\")\r\n oyuncu += 1\r\n else:\r\n print(\"Berabere.\")\r\n \r\n else:\r\n print(\"Tekrar giriniz.\")\r\n\r\n\r\nif oyuncu > bilgisayar:\r\n print(\"\\nSiz:\",oyuncu,\"\\nBilgisayar:\",bilgisayar,\"\\nKazandınız.\")\r\n\r\nelif oyuncu < bilgisayar:\r\n print(\"\\nSiz:\",oyuncu,\"\\nBilgisayar:\",bilgisayar,\"\\nKaybettiniz.\")\r\n\r\nelse:\r\n print(\"\\nSiz:\",oyuncu,\"\\nBilgisayar:\",bilgisayar,\"\\nBerabere\")\r\n","sub_path":"Döngüler/TasKagitMakas.py","file_name":"TasKagitMakas.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"589914737","text":"import sys\nsys.path.append(\"/afs/ipp-garching.mpg.de/home/m/mwillens/Programme/python/lib/\")\nsys.path.append(\"/afs/ipp-garching.mpg.de/aug/ads-diags/common/python/lib\")\nimport matplotlib.pylab as plt\nimport numpy as np\nimport distutils.dir_util\nimport My_classes\nreload(My_classes)\nfrom My_classes import ECEI_osam\nimport modules.my_func as my_func \nreload(my_func)\n#from modules.my_func import find_nearest_idx\nimport modules.my_func as my_func\nimport dd\n\n\npath = \"/afs/ipp/home/o/osam/Sawtooth_crash/\"\nopen_file = \"shots_ECEI_analyzed2.txt\"\npath_to_save = \"/afs/ipp-garching.mpg.de/home/o/osam/Sawtooth_crash/Pictures/Shot_list_CEC_IDA\"\n\nshot_list, checkIDA, checkCEC, tECEI_B, tECEI_E =\\\nnp.loadtxt(path+open_file, delimiter=' ', skiprows = 1, usecols=(0, 2, 3, 4, 5), unpack=True)\nshot_list ,checkIDA, checkCEC = shot_list.astype(int), checkIDA.astype(int), checkCEC.astype(int)\n\n\n\n\nimport ECE\n# import kk_mwillens as kk # fuer das q profile\nimport EQU\nimport IDA\n\nfor i in xrange(32,len(shot_list)):\n\n print(\"i = %g/%g\" %(i, len(shot_list)))\n Shot = shot_list[i]\n tB = tECEI_B[i]\n tE = tECEI_E[i]\n my_dpi = 96\n \n if (checkCEC[i] == 1):\n try:\n CEC_check = dd.shotfile('CEC', Shot)\n t_CEC = CEC_check.getObjectData('time-A')\n Trad_CEC = CEC_check.getObjectData('Trad-A')\n idx_B = my_func.find_nearest_idx(t_CEC, tB)\n idx_E = my_func.find_nearest_idx(t_CEC, tE)\n \n plt.close()\n plt.figure(num=None, figsize=(19, 11), dpi=my_dpi, facecolor='w', edgecolor='k')\n for i in xrange(Trad_CEC.shape[0]):\n plt.plot(t_CEC[idx_B:idx_E], Trad_CEC[i, idx_B:idx_E])\n plt.xlabel(\"t [s]\")\n plt.ylabel(\"Trad [eV]\")\n plt.title(\"#%g ECE CEC\"%(Shot))\n # plt.show() \n plt.savefig(path_to_save+'/p%05d.png' %(Shot), dpi=my_dpi, bbox_inches='tight') \n except:\n print(\"%g - No ECE, checkECE=%g, checkIDA=%g\"%(Shot, checkCEC[i], checkIDA[i]))\n \n \n elif (checkIDA[i] == 1):\n try:\n IDA = IDA.IDA()\n IDA.Load(Shot, tBegin = tB, tEnd = tE)\n \n plt.close()\n plt.figure(num=None, figsize=(19, 11), dpi=my_dpi, facecolor='w', edgecolor='k')\n plt.plot(IDA.time,IDA.Te)\n plt.xlabel(\"t [s]\")\n plt.ylabel(\"Trad [eV]\")\n plt.title(\"#%g IDA\"%(Shot))\n #plt.show() \n plt.savefig(path_to_save+'/p%05d.png' %(Shot), dpi=my_dpi, bbox_inches='tight') \n except:\n print(\"%g - No IDA, No ECE, checkECE=%g, checkIDA=%g\"%(Shot, checkCEC[i], checkIDA[i]))\n \n\n\n\n\nprint(\"the script is over.\")","sub_path":"project0/ECEI_from_list_save_CEC_png.py","file_name":"ECEI_from_list_save_CEC_png.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"247974445","text":"from csv import reader\nimport numpy as np\nimport copy\nfrom matplotlib import pyplot as plt\nimport random\nimport matplotlib.patches as mpatches\n\n\ndef calc_sigmoid(val):\n '''\n logistic function, computes values between 0 and 1\n :param val: Solution of line function\n :return: sigmoid(val)\n '''\n return (np.exp(val)/(1+np.exp(val)))\n\ndef calc_line_funtio(value_x, weights):\n '''\n solve line equation (y=mx+c)\n :param value_x: numpy array contianing values of attributes\n :param weights: numpy array conatining coeff. for respective attributes\n :return: soln of line equation(y_cap)\n '''\n\n return (np.sum(value_x*weights))\n\n\ndef logistic_regression(data,actual_class):\n \"\"\"\n Trains regressor on the give input data\n :param data: input data\n :param weights: initial weights corresponding to respective attributes(all zero)\n :param actual_class: original class of the data(0 or 1)\n :return: Final weights(coeff), SSE(sum of squared error) for each epoch\n \"\"\"\n #learning rate\n weight_total=[]\n weight_input_layer=[]\n weight_hidden_layer=[]\n\n for _ in range(5):\n weights=np.zeros((0))\n for _ in range(len(data[0])):\n val =random.uniform(-1,1)\n\n weights=np.append(weights,val)\n weight_input_layer.append(weights)\n\n weight_total.append(weight_input_layer)\n\n for _ in range(4):\n weights=np.zeros((0))\n for _ in range(6):\n val = random.uniform(-1,1)\n\n weights=np.append(weights,val)\n weight_hidden_layer.append(weights)\n\n weight_total.append(weight_hidden_layer)\n\n\n learning_constant = 0.1\n SSE_epoch=[]\n weights_array=[]\n #number of iteration on the training data\n for epoch in range(10001):\n SSE=0\n\n for r_index in range(len(data)):\n #computing y_cap and sigmoid function for each sample\n #hiddenlayer calculation\n hidden_layer_output=np.array((1))\n for node in range(5):\n y_cap = calc_line_funtio(data[r_index],weight_input_layer[node])\n y_sigmoid = calc_sigmoid(y_cap)\n hidden_layer_output= np.append(hidden_layer_output,y_sigmoid)\n\n\n #output layer calculation\n output_layer_output=np.zeros((0))\n for node in range(4):\n y_cap = calc_line_funtio(hidden_layer_output,weight_hidden_layer[node])\n y_sigmoid = calc_sigmoid(y_cap)\n output_layer_output= np.append(output_layer_output,y_sigmoid)\n\n\n\n #output layer delta\n\n delta_output=(actual_class[r_index]-output_layer_output) * (output_layer_output*(1-output_layer_output))\n #calculating SSE\n SSE +=np.sum((actual_class[r_index]-output_layer_output)**2)\n\n\n\n #print('delta output',delta_output)\n #hidden layer delta\n delta_hidden_temp=np.zeros((0))\n\n for weight in range(6):\n delta_node=0\n for hidden_node in range(4):\n delta_node+= weight_hidden_layer[hidden_node][weight] * delta_output[hidden_node]\n delta_hidden_temp=np.append(delta_hidden_temp,delta_node)\n\n delta_hidden= (hidden_layer_output*(1- hidden_layer_output)) * delta_hidden_temp\n\n\n #updating hidden_layer weight\n for weight in range(6):\n for output_node in range(4):\n\n weight_hidden_layer[output_node][weight]+=learning_constant *(hidden_layer_output[weight] * delta_output[output_node])\n\n\n #updating input layer weight\n for weight in range(3):\n for hidden_node in range(5):\n\n weight_input_layer[hidden_node][weight]+=learning_constant *(data[r_index][weight] * delta_hidden[hidden_node+1])\n SSE_epoch.append(SSE)\n if(epoch==0):\n weights_array.append(copy.deepcopy(weight_total))\n elif(epoch==10):\n weights_array.append(copy.deepcopy(weight_total))\n elif(epoch==100):\n weights_array.append(copy.deepcopy(weight_total))\n elif(epoch==1000):\n weights_array.append(copy.deepcopy(weight_total))\n elif(epoch==10000):\n weights_array.append(copy.deepcopy(weight_total))\n\n return weights_array,SSE_epoch\n\n\ndef prediction(data,weight_array):\n '''\n Predticion function\n :param data: test data\n :param weights: final weights after training\n :return: prediction of class(0 or 1)\n '''\n result=[]\n\n for r_index in range(len(data)):\n first_layer=np.array((1))\n output_layer=[]\n for weights in weight_array[0]:\n #computing y_cap and sigmoid function for each sample\n y_cap = calc_line_funtio(data[r_index],weights)\n y_sigmoid = calc_sigmoid(y_cap)\n #roundong sigmoid value\n # if(y_sigmoid<0.5):\n # y_sigmoid=0\n # else:\n # y_sigmoid=1\n #result list\n first_layer=np.append(first_layer,y_sigmoid)\n for index in range(4):\n y_cap = calc_line_funtio(first_layer,weight_array[1][index])\n y_sigmoid = calc_sigmoid(y_cap)\n output_layer.append(y_sigmoid)\n\n result.append(output_layer)\n return result\n\ndef plot_SSE(SSE):\n '''\n plotting SSE vs Epoch graph\n :param SSE: List SSE for each epoch\n :return:\n '''\n\n plt.plot([x for x in range(1,len(SSE)+1)],SSE),plt.title(\"SSE vs Epoch\"),plt.xlabel(\"Epoch\"),plt.ylabel(\"SSE\")\n plt.show()\n\ndef plot_data(data,weights):\n '''\n Plot data distribution and Decision boundary across data\n :param data: Input data\n :param weights: Final weights\n :return:None\n '''\n\n col=('blue','green','red','black')\n #Plotting data points onto the graph\n fig= plt.subplot()\n\n for index in range(len(data[0])):\n fig.scatter(data[0][index], data[1][index],c=col[int(data[2][index])-1],marker='x')\n plt.xlabel=(\"Attribute1\"),plt.ylabel(\"Attribute2\"),plt.title(\"Attribute and Class Distribution\")\n blue_patch = mpatches.Patch(color='blue', label='Class 1')\n green_patch = mpatches.Patch(color='green', label='Class 2')\n red_patch = mpatches.Patch(color='red', label='Class 3')\n black_patch = mpatches.Patch(color='black', label='Class 4')\n plt.legend(handles=[blue_patch,green_patch,red_patch,black_patch],loc=\"upper left\")\n\n plt.show()\n\n\n\n\n\ndef main():\n '''\n MAin function\n takes input file from user\n Store the data into a list of numpy array(for each row)\n Call all the other function\n :return:\n '''\n\n filename=input(\"Enter the filename\")\n #filename='test2'\n file = open(filename, \"r\")\n attr_list = list(reader(file))\n\n data_list=attr_list #create the final list for passing\n data=[np.zeros((0)) for _ in range(len(data_list))]\n actual_class=np.zeros((0))\n\n data2=[np.zeros((0)) for _ in range(len(data_list[0]))]\n #Stroing data into numpy array\n for r_index in range(len(data_list)):\n data[r_index]=np.append(data[r_index],1)\n\n for c_index in range(len(data_list[r_index])):\n data2[c_index]=np.append(data2[c_index],np.float(data_list[r_index][c_index]))\n if(c_index==len(data_list[r_index])-1):\n #Storing class in separate list\n actual_class=np.append(actual_class,np.float(data_list[r_index][c_index]))\n else:\n #Storing data row wise\n data[r_index]=np.append(data[r_index],np.float(data_list[r_index][c_index]))\n\n training_class=[]\n for classes in actual_class:\n if classes == 1:\n training_class.append(np.array((1,0,0,0)))\n elif classes ==2:\n training_class.append(np.array((0,1,0,0)))\n elif classes ==3:\n training_class.append(np.array((0,0,1,0)))\n elif classes ==4:\n training_class.append(np.array((0,0,0,1)))\n\n\n #Intializing weight vector\n\n #Calling training function\n weights_array,SSE = logistic_regression(data,training_class)\n #print(weights)\n #print(weights_array)\n ind=0\n for weight in weights_array:\n target = open(\"weights_\"+str(ind)+\".csv\",\"w\")\n ind+=1\n result=\"\"\n for wt in weight:\n for w in wt:\n for v in range(len(w)):\n result+=str(w[v])\n if v+1==len(w):\n pass\n else:\n result+=\",\"\n result+=\"\\n\"\n #print(\"R\",result)\n target.write(result)\n\n print(\"Weights files are created................\")\n plot_SSE(SSE)\n plot_data(data2,weights_array)\n\n\nmain()","sub_path":"Neural Net/trainMLP.py","file_name":"trainMLP.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"284020707","text":"import requests\nimport logging\nimport time\nimport ratelim\n\n# Local imports\nfrom utils.common.db_utils import read_all_results\nfrom utils.common.datapipeline import DataPipeline\n\n\ndef get_meetup_data(url, api_key, offset=0, max_results=200):\n # Set the offset parameter and make the request\n params = dict(offset=offset, page=max_results, key=api_key)\n r = requests.get(url, params=params)\n r.raise_for_status()\n # If no response is found\n if len(r.text) == 0:\n time.sleep(5)\n print(\"Got a bad response, so retrying page\", offset)\n return get_meetup_data(url, api_key, offset=offset)\n # Extract results in the country of interest (bonus countries\n # can enter the fold because of the radius parameter)\n data = r.json()\n return [row['id'] for row in data]\n\n\ndef get_all_meetup_data(url, api_key, max_results=200):\n results = []\n offset = 0\n while True:\n _results = get_meetup_data(url, api_key, offset, max_results)\n print(offset, len(_results))\n results += _results\n if len(_results) < max_results:\n break\n offset += 1\n return results\n\n\ndef run(config):\n groups = read_all_results(config, 'input_db', 'input_table')\n # Filter groups matching target country/category\n groups = [row for row in groups\n if row['country_name'] == config['parameters']['country']]\n groups = [row for row in groups\n if row['category_id'] == int(config['parameters']['category'])]\n\n # Collect group info\n groups = set(row['urlname'] for row in groups)\n logging.info(\"Got %s distinct groups from database\", len(groups))\n\n api_key = config[\"Meetup\"][\"api-key\"]\n max_results = 200\n output = []\n for urlname in groups:\n\n url = \"https://api.meetup.com/{}/events\".format(urlname)\n event_ids = get_all_meetup_data(url,api_key,max_results) \n print(\"Got\", len(event_ids))\n\n \n url = \"https://api.meetup.com/{}/events/\".format(urlname)\n event_ids = get_all_meetup_data(url,api_key,max_results) \n print(\"Got\", len(event_ids))\n\n \n for event_id in set(event_ids):\n row = dict(member_id=member_id, group_urlname=urlname)\n output.append(row)\n break\n\n logging.info(\"Got %s rows of data\", len(output))\n with DataPipeline(config) as dp:\n for row in output:\n dp.insert(row)\n","sub_path":"collect_data/utils/meetup/old/meetup_get_group_events.py","file_name":"meetup_get_group_events.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"496460544","text":"#不对比原始文件,直接对比trustdata.py生成的两个结果文件\nimport csv\nimport sys\nfrom datetime import date\n\n\ndef main(pre_file, curr_file):\n last_month_file = open(pre_file, 'r')\n this_month_file = open(curr_file, 'r')\n file_name = '../../result/trustdata/changes/trustdata_changes'+str(date.today())+'.csv'\n final_file = csv.writer(open(file_name, 'w'))\n final_file.writerow(['App ID','App Name','Date','Old MAU(mln)', 'New MAU(mln)','%', 'Old DAU(mln)', 'New DAU(mln)','%','Old - Time Spent (000 Hr)', 'New - Time Spent (000 Hr)','%', 'Old - Startups (000)', 'New - Startups (000)','%'])\n this_month_dict = convert(this_month_file)\n last_month_dict = convert(last_month_file)\n for key in last_month_dict:\n if key in this_month_dict:\n if this_month_dict[key] != last_month_dict[key]:\n new_data = this_month_dict[key]\n old_data = last_month_dict[key]\n per1 = new_data[2]/old_data[2]-1\n per2 = new_data[3]/old_data[3]-1\n per3 = new_data[4]/old_data[4]-1\n per4 = new_data[5]/old_data[5]-1\n final_file.writerow([new_data[0], new_data[1],new_data[6],old_data[2], new_data[2], per1, old_data[3], new_data[3],per2, old_data[4], new_data[4], per3, old_data[5], new_data[5],per4])\n else:\n print('[Warn] deleted app' + str(last_month_dict[key]))\ndef convert(file):\n file_dict = {}\n for row in file:\n data = row.split('\\t')\n app_name = data[0]\n app_id = data[1]\n mau = data[2]\n total_time = data[3]\n dau = data[4]\n total_counts = data[5]\n date = data[6]\n key = str(app_id)+str(date)\n if key not in file_dict:\n file_dict[key] = [app_id, app_name, mau, dau, total_time, total_counts, date]\n else:\n print('[Warn] duplicate data: '+row)\n return file_dict\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('[Error] python3 trustdata.changes.py ')\n exit()\n main(sys.argv[1], sys.argv[2])\n","sub_path":"src/trustdata/trustdata.changes.py","file_name":"trustdata.changes.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"270891100","text":"from PIL import Image\nimport cv2\nimport numpy as np\n\ncardPath = \"カードデータ/\"\nconcat_path = \"jpg/\"\n\n\n#輪郭抽出\ndef edge(word):\n img = cv2.imread(cardPath + word + \".jpg\")\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #グレースケール変換\n ret,thresh = cv2.threshold(gray,180,255,0)\n canny_img = cv2.Canny(thresh, 50, 110)\n contours, hierarchy = cv2.findContours(canny_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[0]\n img_Contour = cv2.drawContours(img, [cnt], 0, (0,255,0), 2)\n return cnt\n\n#余白トリミング\ndef trim(img,word,cnt):\n x_list = []\n y_list = []\n for i in range(len(cnt)):\n x_list.append(cnt[i][0][0])\n y_list.append(cnt[i][0][1])\n x_min = min(x_list)\n y_min = min(y_list)\n x_max = max(x_list)\n y_max = max(y_list)\n\n if len(cnt) >= 50:\n img.crop((x_min,y_min,x_max,y_max)).resize((400,560)).save(cardPath + word + \".jpg\")\n else:\n img.resize((400,560)).save(cardPath + word + \".jpg\")\n img.close()\n\n#画像結合\ndef concat(DeckList):\n dst = Image.new('RGB', (3 * 400 + 140, 3 * 560 + 200),(255, 255, 255))\n x = 0\n y = 0\n num = 0\n count = []\n cardList = []\n for i,elem in enumerate(DeckList):\n if (i % 3) == 0:\n count.append(int(elem))\n elif (i % 3) == 2:\n cardList.append(elem)\n print(\"【印刷リスト】\")\n for n,card in enumerate(cardList):\n print(card)\n for i in range(count[n]):\n img = Image.open(cardPath + card + \".jpg\")\n dst.paste(img, (x % 3 * img.width + 70, int(x / 3) * img.height + 100))\n if (x % 3 == 2 & int(x / 3) == 2):#9枚区切りで保存\n dst.save(concat_path + \"test\" + str(num) + \".jpg\")\n dst = Image.new('RGB', (3 * 400 + 140, 3 * 560 + 200),(255, 255, 255))\n x = 0\n num += 1\n elif (n == len(cardList) - 1) & (i == int(count[n] - 1)):#9枚区切りで終了しないときも保存\n dst.save(concat_path + \"test\" + str(num) + \".jpg\")\n else:\n x += 1\n","sub_path":"cardGenerate.py","file_name":"cardGenerate.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"475494017","text":"import cv2\nimport threading\nimport numpy\nfrom app_thermometer.moduls.Seek_termal import Thermal\nfrom app_thermometer.moduls.lcd_i2c_rus import lcd_rus\nimport time\nimport os\n\nclass VideoCamera(threading.Thread):\n def __init__(self, path_CascadeClassifier, irCamera, termo):\n super().__init__()\n self.path_CascadeClassifier = path_CascadeClassifier\n self.irCamera = irCamera\n self.status_run = True\n self.frame = None\n self.bbox = None\n self.status_ir_camera = False\n self.old_time_run_ir_camera = time.time()\n self.termo = termo\n\n # self.irCamera.initialize()\n # self.irCamera.start()\n self.temp = -1\n self.tempC_WEB = None\n self.imageRGB_web = None\n self.slagFace = False\n self.text_lcd = ''\n self.tempPir = None\n self.tima_pir = time.time()\n\n self.slagFace =False\n def init(self):\n self.face_detector = cv2.CascadeClassifier(self.path_CascadeClassifier)\n self.video = cv2.VideoCapture(0)\n\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n return self.frame\n\n def getBbox(self):\n return self.bbox\n\n def runIrCamera(self):\n\n time.sleep(5)\n self.irCamera.status_get_frame = True\n time.sleep(1)\n\n def get_temp(self):\n T_text_path = r'/home/pi/ProjectMain/tMAXT.txt'\n T_image_path = r'/home/pi/ProjectMain/Thermal_image.png'\n count = 0\n temp, T_img = None, None\n while count <= 3:\n temp = -100\n try:\n if os.path.exists(T_image_path):\n if os.path.exists(T_text_path):\n f = open(T_text_path, 'r')\n temp = float(f.read())\n f.close()\n\n if temp != -100:\n if temp <= 32.5:\n temp = temp + 4.3 + abs(32.5 - temp) / 1.23076923\n else:\n temp = temp + 4.3\n print(\"на тепловизоре макс.: {}\".format(temp))\n else:\n continue\n\n T_img = cv2.imread(T_image_path, cv2.IMREAD_GRAYSCALE)\n T_img = cv2.rotate(T_img, cv2.ROTATE_90_CLOCKWISE) # rotateImage(Tt_img, 270)\n T_img = T_img.astype(numpy.uint8)\n T_img = cv2.cvtColor(T_img, cv2.COLOR_GRAY2RGB)\n T_img = cv2.applyColorMap(T_img, cv2.COLORMAP_JET)\n\n\n return temp, T_img\n\n else:\n count += 1\n continue\n except BaseException as e:\n count += 1\n continue\n\n def stopIrCamera(self):\n self.irCamera.status_get_frame = False\n\n def get_temp_pir(self):\n return self.termo.get_object_1()\n\n def Temperature_measurements(self):\n\n prev_temp = self.get_temp_pir()\n i = 0\n is_begining = True\n\n cur_temp = self.get_temp_pir()\n while True:\n cur_temp = self.get_temp_pir()\n if cur_temp - prev_temp > 0.6 and cur_temp > 24.00:\n is_begining = False\n lcd_rus.clear()\n time.sleep(0.1)\n lcd_rus.pull_lcd_text(1, \"Начинаю измерение\")\n self.text_lcd = \"Начинаю измерение\"\n i = 0\n start_temp = prev_temp\n time.sleep(0.4)\n max_t = cur_temp\n cur_temp = self.get_temp_pir()\n while (abs(cur_temp - prev_temp) > 0.1):\n time.sleep(0.4)\n cur_temp = self.get_temp_pir()\n prev_temp = cur_temp\n max_t = max(cur_temp, max_t)\n if abs(cur_temp - start_temp) > 1:\n exact_t = max_t\n if max_t <= 32.5:\n max_t = max_t + 4.3 + abs(32.5 - max_t) / 1.23076923\n else:\n max_t = max_t + 4.3\n\n if max_t >= 37.0:\n pass\n # text_with_warning(\"СТОП!\", \" \" + f\"темпер.: {max_t:.1f}\" + \"\\x99C\")\n lcd_rus.pull_lcd_r(round(max_t, 1))\n lcd_rus.pull_lcd_text(1, \"СТОП!\")\n self.text_lcd = 'СТОП!'\n return round(max_t, 1)\n else:\n pass\n # text_with_signal(\"Ваша температура\", \" \" + f\"{max_t:.1f}\" + \"\\x99C\")\n lcd_rus.pull_lcd_r(round(max_t, 1))\n round(max_t, 1)\n\n # time.sleep(1)\n # lcd_rus.clear()\n # # sleep(0.1)\n # # lcd_rus.pull_lcd_l(round(max_t, 1))\n else:\n lcd_rus.clear()\n time.sleep(0.1)\n lcd_rus.pull_lcd_text(1, \"Готов измерять\")\n self.text_lcd = \"Готов измерять\"\n prev_temp = cur_temp\n time.sleep(0.2)\n i += 1\n if i == 5 and not is_begining:\n lcd_rus.clear()\n lcd_rus.pull_lcd_text(1, \"Готов измерять\")\n self.text_lcd = \"Готов измерять\"\n\n def getFace(self):\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\n faces = self.face_detector.detectMultiScale(gray, 1.3, 5)\n flag = True\n lcd_rus.pull_lcd_l('')\n\n for (x, y, w, h) in faces:\n cv2.rectangle(self.frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n self.oldTime = time.time()\n self.timeUpdate_temp = time.time()\n\n if flag:\n if self.text_lcd != \"Найдено лицо\" :\n #lcd_rus.pull_lcd_text(1, \"Найдено лицо\")\n self.text_lcd = \"Найдено лицо\"\n temp, T_img = self.get_temp()\n\n self.tempC_WEB = temp\n self.imageRGB_web = T_img\n\n lcd_rus.pull_lcd_l(round(temp, 1))\n time.sleep(0.2)\n self.slagFace = True\n flag = False\n #Пирометр\n\n\n def get_frame_web(self):\n '''\n Для отображения на страници\n :return:\n '''\n ret, jpeg = cv2.imencode('.jpg', self.frame)\n return jpeg.tobytes()\n\n def get_IR_web(self):\n if not self.imageRGB_web is None:\n ret, jpeg = cv2.imencode('.jpg', self.imageRGB_web)\n return jpeg.tobytes()\n return None\n\n def get_temp_web(self):\n\n mode = self.slagFace\n if mode:\n t1 = round(self.tempC_WEB, 1)\n\n t2 = round(self.termo.get_object_1(), 1)\n\n return round(t1, 1), round(t2, 1), mode\n else:\n return 0, 0, 0\n\n\n def run(self):\n self.oldTime = time.time()\n lcd_rus.pull_lcd_text(1, \"Готов измерять\")\n while self.status_run:\n success, frame = self.video.read()\n if not success is None:\n self.frame = numpy.copy(frame)\n self.getFace()\n\n if self.slagFace:\n if time.time() - self.oldTime > 5:\n self.slagFace = False\n # lcd_rus.clear()\n # time.sleep(0.1)\n # lcd_rus.pull_lcd_text(1, \"Готов измерять\")\n # self.text_lcd = \"Готов измерять\"\n\n\n\n","sub_path":"app_thermometer/moduls/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":7810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"173220060","text":"# QUADRATIC EQUATION\r\n\r\nimport math\r\n\r\na = int(input(\"Enter the value of a: \"))\r\nb = int(input(\"Enter the value of b: \"))\r\nc = int(input(\"Enter the value of c: \"))\r\n\r\ndis = (b**2)-(4*a*c) #dis = discriminant\r\n\r\nif dis > 0:\r\n root1 = (-b + math.sqrt(dis)/(2*a))\r\n root2 = (-b - math.sqrt(dis)/(2*a))\r\n print(\"Two distinct real roots are %.2f and %.2f\" %(root1, root2))\r\n\r\nelif dis == 0:\r\n root1 = root2 = -b / (2*a)\r\n print(\"Two equal and real roots are %.2f and %.2f\" %(root1, root2))\r\n\r\nelif dis < 0:\r\n root1 = root2 = -b / (2*a)\r\n i = math.sqrt(-dis) / (2*a) #i = imaginary\r\n print(\"Two distinct complex roots are %.2f + %.2f and %.2f - %.2f\" %(root1,i,root2,i))\r\n\r\nelse:\r\n print(\"Sorry! I didn't understand.\")\r\n","sub_path":"LakshiDemo_2.py","file_name":"LakshiDemo_2.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"279306012","text":"from mapObjects.mapObject import MapObject\nfrom mapObjects.hideable import Hideable\n\n\nclass Button(MapObject, Hideable):\n def __init__(self, button):\n self.text = button.text\n self.command = button.callback\n super().__init__(\n button.xRatio,\n button.yRatio\n )\n","sub_path":"mapObjects/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"449456226","text":"\nimport pwd\nimport os\nUSER='baisong'\nuserpwd = pwd.getpwnam(USER)\nos.setgid(userpwd.pw_gid)\nos.setuid(userpwd.pw_uid)\n\nf=open(\"./b.txt\",'a+')\nprint >> f,'ni hao'\nf.close()","sub_path":"test_python/set_user_of_file/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"218781821","text":"\"\"\"Representation of a WeMo Dimmer device.\"\"\"\nfrom .api.long_press import LongPressMixin\nfrom .switch import Switch\n\n\nclass Dimmer(Switch, LongPressMixin):\n \"\"\"Representation of a WeMo Dimmer device.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a WeMo Dimmer device.\"\"\"\n Switch.__init__(self, *args, **kwargs)\n self._brightness = None\n\n def get_brightness(self, force_update=False):\n \"\"\"Get brightness from device.\"\"\"\n if force_update or self._brightness is None:\n try:\n brightness = self.basicevent.GetBinaryState().get('brightness')\n except ValueError:\n brightness = 0\n self._brightness = brightness\n\n return self._brightness\n\n def set_brightness(self, brightness):\n \"\"\"\n Set the brightness of this device to an integer between 1-100.\n\n Setting the brightness does not turn the light on, so we need\n to check the state of the switch.\n \"\"\"\n if brightness == 0:\n if self.get_state() != 0:\n self.off()\n else:\n if self.get_state() == 0:\n self.on()\n\n self.basicevent.SetBinaryState(brightness=int(brightness))\n self._brightness = int(brightness)\n\n def subscription_update(self, _type, _param):\n \"\"\"Update the dimmer attributes due to a subscription update event.\"\"\"\n if _type == \"Brightness\" and self._state:\n self._brightness = int(_param)\n return True\n return super().subscription_update(_type, _param)\n\n def __repr__(self):\n \"\"\"Return a string representation of the device.\"\"\"\n return ''.format(name=self.name)\n\n @property\n def device_type(self):\n \"\"\"Return what kind of WeMo this device is.\"\"\"\n return \"Dimmer\"\n","sub_path":"pywemo/ouimeaux_device/dimmer.py","file_name":"dimmer.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"120731594","text":"\"\"\"\nImplementation of \"Spatially aware clustering\"\nBy Alexandrov et al. (2011)\n\"\"\"\n\nimport os\nimport argparse\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport esmraldi.segmentation as seg\nimport esmraldi.imzmlio as imzmlio\nimport esmraldi.fusion as fusion\nimport scipy.ndimage as ndimage\nimport SimpleITK as sitk\n\nfrom esmraldi.fastmap import FastMap\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.cluster import KMeans\n\ndef mapping_neighbors(image, radius, weights):\n r = radius\n size = 2*r+1\n img_padded = np.pad(image, (r,r), 'constant')\n mapping_matrix = np.zeros(shape=(image.shape[0], image.shape[1], size, size, image.shape[-1]))\n for index in np.ndindex(image.shape[:-1]):\n i, j = index\n neighbors = image[i-r:i+r+1, j-r:j+r+1]\n if neighbors.shape[0] != size or neighbors.shape[1] != size:\n continue\n mapping_matrix[index] = neighbors * weights[..., None]\n return mapping_matrix\n\ndef gaussian_weights(radius):\n size = 2*radius+1\n sigma = size/4\n return np.array([[np.exp((-i**2-j**2)/(2*sigma**2)) for i in range(-radius,radius+1)] for j in range(-radius,radius+1)])\n\ndef spatially_aware_clustering(image, k, n, radius):\n weights = gaussian_weights(radius)\n mapping_matrix = mapping_neighbors(image, radius, weights)\n old_shape = mapping_matrix.shape\n new_shape = (np.prod(old_shape[:-3]), np.prod(old_shape[-3:]))\n fastmap_matrix = mapping_matrix.reshape(new_shape)\n\n if n < new_shape[-1]:\n fastmap = FastMap(fastmap_matrix, n)\n proj = fastmap.compute_projections()\n pd_X = pairwise_distances(fastmap_matrix)**2\n pd_proj = pairwise_distances(proj)**2\n print(\"Sum abs. diff=\", np.sum(np.abs(pd_X - pd_proj)))\n else:\n proj = fastmap_matrix\n\n kmeans = KMeans(k, random_state=0).fit(proj)\n labels = kmeans.labels_\n image_labels = labels.reshape(old_shape[:-3])\n\n return image_labels\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"Input MALDI image (imzML or nii)\")\nparser.add_argument(\"-o\", \"--output\", help=\"Output image (ITK format)\")\nparser.add_argument(\"-n\", \"--number\", help=\"Number of dimensions after dimension reduction (fastmap)\", default=50)\nparser.add_argument(\"-k\", \"--classes\", help=\"Number of clusters for kmeans\", default=7)\nparser.add_argument(\"-r\", \"--radius\", help=\"Radius for spatial features\", default=1)\nparser.add_argument(\"-g\", \"--threshold\", help=\"Mass to charge ratio threshold (optional)\", default=0)\nparser.add_argument(\"--normalize\", help=\"Normalize spectra by their norm\", action=\"store_true\")\n\nargs = parser.parse_args()\n\ninputname = args.input\noutname = args.output\nradius = int(args.radius)\nn = int(args.number)\nk = int(args.classes)\nthreshold = int(args.threshold)\nnormalize = args.normalize\n\n\nif inputname.lower().endswith(\".imzml\"):\n imzml = imzmlio.open_imzml(inputname)\n spectra = imzmlio.get_full_spectra(imzml)\n max_x = max(imzml.coordinates, key=lambda item:item[0])[0]\n max_y = max(imzml.coordinates, key=lambda item:item[1])[1]\n max_z = max(imzml.coordinates, key=lambda item:item[2])[2]\n image = imzmlio.get_images_from_spectra(spectra, (max_x, max_y, max_z))\n mzs, intensities = imzml.getspectrum(0)\nelse:\n image = sitk.GetArrayFromImage(sitk.ReadImage(inputname)).T\n mzs = [i for i in range(image.shape[2])]\n mzs = np.asarray(mzs)\n\nimage = image[..., mzs >= threshold]\nif normalize:\n for index in np.ndindex(image.shape[:-1]):\n spectrum = image[index]\n norm = np.linalg.norm(spectrum)\n if norm > 0:\n spectrum /= norm\n image[index] = spectrum\n\n\nmzs = mzs[mzs >= threshold]\nmzs = np.around(mzs, decimals=2)\nmzs = mzs.astype(str)\n\nnb_peaks = image.shape[-1]\nprint(\"Number of peaks=\", nb_peaks)\n\nshape = image.shape\n\nif len(shape) == 4:\n for i in range(shape[-2]):\n current_image = image[..., i, :]\n image_labels = spatially_aware_clustering(current_image, k, n, radius)\n plt.imshow(image_labels)\n plt.show()\nelse:\n image_labels = spatially_aware_clustering(image, k, n, radius)\n\nimage = imzmlio.normalize(image)\noutname_csv = os.path.splitext(outname)[0] + \".csv\"\nout_array = np.zeros(shape=(nb_peaks, k))\nfor i in range(k):\n indices = np.where(image_labels == i)\n not_indices = np.where(image_labels != i)\n median_spectrum = np.median(image[indices], axis=0)\n print(median_spectrum.shape)\n other_median_spectrum = np.median(image[not_indices], axis=0)\n median_spectrum -= other_median_spectrum\n top_indices = np.argsort(median_spectrum)[::-1]\n top_molecules = mzs[top_indices]\n out_array[:, i] = top_molecules\nnp.savetxt(outname_csv, out_array, delimiter=\";\", fmt=\"%s\")\n\nimage_labels_itk = sitk.GetImageFromArray(image_labels.astype(np.uint8))\nsitk.WriteImage(image_labels_itk, outname)\n\n#plt.imshow(image_labels.T)\n#plt.show()\n","sub_path":"examples/spatially_aware_clustering.py","file_name":"spatially_aware_clustering.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"144139278","text":"import argparse\nimport datetime\nimport os\nimport pickle\nimport time\nimport logging\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom sklearn.metrics import matthews_corrcoef\nfrom tqdm import tqdm, trange\nfrom transformers import DistilBertTokenizer, DistilBertForSequenceClassification, AdamW,get_linear_schedule_with_warmup\n\n\ndef read_data(data_path, tokenizer, max_seq_length):\n\t\"\"\"\n\tRead in data from txt file and return TensorDataset with\n\tinput_ids, attention_masks, labels\n\t\"\"\"\n\n\t# Read data in dataframe format\n\twith open(data_path, encoding = 'utf-8') as f:\n\t\tdata = {}\n\t\theaders = f.readline().strip().split(\"\\t\")\n\t\tfor h in headers:\n\t\t\tdata[h] = []\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tpos = line.find(\"\\t\")\n\t\t\tdata[headers[0]].append(line[:pos])\n\t\t\tdata[headers[1]].append(line[pos + 1:-1])\n\t\t\tline = f.readline()\n\tdt = pd.DataFrame(data)\n\t# dt = pd.read_csv(data_path, sep = \"\\t\")\n\t# dt_lines = open(data_path).readlines()\n\n\t# Convert text and labels in lists\n\ttext = dt['text'].values\n\tclass_label = dt['class'].values\n\tlabels = []\n\tfor label in class_label:\n\t\tif label == 'machine':\n\t\t\tlabels.append(1)\n\t\telse:\n\t\t\tlabels.append(0)\n\n\tinput_ids = []\n\tattention_masks = []\n\n\t# For every sentence in data text\n\tfor sent in text:\n\t\tencoded_dict = tokenizer.encode_plus(\n\t\t\tsent,\n\t\t\tadd_special_token = True,\n\t\t\tmax_length = max_seq_length,\n\t\t\tpad_to_max_length = True,\n\t\t\treturn_attention_mask = True,\n\t\t\treturn_tensors = 'pt'\n\t\t)\n\n\t\t# Add the encoded sentence to the list.\n\t\tinput_ids.append(encoded_dict['input_ids'])\n\t\t# And its attention mask (simply differentiates padding from non-padding).\n\t\tattention_masks.append(encoded_dict['attention_mask'])\n\n\t# Convert input_ids, attention_masks and label into tensors\n\tinput_ids = torch.cat(input_ids, dim = 0)\n\tattention_masks = torch.cat(attention_masks, dim = 0)\n\tlabels = torch.tensor(labels)\n\t# print(labels.size())\n\n\t# Convert dataset into TensorDataset format\n\tdataset = TensorDataset(input_ids, attention_masks, labels)\n\n\treturn dataset\n\n\ndef read_test_data(data_path, tokenizer, max_seq_length):\n\t\"\"\"\n\tRead in data from txt file and return TensorDataset with\n\tinput_ids, attention_masks, labels\n\t\"\"\"\n\n\t# Read data in dataframe format\n\twith open(data_path, encoding = 'utf-8') as f:\n\t\tdata = {}\n\t\theaders = f.readline().strip().split(\"\\t\")\n\t\tfor h in headers:\n\t\t\tdata[h] = []\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tpos = line.find(\"\\t\")\n\t\t\tdata[headers[0]].append(line[:pos])\n\t\t\tdata[headers[1]].append(line[pos + 1:-1])\n\t\t\tline = f.readline()\n\tdt = pd.DataFrame(data)\n\t# dt = pd.read_csv(data_path, sep = \"\\t\",encoding=\"utf-8\")\n\n\t# dt_lines = open(data_path).readlines()\n\n\t# Convert text and labels in lists\n\ttext = dt['Text'].values\n\t# print(len(text))\n\tinput_ids = []\n\tattention_masks = []\n\n\t# For every sentence in data text\n\tfor sent in text:\n\t\tencoded_dict = tokenizer.encode_plus(\n\t\t\tsent,\n\t\t\tadd_special_token = True,\n\t\t\tmax_length = max_seq_length,\n\t\t\tpad_to_max_length = True,\n\t\t\treturn_attention_mask = True,\n\t\t\treturn_tensors = 'pt'\n\t\t)\n\n\t\t# Add the encoded sentence to the list.\n\t\tinput_ids.append(encoded_dict['input_ids'])\n\t\t# And its attention mask (simply differentiates padding from non-padding).\n\t\tattention_masks.append(encoded_dict['attention_mask'])\n\n\t# Convert input_ids, attention_masks and label into tensors\n\tinput_ids = torch.cat(input_ids, dim = 0)\n\tattention_masks = torch.cat(attention_masks, dim = 0)\n\n\t# Convert dataset into TensorDataset format\n\tdataset = TensorDataset(input_ids, attention_masks)\n\n\treturn dataset\n\n\ndef flat_accuracy(preds, labels):\n\t\"\"\"\n\tFunction to calculate the accuracy of our predictions vs labels\n\t\"\"\"\n\tpred_flat = np.argmax(preds, axis = 1).flatten()\n\tlabels_flat = labels.flatten()\n\treturn np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\ndef format_time(elapsed):\n\t\"\"\"\n\tTakes a time in seconds and returns a string hh:mm:ss\n\t\"\"\"\n\t# Round to the nearest second.\n\telapsed_rounded = int(round((elapsed)))\n\n\t# Format as hh:mm:ss\n\treturn str(datetime.timedelta(seconds = elapsed_rounded))\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\t# Required parameters\n\tparser.add_argument(\"--data_dir\",\n\t default = '/home/liyixi/hw5/data/classification/',\n\t type = str)\n\tparser.add_argument(\"--task_name\",\n\t default = None,\n\t type = str,\n\t required = True,\n\t help = \"The name of the task to train.\")\n\tparser.add_argument(\"--output_dir\",\n\t default = '/home/liyixi/hw5/output/classification/',\n\t type = str,\n\t help = \"The output directory where the model predictions and checkpoints will be written.\")\n\tparser.add_argument(\"--do_train\",\n\t action = 'store_true',\n\t help = \"Whether to run training.\")\n\tparser.add_argument(\"--do_eval\",\n\t action = 'store_true',\n\t help = \"Whether to run eval on the dev set.\")\n\tparser.add_argument(\"--do_test\",\n\t action = 'store_true',\n\t help = \"Whether to run test on the test set.\")\n\tparser.add_argument(\"--do_test_no_label\",\n\t action = 'store_true',\n\t help = \"Whether to run test on the test set.\")\n\tparser.add_argument(\"--do_test_generation\",\n\t action = 'store_true',\n\t help = \"Whether to test the generation.\")\n\tparser.add_argument(\"--train_batch_size\",\n\t default = 32,\n\t type = int,\n\t help = \"Total batch size for training.\")\n\tparser.add_argument(\"--eval_batch_size\",\n\t default = 32,\n\t type = int,\n\t help = \"Total batch size for eval.\")\n\tparser.add_argument(\"--test_batch_size\",\n\t default = 32,\n\t type = int,\n\t help = \"Total batch size for test.\")\n\tparser.add_argument(\"--learning_rate\", nargs = '+',\n\t default = [],\n\t type = str,\n\t help = \"The initial learning rate for optimizer.\")\n\tparser.add_argument(\"--num_train_epochs\",\n\t default = 5,\n\t type = float,\n\t help = \"Total number of training epochs to perform.\")\n\tparser.add_argument(\"--no_cuda\",\n\t action = 'store_true',\n\t help = \"Whether not to use CUDA when available\")\n\tparser.add_argument('--overwrite_output_dir',\n\t action = 'store_true',\n\t help = \"Overwrite the content of the output directory\")\n\tparser.add_argument(\"--local_rank\",\n\t type = int,\n\t default = -1,\n\t help = \"local_rank for distributed training on gpus\")\n\tparser.add_argument(\"--seed\", type = int, default = 9001, help = \"random seed for initialization\")\n\tparser.add_argument(\n\t\t\"--load_pretraining_model\",\n\t\thelp = \"Load the pre-training model\",\n\t\ttype = str,\n\t\tdefault = None)\n\tparser.add_argument(\n\t\t\"--load_model_name\",\n\t\thelp = \"Load the specified \" + \"saved model for testing\",\n\t\ttype = str,\n\t\tdefault = None)\n\tparser.add_argument(\n\t\t\"--generate_text_name\",\n\t\thelp = \"Load the specified \" + \"saved model for testing\",\n\t\ttype = str,\n\t\tdefault = None)\n\targs = parser.parse_args()\n\t# Step 1 args\n\t# args = parser.parse_args(['--task_name=bert_classification', '--learning_rate=5e-6 5e-5 5e-4',\n\t# '--load_pretraining_model=/Users/Loielaine/Desktop/umich-2020/SI630/hw/hw5/output/fine-tuning/pretraining-len32-lr5e-05',\n\t# '--do_train', '--do_eval'])\n\t# args = parser.parse_args(['--task_name=bert_classification', '--learning_rate=5e-5',\n\t# '--do_train', '--do_eval'])\n\t# # Step 2 args\n\t# args = parser.parse_args(['--task_name=bert_classification_test', '--do_test',\n\t# '--load_model_name=/home/liyixi/hw5/output/classification/bert_classification-len32-lr5e-05-epoch4'])\n\t# Set seed\n\tseed = args.seed\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\ttorch.cuda.manual_seed_all(seed)\n\n\t# Set device\n\tif args.local_rank == -1 or args.no_cuda:\n\t\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n\t\tn_gpu = torch.cuda.device_count()\n\t\ttorch.manual_seed(args.seed)\n\telse:\n\t\ttorch.cuda.set_device(args.local_rank)\n\t\tdevice = torch.device(\"cuda\", args.local_rank)\n\t\tn_gpu = 1\n\t\ttorch.cuda.manual_seed_all(args.seed)\n\targs.device = device\n\n\t# Set logging\n\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\tlogging.basicConfig(filename = os.path.join(args.output_dir, 'log_{0}_{1}.log'.format(\n\t\tstr(args.task_name), timestr)),\n\t filemode = 'a', format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n\t datefmt = '%m/%d/%Y %H:%M:%S',\n\t level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\tlogger = logging.getLogger(__name__)\n\tlogger.info(\"device: {} n_gpu: {}, distributed training: {}\".format(\n\t\tdevice, n_gpu, bool(args.local_rank != -1)))\n\n\tif args.do_train and args.do_eval:\n\t\t# Load pre-training model\n\t\tif args.load_pretraining_model is not None:\n\t\t\t# Set tokenizer\n\t\t\ttokenizer = DistilBertTokenizer.from_pretrained(args.load_pretraining_model, do_lower_case = True)\n\t\t\t# tokenizer = DistilBertTokenizer.from_pretrained('bert-base-uncased', do_lower_case = True)\n\t\t\t# Set model\n\t\t\tmodel = DistilBertForSequenceClassification.from_pretrained(args.load_pretraining_model, num_labels = 2,\n\t\t\t output_attentions = False,\n\t\t\t output_hidden_states = False)\n\t\telse:\n\t\t\ttokenizer = DistilBertTokenizer.from_pretrained('bert-base-uncased', do_lower_case = True)\n\t\t\tmodel = DistilBertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels = 2,\n\t\t\t output_attentions = False,\n\t\t\t output_hidden_states = False)\n\n\t\tmodel.to(device)\n\n\t\t# Parameters for validation\n\t\tlr_list = list(map(float, args.learning_rate[0].split(' ')))\n\t\tlength = 32\n\t\teps = 1e-8\n\n\t\tlogger.info(\"***** Load training data*****\")\n\t\t# Read and save training input_ids\n\t\tcached_input_file = os.path.join(args.data_dir, 'train_{0}_{1}'.format(\n\t\t\tstr(args.task_name), str(length)))\n\t\ttry:\n\t\t\twith open(cached_input_file, \"rb\") as reader:\n\t\t\t\ttrain_inputs = pickle.load(reader)\n\t\t\t\treader.close()\n\t\texcept:\n\t\t\ttrain_inputs = read_data(args.data_dir + 'train.tsv', tokenizer, length)\n\n\t\t\tif args.local_rank == -1:\n\t\t\t\tlogger.info(\" Saving train features into cached file %s\", cached_input_file)\n\t\t\t\twith open(cached_input_file, \"wb\") as writer:\n\t\t\t\t\tpickle.dump(train_inputs, writer)\n\t\t\t\t\twriter.close()\n\n\t\tlogger.info(\"***** Load eval data*****\")\n\t\t# Read and save training input_ids\n\t\tcached_input_file = os.path.join(args.data_dir, 'dev_{0}_{1}'.format(\n\t\t\tstr(args.task_name), str(length)))\n\t\ttry:\n\t\t\twith open(cached_input_file, \"rb\") as reader:\n\t\t\t\teval_inputs = pickle.load(reader)\n\t\t\t\treader.close()\n\t\texcept:\n\t\t\teval_inputs = read_data(args.data_dir + 'dev.tsv', tokenizer, length)\n\n\t\t\tif args.local_rank == -1:\n\t\t\t\tlogger.info(\" Saving eval features into cached file %s\", cached_input_file)\n\t\t\t\twith open(cached_input_file, \"wb\") as writer:\n\t\t\t\t\tpickle.dump(eval_inputs, writer)\n\t\t\t\t\twriter.close()\n\n\t\t# Create DataLoader\n\t\ttrain_dataloader = DataLoader(\n\t\t\ttrain_inputs,\n\t\t\tsampler = RandomSampler(train_inputs),\n\t\t\tbatch_size = args.train_batch_size\n\t\t)\n\t\teval_dataloader = DataLoader(\n\t\t\teval_inputs,\n\t\t\tsampler = SequentialSampler(eval_inputs),\n\t\t\tbatch_size = args.eval_batch_size\n\t\t)\n\n\t\tfor learning_rate in lr_list:\n\t\t\tlogger.info(\"***** Validation parameters*****\")\n\t\t\tlogger.info(\" Learning rate = %f\" % learning_rate)\n\n\t\t\t# Train model\n\t\t\tlogger.info(\"***** Run training and evaluating *****\")\n\t\t\tlogger.info(\" Num of train examples = %d\", len(train_dataloader))\n\t\t\tlogger.info(\" Train batch size = %d\", args.train_batch_size)\n\t\t\tlogger.info(\" Num of eval examples = %d\", len(eval_dataloader))\n\t\t\tlogger.info(\" Eval batch size = %d\", args.eval_batch_size)\n\n\t\t\t# Set optimizer as AdamW\n\t\t\toptimizer = AdamW(model.parameters(),\n\t\t\t lr = learning_rate,\n\t\t\t eps = eps)\n\n\t\t\t# Total number of training steps is [number of batches] x [number of epochs].\n\t\t\t# (Note that this is not the same as the number of training samples).\n\t\t\ttotal_steps = len(train_dataloader) * args.num_train_epochs\n\n\t\t\t# Create the learing rate schedular\n\t\t\tscheduler = get_linear_schedule_with_warmup(\n\t\t\t\toptimizer,\n\t\t\t\tnum_warmup_steps = int(0.1 * total_steps),\n\t\t\t\tnum_training_steps = total_steps\n\t\t\t)\n\n\t\t\t# ========================================\n\t\t\t# Training\n\t\t\t# ========================================\n\n\t\t\t# Perform one full pass over the training set.\n\n\t\t\t# Store training statistics (loss)\n\t\t\ttraining_stats = []\n\t\t\t# Measure the total training time\n\t\t\ttotal_t0 = time.time()\n\t\t\tfor epoch in trange(int(args.num_train_epochs), desc = \"Epoch\", disable = args.local_rank not in [-1, 0]):\n\t\t\t\tlogger.info(\"epoch: %d\", epoch)\n\t\t\t\t# Reset the total loss for this epoch.\n\t\t\t\ttotal_train_loss = 0\n\t\t\t\tt0 = time.time()\n\t\t\t\tmodel.train()\n\n\t\t\t\tfor step, batch in enumerate(\n\t\t\t\t\t\ttqdm(train_dataloader, desc = \"Iteration\", disable = args.local_rank not in [-1, 0])):\n\t\t\t\t\tb_input_ids, b_input_mask, b_labels = batch\n\t\t\t\t\tb_input_ids = b_input_ids.to(device)\n\t\t\t\t\tb_input_mask = b_input_mask.to(device)\n\t\t\t\t\tb_labels = b_labels.to(device)\n\n\t\t\t\t\t# Always clear any previously calculated gradients before performing a\n\t\t\t\t\t# backward pass. PyTorch doesn't do this automatically because\n\t\t\t\t\t# accumulating the gradients is \"convenient while training RNNs\".\n\t\t\t\t\t# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)\n\t\t\t\t\tmodel.zero_grad()\n\n\t\t\t\t\t# Perform a forward pass (evaluate the model on this training batch).\n\t\t\t\t\t# The documentation for this `model` function is here:\n\t\t\t\t\t# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.DistilBertForSequenceClassification\n\t\t\t\t\t# It returns different numbers of parameters depending on what arguments\n\t\t\t\t\t# arge given and what flags are set. For our useage here, it returns\n\t\t\t\t\t# the loss (because we provided labels) and the \"logits\"--the model\n\t\t\t\t\t# outputs prior to activation.\n\t\t\t\t\tloss, logits = model(b_input_ids,\n\t\t\t\t\t \n\t\t\t\t\t attention_mask = b_input_mask,\n\t\t\t\t\t labels = b_labels)\n\n\t\t\t\t\t# Accumulate the training loss over all of the batches so that we can\n\t\t\t\t\t# calculate the average loss at the end. `loss` is a Tensor containing a\n\t\t\t\t\t# single value; the `.item()` function just returns the Python value\n\t\t\t\t\t# from the tensor.\n\t\t\t\t\ttotal_train_loss += loss.item()\n\n\t\t\t\t\tloss.backward()\n\n\t\t\t\t\t# Clip the norm of the gradients to 1.0.\n\t\t\t\t\t# This is to help prevent the \"exploding gradients\" problem.\n\t\t\t\t\ttorch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n\t\t\t\t\t# Update parameters and take a step using the computed gradient.\n\t\t\t\t\t# The optimizer dictates the \"update rule\"--how the parameters are\n\t\t\t\t\t# modified based on their gradients, the learning rate, etc.\n\t\t\t\t\toptimizer.step()\n\n\t\t\t\t\t# Update the learning rate.\n\t\t\t\t\tscheduler.step()\n\n\t\t\t\t\toptimizer.zero_grad()\n\n\t\t\t\t# Calculate the average loss over all of the batches.\n\t\t\t\tavg_train_loss = total_train_loss / len(train_dataloader)\n\n\t\t\t\t# Measure how long this epoch took.\n\t\t\t\ttraining_time = format_time(time.time() - t0)\n\t\t\t\tlogger.info(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n\t\t\t\tlogger.info(\" Training epcoh took: {:}\".format(training_time))\n\n\t\t\t\t# ========================================\n\t\t\t\t# Validation\n\t\t\t\t# ========================================\n\t\t\t\t# After the completion of each training epoch, measure our performance on\n\t\t\t\t# our validation set.\n\n\t\t\t\tt0 = time.time()\n\n\t\t\t\t# Put the model in evaluation mode--the dropout layers behave differently\n\t\t\t\t# during evaluation.\n\t\t\t\tmodel.eval()\n\n\t\t\t\t# Tracking variables\n\t\t\t\ttotal_eval_accuracy = 0.0\n\t\t\t\ttotal_eval_loss = 0.0\n\t\t\t\tnb_eval_steps = 0\n\n\t\t\t\t# Evaluate data for one epoch\n\t\t\t\tfor batch in tqdm(eval_dataloader, desc = \"Evaluating\"):\n\t\t\t\t\tb_input_ids = batch[0].to(device)\n\t\t\t\t\tb_input_mask = batch[1].to(device)\n\t\t\t\t\tb_labels = batch[2].to(device)\n\n\t\t\t\t\t# Tell pytorch not to bother with constructing the compute graph during\n\t\t\t\t\t# the forward pass, since this is only needed for backprop (training).\n\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\t(loss, logits) = model(b_input_ids,\n\t\t\t\t\t\t \n\t\t\t\t\t\t attention_mask = b_input_mask,\n\t\t\t\t\t\t labels = b_labels)\n\n\t\t\t\t\t# Accumulate the validation loss.\n\t\t\t\t\ttotal_eval_loss += loss.item()\n\n\t\t\t\t\t# Move logits and labels to CPU\n\t\t\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\t\t\tlabel_ids = b_labels.to('cpu').numpy()\n\n\t\t\t\t\t# Calculate the accuracy for this batch of test sentences, and\n\t\t\t\t\t# accumulate it over all batches.\n\t\t\t\t\ttotal_eval_accuracy += flat_accuracy(logits, label_ids)\n\n\t\t\t\t# Report the final accuracy for this validation run.\n\t\t\t\tavg_val_accuracy = total_eval_accuracy / len(eval_dataloader)\n\t\t\t\tlogger.info(\" Accuracy: {0:.2f}\".format(avg_val_accuracy))\n\n\t\t\t\t# Calculate the average loss over all of the batches.\n\t\t\t\tavg_val_loss = total_eval_loss / len(eval_dataloader)\n\n\t\t\t\t# Measure how long the validation run took.\n\t\t\t\tvalidation_time = format_time(time.time() - t0)\n\n\t\t\t\tlogger.info(\" Validation Loss: {0:.2f}\".format(avg_val_loss))\n\t\t\t\tlogger.info(\" Validation took: {:}\".format(validation_time))\n\n\t\t\t\t# Record all statistics from this epoch.\n\t\t\t\ttraining_stats.append({\n\t\t\t\t\t'epoch': epoch,\n\t\t\t\t\t'Training Loss': avg_train_loss,\n\t\t\t\t\t'Valid. Loss': avg_val_loss,\n\t\t\t\t\t'Valid. Accuracy': avg_val_accuracy,\n\t\t\t\t\t'Training Time': training_time,\n\t\t\t\t\t'Validation Time': validation_time})\n\n\t\t\t# Save the final model to model_sav\n\t\t\tmodel_name = '%s-len%d-lr%s-epoch%d' % (\n\t\t\t\targs.task_name, length, str(learning_rate), args.num_train_epochs - 1)\n\t\t\toutput_model_file = os.path.join(args.output_dir, model_name)\n\t\t\tif not os.path.exists(output_model_file):\n\t\t\t\tos.makedirs(output_model_file)\n\n\t\t\tlogger.info('Saving model to %s' % output_model_file)\n\n\t\t\t# Save a trained model, configuration and tokenizer using `save_pretrained()`.\n\t\t\t# They can then be reloaded using `from_pretrained()`\n\t\t\tmodel_to_save = model.module if hasattr(model,\n\t\t\t 'module') else model # Take care of distributed/parallel training\n\t\t\tmodel_to_save.save_pretrained(output_model_file)\n\t\t\ttokenizer.save_pretrained(output_model_file)\n\n\t\t\t# Save results to file\n\t\t\toutput_result_file = os.path.join(args.output_dir, '%s_result.log' % args.task_name)\n\t\t\twith open(output_result_file, \"a\") as writer:\n\t\t\t\twriter.write(\"Sequence length: %d\\n\" % length)\n\t\t\t\twriter.write(\"Learning rate: %s\\n\" % str(learning_rate))\n\t\t\t\tfor i in range(int(args.num_train_epochs)):\n\t\t\t\t\twriter.write('Epoch: %d\\n' % i)\n\t\t\t\t\twriter.write(\"training loss: %1.3f, eval loss: %1.3f,eval accuracy: %1.3f\" \\\n\t\t\t\t\t % (training_stats[i]['Training Loss'], training_stats[i]['Valid. Loss'],\n\t\t\t\t\t training_stats[i]['Valid. Accuracy']))\n\t\t\t\t\twriter.write('\\n')\n\t\t\t\twriter.write('\\n')\n\n\tif args.do_test:\n\t\t# load model and tokenizer\n\t\tlogger.info(\"***** Load test model*****\")\n\t\tif args.load_model_name is not None:\n\t\t\t# Set tokenizer\n\t\t\ttokenizer = DistilBertTokenizer.from_pretrained(args.load_model_name, do_lower_case = True)\n\t\t\t# Set model\n\t\t\tmodel = DistilBertForSequenceClassification.from_pretrained(args.load_model_name, num_labels = 2)\n\t\t\tmodel.to(device)\n\t\t\tlogger.info(\"Model loaded: %s\" % args.load_model_name)\n\t\telse:\n\t\t\tlogger.error('No model loaded!')\n\n\t\tlearning_rate = float(args.load_model_name.split('-')[2][2:] + '-' + args.load_model_name.split('-')[3])\n\t\tlength = int(args.load_model_name.split('-')[1][3:])\n\t\tlogger.info(\" Sequence length = %d\" % length)\n\t\tlogger.info(\" Learning rate = %f\" % learning_rate)\n\n\t\tlogger.info(\"***** Load test data*****\")\n\t\t# Read and save training input_ids\n\t\tcached_input_file = os.path.join(args.data_dir, 'test_{0}_{1}'.format(\n\t\t\tstr(args.task_name), str(length)))\n\t\ttry:\n\t\t\twith open(cached_input_file, \"rb\") as reader:\n\t\t\t\ttest_inputs = pickle.load(reader)\n\t\t\t\treader.close()\n\t\texcept:\n\t\t\ttest_inputs = read_data(args.data_dir + 'test.tsv', tokenizer, length)\n\n\n\t\t\tif args.local_rank == -1:\n\t\t\t\tlogger.info(\" Saving eval features into cached file %s\", cached_input_file)\n\t\t\t\twith open(cached_input_file, \"wb\") as writer:\n\t\t\t\t\tpickle.dump(test_inputs, writer)\n\t\t\t\t\twriter.close()\n\n\t\ttest_dataloader = DataLoader(test_inputs, sampler = SequentialSampler(test_inputs),\n\t\t batch_size = args.test_batch_size)\n\n\t\tlogger.info(\"***** Predicting *****\")\n\t\t# Tracking variables\n\t\tpredictions, true_labels = [], []\n\t\ttotal_test_accuracy = 0.0\n\t\t# Predict\n\t\tfor batch in tqdm(test_dataloader, desc = \"Testing\"):\n\t\t\t# Add batch to GPU\n\t\t\tbatch = tuple(t.to(device) for t in batch)\n\n\t\t\t# Unpack the inputs from our dataloader\n\t\t\tb_input_ids, b_input_mask, b_labels = batch\n\t\t\tb_input_ids = b_input_ids.to(device)\n\t\t\tb_input_mask = b_input_mask.to(device)\n\t\t\tb_labels = b_labels.to(device)\n\n\t\t\t# Telling the model not to compute or store gradients, saving memory and\n\t\t\t# speeding up prediction\n\t\t\twith torch.no_grad():\n\t\t\t\t# Forward pass, calculate logit predictions\n\t\t\t\toutputs = model(b_input_ids, \n\t\t\t\t attention_mask = b_input_mask)\n\n\t\t\tlogits = outputs[0]\n\n\t\t\t# Move logits and labels to CPU\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = b_labels.to('cpu').numpy()\n\n\t\t\t# Calculate the accuracy for this batch of test sentences, and\n\t\t\t# accumulate it over all batches.\n\t\t\ttotal_test_accuracy += flat_accuracy(logits, label_ids)\n\n\t\t\t# Store predictions and true labels\n\t\t\tpredictions.append(logits)\n\t\t\ttrue_labels.append(label_ids)\n\n\t\t# Combine the results across all batches.\n\t\tflat_predictions = np.concatenate(predictions, axis = 0)\n\n\t\t# For each sample, pick the label (0 or 1) with the higher score.\n\t\tflat_predictions = np.argmax(flat_predictions, axis = 1).flatten()\n\n\t\t# Combine the correct labels for each batch into a single list.\n\t\tflat_true_labels = np.concatenate(true_labels, axis = 0)\n\n\t\tacc = (flat_predictions == flat_true_labels).mean()\n\t\tlogger.info(\"Testing accuracy from label comparison: %1.3f\" % acc)\n\n\t\t# Report the final accuracy for this validation run.\n\t\tavg_test_accuracy = total_test_accuracy / len(test_dataloader)\n\t\tlogger.info(\"Testing accuracy from average batch accuracy: %1.3f\" % avg_test_accuracy)\n\n\t\t# Calculate the MCC\n\t\tmcc = matthews_corrcoef(flat_true_labels, flat_predictions)\n\t\tlogger.info(\"Testing MCC: %1.3f\" % mcc)\n\n\tif args.do_test_no_label:\n\t\t# load model and tokenizer\n\t\tlogger.info(\"***** Load test model*****\")\n\t\tif args.load_model_name is not None:\n\t\t\t# Set tokenizer\n\t\t\ttokenizer = DistilBertTokenizer.from_pretrained(args.load_model_name, do_lower_case = True)\n\t\t\t# Set model\n\t\t\tmodel = DistilBertForSequenceClassification.from_pretrained(args.load_model_name, num_labels = 2)\n\t\t\tmodel.to(device)\n\t\t\tlogger.info(\"Model loaded: %s\" % args.load_model_name)\n\t\telse:\n\t\t\tlogger.error('No model loaded!')\n\n\t\ttest_inputs = read_test_data(args.data_dir + 'test_text.tsv', tokenizer, 32)\n\t\tprint(len(test_inputs))\n\t\ttest_dataloader = DataLoader(test_inputs, sampler = SequentialSampler(test_inputs),\n\t\t batch_size = args.test_batch_size)\n\n\t\tlogger.info(\"***** Predicting *****\")\n\t\t# Tracking variables\n\t\tpredictions = []\n\t\tfor batch in tqdm(test_dataloader, desc = \"Testing\"):\n\t\t\t# Add batch to GPU\n\t\t\tbatch = tuple(t.to(device) for t in batch)\n\n\t\t\t# Unpack the inputs from our dataloader\n\t\t\tb_input_ids, b_input_mask = batch\n\t\t\tb_input_ids = b_input_ids.to(device)\n\t\t\tb_input_mask = b_input_mask.to(device)\n\n\t\t\t# Telling the model not to compute or store gradients, saving memory and\n\t\t\t# speeding up prediction\n\t\t\twith torch.no_grad():\n\t\t\t\t# Forward pass, calculate logit predictions\n\t\t\t\toutputs = model(b_input_ids, \n\t\t\t\t attention_mask = b_input_mask)\n\n\t\t\tlogits = outputs[0]\n\n\t\t\t# Move logits and labels to CPU\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\t# Store predictions and true labels\n\t\t\tpredictions.append(logits)\n\n\t\t# Combine the results across all batches.\n\t\tflat_predictions = np.concatenate(predictions, axis = 0)\n\t\tprint(len(flat_predictions))\n\t\t# For each sample, pick the label (0 or 1) with the higher score.\n\t\tflat_predictions = list(np.argmax(flat_predictions, axis = 1).flatten())\n\n\t\tpred_labels = ['machine' if p == 1 else 'human' for p in flat_predictions]\n\t\tprint(len(pred_labels))\n\n\t\twith open(os.path.join(args.output_dir, 'test_predictions.csv'), 'w') as writer:\n\t\t\twriter.write('Id,Category\\n')\n\t\t\ti = 0\n\t\t\tfor p in pred_labels:\n\t\t\t\twriter.write('%d,%s\\n' % (i, p))\n\t\t\t\ti += 1\n\t\t\twriter.close()\n\n\tif args.do_test_generation:\n\n\t\t# load model\n\t\tlogger.info(\"***** Load test model*****\")\n\t\tif args.load_model_name is not None:\n\t\t\t# Set tokenizer\n\t\t\ttokenizer = DistilBertTokenizer.from_pretrained(args.load_model_name, do_lower_case = True)\n\t\t\t# Set model\n\t\t\tmodel = DistilBertForSequenceClassification.from_pretrained(args.load_model_name, num_labels = 2)\n\t\t\tmodel.to(device)\n\t\telse:\n\t\t\tlogger.error('No model loaded!')\n\n\t\tlearning_rate = float(args.load_model_name.split('-')[2][2:] + '-' + args.load_model_name.split('-')[3])\n\t\tlength = int(args.load_model_name.split('-')[1][3:])\n\t\tlogger.info(\" Sequence length = %d\" % length)\n\t\tlogger.info(\" Learning rate = %f\" % learning_rate)\n\n\t\tlogger.info(\"***** Load test data*****\")\n\t\t# Read and save training input_ids\n\t\ttext = []\n\t\twith open(os.path.join(\n\t\t\t\t'/home/liyixi/hw5/output/generation/%s' % args.generate_text_name)) as input:\n\t\t\tfor _ in range(20):\n\t\t\t\tsent = input.readline().lstrip().rstrip()\n\t\t\t\tif len(sent) != 0:\n\t\t\t\t\ttext.append(sent)\n\t\tinput.close()\n\t\tlabels = [1] * len(text)\n\t\tprint(len(text))\n\t\tpreds = []\n\t\tfor sent in text:\n\t\t\tinput_ids = torch.tensor(tokenizer.encode(sent, add_special_tokens=True)).unsqueeze(0)\n\t\t\tlabels = torch.tensor([1]).unsqueeze(0)\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tlabels = labels.to(device)\n\t\t\toutput = model(input_ids=input_ids,labels=labels)\n\t\t\tlogits = output[1]\n\t\t\tif len(preds) == 0:\n\t\t\t\tpreds.append(logits.detach().cpu().numpy())\n\t\t\telse:\n\t\t\t\tpreds[0] = np.append(\n\t\t\t\t\tpreds[0], logits.detach().cpu().numpy(), axis = 0)\n\n\t\tacc = flat_accuracy(preds, labels)\n\n\t\tlogger.info(\"***** Test results *****\")\n\t\t# Report the final accuracy for this test run.\n\t\tlogger.info(\"average accuracy: %f\" % acc)\n\t\t# Measure how long the eval run took.\n\n\t\tpreds = preds[0]\n\t\tpreds = preds.argmax(1)\n\n\t\toutput_test_pred_file = os.path.join(args.output_dir, '%s-test_generation_pred.csv' % args.task_name)\n\t\twith open(output_test_pred_file, \"w\") as writer:\n\t\t\twriter.write(\"Id,Prediction,Generate_text\\n\")\n\t\t\tfor i in range(len(preds)):\n\t\t\t\twriter.write(\"%d,%d,%s\\n\" % (i, preds[i], text[i]))\n","sub_path":"hw5_deeplearning/script/classification/model_classification_distilbert.py","file_name":"model_classification_distilbert.py","file_ext":"py","file_size_in_byte":27356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"136425601","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 15 22:29:17 2019\n\n@author: arynchoong\nGame objects\n\"\"\"\nimport random\n\nSCREENWIDTH = 1024\nSTARTY = 20\nMARGINX = 6\nBOUNDY = 600\nFONTWIDTH = 9 # based on Consolas Bold 16 pt font size\n\nclass Word:\n def __init__(self, word):\n self.text = word\n self.len = len(self.text)\n self.x = 0\n self.y = 6\n self.typedidx = -1\n self.surf = None\n self.rect = None\n \n def get_x(self):\n return self.x\n def set_x(self, x):\n self.x = x\n return\n def get_y(self):\n return self.y\n def __len__(self):\n return self.len\n def move(self, delta):\n self.y += delta\n return self.y\n def typed(self):\n self.typedidx += 1\n if (self.typedidx >= (self.len-1)):\n # completed typing word\n self.typedidx = -2\n return False\n return True\n def typed_reset(self):\n self.typedidx = -1\n return\n def set_remove(self):\n self.typedidx = -2\n return\n def set_display(self, surf, rect):\n self.surf = surf\n self.rect = rect\n return\n def get_surf(self):\n return self.surf\n def get_rect(self):\n return self.rect\n\n\nclass Block:\n def __init__(self, flag=True, height = 0):\n self.flag = flag\n self.height = height\n return\n def set_flag(self, flag):\n self.flag = flag\n return\n def get_flag(self):\n return self.flag\n def set_height(self, height):\n self.height = height\n return\n def get_height(self):\n return self.height\n \nclass Cityline:\n def __init__(self):\n self.len = int((SCREENWIDTH-MARGINX*2)/10)\n # Create cityline\n self.cityline = []\n # with 10 px blocks, randomize building height\n for i in range(self.len):\n # each block is (flag,height) \n self.cityline.append(Block(True, random.randint(1,20)))\n # n blocks require n+1 coordinates. last block redundant\n self.cityline[-1].set_flag(False) \n return\n \n def __iter__(self):\n return CityIterator(self.cityline)\n \n def __len__(self):\n return self.len\n \n def check_gameover(self):\n if all(block.get_flag() == False for block in self.cityline):\n return True\n return False\n \n def del_blocks(self, startx, strlen):\n startidx = int((startx-MARGINX) / 10)\n endidx = (startidx + int((FONTWIDTH * strlen) / 10))\n if endidx >= self.len:\n endidx = self.len-1\n for i in range(startidx, endidx+1):\n self.cityline[i].set_flag(False)\n return\n \n def get_block(self, index):\n return self.cityline[index]\n \nclass CityIterator:\n def __init__(self, city):\n self.city = city\n self.index = 0\n \n def __next__(self):\n try:\n block = self.city[self.index]\n except IndexError:\n raise StopIteration()\n self.index += 1\n return block\n \n def __iter__(self):\n return self\n\n\nclass Objects:\n def __init__(self, wordlist=None, font_size=16):\n if wordlist == None:\n self.wordlist = ['a','b','c','d','e','f','g','h','i','j','k',\n 'l','m','n','o','p','q','r','s','t','u','v',\n 'x','y','z']\n random.shuffle(self.wordlist)\n else:\n self.wordlist = wordlist\n self.max_x = SCREENWIDTH - MARGINX\n self.font_size = font_size\n self.game_words = []\n self.cityline = Cityline()\n \n def add_word(self):\n if not self.wordlist:\n return False\n \n # calculate list of city blocks available\n avail_block = []\n for idx, block in enumerate(self.cityline):\n if block.get_flag() == True:\n avail_block.append(idx)\n # remove blocks with words already above\n if self.game_words:\n new_avail = avail_block.copy()\n for word in self.game_words:\n startidx = int((word.get_x()-(MARGINX))/10)\n endidx = startidx + int(len(word)/10)\n for idx in range(startidx,endidx+1):\n if idx in new_avail:\n new_avail.remove(idx)\n if new_avail:\n avail_block = new_avail\n \n word = Word(self.wordlist.pop(0))\n # calculate max x of word\n bound_x = self.max_x - (FONTWIDTH * len(word))\n if not avail_block:\n word.set_x(random.randint(MARGINX*2,bound_x))\n else:\n # randomly select block\n idx = random.choice(avail_block)\n # calculate start x of word\n if ((idx*10)+MARGINX) > bound_x:\n word.set_x(bound_x)\n else:\n word.set_x((idx*10)+MARGINX)\n self.game_words.append(word)\n return True\n \n def move(self, delta):\n # move all game words downwards\n if not self.game_words:\n return\n for word in self.game_words:\n word.move(delta)\n if word.get_y() >= BOUNDY:\n word.set_remove()\n self.cityline.del_blocks(word.get_x(),len(word))\n self.cleanup()\n return\n \n def isEmpty(self):\n if self.wordlist:\n return False\n if self.game_words:\n return False\n return True\n \n def cleanup(self):\n if not self.game_words:\n return\n # remove words from game_words with word.typedidx == -2\n self.game_words = [w for w in self.game_words if w.typedidx != -2]\n \n def is_gameover(self):\n return self.cityline.check_gameover()\n \n","sub_path":"python/src/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"26917570","text":"from opengever.dossier.behaviors.dossier import IDossierMarker\nfrom opengever.dossier.resolve import AfterResolveJobs\nfrom opengever.nightlyjobs.provider import NightlyJobProviderBase\nfrom plone import api\n\n\nMAX_CONVERSION_REQUESTS_PER_NIGHT = 10000\n\n# Track total number of conversion requests sent per nightly run\nsent_conversion_requests = 0\n\n\nclass ExecuteNightlyAfterResolveJobs(NightlyJobProviderBase):\n\n def __init__(self, context, request, logger):\n super(ExecuteNightlyAfterResolveJobs, self).__init__(context, request, logger)\n\n self.catalog = api.portal.get_tool('portal_catalog')\n\n # Get all dossiers that are resolved, but still have\n # AfterResolveJobs pending\n self.query = dict(\n object_provides=IDossierMarker.__identifier__,\n review_state='dossier-state-resolved',\n after_resolve_jobs_pending=True,\n )\n\n def __iter__(self):\n pending = self.catalog.unrestrictedSearchResults(self.query)\n for brain in pending:\n self.logger.info('sent_conversion_requests: {}'.format(\n sent_conversion_requests))\n if sent_conversion_requests >= MAX_CONVERSION_REQUESTS_PER_NIGHT:\n self.logger.warn(\n \"Reached MAX_CONVERSION_REQUESTS_PER_NIGHT \"\n \"(%r) limit, stopping after resolve jobs for tonight.\" %\n MAX_CONVERSION_REQUESTS_PER_NIGHT)\n raise StopIteration\n yield {'path': brain.getPath()}\n\n def __len__(self):\n resultset = self.catalog.unrestrictedSearchResults(self.query)\n return resultset.actual_result_count\n\n def run_job(self, job, interrupt_if_necessary):\n global sent_conversion_requests\n path = job['path']\n dossier = self.context.unrestrictedTraverse(path)\n self.logger.info(\"Running AfterResolve jobs for %r\" % dossier)\n after_resolve_jobs = AfterResolveJobs(dossier)\n after_resolve_jobs.execute(nightly_run=True)\n sent_conversion_requests += after_resolve_jobs.num_pdf_conversions\n","sub_path":"opengever/dossier/nightly_after_resolve_job.py","file_name":"nightly_after_resolve_job.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"171030715","text":"import tornado.web\nimport requests\nimport settings\nimport simplejson as json\n\nfrom lib import userdb\n\nclass BaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"username\")\n\n def send_email(self, from_user, to_user, subject, text):\n return requests.post(\n \"https://sendgrid.com/api/mail.send.json\",\n data={\n \"api_user\":settings.get('sendgrid_user'),\n \"api_key\":settings.get('sendgrid_secret'),\n \"from\": from_user,\n \"to\": to_user,\n \"subject\": subject,\n \"text\": text\n },\n verify=False\n )\n\n def is_blacklisted(self, screen_name):\n u = userdb.get_user_by_screen_name(screen_name)\n if u and 'user' in u.keys() and 'is_blacklisted' in u['user'].keys() and u['user']['is_blacklisted']:\n return True\n return False\n\n def current_user_can(self, capability):\n \"\"\"\n Tests whether a user can do a certain thing.\n \"\"\"\n result = False\n u = userdb.get_user_by_screen_name(self.current_user)\n if u and 'role' in u.keys():\n try:\n if capability in settings.get('%s_capabilities' % u['role']):\n result = True\n except:\n result = False\n return result\n\n def api_response(self, data):\n # return an api response in the proper output format with status_code == 200\n self.write_api_response(data, 200, \"OK\")\n\n def error(self, status_code, status_txt, data=None):\n # return an api error in the proper output format\n self.write_api_response(status_code=status_code, status_txt=status_txt, data=data)\n\n def write_api_response(self, data, status_code, status_txt):\n # return an api error based on the appropriate request format (ie: json)\n format = self.get_argument('format', 'json')\n callback = self.get_argument('callback', None)\n if format not in [\"json\"]:\n status_code = 500\n status_txt = \"INVALID_ARG_FORMAT\"\n data = None\n format = \"json\"\n response = {'status_code':status_code, 'status_txt':status_txt, 'data':data}\n\n if format == \"json\":\n data = json.dumps(response)\n if callback:\n self.set_header(\"Content-Type\", \"application/javascript; charset=utf-8\")\n self.write('%s(%s)' % (callback, data))\n else:\n self.set_header(\"Content-Type\", \"application/json; charset=utf-8\")\n self.write(data)\n self.finish()\n","sub_path":"app/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"172247378","text":"from django.db import models\n\n\nclass Post(models.Model):\n\n class Meta():\n db_table = \"post\"\n verbose_name = \"Новость\"\n verbose_name_plural = \"Новости\"\n ordering = [\"create\"]\n\n title = models.CharField(\"Заголовок\", max_length=100)\n text = models.TextField(\"Текст\", max_length=1000)\n image = models.ImageField(\"Изображение\", upload_to=\"news/\", blank=True)\n create = models.DateTimeField(\"Опубликована\", auto_now_add=True)\n update = models.DateTimeField(\"Изменена\", auto_now_add=True)\n moder = models.BooleanField(\"Модерация\")\n\n def __str__(self):\n return self.title\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"149224784","text":"import argparse\nimport collections\nimport json\nimport os\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport ckiptagger\nimport tensorflow as tf\nfrom tqdm import tqdm\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('data_path', type=Path, help='Original data JSON file')\n parser.add_argument('prediction_path', type=Path, help='Model prediction JSON file')\n parser.add_argument('output_path', type=Path, help='Evaluation result save file')\n parser.add_argument('ckip_model_dir', type=Path, help='CKIP model directory')\n args = parser.parse_args()\n\n return vars(args)\n\n\ndef load_json(json_path):\n print(f'[*] Loading {json_path}...', end='', flush=True)\n with open(json_path) as f:\n result = json.load(f)\n print('done')\n\n return result\n\n\ndef save_json(data, json_path):\n print(f'[*] Saving to {json_path}...', end='', flush=True)\n with open(json_path, 'w') as f:\n json.dump(data, f)\n print('done')\n\n\ndef collect_answers(data):\n answers = {}\n for d in data['data']:\n for p in d['paragraphs']:\n for qa in p['qas']:\n answers[qa['id']] = {\n 'answerable': qa['answerable'],\n 'answers': [a['text'] for a in qa['answers']]\n }\n\n return answers\n\n\nclass Tokenizer:\n def __init__(self, model_dir):\n print(f'[*] Creating CKIP tokenizer from {model_dir}...', end='', flush=True)\n self._ws = ckiptagger.WS(model_dir)\n self._pos = ckiptagger.POS(model_dir)\n self._pos_punc_class_suffix = 'CATEGORY'\n print('done')\n\n def __call__(self, text, remove_punc=False):\n tokens = self._ws([text])[0]\n if not remove_punc:\n return tokens\n\n pos = self._pos([tokens])[0]\n tokens = [t for t, p in zip(tokens, pos)\n if not p.endswith(self._pos_punc_class_suffix)]\n\n return tokens\n\n\ndef compute_em(ans, pred):\n def em(a, p):\n return int(''.join(a) == ''.join(p))\n\n return max([em(a, pred) for a in ans])\n\n\ndef compute_f1(ans, pred):\n def f1(a, p):\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n if len(a) == 0 or len(p) == 0:\n return int(''.join(a) == ''.join(p))\n\n common = collections.Counter(a) & collections.Counter(p)\n tp = sum(common.values())\n if tp == 0:\n return 0\n precision = tp / len(p)\n recall = tp / len(a)\n\n return (2 * precision * recall) / (precision + recall)\n\n return max([f1(a, pred) for a in ans])\n\n\ndef compute_metric(ans, pred, tokenizer):\n ans = [tokenizer(a, remove_punc=True) for a in ans]\n pred = tokenizer(pred, remove_punc=True)\n\n return {\n 'em': compute_em(ans, pred),\n 'f1': compute_f1(ans, pred)\n }\n\n\ndef compute_metrics(answers, predictions, tokenizer):\n metrics = []\n for id_ in tqdm(list(answers.keys()), desc='[*] Evaluating', dynamic_ncols=True):\n if id_ not in predictions:\n print(f'[!] Cannot find answer for id {id_} in model predictions')\n continue\n answerable = answers[id_]['answerable']\n prediction = predictions[id_]\n metric = compute_metric(answers[id_]['answers'], prediction, tokenizer)\n metrics.append({\n **metric,\n 'answerable': answerable,\n 'answerable_acc': int(answerable ^ (prediction == ''))\n })\n\n n_total = len(metrics)\n n_answerable = len([m for m in metrics if m['answerable']])\n n_unanswerable = n_total - n_answerable\n result = {\n 'overall': {\n 'count': n_total,\n 'em': sum([m['em'] for m in metrics]) / n_total,\n 'f1': sum([m['f1'] for m in metrics]) / n_total\n },\n 'answerable': {\n 'count': n_answerable,\n 'em': sum([m['em'] for m in metrics if m['answerable']]) / n_answerable,\n 'f1': sum([m['f1'] for m in metrics if m['answerable']]) / n_answerable\n },\n 'unanswerable': {\n 'count': n_unanswerable,\n 'em': sum([m['em'] for m in metrics if not m['answerable']]) / n_unanswerable,\n 'f1': sum([m['f1'] for m in metrics if not m['answerable']]) / n_unanswerable\n },\n 'answerable accuracy': sum(m['answerable_acc'] for m in metrics) / n_total\n }\n\n return result\n\n\ndef main(data_path, prediction_path, output_path, ckip_model_dir):\n # Surpress TensorFlow and OpenMP messages\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n os.environ[\"KMP_WARNINGS\"] = \"FALSE\"\n\n # Set TensorFlow random seed\n tf.compat.v1.set_random_seed(19)\n\n print(f'[-] Original data file: {data_path}')\n print(f'[-] Model prediction file: {prediction_path}')\n print(f'[-] Evaluation output path: {output_path}\\n')\n\n # Load gold answers\n data = load_json(data_path)\n answers = collect_answers(data)\n\n # Load model predictions\n predictions = load_json(prediction_path)\n\n # Create tokenizer\n tokenizer = Tokenizer(ckip_model_dir)\n\n # Compute metrics\n result = compute_metrics(answers, predictions, tokenizer)\n\n # Save evaluation result\n save_json(result, output_path)\n pprint(result)\n\n\nif __name__ == \"__main__\":\n kwargs = parse_args()\n main(**kwargs)","sub_path":"BertforQA/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"316973444","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom typing import Optional\n\nfrom google.api_core.exceptions import GoogleAPICallError, FailedPrecondition\n\nfrom google.cloud.pubsublite.internal.wait_ignore_cancelled import wait_ignore_errors\nfrom google.cloud.pubsublite.internal.wire.connection import (\n Connection,\n ConnectionFactory,\n)\nfrom google.cloud.pubsublite.internal.wire.connection_reinitializer import (\n ConnectionReinitializer,\n)\nfrom google.cloud.pubsublite.internal.wire.flow_control_batcher import (\n FlowControlBatcher,\n)\nfrom google.cloud.pubsublite.internal.wire.retrying_connection import RetryingConnection\nfrom google.cloud.pubsublite.internal.wire.subscriber import Subscriber\nfrom google.cloud.pubsublite_v1 import (\n SubscribeRequest,\n SubscribeResponse,\n FlowControlRequest,\n SequencedMessage,\n InitialSubscribeRequest,\n SeekRequest,\n Cursor,\n)\n\n\nclass SubscriberImpl(\n Subscriber, ConnectionReinitializer[SubscribeRequest, SubscribeResponse]\n):\n _initial: InitialSubscribeRequest\n _token_flush_seconds: float\n _connection: RetryingConnection[SubscribeRequest, SubscribeResponse]\n\n _outstanding_flow_control: FlowControlBatcher\n\n _reinitializing: bool\n _last_received_offset: Optional[int]\n\n _message_queue: \"asyncio.Queue[SequencedMessage]\"\n\n _receiver: Optional[asyncio.Future]\n _flusher: Optional[asyncio.Future]\n\n def __init__(\n self,\n initial: InitialSubscribeRequest,\n token_flush_seconds: float,\n factory: ConnectionFactory[SubscribeRequest, SubscribeResponse],\n ):\n self._initial = initial\n self._token_flush_seconds = token_flush_seconds\n self._connection = RetryingConnection(factory, self)\n self._outstanding_flow_control = FlowControlBatcher()\n self._reinitializing = False\n self._last_received_offset = None\n self._message_queue = asyncio.Queue()\n self._receiver = None\n self._flusher = None\n\n async def __aenter__(self):\n await self._connection.__aenter__()\n return self\n\n def _start_loopers(self):\n assert self._receiver is None\n assert self._flusher is None\n self._receiver = asyncio.ensure_future(self._receive_loop())\n self._flusher = asyncio.ensure_future(self._flush_loop())\n\n async def _stop_loopers(self):\n if self._receiver:\n self._receiver.cancel()\n await wait_ignore_errors(self._receiver)\n self._receiver = None\n if self._flusher:\n self._flusher.cancel()\n await wait_ignore_errors(self._flusher)\n self._flusher = None\n\n def _handle_response(self, response: SubscribeResponse):\n if \"messages\" not in response:\n self._connection.fail(\n FailedPrecondition(\n \"Received an invalid subsequent response on the subscribe stream.\"\n )\n )\n return\n self._outstanding_flow_control.on_messages(response.messages.messages)\n for message in response.messages.messages:\n if (\n self._last_received_offset is not None\n and message.cursor.offset <= self._last_received_offset\n ):\n self._connection.fail(\n FailedPrecondition(\n \"Received an invalid out of order message from the server. Message is {}, previous last received is {}.\".format(\n message.cursor.offset, self._last_received_offset\n )\n )\n )\n return\n self._last_received_offset = message.cursor.offset\n for message in response.messages.messages:\n # queue is unbounded.\n self._message_queue.put_nowait(message)\n\n async def _receive_loop(self):\n while True:\n response = await self._connection.read()\n self._handle_response(response)\n\n async def _try_send_tokens(self):\n req = self._outstanding_flow_control.release_pending_request()\n if req is None:\n return\n try:\n await self._connection.write(SubscribeRequest(flow_control=req))\n except GoogleAPICallError:\n # May be transient, in which case these tokens will be resent.\n pass\n\n async def _flush_loop(self):\n while True:\n await asyncio.sleep(self._token_flush_seconds)\n await self._try_send_tokens()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._stop_loopers()\n await self._connection.__aexit__(exc_type, exc_val, exc_tb)\n\n async def reinitialize(\n self, connection: Connection[SubscribeRequest, SubscribeResponse]\n ):\n self._reinitializing = True\n await self._stop_loopers()\n await connection.write(SubscribeRequest(initial=self._initial))\n response = await connection.read()\n if \"initial\" not in response:\n self._connection.fail(\n FailedPrecondition(\n \"Received an invalid initial response on the subscribe stream.\"\n )\n )\n return\n if self._last_received_offset is not None:\n # Perform a seek to get the next message after the one we received.\n await connection.write(\n SubscribeRequest(\n seek=SeekRequest(\n cursor=Cursor(offset=self._last_received_offset + 1)\n )\n )\n )\n seek_response = await connection.read()\n if \"seek\" not in seek_response:\n self._connection.fail(\n FailedPrecondition(\n \"Received an invalid seek response on the subscribe stream.\"\n )\n )\n return\n tokens = self._outstanding_flow_control.request_for_restart()\n if tokens is not None:\n await connection.write(SubscribeRequest(flow_control=tokens))\n self._reinitializing = False\n self._start_loopers()\n\n async def read(self) -> SequencedMessage:\n return await self._connection.await_unless_failed(self._message_queue.get())\n\n async def allow_flow(self, request: FlowControlRequest):\n self._outstanding_flow_control.add(request)\n if (\n not self._reinitializing\n and self._outstanding_flow_control.should_expedite()\n ):\n await self._try_send_tokens()\n","sub_path":"google/cloud/pubsublite/internal/wire/subscriber_impl.py","file_name":"subscriber_impl.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"440669536","text":"\"\"\"Memory architecture for reinforcement learning.\"\"\"\n\nfrom collections import namedtuple, defaultdict\nfrom collections.abc import Hashable\nfrom uuid import uuid4 as uuid\n\nfrom networkx import MultiDiGraph\n\nfrom .rl_environments import State, Action, Environment\nfrom .data_structures import TreeMultiMap\n\n\ndef memory_architecture(cls):\n \"\"\"Decorate an Environment to become a memory architecture.\n\n Arguments:\n cls (class): The Environment superclass.\n\n Returns:\n class: A subclass with a memory architecture.\n \"\"\"\n # pylint: disable = too-many-statements\n assert issubclass(cls, Environment)\n\n # pylint: disable = invalid-name\n BufferProperties = namedtuple('BufferProperties', ['copyable', 'writable'])\n\n class MemoryArchitectureMetaEnvironment(cls):\n \"\"\"A subclass to add a long-term memory to an Environment.\"\"\"\n\n BUFFERS = {\n 'perceptual': BufferProperties(\n copyable=True,\n writable=False,\n ),\n 'query': BufferProperties(\n copyable=False,\n writable=True,\n ),\n 'retrieval': BufferProperties(\n copyable=True,\n writable=False,\n ),\n 'scratch': BufferProperties(\n copyable=True,\n writable=True,\n ),\n }\n\n def __init__(\n self,\n buf_ignore=None, internal_reward=-0.1, max_internal_actions=None,\n knowledge_store=None,\n *args, **kwargs,\n ): # noqa: D102\n \"\"\"Initialize a memory architecture.\n\n Arguments:\n buf_ignore (Iterable[str]): Buffers that should not be created.\n internal_reward (float): Reward for internal actions. Defaults to -0.1.\n max_internal_actions (int): Max number of consecutive internal actions. Defaults to None.\n knowledge_store (KnowledgeStore): The memory model to use.\n *args: Arbitrary positional arguments.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n # pylint: disable = keyword-arg-before-vararg\n # parameters\n if buf_ignore is None:\n buf_ignore = set()\n self.buf_ignore = set(buf_ignore)\n self.internal_reward = internal_reward\n self.max_internal_actions = max_internal_actions\n # infrastructure\n if knowledge_store is None:\n knowledge_store = NaiveDictKB()\n self.knowledge_store = knowledge_store\n # variables\n self.buffers = {}\n self.internal_action_count = 0\n # initialization\n self._clear_buffers()\n super().__init__(*args, **kwargs)\n\n @property\n def slots(self):\n \"\"\"Yield all values of all attributes in all buffers.\n\n Yields:\n Tuple[str, str, Any]: A tuple of buffer, attribute, and value.\n \"\"\"\n for buf, attrs in sorted(self.buffers.items()):\n for attr, val in attrs.items():\n yield buf, attr, val\n\n def to_dict(self):\n \"\"\"Convert the state into a dictionary.\"\"\"\n return {buf + '_' + attr: val for buf, attr, val in self.slots}\n\n def get_state(self): # noqa: D102\n # pylint: disable = missing-docstring\n return State(**self.to_dict())\n\n def get_observation(self): # noqa: D102\n # pylint: disable = missing-docstring\n return State(**self.to_dict())\n\n def reset(self): # noqa: D102\n # pylint: disable = missing-docstring\n super().reset()\n self._clear_buffers()\n\n def _clear_buffers(self):\n self.buffers = {}\n for buf, _ in self.BUFFERS.items():\n if buf in self.buf_ignore:\n continue\n self.buffers[buf] = TreeMultiMap()\n\n def _clear_ltm_buffers(self):\n self.buffers['query'].clear()\n self.buffers['retrieval'].clear()\n\n def start_new_episode(self): # noqa: D102\n # pylint: disable = missing-docstring\n super().start_new_episode()\n self._clear_buffers()\n self._sync_input_buffers()\n self.internal_action_count = 0\n\n def get_actions(self): # noqa: D102\n # pylint: disable = missing-docstring\n actions = super().get_actions()\n if actions == []:\n return actions\n actions = set(actions)\n allow_internal_actions = (\n self.max_internal_actions is None\n or self.internal_action_count < self.max_internal_actions\n )\n if allow_internal_actions:\n actions.update(self._generate_copy_actions())\n actions.update(self._generate_delete_actions())\n actions.update(self._generate_retrieve_actions())\n actions.update(self._generate_cursor_actions())\n return sorted(actions)\n\n def _generate_copy_actions(self):\n actions = []\n for src_buf, src_props in self.BUFFERS.items():\n if src_buf in self.buf_ignore or not src_props.copyable:\n continue\n for attr in self.buffers[src_buf]:\n for dst_buf, dst_prop in self.BUFFERS.items():\n copyable = (\n src_buf != dst_buf\n and dst_buf not in self.buf_ignore\n and dst_prop.writable\n and not (src_buf == 'perceptual' and dst_buf == 'scratch')\n and self.buffers[src_buf][attr] != self.buffers[dst_buf].get(attr, None)\n )\n if not copyable:\n continue\n actions.append(Action(\n 'copy',\n src_buf=src_buf,\n src_attr=attr,\n dst_buf=dst_buf,\n dst_attr=attr,\n ))\n return actions\n\n def _generate_delete_actions(self):\n actions = []\n for buf, prop in self.BUFFERS.items():\n if buf in self.buf_ignore or not prop.writable:\n continue\n for attr in self.buffers[buf]:\n actions.append(Action(\n 'delete',\n buf=buf,\n attr=attr,\n ))\n return actions\n\n def _generate_retrieve_actions(self):\n actions = []\n for buf, buf_props in self.BUFFERS.items():\n if buf in self.buf_ignore or not buf_props.copyable:\n continue\n for attr, value in self.buffers[buf].items():\n if self.knowledge_store.retrievable(value):\n actions.append(Action('retrieve', buf=buf, attr=attr))\n return actions\n\n def _generate_cursor_actions(self):\n actions = []\n if self.buffers['retrieval']:\n if self.knowledge_store.has_prev_result:\n actions.append(Action('prev-result'))\n if self.knowledge_store.has_next_result:\n actions.append(Action('next-result'))\n return actions\n\n def react(self, action): # noqa: D102\n # pylint: disable = missing-docstring\n # handle internal actions and update internal buffers\n assert action in self.get_actions(), f'{action} not in {self.get_actions()}'\n external_action = self._process_internal_actions(action)\n if external_action:\n reward = super().react(action)\n self.internal_action_count = 0\n else:\n reward = self.internal_reward\n self.internal_action_count += 1\n self._sync_input_buffers()\n return reward\n\n def _process_internal_actions(self, action):\n \"\"\"Process internal actions, if appropriate.\n\n Arguments:\n action (Action): The action, which may or may not be internal.\n\n Returns:\n bool: Whether the action was external.\n \"\"\"\n if action.name == 'copy':\n val = self.buffers[action.src_buf][action.src_attr]\n self.buffers[action.dst_buf][action.dst_attr] = val\n if action.dst_buf == 'query':\n self._query_ltm()\n elif action.name == 'delete':\n del self.buffers[action.buf][action.attr]\n if action.buf == 'query':\n self._query_ltm()\n elif action.name == 'retrieve':\n result = self.knowledge_store.retrieve(self.buffers[action.buf][action.attr])\n self.buffers['query'].clear()\n if result is None:\n self.buffers['retrieval'].clear()\n else:\n self.buffers['retrieval'] = result\n elif action.name == 'prev-result':\n self.buffers['retrieval'] = self.knowledge_store.prev_result()\n elif action.name == 'next-result':\n self.buffers['retrieval'] = self.knowledge_store.next_result()\n else:\n return True\n return False\n\n def _query_ltm(self):\n if not self.buffers['query']:\n self.buffers['retrieval'].clear()\n return\n result = self.knowledge_store.query(self.buffers['query'])\n if result is None:\n self.buffers['retrieval'].clear()\n else:\n self.buffers['retrieval'] = result\n\n def _sync_input_buffers(self):\n # update input buffers\n self.buffers['perceptual'] = super().get_observation()\n\n def add_to_ltm(self, **kwargs):\n \"\"\"Add a memory element to long-term memory.\n\n Arguments:\n **kwargs: The key-value pairs of the memory element.\n \"\"\"\n self.knowledge_store.store(**kwargs)\n\n return MemoryArchitectureMetaEnvironment\n\n\nclass KnowledgeStore:\n \"\"\"Generic interface to a knowledge base.\"\"\"\n\n def clear(self):\n \"\"\"Remove all knowledge from the KB.\"\"\"\n raise NotImplementedError()\n\n def store(self, act_on, time_stamp=None, mem_id=None, **kwargs):\n \"\"\"Add knowledge to the KB.\n\n Arguments:\n mem_id (any): The ID of the element. Defaults to None.\n **kwargs: Attributes and values of the element to add.\n\n Returns:\n bool: True if the add was successful.\n \"\"\"\n raise NotImplementedError()\n\n def retrieve(self, mem_id):\n \"\"\"Retrieve the element with the given ID.\n\n Arguments:\n mem_id (any): The ID of the desired element.\n\n Returns:\n TreeMultiMap: The desired element, or None.\n \"\"\"\n raise NotImplementedError()\n\n def query(self, attr_vals):\n \"\"\"Search the KB for elements with the given attributes.\n\n Arguments:\n attr_vals (Mapping[str, Any]): Attributes and values of the desired element.\n\n Returns:\n TreeMultiMap: A search result, or None.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def has_prev_result(self):\n \"\"\"Determine if a previous query result is available.\n\n Returns:\n bool: True if there is a previous result.\n \"\"\"\n raise NotImplementedError()\n\n def prev_result(self):\n \"\"\"Get the prev element that matches the most recent search.\n\n Returns:\n TreeMultiMap: A search result, or None.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def has_next_result(self):\n \"\"\"Determine if a next query result is available.\n\n Returns:\n bool: True if there is a next result.\n \"\"\"\n raise NotImplementedError()\n\n def next_result(self):\n \"\"\"Get the next element that matches the most recent search.\n\n Returns:\n TreeMultiMap: A search result, or None.\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def retrievable(mem_id):\n \"\"\"Determine if an object is a retrievable memory ID.\n\n Arguments:\n mem_id (any): The object to check.\n\n Returns:\n bool: True if the object is a retrievable memory ID.\n \"\"\"\n raise NotImplementedError()\n\n\nclass NaiveDictKB(KnowledgeStore):\n \"\"\"A list-of-dictionary implementation of a knowledge store.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the NaiveDictKB.\"\"\"\n self.knowledge = []\n self.query_index = None\n self.query_matches = []\n\n def clear(self): # noqa: D102\n self.knowledge = []\n self.query_index = None\n self.query_matches = []\n\n def store(self, mem_id=None, **kwargs): # noqa: D102\n self.knowledge.append(TreeMultiMap(**kwargs))\n return True\n\n def retrieve(self, mem_id): # noqa: D102\n raise NotImplementedError()\n\n def query(self, attr_vals): # noqa: D102\n candidates = []\n for candidate in self.knowledge:\n match = all(\n attr in candidate and candidate[attr] == val\n for attr, val in attr_vals.items()\n )\n if match:\n candidates.append(candidate)\n if candidates:\n # if the current retrieved item still matches the new query\n # leave it there but update the cached matches and index\n if self.query_index is not None:\n curr_retrieved = self.query_matches[self.query_index]\n else:\n curr_retrieved = None\n self.query_matches = sorted(candidates)\n # use the ValueError from list.index() to determine if the query still matches\n try:\n self.query_index = self.query_matches.index(curr_retrieved)\n except ValueError:\n self.query_index = 0\n return self.query_matches[self.query_index]\n self.query_index = None\n self.query_matches = []\n return None\n\n @property\n def has_prev_result(self): # noqa: D102\n return True\n\n def prev_result(self): # noqa: D102\n self.query_index = (self.query_index - 1) % len(self.query_matches)\n return self.query_matches[self.query_index]\n\n @property\n def has_next_result(self): # noqa: D102\n return True\n\n def next_result(self): # noqa: D102\n self.query_index = (self.query_index + 1) % len(self.query_matches)\n return self.query_matches[self.query_index]\n\n @staticmethod\n def retrievable(mem_id): # noqa: D102\n return False\n\n\nclass NetworkXKB(KnowledgeStore):\n \"\"\"A NetworkX implementation of a knowledge store.\"\"\"\n\n def __init__(self, activation_class=None):\n \"\"\"Initialize the NetworkXKB.\"\"\"\n # parameters\n if activation_class is None:\n activation_fn = (lambda graph, mem_id: None)\n self.activation_class = activation_class\n self.activation_fn = activation_class.activate\n # variables\n self.graph = MultiDiGraph()\n self.inverted_index = defaultdict(set)\n self.query_results = None\n self.result_index = None\n self.clear()\n\n def clear(self): # noqa: D102\n self.graph.clear()\n self.inverted_index.clear()\n self.query_results = None\n self.result_index = None\n\n def get_activation(self, mem_id, current_time, pre_query):\n if not self.graph.__contains__(mem_id):\n return 0\n # if pre_query, set current time to current_time - 0.00001\n if pre_query:\n current_time -= 0.00001\n total_act = 0\n for time_stamp, scale_factor in self.graph.nodes[mem_id]['activation']:\n time_since = current_time - time_stamp\n if time_since < 0: # time travel: ignore this tuple\n continue\n elif time_since == 0: # time_since approaches zero\n total_act = total_act + scale_factor * (0.000000000001**(self.activation_class.decay_rate))\n else:\n total_act = total_act + scale_factor * (time_since**(self.activation_class.decay_rate))\n # print(total_act)\n return total_act\n\n\n def store(self, time_stamp, backlinks, act_on, mem_id=None, **kwargs): # noqa: D102\n if mem_id is None:\n mem_id = uuid()\n if mem_id not in self.graph: # create node and attributes if needed\n self.graph.add_node(mem_id, activation=[])\n for attribute, value in kwargs.items():\n if value not in self.graph:\n self.graph.add_node(value, activation=[])\n edges = self.graph.out_edges(mem_id, data=True)\n for (src, dest, attribute_dict) in edges:\n if dest == value:\n for key in attribute_dict:\n if attribute_dict[key] == attribute:\n # edge already exists, do not create edge\n # activate node, spread\n self.activation_fn(self.graph, mem_id, time_stamp)\n return True\n # edge does not exist, so create it\n self.graph.add_edge(mem_id, value, attribute=attribute)\n if backlinks:\n self.graph.add_edge(value, mem_id, attribute='backlink_from_' + attribute + '_to_' + mem_id)\n # FIXED what does inverted_index mean (a technique for speeding up search)\n # if value in self.graph:\n # if attribute == self.graph.out_edges(mem_id, data=True)['attribute']:\n # self.activation_fn(self.graph, mem_id, time_stamp)\n # return True\n self.inverted_index[attribute].add(mem_id)\n # activate node, spread\n if act_on:\n self.activation_fn(self.graph, mem_id, time_stamp)\n return True\n\n def _activate_and_return(self, time_stamp, mem_id):\n self.activation_fn(self.graph, mem_id, time_stamp)\n result = TreeMultiMap()\n for _, value, data in self.graph.out_edges(mem_id, data=True):\n # print(mem_id + ' ' + data['attribute'])\n result.add(data['attribute'], value)\n result.add('node_id', mem_id)\n return result\n\n def retrieve(self, time_stamp, mem_id): # noqa: D102\n if mem_id not in self.graph:\n return None\n return self._activate_and_return(time_stamp, mem_id)\n\n def query(self, time_stamp, pre_query, attr_vals): # noqa: D102\n\n # first pass: get candidates with all the attributes\n candidates = set.intersection(*(\n self.inverted_index[attribute] for attribute in attr_vals.keys()\n ))\n # second pass: get candidates with the correct values\n candidates = set(\n candidate for candidate in candidates\n if all((\n (candidate, value) in self.graph.edges\n and any(\n attributes['attribute'] == attribute\n for attributes in self.graph.get_edge_data(candidate, value).values()\n )\n ) for attribute, value in attr_vals.items())\n )\n # quit early if there are no results\n if not candidates:\n self.query_results = None\n self.result_index = None\n return None\n # final pass: sort results by activation\n self.query_results = sorted(\n candidates,\n key=(lambda mem_id: self.get_activation(mem_id, time_stamp, pre_query)),\n reverse=True,\n )\n self.result_index = 0\n return self._activate_and_return(time_stamp, self.query_results[self.result_index])\n\n @property\n def has_prev_result(self): # noqa: D102\n return (\n self.query_results is not None\n and self.result_index > 0\n )\n\n def prev_result(self, time_stamp): # noqa: D102\n self.result_index -= 1\n return self._activate_and_return(time_stamp, self.query_results[self.result_index])\n\n @property\n def has_next_result(self): # noqa: D102\n return (\n self.query_results is not None\n and self.result_index < len(self.query_results) - 1\n )\n\n def next_result(self, time_stamp): # noqa: D102\n self.result_index += 1\n return self._activate_and_return(time_stamp, self.query_results[self.result_index])\n\n @staticmethod\n def retrievable(mem_id): # noqa: D102\n return isinstance(mem_id, Hashable)\n\n\nclass SparqlKB(KnowledgeStore):\n \"\"\"An adaptor for RL agents to use KnowledgeSources.\"\"\"\n\n # FIXME arguably this should be abstracted and moved to KnowledgeStore\n Augment = namedtuple('Augment', 'old_attrs, transform')\n\n BAD_VALUES = set([\n '\"NAN\"^^',\n '\"NAN\"^^',\n ])\n\n def __init__(self, knowledge_source, augments=None):\n \"\"\"Initialize a SparqlKB.\n\n Arguments:\n knowledge_source (KnowledgeSource): A SPARQL knowledge source.\n augments (Sequence[Augment]): Additional values to add to results.\n \"\"\"\n # parameters\n self.source = knowledge_source\n if augments is None:\n augments = []\n self.augments = list(augments)\n # variables\n self.prev_query = None\n self.query_offset = 0\n # cache\n self.retrieve_cache = {}\n self.query_cache = {}\n\n def clear(self): # noqa: D102\n raise NotImplementedError()\n\n def store(self, mem_id=None, **kwargs): # noqa: D102\n raise NotImplementedError()\n\n\n\n def retrieve(self, mem_id): # noqa: D102\n valid_mem_id = (\n isinstance(mem_id, str)\n and mem_id.startswith('')\n )\n if not valid_mem_id:\n raise ValueError(\n f'mem_id should be a str of the form \"\", '\n f'but got: {mem_id}'\n )\n if mem_id not in self.retrieve_cache:\n result = self._true_retrieve(mem_id)\n for augment in self.augments:\n if all(attr in result for attr in augment.old_attrs):\n new_prop_val = augment.transform(result)\n if new_prop_val is not None:\n new_prop, new_val = new_prop_val\n result[new_prop] = new_val\n self.retrieve_cache[mem_id] = TreeMultiMap.from_dict(result)\n result = self.retrieve_cache[mem_id]\n self.prev_query = None\n self.query_offset = 0\n return result\n\n def _true_retrieve(self, mem_id):\n query = f'''\n SELECT DISTINCT ?attr ?value WHERE {{\n {mem_id} ?attr ?value .\n }}\n '''\n results = self.source.query_sparql(query)\n # FIXME HACK to avoid dealing with multi-valued attributes,\n # we only return the \"largest\" value for each attribute\n result = defaultdict(set)\n for binding in results:\n val = binding['value'].rdf_format\n if val in self.BAD_VALUES:\n continue\n result[binding['attr'].rdf_format].add(val)\n return {attr: max(vals) for attr, vals in result.items()}\n\n def query(self, attr_vals): # noqa: D102\n query_terms = tuple((k, v) for k, v in sorted(attr_vals.items()))\n if query_terms not in self.query_cache:\n mem_id = self._true_query(attr_vals)\n self.query_cache[query_terms] = mem_id\n mem_id = self.query_cache[query_terms]\n self.query_offset = 0\n if mem_id is None:\n self.prev_query = None\n return TreeMultiMap()\n else:\n self.prev_query = attr_vals\n return self.retrieve(mem_id)\n\n def _true_query(self, attr_vals, offset=0):\n condition = ' ; '.join(\n f'{attr} {val}' for attr, val in attr_vals.items()\n )\n query = f'''\n SELECT DISTINCT ?concept WHERE {{\n ?concept {condition} ;\n ?__name__ .\n }} ORDER BY ?__name__ LIMIT 1 OFFSET {offset}\n '''\n results = self.source.query_sparql(query)\n try:\n return next(iter(results))['concept'].rdf_format\n except StopIteration:\n return None\n\n @property\n def has_prev_result(self): # noqa: D102\n return self.prev_query is not None and self.query_offset > 0\n\n def prev_result(self): # noqa: D102\n if not self.has_prev_result:\n return None\n self.query_offset -= 1\n return self._true_query(self.prev_query, offset=self.query_offset)\n\n @property\n def has_next_result(self): # noqa: D102\n return self.prev_query is not None\n\n def next_result(self): # noqa: D102\n if not self.has_next_result:\n return None\n self.query_offset += 1\n return self._true_query(self.prev_query, offset=self.query_offset)\n\n @staticmethod\n def retrievable(mem_id): # noqa: D102\n return isinstance(mem_id, str) and mem_id.startswith(' 0:\n sharing = 1/len(result)\n else:\n sharing = 1\n for i in range(len(result)):\n if result[i] != mem_id and result[i] not in visited:\n # print('successor of ' + mem_id + \" is \" + result[i])\n # is sharing * scale_factor / 2 valid? ruth and bryce say yes\n visited.add(result[i])\n self._activate(visited, graph, result[i], time_stamp, sharing * scale_factor / 2, max_steps - 1)\n\n def activate(self, graph, mem_id, time_stamp, scale_factor=None, max_steps=None):\n if max_steps == 0:\n return\n if scale_factor is None:\n scale_factor = self.scale_factor\n if max_steps is None:\n max_steps = self.max_steps\n visited = {mem_id}\n self._activate(visited, graph, mem_id, time_stamp, scale_factor, max_steps)\n\n\n","sub_path":"research/rl_memory.py","file_name":"rl_memory.py","file_ext":"py","file_size_in_byte":27073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"289456542","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nimport json\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n cmd = sys.argv[0]\n print(\n f\"\"\"\n Usage: {cmd} knoedler_0.csv knoedler.csv\n \"\"\".lstrip()\n )\n sys.exit(1)\n\n headerPath = sys.argv[1]\n contentPath = sys.argv[2]\n header = pd.read_csv(headerPath, dtype=object)\n df = pd.read_csv(contentPath, dtype=object, names=header.columns)\n\n multipleRecords = df.groupby(\"knoedler_number\").filter(lambda x: len(x) > 1)\n multipleRecords = multipleRecords.sort_values(['knoedler_number', 'entry_date_year', 'entry_date_month', 'entry_date_day'],ascending=True).groupby('knoedler_number')\n\n inventoryingRecords = []\n inventoryingDict = defaultdict(list)\n for group_name, df_group in multipleRecords:\n lastPurchase = \"\"\n for row_index, row in df_group.iterrows():\n if lastPurchase == \"\":\n lastPurchase = row[\"pi_record_no\"]\n else:\n if not pd.isna(row[\"purchase_seller_auth_name_1\"]):\n inventoryingRecords.append(row[\"pi_record_no\"])\n inventoryingDict[group_name].append(row[\"pi_record_no\"])\n if row[\"transaction\"] == \"Sold\":\n lastPurchase = \"\"\n finalDf = df[df[\"pi_record_no\"].isin(inventoryingRecords)]\n finalDf.to_csv(\"final.csv\", index=False)\n\n finalDict = {\"pi_record_no\" : inventoryingRecords}\n outputJson = open(\"sellers_to_be_deleted.json\", \"w\", encoding=\"utf-8\")\n json.dump(finalDict, outputJson, indent=2)\n","sub_path":"scripts/remove_seller_name.py","file_name":"remove_seller_name.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"356350425","text":"import toml\n\nfrom judge.utils.log import logger\n\n\nclass LanguageType(object):\n language_id = 0\n source_name = ''\n compile_command = ''\n running_command = ''\n execute_name = ''\n compile_args = []\n running_args = []\n memory = 512\n\n def __init__(self):\n pass\n\n def get_running_args(self):\n return self.running_args\n\n def to_compile_info(self):\n return {\n \"command\": self.compile_command,\n \"args\": ' '.join(self.compile_args),\n \"memory\": self.memory,\n }\n\n def get_compile_args(self):\n return self.compile_args\n\n def full_compile_command(self):\n args = self.compile_args[:]\n args.insert(0, self.compile_command)\n return args\n\n\nclass LanguageNotExist(Exception):\n pass\n\n\nlanguage_manager = None\n\n\ndef load_languages():\n global language_manager\n if language_manager is None:\n languages = toml.load('languages.toml')\n language_manager = LanguageCentre(languages)\n\n\ndef get_language(language_id) -> LanguageType:\n load_languages()\n\n return language_manager.get_language(language_id)\n\n\nclass LanguageCentre(object):\n _languages = {}\n\n def __init__(self, cfg):\n self.load(cfg)\n\n def load(self, languages):\n for lang in languages['language']:\n language_type = LanguageType()\n language_type.language_id = lang['language_id']\n language_type.source_name = lang['source_name']\n language_type.compile_command = lang['compile_command']\n language_type.execute_name = lang['execute_name']\n language_type.compile_args = lang['compile_args']\n language_type.running_command = lang['running_command']\n language_type.running_args = lang['running_args']\n if 'memory' in lang:\n language_type.memory = lang['memory']\n\n self._languages[lang['language_id']] = language_type\n\n def get_language(self, language_id) -> LanguageType:\n if language_id in self._languages:\n return self._languages[language_id]\n logger().info('Language id not exist: {id}'.format(id=language_id))\n raise LanguageNotExist()\n","sub_path":"judge/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"239516793","text":"# -*- coding: utf-8; -*-\nfrom Tkinter import *\nfrom bs import *\n\nclass Btns(object):\n def __init__(self,size,fr):\n self.btns = []\n for i in xrange(0,size):\n self.btns.append([])\n for j in xrange(0,size):\n btn = Button(fr, width = 4, height= 2, text = ' ')\n btn.grid(row = i, column = j)\n btn.config(state = 'disabled', relief=SUNKEN, borderwidth=1, font = '30')\n self.btns[i].append(btn)\n self.size = size\n\n def set(self, pram, value):\n for i in range(0,len(self.btns)):\n for j in range(0,len(self.btns[i])):\n self.btns[i][j][pram] = value\n\n def bind(self,foo):\n for i in range(0,len(self.btns)):\n for j in range(0,len(self.btns[i])):\n self.btns[i][j].bind(\"\", foo)\n\ndef btns_disable(btns):\n for i in range(0,len(btns)):\n for j in range(0,len(btns[i])):\n btns[i][j].config(state = 'disabled', relief=SUNKEN, borderwidth=1)\n\ndef btns_show(btns,field):\n for i in range(0,len(btns)):\n for j in range(0,len(btns[i])):\n if field.board[i][j] == 1:\n btns[i][j].config(text = 'Q')\n btns[i][j].config(state = 'disabled', relief=SUNKEN, borderwidth=1)\n\n\ndef newGame():\n global fl\n global my_f\n box2.grid_remove()\n lb2.grid_remove()\n my_f.n_ships = 0\n my_f.reset()\n fl.reset()\n my_btns.set('state','active')\n btns_disable(btns1.btns)\n my_btns.set('text',' ')\n my_btns.set('bg',COLOR)\n btns1.set('text','')\n my_btns.set('relief','raised')\n my_btns.bind(add_ship)\n i = num_of_ships\n while i>0:\n coord = get_rand_coord(size)\n x = coord['x']\n y = coord['y']\n if 1 != fl.board[x][y]:\n fl.board[x][y] = 1\n i-=1\n\n\ndef add_ship(event):\n global my_f\n grid_info = event.widget.grid_info()\n x = int(grid_info[\"row\"])\n y = int(grid_info[\"column\"])\n c = {'x':x, 'y':y}\n my_f.modif(c,1)\n my_btns.btns[x][y].config(state = 'disabled', relief=SUNKEN, borderwidth=1, text=u\"\\u2714\")\n if my_f.n_ships==num_of_ships:\n play()\n\n\ndef play():\n global my_btns\n global fl\n global box2\n global lb2\n box2.grid(row = 2, column = 2)\n lb2.grid(row = 1, column = 2)\n btns_disable(my_btns.btns)\n my_btns.bind(nothing)\n btns1.set('state','active')\n btns1.set('relief','raised')\n btns1.bind(fire)\n\ndef nothing(event):\n pass\n\ndef fire(event):\n global fl \n global btns1\n grid_info = event.widget.grid_info()\n x = int(grid_info[\"row\"])\n y = int(grid_info[\"column\"])\n c = {'x':x, 'y':y}\n res = fl.modif(c,2)\n btns1.btns[x][y].config(state = 'disabled', relief=SUNKEN, borderwidth=1)\n btns1.btns[x][y].bind(\"\",nothing)\n if res == 1:\n btns1.btns[x][y].config(text=\"X\")\n bot_fire()\n win = fl.is_empty()\n lose = my_f.is_empty()\n if win:\n pass\n if lose:\n pass\n if win or lose:\n btns_show(btns1.btns,fl)\n btns1.bind(nothing)\n\ndef bot_fire():\n c = get_rand_coord(size)\n my_f.modif(c,2)\n x = c['x']\n y = c['y']\n while my_f.board[x][y]==2 :\n c = get_rand_coord(size)\n x = c['x']\n y = c['y']\n my_f.modif(c,2)\n my_btns.btns[x][y]['bg'] = 'red'\n my_btns.btns[x][y]['text'] = u\"\\u2622\"\n\nroot = Tk()\nn=4\nnum_of_ships = 4\nsize = 5\n\nmenubar = Menu(root)\nmenubar.add_command(label=\"New Game!\", command = newGame)\nmenubar.add_command(label=\"Quit!\", command=root.quit)\nCOLOR = root['bg']\nroot['bg'] = \"grey\"\nbox1 = Frame(root,bg=\"grey\")\nbox2 = Frame(root,bg=\"grey\")\nbox1.config(borderwidth=5)\nbox2.config(borderwidth=5)\nlb1 = Label(root,text=\"Ваше поле\",bg=\"grey\")\nlb2 = Label(root,text=\"Поле противника\",bg=\"grey\")\n\n\nmy_btns = Btns(size,box1) #Add_btns(size,box1)\nbtns1 = Btns(size,box2) #Add_btns(size,box2)\n\n\nbox1.grid(row = 2, column = 1)\nlb1.grid(row = 1, column = 1)\n#box2.grid(row = 2, column = 2)\n#lb2.grid(row = 1, column = 2)\n\nfl = Field(size)\nmy_f = Field(size)\n\n\n# display the menu\nroot.config(menu=menubar)\nroot.mainloop()\n","sub_path":"interface.pyw","file_name":"interface.pyw","file_ext":"pyw","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"91269129","text":"import unittest\nimport mock\n\nimport telemetry_metrics.metrics as met\n\nclass mCmp(object):\n def __init__(self, name, value, type_, **kwargs):\n self.name = name\n self.value = value\n self.type = type_\n self._metric_name_components = kwargs.get('name_components',[])\n\n return\n\n def __eq__(self, other):\n return (self.name == other.name\n and self.value == other.value\n and self.type == other.type\n and self._metric_name_components == other._metric_name_components)\n\n def __repr__(self):\n return 'mCmp(%s, %s, %s, %s)' % (self.name, self.value, self.type, self._metric_name_components)\n\n\nclass testMetric(unittest.TestCase):\n def testMetric1(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n m = met.Metric('asdf', backend=b)\n m.emit()\n b.emit.assert_called_with(mCmp('asdf', 0, None))\n return\n\n def testCounter(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n m = met.Counter('asdf', backend=b)\n m.emit()\n b.emit.assert_called_with(mCmp('asdf', 1, met.COUNTER))\n return\n\n def testGauge(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n m = met.Gauge('asdf', backend=b)\n m.emit()\n b.emit.assert_called_with(mCmp('asdf', 0, met.GAUGE))\n return\n\n def testTimer(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n m = met.Timer('asdf', backend=b)\n m.emit()\n b.emit.assert_called_with(mCmp('asdf', 0, met.TIMER))\n return\n\n def testTimerFunction(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n\n with mock.patch('telemetry_metrics.metrics.Timer.duration', new_callable=mock.PropertyMock) as duration:\n duration.return_value = 5\n m = met.Timer('asdf', backend=b)\n m.finish()\n b.emit.assert_called_with(mCmp('asdf', 5, met.TIMER))\n return\n\n def testTiming(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n m = met.Timer('asdf', backend=b)\n m.finish()\n self.assertTrue(m.duration > 0)\n return\n\n def testRAII(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n\n with mock.patch('telemetry_metrics.metrics.Timer.duration', new_callable=mock.PropertyMock) as duration:\n with met.Timer('asdf', backend=b) as t:\n for n in range(10):\n duration.return_value = n\n\n b.emit.assert_called_with(mCmp('asdf', 9, met.TIMER))\n return\n\nclass TestError(object):\n @classmethod\n def name_components(cls, **kwargs):\n return (kwargs['context'], 'foo','bar','baz', kwargs['name'])\n\n\nclass testFactory(unittest.TestCase):\n def test_raises_decorator(self):\n b = mock.Mock()\n b.emit = mock.Mock()\n\n metrics = met.MetricsFactory(backend=b)\n\n @metrics.raises(TestError, 'Hello world')\n @metrics.raises(TestError, 'Goodbye world')\n def foobar():\n pass\n\n b.emit.assert_has_calls([\n mock.call(mCmp('TestError', 0, met.COUNTER, name_components=('foobar', 'foo', 'bar', 'baz', 'Goodbye world'))),\n mock.call(mCmp('TestError', 0, met.COUNTER, name_components=('foobar', 'foo', 'bar', 'baz', 'Hello world'))),\n ])\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"telemetry_metrics/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"366401798","text":"# calculates the average grade for each student,\n# and print out the student’s name along with their average grade.\n\ninnie = open(\"C:/Users/Roger/Documents/Roger/Python/studentdata.txt\", 'r')\n\nfor aline in innie:\n splitted = aline.split()\n scores = splitted[1:]\n scores = [int(i) for i in scores]\n print(scores)\n total = 0\n for i in scores:\n total += i\n average = total / (len(scores))\n print(splitted[0], average)\n\ninnie.close()\n","sub_path":"student average.py","file_name":"student average.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"348798467","text":"from flask import Flask, render_template\nfrom alpha_vantage.timeseries import TimeSeries\nfrom alpha_vantage.techindicators import TechIndicators\nfrom alpha_vantage.sectorperformance import SectorPerformances\nfrom alpha_vantage.cryptocurrencies import CryptoCurrencies\nfrom alpha_vantage.foreignexchange import ForeignExchange\nimport matplotlib.pyplot as plt\nfrom pprint import pprint\nimport requests\nimport json\n\n\n\nQUERY_URL = \"https://www.alphavantage.co/query?function={REQUEST_TYPE}&apikey={KEY}&symbol={SYMBOL}\"\nAPI_KEY = \"571I2OGDRCSK2904\"\n\n\n#-----------------------------------------------------\n# Data:\n\n\n# Time series\nts = TimeSeries(key=API_KEY, output_format='JSON')\ndata, meta_data = ts.get_intraday(symbol='MSFT', interval='1min', outputsize='full')\n\n\n# pprint(data)\n# print(data)\n\ni = 0\nj = 0\n\nfor i in data:\n print(i) # datetimes\n for j in data[i]:\n print(j) # headers for open, high, low, close\n print(data[i][j]) # values for open, high, low, close\n\n'''NOTE: unignore the Venv windows files for Git VCS'''\n\n#data ouput\n# pprint(meta_data)\n# pprint(data)\n# print(data['4. close'])\n\n#-----------------------------------------------------\n# Plots:\n\n# Time series\nts = TimeSeries(key=API_KEY, output_format='pandas')\ndata, meta_data = ts.get_intraday(symbol='MSFT', interval='1min', outputsize='full')\ndata['4. close'].plot()\nplt.title('Intraday Times Series for the MSFT stock (1 min)')\n# plt.savefig('templates/my_plot.png')\nplt.show()\n\n# Technical Indicators\nti = TechIndicators(key=API_KEY, output_format='pandas')\ndata2, meta_data2 = ti.get_bbands(symbol='MSFT', interval='60min', time_period=60)\ndata2.plot()\nplt.title('BBbands indicator for MSFT stock (60 min)')\nplt.show()\n\n# Sector Performance\nsp = SectorPerformances(key=API_KEY, output_format='pandas')\ndata3, meta_data3 = sp.get_sector()\ndata3['Rank A: Real-Time Performance'].plot(kind='bar')\nplt.title('Real Time Performance (%) per Sector')\nplt.tight_layout()\nplt.grid()\nplt.show()\n\n# # Crypto Currencies\n# cc = CryptoCurrencies(key=API_KEY, output_format='pandas')\n# data4, meta_data4 = cc.get_digital_currency_intraday(symbol='BTC', market='CNY')\n# data4['1b. price (USD)'].plot()\n# plt.tight_layout()\n# plt.title('Intraday value for bitcoin (BTC)')\n# plt.grid()\n# plt.show()\n\n# # Foreign Exchange\n# cc = ForeignExchange(key=API_KEY)\n# # There is no metadata in this call\n# data5, _ = cc.get_currency_exchange_rate(from_currency='BTC',to_currency='USD')\n# pprint(data5)\n\n#-----------------------------------------------------\n# Without API python wrapper:\n\n\n# API_URL = \"https://www.alphavantage.co/query\"\n# symbols = ['QCOM',\"INTC\",\"PDD\"]\n#\n# for symbol in symbols:\n# data = { \"function\": \"TIME_SERIES_INTRADAY\",\n# \"symbol\": symbol,\n# \"interval\" : \"60min\",\n# \"datatype\": \"json\",\n# \"apikey\": API_KEY }\n# response = requests.get(API_URL, data)\n# data = response.json()\n# print(symbol)\n# a = (data['Time Series (60min)'])\n# keys = (a.keys())\n# for key in keys:\n# print(a[key]['2. high'] + \" \" + a[key]['5. volume'])\n\n#-----------------------------------------------------\n# Flask rendering template for HTML\n\n\n# app = Flask(__name__)\n#\n# @app.route('/')\n# def _request():\n# ts = TimeSeries(key=API_KEY, output_format='JSON')\n# data, meta_data = ts.get_intraday(symbol='MSFT', interval='1min', outputsize='full')\n#\n# return render_template('index.html', data=data)\n#\n# if __name__ == '__main__':\n# app.run(debug=True)\n#\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"4205315","text":"import pandas as pd\nimport numpy as np\nimport json\nimport os\n\n\ndef transform_file(file_name=\"eyetribe_output_copy.txt\"):\n file = open(file_name, \"r+\")\n content = \"\"\n for line in file:\n content += line[:-1] + \", \\n\"\n content = content[:-3] + \"\\n]}\"\n file.seek(0,0)\n file.write('{\"all\": [\\n' + content)\n file.close()\n\n# Get data\ndef empty_list(prev_key, keys, final_output, val):\n prev_key_init = prev_key\n for k in keys:\n prev_key = prev_key_init\n vals = val[k]\n if type(vals) == type({}):\n new_keys = vals.keys()\n prev_key += k + '_'\n empty_list(prev_key, new_keys, final_output, vals)\n else: \n final_output[prev_key + k] = list()\n\ndef transform_vals(prev_key, keys, final_output, val):\n prev_key_init = prev_key\n for k in keys:\n prev_key = prev_key_init\n vals = val[k]\n if type(vals) == type({}):\n new_keys = vals.keys()\n prev_key += k + '_'\n transform_vals(prev_key, new_keys, final_output, vals)\n else: \n final_output[prev_key + k].append(vals)\n\n\ndef clean_data(file_name=\"eyetribe_output_test.txt\", raw=False):\n # if raw:\n # print('clean')\n # transform_file(file_name)\n print(\"clean\")\n files = open(file_name, \"r\")\n data = files.read()\n try:\n d2 = json.loads(data)\n except:\n print(\"a\")\n files.close()\n transform_file(file_name)\n files = open(file_name, \"r\")\n data = files.read()\n d2 = json.loads(data)\n files.close()\n\n output_list = d2['all']\n final_output = {}\n\n ## Preprocessing Data\n keys = list(output_list[0]['values']['frame'].keys())\n\n empty_list('', keys, final_output, output_list[0]['values']['frame'])\n\n \n for val in output_list:\n if 'values' in val.keys():\n transform_vals('', keys, final_output, val['values']['frame'])\n\n pd_data = pd.DataFrame.from_dict(final_output)\n features = [\"time\", \"avg_x\", \"avg_y\"]\n return pd_data[features]\n\n\n\n\nif __name__ == '__main__':\n # get data\n # data = clean_data(\"eyetribe_output_copy.txt\")\n # data = clean_data(\"test.txt\")\n x=2\n","sub_path":"Front_End/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"458316843","text":"import asyncio\r\nimport os\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\n\r\nimport vanir\r\nimport vanir.tests\r\n\r\nVM_PREFIX = \"test-\"\r\n\r\n@unittest.skipUnless(os.path.exists('/usr/bin/rpmsign') and\r\n os.path.exists('/usr/bin/rpmbuild'),\r\n 'rpm-sign and/or rpm-build not installed')\r\nclass TC_00_Dom0UpgradeMixin(object):\r\n \"\"\"\r\n Tests for downloading dom0 updates using VMs based on different templates\r\n \"\"\"\r\n pkg_name = 'vanir-test-pkg'\r\n dom0_update_common_opts = ['--disablerepo=*', '--enablerepo=test']\r\n\r\n @classmethod\r\n def generate_key(cls, keydir):\r\n gpg_opts = ['gpg', '--quiet', '--no-default-keyring',\r\n '--homedir', keydir]\r\n p = subprocess.Popen(gpg_opts + ['--gen-key', '--batch'],\r\n stdin=subprocess.PIPE,\r\n stderr=open(os.devnull, 'w'))\r\n p.stdin.write('''\r\nKey-Type: RSA\r\nKey-Length: 1024\r\nKey-Usage: sign\r\nName-Real: Vanir test\r\nExpire-Date: 0\r\n%commit\r\n '''.format(keydir=keydir).encode())\r\n p.stdin.close()\r\n p.wait()\r\n\r\n subprocess.check_call(gpg_opts + ['-a', '--export',\r\n '--output', os.path.join(keydir, 'pubkey.asc')])\r\n p = subprocess.Popen(gpg_opts + ['--with-colons', '--list-keys'],\r\n stdout=subprocess.PIPE)\r\n for line in p.stdout.readlines():\r\n fields = line.decode().split(':')\r\n if fields[0] == 'pub':\r\n return fields[4][-8:].lower()\r\n raise RuntimeError\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n super(TC_00_Dom0UpgradeMixin, cls).setUpClass()\r\n\r\n cls.tmpdir = tempfile.mkdtemp()\r\n\r\n cls.keyid = cls.generate_key(cls.tmpdir)\r\n\r\n with open('/etc/yum.repos.d/test.repo', 'w') as repo_file:\r\n repo_file.write('''\r\n[test]\r\nname = Test\r\nbaseurl = http://localhost:8080/\r\nenabled = 1\r\n''')\r\n\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n os.unlink('/etc/yum.repos.d/test.repo')\r\n\r\n shutil.rmtree(cls.tmpdir)\r\n\r\n def setUp(self):\r\n super(TC_00_Dom0UpgradeMixin, self).setUp()\r\n if self.template.startswith('whonix-'):\r\n # Whonix redirect all the traffic through tor, so repository\r\n # on http://localhost:8080/ is unavailable\r\n self.skipTest(\"Test not supported for this template\")\r\n self.init_default_template(self.template)\r\n self.updatevm = self.app.add_new_vm(\r\n vanir.vm.appvm.AppVM,\r\n name=self.make_vm_name(\"updatevm\"),\r\n label='red'\r\n )\r\n self.loop.run_until_complete(self.updatevm.create_on_disk())\r\n self.app.updatevm = self.updatevm\r\n self.app.save()\r\n subprocess.call(['rpm', '-e', self.pkg_name],\r\n stderr=subprocess.DEVNULL)\r\n subprocess.check_call(['rpm', '--import',\r\n os.path.join(self.tmpdir, 'pubkey.asc')])\r\n self.loop.run_until_complete(self.updatevm.start())\r\n self.repo_running = False\r\n self.repo_proc = None\r\n\r\n def tearDown(self):\r\n if self.repo_proc:\r\n self.repo_proc.terminate()\r\n self.loop.run_until_complete(self.repo_proc.wait())\r\n del self.repo_proc\r\n self.app.updatevm = None\r\n super(TC_00_Dom0UpgradeMixin, self).tearDown()\r\n\r\n subprocess.call(['rpm', '-e', self.pkg_name],\r\n stderr=subprocess.DEVNULL)\r\n subprocess.call(['rpm', '-e', 'gpg-pubkey-{}'.format(\r\n self.keyid)], stderr=subprocess.DEVNULL)\r\n\r\n for pkg in os.listdir(self.tmpdir):\r\n if pkg.endswith('.rpm'):\r\n os.unlink(pkg)\r\n\r\n def create_pkg(self, dir, name, version):\r\n spec_path = os.path.join(dir, name+'.spec')\r\n spec = open(spec_path, 'w')\r\n spec.write(\r\n '''\r\nName: {name}\r\nSummary: Test Package\r\nVersion: {version}\r\nRelease: 1\r\nVendor: Cybertrigo\r\nLicense: GPL\r\nGroup: Vanir\r\nURL: http://www.vos.org\r\n\r\n%description\r\nTest package\r\n\r\n%install\r\n\r\n%files\r\n '''.format(name=name, version=version)\r\n )\r\n spec.close()\r\n subprocess.check_call(\r\n ['rpmbuild', '--quiet', '-bb', '--define', '_rpmdir {}'.format(dir),\r\n spec_path])\r\n pkg_path = os.path.join(dir, 'x86_64',\r\n '{}-{}-1.x86_64.rpm'.format(name, version))\r\n subprocess.check_call(['chmod', 'go-rw', '/dev/tty'])\r\n subprocess.check_call(\r\n ['rpm', '--quiet', '--define=_gpg_path {}'.format(dir),\r\n '--define=_gpg_name {}'.format(\"Vanir test\"),\r\n '--addsign', pkg_path],\r\n stdin=subprocess.DEVNULL,\r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n subprocess.check_call(['chmod', 'go+rw', '/dev/tty'])\r\n return pkg_path\r\n\r\n def send_pkg(self, filename):\r\n with open(filename, 'rb') as f_pkg:\r\n self.loop.run_until_complete(self.updatevm.run_for_stdio(\r\n 'mkdir -p /tmp/repo; cat > /tmp/repo/{}'.format(\r\n os.path.basename(filename)),\r\n input=f_pkg.read()))\r\n try:\r\n self.loop.run_until_complete(\r\n self.updatevm.run_for_stdio('cd /tmp/repo; createrepo .'))\r\n except subprocess.CalledProcessError as e:\r\n if e.returncode == 127:\r\n self.skipTest('createrepo not installed in template {}'.format(\r\n self.template))\r\n else:\r\n self.skipTest('createrepo failed with code {}, '\r\n 'cannot perform the test'.format(e.returncode))\r\n self.start_repo()\r\n\r\n def start_repo(self):\r\n if self.repo_running:\r\n return\r\n self.repo_proc = self.loop.run_until_complete(self.updatevm.run(\r\n 'cd /tmp/repo && python -m SimpleHTTPServer 8080',\r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT))\r\n self.repo_running = True\r\n\r\n def test_000_update(self):\r\n \"\"\"Dom0 update tests\r\n\r\n Check if package update is:\r\n - detected\r\n - installed\r\n - \"updates pending\" flag is cleared\r\n \"\"\"\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n subprocess.check_call(['rpm', '-i', filename])\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '2.0')\r\n self.send_pkg(filename)\r\n self.app.domains[0].features['updates-available'] = True\r\n\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', *self.dom0_update_common_opts,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update failed: \" + f_log.read())\r\n del proc\r\n\r\n retcode = subprocess.call(['rpm', '-q', '{}-1.0'.format(\r\n self.pkg_name)], stdout=subprocess.DEVNULL)\r\n self.assertEqual(retcode, 1, 'Package {}-1.0 still installed after '\r\n 'update'.format(self.pkg_name))\r\n retcode = subprocess.call(['rpm', '-q', '{}-2.0'.format(\r\n self.pkg_name)], stdout=subprocess.DEVNULL)\r\n self.assertEqual(retcode, 0, 'Package {}-2.0 not installed after '\r\n 'update'.format(self.pkg_name))\r\n self.assertFalse(\r\n self.app.domains[0].features.get('updates-available', False),\r\n \"'updates pending' flag not cleared\")\r\n\r\n def test_005_update_flag_clear(self):\r\n \"\"\"Check if 'updates pending' flag is cleared\"\"\"\r\n\r\n # create any pkg (but not install it) to initialize repo in the VM\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n self.send_pkg(filename)\r\n self.app.domains[0].features['updates-available'] = True\r\n\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', *self.dom0_update_common_opts,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update failed: \" + f_log.read())\r\n del proc\r\n\r\n with open(logpath) as f:\r\n dom0_update_output = f.read()\r\n self.assertFalse('Errno' in dom0_update_output or\r\n 'Couldn\\'t' in dom0_update_output,\r\n \"vanir-dom0-update reported an error: {}\".\r\n format(dom0_update_output))\r\n\r\n self.assertFalse(\r\n self.app.domains[0].features.get('updates-available', False),\r\n \"'updates pending' flag not cleared\")\r\n\r\n def test_006_update_flag_clear(self):\r\n \"\"\"Check if 'updates pending' flag is cleared, using --clean\"\"\"\r\n\r\n # create any pkg (but not install it) to initialize repo in the VM\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n self.send_pkg(filename)\r\n self.app.domains[0].features['updates-available'] = True\r\n\r\n # remove also repodata to test #1685\r\n if os.path.exists('/var/lib/vanir/updates/repodata'):\r\n shutil.rmtree('/var/lib/vanir/updates/repodata')\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', '--clean',\r\n *self.dom0_update_common_opts,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update failed: \" + f_log.read())\r\n del proc\r\n\r\n with open(logpath) as f:\r\n dom0_update_output = f.read()\r\n self.assertFalse('Errno' in dom0_update_output or\r\n 'Couldn\\'t' in dom0_update_output,\r\n \"vanir-dom0-update reported an error: {}\".\r\n format(dom0_update_output))\r\n\r\n self.assertFalse(\r\n self.app.domains[0].features.get('updates-available', False),\r\n \"'updates pending' flag not cleared\")\r\n\r\n def test_010_instal(self):\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n self.send_pkg(filename)\r\n\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', *self.dom0_update_common_opts,\r\n self.pkg_name,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update failed: \" + f_log.read())\r\n del proc\r\n\r\n retcode = subprocess.call(['rpm', '-q', '{}-1.0'.format(\r\n self.pkg_name)], stdout=open('/dev/null', 'w'))\r\n self.assertEqual(retcode, 0, 'Package {}-1.0 not installed'.format(\r\n self.pkg_name))\r\n\r\n def test_020_install_wrong_sign(self):\r\n subprocess.call(['rpm', '-e', 'gpg-pubkey-{}'.format(\r\n self.keyid)])\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n self.send_pkg(filename)\r\n\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', *self.dom0_update_common_opts,\r\n self.pkg_name,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if not proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update unexpectedly succeeded: \" +\r\n f_log.read())\r\n del proc\r\n\r\n retcode = subprocess.call(['rpm', '-q', '{}-1.0'.format(\r\n self.pkg_name)], stdout=subprocess.DEVNULL)\r\n self.assertEqual(retcode, 1,\r\n 'Package {}-1.0 installed although '\r\n 'signature is invalid'.format(self.pkg_name))\r\n\r\n def test_030_install_unsigned(self):\r\n filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')\r\n subprocess.check_call(['rpm', '--delsign', filename],\r\n stdout=subprocess.DEVNULL,\r\n stderr=subprocess.STDOUT)\r\n self.send_pkg(filename)\r\n\r\n logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')\r\n with open(logpath, 'w') as f_log:\r\n proc = self.loop.run_until_complete(asyncio.create_subprocess_exec(\r\n 'vanir-dom0-update', '-y', *self.dom0_update_common_opts,\r\n self.pkg_name,\r\n stdout=f_log,\r\n stderr=subprocess.STDOUT))\r\n self.loop.run_until_complete(proc.wait())\r\n if not proc.returncode:\r\n del proc\r\n with open(logpath) as f_log:\r\n self.fail(\"vanir-dom0-update unexpectedly succeeded: \" +\r\n f_log.read())\r\n del proc\r\n\r\n retcode = subprocess.call(['rpm', '-q', '{}-1.0'.format(\r\n self.pkg_name)], stdout=subprocess.DEVNULL)\r\n self.assertEqual(retcode, 1,\r\n 'UNSIGNED package {}-1.0 installed'.format(self.pkg_name))\r\n\r\n\r\ndef create_testcases_for_templates():\r\n return vanir.tests.create_testcases_for_templates('TC_00_Dom0Upgrade',\r\n TC_00_Dom0UpgradeMixin, vanir.tests.SystemTestCase,\r\n module=sys.modules[__name__])\r\n\r\ndef load_tests(loader, tests, pattern):\r\n tests.addTests(loader.loadTestsFromNames(\r\n create_testcases_for_templates()))\r\n return tests\r\n\r\nvanir.tests.maybe_create_testcases_on_import(create_testcases_for_templates)\r\n","sub_path":"vanir/tests/integ/dom0_update.py","file_name":"dom0_update.py","file_ext":"py","file_size_in_byte":14909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"219517334","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom keras.models import Sequential\r\nfrom keras.models import load_model\r\nfrom keras.layers import Dense, Dropout\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import to_categorical\r\nfrom keras.callbacks import EarlyStopping, CSVLogger\r\n#from keras.wrappers.scikit_learn import KerasClassifier\r\n#from sklearn.model_selection import GridSearchCV\r\nfrom sklearn import metrics\r\nfrom sklearn.utils.multiclass import unique_labels\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom inspect import signature\r\n\r\ninput_node_num = 30\r\nlr = 0.01\r\nbatch_size = 32\r\nepochs = 60\r\nnum_classes = 2\r\nvalidation_split = 0.2\r\n\r\ntraining_filename = 'Data.csv'\r\nhidden_test_set_filename = 'test_no_Class.csv'\r\noutput_filename = 'r07943097_answer.txt'\r\n\r\ndef load_data(file):\r\n df = pd.read_csv(file)\r\n if file == training_filename:\r\n x_train = np.array(df.iloc[:,0:input_node_num].copy())\r\n y_train = np.array(df.iloc[:,input_node_num].copy())\r\n return x_train,y_train\r\n else:\r\n x_hidden = np.array(df.iloc[:,0:input_node_num].copy())\r\n return x_hidden\r\n\r\ndef train_test_split(training_set, validation_split_ratio): \r\n training_set_num = int(np.size(training_set, 0)*(1.0-validation_split_ratio))\r\n training_set_new = training_set[0:training_set_num]\r\n validation_set = training_set[training_set_num:]\r\n return training_set_new, validation_set\r\n\r\ndef get_model():\r\n model = Sequential()\r\n model.add(Dense(512, activation='relu', input_shape=(input_node_num,)))\r\n model.add(Dropout(0.1))\r\n model.add(Dense(1024, activation='relu'))\r\n model.add(Dropout(0.1))\r\n model.add(Dense(num_classes, activation='softmax'))\r\n optimizer = Adam(lr=lr)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy'])\r\n return model\r\n\r\ndef plot_loss_accuracy(file):\r\n df = pd.read_csv(file)\r\n loss = np.array(df.loss)\r\n val_loss = np.array(df.val_loss)\r\n accuracy = np.array(df.accuracy)\r\n val_accuracy = np.array(df.val_accuracy)\r\n \r\n plt.figure()\r\n loss, = plt.plot(loss)\r\n val_loss, = plt.plot(val_loss)\r\n plt.legend([loss, val_loss], ['loss', 'val_loss'], loc='upper right')\r\n \r\n plt.figure()\r\n accuracy, = plt.plot(accuracy)\r\n val_accuracy, = plt.plot(val_accuracy)\r\n plt.legend([accuracy, val_accuracy], ['accuracy', 'val_accuracy'], loc='lower right')\r\n\r\ndef plot_confusion_matrix(y_true, y_pred, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n '''\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n '''\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = metrics.confusion_matrix(y_true, y_pred)\r\n \r\n # Only use the labels that appear in the data\r\n classes = classes[unique_labels(y_true, y_pred)]\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True',\r\n xlabel='Predict')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=0, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax\r\n\r\ndef plot_precision_recall_curve(y_true, y_pred): \r\n average_precision = metrics.average_precision_score(y_true, y_pred)\r\n precision, recall, thresholds = metrics.precision_recall_curve(y_true, y_pred)\r\n print('AUPRC:', metrics.auc(precision, recall))\r\n \r\n # In matplotlib < 1.5, plt.fill_between does not have a 'step' argument\r\n step_kwargs = ({'step': 'post'}\r\n if 'step' in signature(plt.fill_between).parameters\r\n else {})\r\n plt.figure()\r\n plt.step(recall, precision, color='b', alpha=0.2,\r\n where='post')\r\n plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)\r\n \r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.ylim([0.0, 1.05])\r\n plt.xlim([0.0, 1.0])\r\n plt.title('test Precision-Recall curve: AP={0:0.2f}'.format(\r\n average_precision))\r\n\r\ndef plot_ROC(y_true, y_pred): \r\n fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)\r\n print('AUROC:', metrics.auc(fpr, tpr))\r\n \r\n plt.figure()\r\n plt.plot(fpr, tpr, color='darkorange', label='ROC curve')\r\n plt.plot([0, 1], [0, 1], color='navy', linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('Receiver operating characteristic')\r\n plt.legend(loc=\"lower right\")\r\n\r\nif __name__ == \"__main__\":\r\n ### load training file\r\n x_train, y_train = load_data(training_filename)\r\n y_train = to_categorical(y_train, num_classes)\r\n x_train, x_test = train_test_split(x_train, validation_split)\r\n y_train, y_test = train_test_split(y_train, validation_split)\r\n \r\n ### grid search\r\n '''\r\n model = KerasClassifier(build_fn=get_model, epochs=epochs, batch_size=batch_size, verbose=0)\r\n param_grid = dict()\r\n grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)\r\n grid_result = grid.fit(x_train, y_train)\r\n # summarize results\r\n print(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\r\n means = grid_result.cv_results_['mean_test_score']\r\n stds = grid_result.cv_results_['std_test_score']\r\n params = grid_result.cv_results_['params']\r\n for mean, stdev, param in zip(means, stds, params):\r\n print(\"%f (%f) with: %r\" % (mean, stdev, param))\r\n '''\r\n if not os.path.exists('model_weight.h5'): \r\n ### build model\r\n model = get_model()\r\n earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=2)\r\n csv_logger = CSVLogger('training.log')\r\n record = model.fit(x_train, y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n callbacks=[earlyStopping, csv_logger], \r\n validation_data=(x_test, y_test))\r\n ### plot loss and accuracy\r\n plt.figure()\r\n plt.plot(record.history['loss'],label='loss')\r\n plt.plot(record.history['val_loss'],label='val_loss')\r\n plt.legend(loc='upper right')\r\n \r\n plt.figure()\r\n plt.plot(record.history['accuracy'],label='acc')\r\n plt.plot(record.history['val_accuracy'],label='val_acc')\r\n plt.legend(loc='lower right')\r\n plt.show()\r\n model.save('model_weight.h5')\r\n else:\r\n model = load_model('model_weight.h5')\r\n plot_loss_accuracy('training.log')\r\n \r\n \r\n ### evaluate loss and accuracy\r\n score = model.evaluate(x_train, y_train)\r\n print('Train loss:', score[0])\r\n print('Train accuracy:', score[1])\r\n \r\n score = model.evaluate(x_test, y_test)\r\n print('Test loss:', score[0])\r\n print('Test accuracy:', score[1])\r\n \r\n ### load hidden test set file\r\n x_hidden = load_data(hidden_test_set_filename)\r\n \r\n ### predict hidden test set\r\n y_pred_hidden = np.argmax(model.predict(x_hidden), axis=1)\r\n \r\n ### output my answer\r\n with open(output_filename, 'w') as fo: \r\n for predict in y_pred_hidden:\r\n fo.write(str(predict) + \"\\n\")\r\n \r\n ### plot_confusion_matrix and calculate precision, recall, f1_score of model\r\n #### training set\r\n y_true = np.argmax(y_train, axis=1)\r\n y_pred = np.argmax(model.predict(x_train), axis=1)\r\n \r\n print('Model:')\r\n plot_confusion_matrix(y_true, y_pred, classes=np.arange(num_classes),\r\n title='train_confusion_matrix')\r\n \r\n train_precision = metrics.precision_score(y_true, y_pred)\r\n print('train_precision:', train_precision)\r\n train_recall = metrics.recall_score(y_true, y_pred)\r\n print('train_recall:', train_recall)\r\n train_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('train_f1_score:', train_f1_score)\r\n \r\n #### validation set\r\n y_true = np.argmax(y_test, axis=1)\r\n y_pred = np.argmax(model.predict(x_test), axis=1)\r\n plot_confusion_matrix(y_true, y_pred, classes=np.arange(num_classes),\r\n title='validation_confusion_matrix')\r\n \r\n test_precision = metrics.precision_score(y_true, y_pred)\r\n print('test_precision:', test_precision)\r\n test_recall = metrics.recall_score(y_true, y_pred)\r\n print('test_recall:', test_recall)\r\n test_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('test_f1_score:', test_f1_score)\r\n \r\n plot_ROC(y_true, y_pred)\r\n plot_precision_recall_curve(y_true, y_pred)\r\n \r\n #### average score\r\n print('average_precision:', (train_precision+test_precision)/2)\r\n print('average_recall:', (train_recall+test_recall)/2)\r\n print('average_f1_score:', (train_f1_score+test_f1_score)/2)\r\n plt.show()\r\n \r\n ### decision tree\r\n clf = DecisionTreeClassifier()\r\n #### training set\r\n y_true = np.argmax(y_train, axis=1)\r\n clf = clf.fit(x_train, y_true)\r\n y_pred = clf.predict(x_train)\r\n \r\n print('Decision Tree:')\r\n train_accuracy = metrics.accuracy_score(y_true, y_pred)\r\n print('train_accuracy:', train_accuracy)\r\n train_precision = metrics.precision_score(y_true, y_pred)\r\n print('train_precision:', train_precision)\r\n train_recall = metrics.recall_score(y_true, y_pred)\r\n print('train_recall:', train_recall)\r\n train_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('train_f1_score:', train_f1_score)\r\n \r\n #### validation set\r\n y_true = np.argmax(y_test, axis=1)\r\n y_pred = clf.predict(x_test)\r\n \r\n test_accuracy = metrics.accuracy_score(y_true, y_pred)\r\n print('test_accuracy:', test_accuracy)\r\n test_precision = metrics.precision_score(y_true, y_pred)\r\n print('test_precision:', test_precision)\r\n test_recall = metrics.recall_score(y_true, y_pred)\r\n print('test_recall:', test_recall)\r\n test_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('test_f1_score:', test_f1_score)\r\n \r\n #### average score\r\n print('average_accuracy:', (train_accuracy+test_accuracy)/2)\r\n print('average_precision:', (train_precision+test_precision)/2)\r\n print('average_recall:', (train_recall+test_recall)/2)\r\n print('average_f1_score:', (train_f1_score+test_f1_score)/2)\r\n \r\n ### random forest\r\n clf = RandomForestClassifier(n_estimators=1000,\r\n random_state=0)\r\n #### training set\r\n y_true = np.argmax(y_train, axis=1)\r\n clf = clf.fit(x_train, y_true)\r\n y_pred = clf.predict(x_train)\r\n \r\n print('Random Forest:')\r\n train_accuracy = metrics.accuracy_score(y_true, y_pred)\r\n print('train_accuracy:', train_accuracy)\r\n train_precision = metrics.precision_score(y_true, y_pred)\r\n print('train_precision:', train_precision)\r\n train_recall = metrics.recall_score(y_true, y_pred)\r\n print('train_recall:', train_recall)\r\n train_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('train_f1_score:', train_f1_score)\r\n \r\n #### validation set\r\n y_true = np.argmax(y_test, axis=1)\r\n y_pred = clf.predict(x_test)\r\n \r\n test_accuracy = metrics.accuracy_score(y_true, y_pred)\r\n print('test_accuracy:', test_accuracy)\r\n test_precision = metrics.precision_score(y_true, y_pred)\r\n print('test_precision:', test_precision)\r\n test_recall = metrics.recall_score(y_true, y_pred)\r\n print('test_recall:', test_recall)\r\n test_f1_score = metrics.f1_score(y_true, y_pred)\r\n print('test_f1_score:', test_f1_score)\r\n \r\n #### average score\r\n print('average_accuracy:', (train_accuracy+test_accuracy)/2)\r\n print('average_precision:', (train_precision+test_precision)/2)\r\n print('average_recall:', (train_recall+test_recall)/2)\r\n print('average_f1_score:', (train_f1_score+test_f1_score)/2)\r\n","sub_path":"hw2_r07943097.py","file_name":"hw2_r07943097.py","file_ext":"py","file_size_in_byte":13180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"496691143","text":"import logging as log\nimport sys\nimport os\nfrom typing import Tuple\nfrom networkmanager import NetworkManager\nfrom controllererror import ControllerError\nfrom controllers import MotorController, BatteryController, \\\n ClimateController, SensorController, BackupController\nfrom pathlib import Path\n\n\nclass Systems:\n \"\"\" The Systems class is the main object that encapsulates the program for\n the electric car central systems. It consists of a set of 'controller'\n modules that each contribute separate functions to control and manage\n different sub-systems in the car (battery, motor, climate control,\n etc). In addition, this class owns an instance of NetworkManager,\n an object containing instances of all the necessary abstracted networking\n interfaces needed to communicate with the aforementioned sub-systems.\n The NetworkManager is passed to each controller upon initialization.\n \"\"\"\n\n def __init__(self):\n \"\"\" Creates and initializes the network manager and all of the\n controllers.\n \"\"\"\n log.info(\"Initializing systems...\")\n self.__controllers = []\n self.__network_manager = NetworkManager((\"localhost\", 4000), [])\n self.__controllers.append(MotorController(self.__network_manager))\n self.__controllers.append(BatteryController(self.__network_manager))\n self.__controllers.append(ClimateController(self.__network_manager))\n self.__controllers.append(SensorController(self.__network_manager))\n self.__controllers.append(BackupController(self.__network_manager))\n self.__loop() # Note there is currently no end condition.\n\n def __get_data(self, name: str) -> any:\n \"\"\" Finds and returns the value of the specified variable that belongs\n to one\tof the controllers.\n\n name - A string to identify the variable.\n \"\"\"\n for controller in self.__controllers:\n try:\n return controller.get_variable(name, False)\n except ControllerError:\n pass\n raise ControllerError(\"Data variable '{0}' does not \"\n \"exist.\".format(name))\n\n def __set_data(self, name: str, value: any) -> None:\n \"\"\" Finds and sets the value of the specified variable that belongs\n to one of the controllers.\n\n name - A string to identify the variable.\n value - New value for the specified variable.\n \"\"\"\n for controller in self.__controllers:\n try:\n controller.set_variable(name, value)\n break\n except ControllerError:\n pass\n raise ControllerError(\"Data variable '{0}' does not \"\n \"exist.\".format(name))\n\n def __send_action(self, name: str, args: Tuple[any]) -> any:\n \"\"\" Calls the action specified by a controller with the provided\n arguments.\n\n name - A string to identify the action.\n args - Tuple of arguments to pass to the function.\n \"\"\"\n for controller in self.__controllers:\n try:\n return controller.perform_action(name, args)\n except ControllerError:\n pass\n raise ControllerError(\"Action '{0}' does not exist.\".format(name))\n\n def __loop(self):\n \"\"\" The main program loop for the systems computer. Listens for external\n requests and services them accordingly with the actions of controllers.\n The\tformat of requests directed to the actions of controllers is a\n dictionary with the format {\"type\": [\"action\"|\"get\"|\"set\"],\n \"name\": [\"name\"], [\"args\": []], [\"value\": any]}.\n \"\"\"\n log.info(\"Starting main loop...\")\n while True:\n request = self.__network_manager.get_pinet().get_request()\n if request is not None:\n response = None\n if request[\"type\"] == \"action\":\n response = self.__send_action(request[\"name\"],\n tuple(request[\"args\"]))\n elif request[\"type\"] == \"get\":\n response = self.__get_data(request[\"name\"])\n elif request[\"type\"] == \"set\":\n try:\n self.__set_data(request[\"name\"], request[\"value\"])\n response = True\n except ControllerError:\n response = False\n self.__network_manager.get_pinet().send_response(\n request[\"requestKey\"],\n response, request[\"peer\"])\n\n def shutdown(self) -> None:\n \"\"\" Safely shuts the controllers down. \"\"\"\n log.info(\"Shutting systems down...\")\n for controller in self.__controllers:\n controller.shutdown()\n\n\n# Main module entry point.\nif __name__ == \"__main__\":\n cwdPath = Path(__file__).parent\n os.chdir(cwdPath)\n logPath = cwdPath / \"logs\"\n if not logPath.is_dir():\n logPath.mkdir()\n logPath = logPath / \"systems.log\"\n if not logPath.is_file():\n open(logPath, \"a+\").close()\n log.basicConfig(level=log.DEBUG,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename=logPath,\n filemode=\"w\")\n consoleLog = log.StreamHandler(sys.stdout)\n consoleLog.setLevel(log.INFO)\n consoleFormat = log.Formatter(\"%(levelname)-8s %(message)s\")\n consoleLog.setFormatter(consoleFormat)\n log.getLogger(\"\").addHandler(consoleLog)\n log.basicConfig()\n log.info(\"Current working directory -> \" + os.getcwd())\n systems = Systems()\n","sub_path":"src/systems.py","file_name":"systems.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"296769811","text":"import sys\n\nif(len(sys.argv)) < 2:\n\texit(0)\n\ns = sys.argv[1].replace(' ', '')\nl = len(s) - len(s) % 5\ns = s[:l]\n\ns2 = \"\"\nfor i in range(l):\n\tif s[i].islower():\n\t\ts2 = s2 + \"a\"\n\telse:\n\t\ts2 = s2 + \"b\"\n\nkey = 'aaaaabbbbbabbbaabbababbaaababaab'\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\n\nret = \"\"\nfor i in range(0,l/5):\n\ts3 = s2[(i * 5) : (i * 5) + 5]\n\tj = key.find(s3)\n\tret = ret + alphabet[j]\nprint(ret)","sub_path":"py/4.5.py","file_name":"4.5.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"36583356","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n==============================================================================\n all_svg2pdf\n\n A Python module to convert all .svg files in a root folder (and below) \n to .pdf according to the following inkscape options:\n --without-gui\n --export-area-drawing\n\n For more inkscape options see: https://inkscape.org/sk/doc/inkscape-man.html\n\n Usage: \n $python all_svg2pdf.py\n==============================================================================\n'''\nimport os\n\ndef getPaths():\n \"\"\"Get root directory and define extension\"\"\"\n\n cwd = os.getcwd()\n extension = '.svg'\n\n return cwd, extension\n\ndef getSVGList(root, extension):\n \"\"\"Get all svg files in root directory and below\"\"\"\n\n svg_list = []\n\n for path, subdirs, files in os.walk(root):\n for file in files:\n if file.endswith(extension):\n abspath_file = path + os.sep + file\n svg_list.append(abspath_file)\n print('Were found %i svg files under root path: %s' %(len(svg_list), root))\n return svg_list\n \ndef svg2pdf(list):\n \"\"\"Convert all svg files to pdf according to specified options\"\"\"\n\n options = '-d 300 --without-gui --export-area-page'\n print('\\nConverting, please wait...\\n')\n for file in list:\n print('Converting file: %s' %file)\n pdf_abspath = file.split('.')[0]\n os.system('inkscape %s \"%s\" --export-pdf=\"%s.pdf\"' %(options, file, pdf_abspath)) \n # os.system('inkscape %s \"%s\" --export-png=\"%s.png\"' %(options, file, pdf_abspath)) \n\nif __name__ == \"__main__\":\n print(__doc__)\n cwd, extension = getPaths()\n svg_files = getSVGList(cwd, extension)\n svg2pdf(svg_files)","sub_path":"Manuscript/all_svg2pdf.py","file_name":"all_svg2pdf.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"276572625","text":"from __future__ import print_function\nimport sys\nimport argparse\nimport ThirdParty.hersheydata as hd\nimport numpy as np\n\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"--xkern\",type=float, default=0.,help=\"added to xspacing, tweaks letter spacing\")\nparser.add_argument(\"--justification\",type=str, default=\"left\", help=\"left, right or centre\")\nparser.add_argument(\"--line_spacing\",type=float, default=1., help=\"Spacing between lines\")\nparser.add_argument(\"--pen\",type=int, default=0, help=\"Pen number to use\")\nparser.add_argument(\"font\",type=str,help=\"Font to use\")\nparser.add_argument(\"input\",type=str, help=\"Text to render\")\nparser.add_argument(\"outputfile\",type=str, help=\"Output\")\nargs=parser.parse_args()\n\ndef parse(pathString):\n lines=[]\n z=pathString.split()\n midpoint=float(z[0])\n offset=float(z[1])\n if len(z)<3:\n return lines,midpoint,offset\n assert(z[2]=='M')\n z=z[2:]\n assert(len(z)%3==0)\n idx=0\n line=[]\n while idx0:\n lines.append(line)\n line=[]\n line.append((x,y))\n else:\n assert (key=='L')\n line.append((x,y))\n if len(line)>0:\n lines.append(line)\n return lines,midpoint,offset\ntry:\n fs=eval('hd.'+args.font)\nexcept:\n print(\"Unknown font\",args.font)\n print(\"Valid fonts are:\")\n for f in dir(hd):\n if f[0]!='_':\n print(f)\n parser.print_help()\n exit()\n \n\nyspacing=40.*args.line_spacing\nwith open(args.input) as fd:\n paragraph=fd.read()\n\nparalines=paragraph.split(\"\\n\")\nypos=0\nplotlines=[]\nplotxpos=[]\nfor line in paralines:\n plotrow=[]\n xpos=0\n for character in line:\n fontlines,minx,maxx=parse(fs[ord(character)-32])\n charlines=[]\n for fontline in fontlines:\n xs,ys=zip(*fontline)\n plotrow.append((np.array(xs)-minx+xpos,-1.*np.array(ys)+ypos))\n \n \n xpos+=maxx-minx+args.xkern\n plotlines.append(plotrow)\n plotxpos.append(xpos-args.xkern)\n ypos-=yspacing\nif args.justification==\"left\":\n pass\nelif args.justification==\"center\" or args.justification == \"centre\":\n for i,xpos in enumerate(plotxpos):\n for xs,ys in plotlines[i]:\n xs-=xpos/2.\nelif args.justification==\"right\":\n for i,xpos in enumerate(plotxpos):\n for xs,ys in plotlines[i]:\n xs-=xpos\nelse:\n raise Exception(\"Unknown justification %s\"%justification)\n \nwith open(args.outputfile,'w') as fd:\n fd.write(\"PEN %d\\n\"%args.pen)\n for row in plotlines:\n for line in row:\n for xy in zip(*line):\n fd.write(\"%f %f \"%xy)\n fd.write(\"\\n\")\n\n","sub_path":"PlotterTests/text2lines.py","file_name":"text2lines.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"452127226","text":"import os\nimport sys\nimport json\nimport time\n\nif len(sys.argv) != 3:\n print(\"Wrong number arguments\")\n exit()\n\ntry:\n slots = json.loads(sys.argv[1]) #Set as python dictionary\nexcept:\n print(\"TOKEN EXPIRED REGENEATE NEW TOKEN\")\n print('\\a')\n exit()\n\n#Test for api call error\nif slots == {}:\n print(\"Wrong project notation\")\n exit()\n\n#Recursively call slot_find until slot is found\nif slots == []:\n time.sleep(60);#One minute wait\n os.system(\"./slot_find \" + sys.argv[2])\n\nprint('\\a') #Bell for found slot\n\nprint(\"DAY\" + \" \" + \"TIME\")\nday = \"-1\"\nfor slot in slots:\n if day != slot['begin_at'][8:-14]:\n print()\n day = slot['begin_at'][8:-14]\n hour = int(slot['begin_at'][11:-11])\n hour = 0 if hour == 23 else hour + 1; #Set to Brussels time zone -> Brussels = Z + 1\n min = slot['begin_at'][14:-8]\n print(day + \" \" + str(hour) + \":\" + min)\nprint()\n\nos.system('zenity --warning --text=\"SLOT FOUND\" --no-wrap') #Notify\n#Alarm when slots found\n# while 1:\n# print('\\a')\n\n\n\nclass slot_: #Not used slot class, could be used to check when new slots are found by api and thus potentially available\n def __init__(slot):\n self.id = slot['id']\n self.day = int(slot['begin_at'][8:-14])\n self.hour = int(slot['begin_at'][11:-11])\n self.min = int(slot['begin_at'][14:-8])\n\n def __eq__(slot):\n if self.id == slot.id:\n return True\n return False\n\n def show(first=False):\n if first == True:\n print(\"DAY\" + \" \" + \"TIME\")\n print(str(self.day) + \" \" + str(self.hour) + \":\" + str(self.min))\n","sub_path":"find_slot/src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"108656540","text":"from opengever.core.upgrade import SchemaMigration\n\n\nclass DropPreProtocol(SchemaMigration):\n\n profileid = 'opengever.meeting'\n upgradeid = 4600\n\n def migrate(self):\n self.drop_pre_protocol_column_from_meeting()\n\n def drop_pre_protocol_column_from_meeting(self):\n fk_name = self.get_foreign_key_name(\n 'meetings', 'pre_protocol_document_id')\n self.op.drop_constraint(fk_name, 'meetings', type_='foreignkey')\n self.op.drop_column('meetings', 'pre_protocol_document_id')\n","sub_path":"opengever/meeting/upgrades/to4600.py","file_name":"to4600.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"514980841","text":"\nclass Graph:\n \"\"\"My class for Lab04\"\"\"\n def __init__(self, gal_file):\n\n # get file name\n self.gal_file = gal_file\n\n\n # open and read gal file\n fp = open(gal_file)\n lines = fp.readlines()\n fp.close()\n self.lines = lines\n\n # build the dictionary\n line0 = lines[0]\n line0 = line0.strip().split()\n self.line0 = line0\n self.n_polygons = int(line0[1])\n neighbors = {}\n lines = lines[1:]\n processing = True \n i = 0\n while i < self.n_polygons * 2:\n header = lines[i].strip().split()\n i_neighbors = lines[i+1].strip().split()\n node_i = header[0]\n neighbors[node_i] = i_neighbors\n i += 2\n self.neighbors = neighbors\n\n\n # answer questions under 2\n\n def summary(self):\n n_neighbors = 0\n mc = 0\n for node in self.neighbors:\n n_neighbors += len(self.neighbors[node])\n n_node = len(self.neighbors[node])\n if n_node > mc:\n mc = n_node\n\n\n average = n_neighbors * 1. / self.n_polygons\n results = {}\n results['average'] = average\n results['max card'] = mc\n\n return results\n\n\nif __name__ == '__main__':\n\n my_gal = Graph(\"Lab04-1.gal\")\n","sub_path":"Lab04/Lab04-2.py","file_name":"Lab04-2.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"411420068","text":"import os\n\nSplitChannels = True\n\n##### Constants #####\nDataAugDir = \"/data/data_aug_final_8\"\nImageChannels = int(4)\n\nif SplitChannels:\n ImageChannels = int(1)\n\nImageSide = int(64)\nMaximumPixelIntensity = 256.0 # Converted to log scale was 65553\n\nDataCache = \"/data/data_caches\" # RAM Memory HA :)\n\nAllColumns = 64\nColumnsResult = [15,31,47,63]\nImportantColumns = [9, 10, 11, 12, 13, 14, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16,\\\n 9+32, 10+32, 11+32, 12+32, 13+32, 14+32,\\\n 9+48, 10+48, 11+48, 12+48, 13+48, 14+48\\\n ]\nImportColumnCount = 24\nColumnsResultCoiunt = 4\n\nExtraColumns = ImportColumnCount\n\n#### Simple calculations\nrawdataset_files = [os.path.join(\"data\", f) for f in next(os.walk(\"data\"))[2] if f.endswith(\".raw\")]\nrawdataset_size = len(rawdataset_files)\n\nif SplitChannels:\n ImageChannels = int(1)\n ImportantColumns = [9,10,11,12,13,14]\n ImportColumnCount = 6\n ColumnsResultCoiunt = 1\n ColumnsResult = [15] \n ExtraColumns=ImportColumnCount\n\n\nos.system(\"mkdir \"+DataAugDir)\n\n","sub_path":"trainer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"484222398","text":"\"\"\"\n Дан список плиток домино в определенном порядке и положении (переворачивать нельзя)\n Необходимо найти длину самой длинной правильной цепочки\n Цепочка считается правильной, если правое число левой плитки равно левому числу правой\n Например: [(1,2),(2,3),(3,4)] - это правильная цепочка (ответ 3)\n [(2,1),(2,3),(3,4)] - это неправильная цепочка, но внутри есть правильная (ответ 2)\n [(1,1),(2,3),(3,5),(5,6),(5,5),(7,3),(3,4),(4,5),(5,6),(1,1)] - правильный ответ 4, хотя есть и цепочка длины 3\n\"\"\"\nimport unittest\n\n\ndef longest_chain(chain):\n k = 1\n for i in range(len(chain)-1):\n if chain[i][1] == chain[i+1][0]:\n k += 1\n return k\n\n\nclass TestDominoMethods(unittest.TestCase):\n def test_domino(self):\n self.assertEqual(longest_chain([(1,2),(2,3),(3,4)]),3)\n self.assertEqual(longest_chain([(2,1),(2,3),(3,4)]),2)\n self.assertEqual(longest_chain([(1,1),(2,3),(3,5),(5,6),(5,5),(7,3),(3,4),(4,5),(5,6),(1,1)]), 4)\n self.assertEqual(longest_chain([(1,1),(2,3),(3,5),(5,6),(5,5),(7,3),(3,4),(4,5),(5,6),(1,1),(1,2)]), 4)\n self.assertEqual(longest_chain([(1,1),(2,3),(3,5),(5,6),(5,5),(8,8),(7,3),(3,4),(4,5),(5,6),(1,1)]), 4)\n self.assertEqual(longest_chain([(1,1),(2,3),(3,5),(5,6),(5,5),(7,3),(13,4),(4,5),(5,6),(1,1)]), 3)\n self.assertEqual(longest_chain([]),0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"domino.py","file_name":"domino.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"611005518","text":"import torch\nfrom torch.optim import Optimizer\nfrom torch.nn import Module\nfrom torch.utils.data import DataLoader\n\nfrom typing import Callable, List, Union\nimport os\nimport os.path as osp\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\nfrom models.callbacks import (\n ProgressBarLogger,\n CallbackList,\n Callback,\n EvaluateFewShot,\n CSVLogger\n)\n\nfrom models.few_shot.protonet import fit_handle as ProtoNet_fit_handle\nfrom models.few_shot.matchingnet import fit_handle as MatchingNet_fit_handle\nfrom models.few_shot.relationnet import fit_handle as RelationNet_fit_handle\nfrom models.few_shot.metaoptnet import fit_handle as MetaOptNet_fit_handle\nfrom models.few_shot.maml import fit_handle as Maml_fit_handle\nLinear_fit_handle = ProtoNet_fit_handle\nFEAT_fit_handle = ProtoNet_fit_handle\nRouge_fit_handle = ProtoNet_fit_handle\nfrom models.few_shot.protonet import laplacian_protonet_fit_handle as LaplacianProtoNet_fit_handle\n\nfrom models.few_shot.protonet_mm import fit_handle as MultiModalProtoNet_fit_handle\nfrom models.few_shot.tima import fit_handle as MultiModalTIMA_fit_handle\nfrom models.few_shot.tima_plus import fit_handle as MultiModalTIMAPlus_fit_handle\n\nfrom models.few_shot.protonet import protonet_pretrain_fit_handle as ProtoNetPretrainClassifier_fit_handle\nfrom models.few_shot.protonet import crg_pretrain_fit_handle as CRGPretrainClassifier_fit_handle\nRTPretrainClassifier_fit_handle = CRGPretrainClassifier_fit_handle\n\nfrom models.classical.linearclassifier import fit_handle as LinearClassifier_fit_handle\n\nfrom models.few_shot.helper import PrepareFunc\n\nfrom models.utils import get_lr, set_logger, pretrain_prepare_batch, multimodal_pretrain_prepare_batch, set_gpu, set_seeds, load_pickle, save_pickle\nfrom models.metrics import metrics_handle, ROOT_PATH\n\n# from models.ycy import RouGee\n\nclass Trainer(object):\n def __init__(self, args):\n self.gpu = args.gpu\n set_gpu(self.gpu)\n set_seeds(torch_seed=args.torch_seed, cuda_seed=args.cuda_seed, np_seed=args.np_seed, random_seed=args.random_seed)\n\n torch.autograd.set_detect_anomaly(True)\n\n self.logger_filename = ROOT_PATH + f'{args.logger_filename}/process/{args.params_str}.log'\n self.result_filename = ROOT_PATH + f'{args.logger_filename}/result/{args.params_str}.csv'\n\n self.logger = set_logger(self.logger_filename, 'train_logger')\n for k in sorted(vars(args).keys()):\n self.logger.info(k + ': %s' % str(vars(args)[k]))\n\n prepare_handle = PrepareFunc(args)\n # 前向传播所需:\n # ( model 有且仅有这一个, callbacks 基类派生类都是共享这一个model. 下面其他东西也是这样存储 )\n \"\"\"\n 准备 Dataloader\n \"\"\"\n (self.train_loader, train_num_classes), (self.val_loader, _), (self.test_loader, _) = \\\n prepare_handle.prepare_dataloader(option=args.paradigm)\n\n \"\"\"\n 准备 Model, Optimizer, loss_fn, callbacks\n \"\"\"\n self.model = prepare_handle.prepare_model(train_num_classes)\n\n self.optimizer, self.lr_scheduler, self.scaler = prepare_handle.prepare_optimizer(self.model)\n self.loss_fn = prepare_handle.prepare_loss_fn()\n self.fit_handle = eval(args.model_class + '_fit_handle')(\n model=self.model,\n optimizer=self.optimizer,\n scaler=self.scaler,\n loss_fn=self.loss_fn\n ) \n\n # 接下来要准备fit函数之前的所有东西, 包括callbacks.\n # 记录数据所需:\n # 这里的params统一传到基类成员, 所有派生类共享. 注意这里一定要精简.\n\n # # 目前pretrain的prepare_batch_func不支持多模态.\n # self.train_prepare_batch, val_prepare_batch, test_prepare_batch = prepare_handle.prepare_prepare_batch_func(\n # model_prepare_batch=self.model.multimodal_prepare_kshot_task if ',' in args.multimodal_option else self.model.prepare_kshot_task,\n # option=0b111 if not args.pretrain_mode else 0b011\n # )\n self.train_prepare_batch, val_prepare_batch, test_prepare_batch = prepare_handle.prepare_prepare_batch_func(\n model_prepare_batch=self.model.prepare_kshot_task,\n option=args.paradigm,\n query_is_label=True if args.model_class != 'Linear' else False\n )\n\n DEBUG_CLASSICAL_ML = False\n if DEBUG_CLASSICAL_ML:\n (self.train_loader, train_num_classes), (self.val_loader, _), (self.test_loader, _) = \\\n prepare_handle.prepare_dataloader(option=0b000)\n self.fit_handle = ProtoNet_fit_handle(\n model=self.model,\n optimizer=self.optimizer,\n scaler=self.scaler,\n loss_fn=self.loss_fn\n )\n test_prepare_batch = pretrain_prepare_batch\n\n self.verbose, self.epoch_verbose = args.verbose, args.epoch_verbose\n # self.params = {\n # 'max_epoch': args.max_epoch,\n # 'verbose': args.verbose,\n # 'metrics': (self.metrics or []),\n # 'prepare_batch': prepare_kshot_task(args.test_way, args.test_query),\n # 'loss_fn': self.loss_fn,\n # 'optimizer': self.optimizer,\n # 'lr_scheduler': self.lr_scheduler\n # }\n\n # args 是一个参数集合, 期望在这里分模块, 对每个类 对应特定的功能, 类的参数也要**具体化**, 这样才可以一层层分解, 较好.\n\n self.model_filepath = args.model_filepath\n self.train_monitor = args.train_monitor\n self.batch_metrics = metrics_handle(self.model, self.train_monitor)\n\n self.do_train = args.do_train\n self.do_test = args.do_test\n\n self.max_epoch = args.max_epoch\n\n self.evaluate_handle = EvaluateFewShot(\n val_loader=self.val_loader,\n test_loader=self.test_loader,\n val_prepare_batch=val_prepare_batch,\n test_prepare_batch=test_prepare_batch,\n batch_metrics=self.batch_metrics,\n eval_fn=self.fit_handle,\n test_interval=args.test_interval,\n max_epoch=self.max_epoch,\n model_filepath=args.model_filepath,\n model_filepath_test_best=args.model_filepath_test_best,\n monitor=self.train_monitor,\n save_best_only=True,\n mode='max',\n simulation_test=False,\n verbose=args.verbose,\n epoch_verbose=args.epoch_verbose\n )\n\n callbacks = [\n self.evaluate_handle,\n CSVLogger(\n self.result_filename,\n separator=',',\n append=False\n )\n ]\n\n if self.do_train:\n callbacks.append(\n ProgressBarLogger(\n length=len(self.train_loader),\n epoch_verbose=self.epoch_verbose\n )\n )\n\n # LearningRateScheduler 最好直接在fit函数里面传一个lr_scheduler, 直接step吧. 看FEAT.\n self.callbacks = CallbackList((callbacks or []))\n self.callbacks.set_model_and_logger(self.model, self.logger)\n\n \"\"\"\n meta\n \"\"\"\n self.meta = args.meta\n\n # if args.init_weights is not None:\n # self.evaluate_handle.predict_log(1, 'test_')\n\n self.mm_train_list = args.mm_train_list\n PREPROCESS_FEATURE_MEAN_STD = False\n if PREPROCESS_FEATURE_MEAN_STD:\n (feat_train_loader, feat_num_classes), (_, _), (_, _) = prepare_handle.prepare_dataloader(option=0b100, is_only_one=True)\n from models.dataloader.lrw import LRW_VIDEO_DATA_PATH_NAME, LRW_AUDIO_DATA_PATH_NAME\n FEATURE_MEAN_CACHE_FILE_NAME_SUFFIX = '_base_features_mean.pkl'\n FEATURE_STD_CACHE_FILE_NAME_SUFFIX = '_base_features_std.pkl'\n\n data_root_path_video = osp.join(args.data_path, LRW_VIDEO_DATA_PATH_NAME)\n data_root_path_audio = osp.join(args.data_path, LRW_AUDIO_DATA_PATH_NAME)\n\n cache_filepath = {\n 'mean': {\n 'video': osp.join(data_root_path_video, '_'.join(args.backbone_class) + FEATURE_MEAN_CACHE_FILE_NAME_SUFFIX),\n 'audio': osp.join(data_root_path_audio, '_'.join(args.backbone_class) + FEATURE_MEAN_CACHE_FILE_NAME_SUFFIX)\n },\n 'std': {\n 'video': osp.join(data_root_path_video, '_'.join(args.backbone_class) + FEATURE_STD_CACHE_FILE_NAME_SUFFIX),\n 'audio': osp.join(data_root_path_audio, '_'.join(args.backbone_class) + FEATURE_STD_CACHE_FILE_NAME_SUFFIX)\n }\n }\n\n self.model.eval()\n with torch.no_grad():\n def compute(cur_modal, cache_mean_filepath, cache_std_filepath):\n if self.epoch_verbose:\n cur_iter = enumerate(tqdm(feat_train_loader))\n else:\n cur_iter = enumerate(feat_train_loader)\n\n data = {}\n for batch_idx, batch in cur_iter:\n x, y = multimodal_pretrain_prepare_batch(batch)\n x = self.model.forward_get_feature(x, modal=cur_modal)\n\n # 构造 类别 -> embedding(特征) 的字典, 为后续计算类中心打下基础.\n tmp_dict = {} # \n for i, v in enumerate(y):\n v = v.item()\n if v in tmp_dict.keys():\n tmp_dict[v].append(x[cur_modal][i])\n else:\n tmp_dict[v] = [x[cur_modal][i]]\n for i, v in tmp_dict.items():\n tmp_dict[i] = torch.stack(v, dim=0)\n\n # 该batch数据 加入data:\n for i, v, in tmp_dict.items():\n if i in data.keys():\n data[i] = torch.cat([data[i], v.cpu().detach()], dim=0)\n else:\n data[i] = v.cpu().detach()\n\n feat_train_feature = torch.cat([i for i in data.values()], dim=0)\n\n save_pickle(cache_mean_filepath, torch.mean(feat_train_feature, dim=0, keepdim=True))\n save_pickle(cache_std_filepath, torch.std(feat_train_feature, dim=0, unbiased=False, keepdim=True))\n\n compute('video', cache_filepath['mean']['video'], cache_filepath['std']['video'])\n compute('audio', cache_filepath['mean']['audio'], cache_filepath['std']['audio'])\n\n assert 0, 'PREPROCESS_FEATURE_MEAN_STD OK!'\n\n self.test_load_from_file = True\n self.init_weights = args.init_weights\n self.unimodal = args.unimodal\n\n def delete_logs(self):\n os.remove(self.logger_filename)\n os.remove(self.result_filename)\n\n def lr_handle(self, epoch, epoch_logs):\n if self.unimodal:\n self.logger.info(f'lr: {get_lr(self.optimizer)}.')\n self.lr_scheduler.step()\n else:\n for mdl in self.mm_train_list:\n self.logger.info(f'lr: {get_lr(self.optimizer[mdl])} ({mdl}).')\n self.lr_scheduler[mdl].step()\n\n def test(self):\n # check_model_changed_handle.check_model(self.model)\n if self.do_test:\n self.logger.info(f'Testing model: {self.model_filepath}')\n if self.verbose:\n print(f'Testing model: {self.model_filepath}')\n\n if self.test_load_from_file:\n state_dict = torch.load(self.model_filepath)\n\n if ',' not in self.gpu and 'module' in list(state_dict.keys())[0]:\n # for models using nn.DataParallel:\n print(\"WARNING: Loading test model in train.py test().\")\n\n tmp_state_dict = OrderedDict()\n for k, v in state_dict.items():\n tmp_state_dict[k.replace('.module', '')] = v # remove `.module`\n state_dict = tmp_state_dict\n elif ',' in self.gpu and 'module' not in list(state_dict.keys())[0]:\n raise Exception('Loading test model in train.py test(). The model may be training on a single GPU, but testing on multiple GPUs.')\n\n model_dict = self.model.state_dict()\n if len(state_dict.keys()) != len(model_dict.keys()):\n self.logger.info(f'Oops! Error loading model, not fully loaded: {[i for i in model_dict.keys() if i not in state_dict.keys()]}.')\n raise Exception(f'Oops! Error loading model, not fully loaded: {[i for i in model_dict.keys() if i not in state_dict.keys()]}.')\n else:\n self.model.load_state_dict(state_dict)\n\n # check_model_changed_handle.check_model(self.model)\n return self.evaluate_handle.predict_log(self.max_epoch, 'test_')\n return None\n\n def fit(self):\n print(f'Please check [do_train: {self.do_train}, do_test: {self.do_test}] again.')\n if not self.do_train:\n if not osp.isfile(self.init_weights):\n torch.save(self.model.state_dict(), self.model_filepath)\n else:\n self.test_load_from_file = False\n return\n\n self.logger.info('Begin training...')\n if self.verbose:\n print('Begin training...')\n\n self.callbacks.on_train_begin()\n\n # from torch.utils.tensorboard import SummaryWriter\n # writer = SummaryWriter('runs/model')\n\n for epoch in range(1, self.max_epoch + 1):\n self.callbacks.on_epoch_begin(epoch)\n epoch_logs, batch_logs = {}, {}\n for batch_index, batch in enumerate(self.train_loader):\n self.callbacks.on_batch_begin(batch_index, batch_logs)\n\n x, y = self.train_prepare_batch(batch)\n logits, reg_logits, loss = self.fit_handle(x=x, y=y, prefix='train_')\n\n batch_logs['loss'] = loss.item()\n\n batch_logs[self.train_monitor] = self.batch_metrics(logits, y)\n\n self.callbacks.on_batch_end(batch_index, batch_logs)\n\n # Run on epoch end\n # 注意这个 epoch_logs 是共享变量, 在callbacks里面的类传递的!\n self.lr_handle(epoch, epoch_logs)\n self.callbacks.on_epoch_end(epoch, epoch_logs)\n\n self.callbacks.on_train_end()\n\n # Run on train end\n self.logger.info('Finished')\n if self.verbose:\n print('Finished.')","sub_path":"models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"245253504","text":"import pygame\n\npygame.init()\n\n# 创建游戏窗口\nresolution = (480,700) # 宽高\nscreen = pygame.display.set_mode(resolution)\n\n# 绘制背景图像\n# 1.加载图像数据\nbg = pygame.image.load('./images/background.png');\n\n# 2.绘制图像数据\nscreen.blit(bg,(0,0))\n\n# 3.update更新屏幕显示\n#pygame.display.update()\n\n#绘制英雄的飞机\nhero = pygame.image.load('./images/me1.png')\nscreen.blit(hero,(200,500))\npygame.display.update()\n\n# 创建时钟对象\nclock = pygame.time.Clock()\n\n#指定飞机的初始位置\nhero_rect = pygame.Rect(150,300,102,126)\n\nwhile True:\n\t# 可以指定循环体代码执行频率\n\tclock.tick(60)\n\n\t# 捕获事件\n\tevent_list = pygame.event.get()\n\tif len(event_list) > 0 :\n\t\tprint(event_list)\n\n\t# 修改飞机位置\n\thero_rect.y -= 1\n\t# 判断飞机位置\n\tif hero_rect.y <= 0:\n\t\thero_rect.y = 700\n\n\t# 调用blit方法绘制图像\n\tscreen.blit(bg,(0,0))\n\tscreen.blit(hero,hero_rect)\n\n\tpygame.display.update()\n\n\npygame.quit()","sub_path":"hm/pygame-demo/事件监听.py","file_name":"事件监听.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"28667370","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=not-callable\n# pylint: disable=redefined-builtin\n\"\"\"Layers that can merge several inputs into one.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.keras import backend as K\n\n\nclass _Merge(tf.keras.layers.Layer):\n \"\"\"Generic merge layer for elementwise merge functions.\n\n Used to implement `Sum`, `Average`, etc.\n\n Arguments:\n **kwargs: standard layer keyword arguments.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(_Merge, self).__init__(**kwargs)\n self.supports_masking = True\n self._supports_ragged_inputs = True\n\n def _merge_function(self, inputs):\n raise NotImplementedError\n\n def _compute_elemwise_op_output_shape(self, shape1, shape2):\n \"\"\"Computes the shape of the resultant of an elementwise operation.\n\n Arguments:\n shape1: tuple or None. Shape of the first tensor\n shape2: tuple or None. Shape of the second tensor\n\n Returns:\n expected output shape when an element-wise operation is\n carried out on 2 tensors with shapes shape1 and shape2.\n tuple or None.\n\n Raises:\n ValueError: if shape1 and shape2 are not compatible for\n element-wise operations.\n \"\"\"\n if None in [shape1, shape2]:\n return None\n elif len(shape1) < len(shape2):\n return self._compute_elemwise_op_output_shape(shape2, shape1)\n elif not shape2:\n return shape1\n output_shape = list(shape1[:-len(shape2)])\n for i, j in zip(shape1[-len(shape2):], shape2):\n if i is None or j is None:\n output_shape.append(None)\n elif i == 1:\n output_shape.append(j)\n elif j == 1:\n output_shape.append(i)\n else:\n if i != j:\n raise ValueError(\n 'Operands could not be broadcast '\n 'together with shapes ' + str(shape1) + ' ' + str(shape2))\n output_shape.append(i)\n return tuple(output_shape)\n\n def build(self, input_shape):\n # Used purely for shape validation.\n if not isinstance(input_shape, list):\n raise ValueError('A merge layer should be called on a list of inputs.')\n if len(input_shape) < 2:\n raise ValueError('A merge layer should be called '\n 'on a list of at least 2 inputs. '\n 'Got ' + str(len(input_shape)) + ' inputs.')\n batch_sizes = {s[0] for s in input_shape if s is not None} - {None}\n if len(batch_sizes) > 1:\n raise ValueError(\n 'Can not merge tensors with different '\n 'batch sizes. Got tensors with shapes : ' + str(input_shape))\n if input_shape[0] is None:\n output_shape = None\n else:\n output_shape = input_shape[0][1:]\n for i in range(1, len(input_shape)):\n if input_shape[i] is None:\n shape = None\n else:\n shape = input_shape[i][1:]\n output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)\n # If the inputs have different ranks, we have to reshape them\n # to make them broadcastable.\n if None not in input_shape and len(set(map(len, input_shape))) == 1:\n self._reshape_required = False\n else:\n self._reshape_required = True\n\n def call(self, inputs):\n if not isinstance(inputs, list):\n raise ValueError('A merge layer should be called on a list of inputs.')\n if self._reshape_required:\n reshaped_inputs = []\n input_ndims = list(map(K.ndim, inputs))\n if None not in input_ndims:\n # If ranks of all inputs are available,\n # we simply expand each of them at axis=1\n # until all of them have the same rank.\n max_ndim = max(input_ndims)\n for x in inputs:\n x_ndim = K.ndim(x)\n for _ in range(max_ndim - x_ndim):\n x = tf.expand_dims(x, axis=1)\n reshaped_inputs.append(x)\n return self._merge_function(reshaped_inputs)\n else:\n # Transpose all inputs so that batch size is the last dimension.\n # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)\n transposed = False\n for x in inputs:\n x_ndim = K.ndim(x)\n if x_ndim is None:\n x_shape = tf.shape(x)\n batch_size = x_shape[0]\n new_shape = K.concatenate(\n [x_shape[1:],\n tf.expand_dims(batch_size, axis=-1)])\n x_transposed = tf.reshape(\n x,\n tf.stack(\n [batch_size, tf.math.reduce_prod(x_shape[1:])], axis=0))\n x_transposed = tf.transpose(x_transposed, perm=(1, 0))\n x_transposed = tf.reshape(x_transposed, new_shape)\n reshaped_inputs.append(x_transposed)\n transposed = True\n elif x_ndim > 1:\n dims = list(range(1, x_ndim)) + [0]\n reshaped_inputs.append(tf.transpose(x, perm=dims))\n transposed = True\n else:\n # We don't transpose inputs if they are 1D vectors or scalars.\n reshaped_inputs.append(x)\n y = self._merge_function(reshaped_inputs)\n y_ndim = K.ndim(y)\n if transposed:\n # If inputs have been transposed, we have to transpose the output too.\n if y_ndim is None:\n y_shape = tf.shape(y)\n y_ndim = tf.shape(y_shape)[0]\n batch_size = y_shape[y_ndim - 1]\n new_shape = K.concatenate([\n tf.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1]\n ])\n y = tf.reshape(y, (-1, batch_size))\n y = tf.transpose(y, perm=(1, 0))\n y = tf.reshape(y, new_shape)\n elif y_ndim > 1:\n dims = [y_ndim - 1] + list(range(y_ndim - 1))\n y = tf.transpose(y, perm=dims)\n return y\n else:\n return self._merge_function(inputs)\n\n def compute_output_shape(self, input_shape):\n if input_shape[0] is None:\n output_shape = None\n else:\n output_shape = input_shape[0][1:]\n for i in range(1, len(input_shape)):\n if input_shape[i] is None:\n shape = None\n else:\n shape = input_shape[i][1:]\n output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)\n batch_sizes = {s[0] for s in input_shape if s is not None} - {None}\n if len(batch_sizes) == 1:\n output_shape = (list(batch_sizes)[0],) + output_shape\n else:\n output_shape = (None,) + output_shape\n return output_shape\n\n def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if all(m is None for m in mask):\n return None\n masks = [tf.expand_dims(m, axis=0) for m in mask if m is not None]\n return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)\n\n\nclass Add(_Merge):\n \"\"\"Layer that adds a list of inputs.\n\n It takes as input a list of tensors,\n all of the same shape, and returns\n a single tensor (also of the same shape).\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n # equivalent to `added = keras.layers.add([x1, x2])`\n added = keras.layers.Add()([x1, x2])\n out = keras.layers.Dense(4)(added)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output += inputs[i]\n return output\n","sub_path":"mobilenet/layers/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":9712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"359015954","text":"################################################################################\n# Copyright (c) 2021 ContinualAI. #\n# Copyrights licensed under the MIT License. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 15-09-2022 #\n# Author(s): Lorenzo Pellegrini #\n# E-mail: contact@continualai.org #\n# Website: avalanche.continualai.org #\n################################################################################\n\"\"\"\nExample on how to use the checkpoint plugin.\n\nThis is basically a vanilla Avalanche main script, but with the replay\nfunctionality enabled. Proper comments are provided to point out the changes\nrequired to use the checkpoint plugin.\n\"\"\"\n\nimport argparse\nimport os\nfrom typing import Sequence\n\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\n\nfrom avalanche.benchmarks import CLExperience, SplitMNIST\nfrom avalanche.evaluation.metrics import (\n accuracy_metrics,\n loss_metrics,\n class_accuracy_metrics,\n)\nfrom avalanche.logging import (\n InteractiveLogger,\n TensorboardLogger,\n WandBLogger,\n TextLogger,\n)\nfrom avalanche.models import SimpleMLP, as_multitask\nfrom avalanche.training.determinism.rng_manager import RNGManager\nfrom avalanche.training.plugins import EvaluationPlugin, ReplayPlugin\nfrom avalanche.training.plugins.checkpoint import (\n CheckpointPlugin,\n FileSystemCheckpointStorage,\n)\nfrom avalanche.training.supervised import Naive\n\n\ndef main(args):\n # FIRST CHANGE: SET THE RANDOM SEEDS\n # In fact, you should to this no matter the checkpointing functionality.\n # Remember to load checkpoints by setting the same random seed used when\n # creating them...\n RNGManager.set_random_seeds(1234)\n\n # Nothing new here...\n device = torch.device(\n f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n )\n print(\"Using device\", device)\n\n # CL Benchmark Creation\n n_experiences = 5\n scenario = SplitMNIST(n_experiences=n_experiences, return_task_id=True)\n input_size = 28 * 28 * 1\n\n train_stream: Sequence[CLExperience] = scenario.train_stream\n test_stream: Sequence[CLExperience] = scenario.test_stream\n\n # Define the model (and load initial weights if necessary)\n # Again, not checkpoint-related\n model = SimpleMLP(\n input_size=input_size, num_classes=scenario.n_classes // n_experiences\n )\n model = as_multitask(model, \"classifier\")\n\n # Prepare for training & testing: not checkpoint-related\n optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)\n criterion = CrossEntropyLoss()\n\n # SECOND CHANGE: INSTANTIATE THE CHECKPOINT PLUGIN\n # FileSystemCheckpointStorage is a good default choice.\n # The provided directory should point to the SPECIFIC experiment: do not\n # re-use the same folder for different experiments/runs.\n # Obvious noob advice: do not use a runtime-computed timestamp for the\n # directory name, or you will end up by NOT loading the previous\n # checkpoint ;)\n # Please notice the `map_location`: you should set the current device there.\n # That will take care of loading the checkpoint on the correct device, even\n # if it was previously produced on a cuda device with a different id. It\n # can also be used to resume a cuda checkpoint from cuda to CPU.\n # However, it will not work when loading a CPU checkpoint to cuda...\n # In brief: CUDA -> CPU (OK), CUDA:0 -> CUDA:1 (OK), CPU -> CUDA (NO!)\n checkpoint_plugin = CheckpointPlugin(\n FileSystemCheckpointStorage(\n directory=\"./checkpoints/task_incremental\",\n ),\n map_location=device,\n )\n\n # THIRD CHANGE: LOAD THE CHECKPOINT IF IT EXISTS\n # IF THE CHECKPOINT EXISTS, SKIP THE CREATION OF THE STRATEGY!\n # OTHERWISE, CREATE THE STRATEGY AS USUAL.\n # NOTE: add the checkpoint plugin to the list of strategy plugins!\n\n # Load checkpoint (if exists)\n strategy, initial_exp = checkpoint_plugin.load_checkpoint_if_exists()\n\n # Create the CL strategy (if not already loaded from checkpoint)\n if strategy is None:\n # Add the checkpoint plugin to the list of plugins!\n plugins = [\n checkpoint_plugin,\n ReplayPlugin(mem_size=500),\n # ...\n ]\n\n # Create loggers (as usual)\n os.makedirs(f\"./logs/checkpointing_{args.checkpoint_at}\", exist_ok=True)\n loggers = [\n TextLogger(\n open(f\"./logs/checkpointing_\" f\"{args.checkpoint_at}/log.txt\", \"w\")\n ),\n InteractiveLogger(),\n TensorboardLogger(f\"./logs/checkpointing_{args.checkpoint_at}\"),\n ]\n\n if args.wandb:\n loggers.append(\n WandBLogger(\n project_name=\"AvalancheCheckpointing\",\n run_name=f\"checkpointing_{args.checkpoint_at}\",\n )\n )\n\n # Create the evaluation plugin (as usual)\n evaluation_plugin = EvaluationPlugin(\n accuracy_metrics(minibatch=False, epoch=True, experience=True, stream=True),\n loss_metrics(minibatch=False, epoch=True, experience=True, stream=True),\n class_accuracy_metrics(stream=True),\n loggers=loggers,\n )\n\n # Create the strategy (as usual)\n strategy = Naive(\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n train_mb_size=128,\n train_epochs=2,\n eval_mb_size=128,\n device=device,\n plugins=plugins,\n evaluator=evaluation_plugin,\n )\n\n # Train and test loop, as usual.\n # Notice the \"if\" checking \"checkpoint_at\", which here is only used to\n # demonstrate the checkpoint functionality. In your code, you may want\n # to add a similar check based on received early termination signals.\n # These signals may include keyboard interrupts, SLURM interrupts, etc.\n # Just keep in mind that the checkpoint is saved AFTER each eval phase.\n # If you terminate the process before the end of the eval phase,\n # all the work done between the previous checkpoint and the current moment\n # is lost.\n for train_task in train_stream[initial_exp:]:\n strategy.train(train_task, num_workers=10, persistent_workers=True)\n strategy.eval(test_stream, num_workers=10)\n\n if train_task.current_experience == args.checkpoint_at:\n print(\"Exiting early\")\n break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--cuda\",\n type=int,\n default=0,\n help=\"Select zero-indexed cuda device. -1 to use CPU.\",\n )\n parser.add_argument(\"--checkpoint_at\", type=int, default=-1)\n parser.add_argument(\"--wandb\", action=\"store_true\")\n main(parser.parse_args())\n","sub_path":"examples/task_incremental_with_checkpointing.py","file_name":"task_incremental_with_checkpointing.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"356069203","text":"from django.conf import settings\nfrom django_mako_plus import view_function, jscontext\nfrom datetime import datetime, timezone\nfrom account import models as amod\n\n@view_function\ndef process_request(request, user:amod.User = None):\n utc_time = datetime.utcnow()\n\n username = str(request.user.username)\n pageMessage = ''\n showMessage = False\n userPermissions = []\n\n if(request.user is not None):\n userPermissions = list(request.user.get_all_permissions())\n\n try:\n pageMessage = request.session['pageMessage']\n except:\n pageMessage = ''\n\n try:\n showMessage = request.session['showMessage']\n except:\n showMessage = False\n\n request.session['showMessage'] = False\n request.session['pageMessage'] = ''\n\n context = {\n # sent to index.html:\n 'utc_time': utc_time,\n 'user': username,\n # sent to index.html and index.js:\n jscontext('data'): {\n 'showMessage': showMessage,\n 'pageMessage': pageMessage,\n 'user': username,\n 'permissions': userPermissions\n }\n }\n return request.dmp.render('index.html', context)\n\n\n ","sub_path":"homepage/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"202490433","text":"import numpy as np\nimport pandas as pd\nimport random\nimport sys\nimport os\nimport math\nimport copy\nimport pyclustering\nfrom pyclustering.cluster import xmeans\n\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\nplt.switch_backend('agg')\nsns.set(style='whitegrid')\ncurrent_palette = sns.color_palette(\"colorblind\", 5)\nif True:\n current_palette[0] = (0 / 255, 114 / 255, 178 / 255)\n current_palette[1] = (240 / 255, 228 / 255, 66 / 255)\n current_palette[2] = (0 / 255, 158 / 255, 115 / 255)\n current_palette[3] = (213 / 255, 94 / 255, 0 / 255)\n current_palette[4] = (204 / 255, 121 / 255, 167 / 255)\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nif int(sys.argv[1])==0:\n if not os.path.exists(\"./res\"):\n os.mkdir(\"./res\")\n os.mkdir(\"./res2\")\n os.mkdir(\"./figs\")\n\n\ndef cluster(data):\n structure = 0\n descent = 0\n data = np.array(data)\n clans = []\n clusters_clan = []\n try:\n init_center = xmeans.kmeans_plusplus_initializer(data, 2).initialize()\n xm = xmeans.xmeans(data, init_center, ccore=False)\n xm.process()\n sizes = [len(cluster) for cluster in xm.get_clusters()]\n centers=xm.get_centers()\n clusters = xm.get_clusters()\n for i in range(len(sizes)):\n if sizes[i] > 5:\n clans.append(centers[i])\n clusters_clan.append(clusters[i])\n except:\n pass\n if len(clans)>0:\n while len(clans) > 0:\n num_clans=len(clans)\n clan_ls=[]\n for i in range(num_clans):\n mate = i\n child = i\n cur_mate = (clans[i][2]-clans[i][0])**2 + (clans[i][3]-clans[i][1])**2 - 1\n cur_child= (clans[i][0]-clans[i][0])**2 + (clans[i][3]-clans[i][1])**2 - 1\n for j in range(num_clans):\n mate_cur=(clans[i][2]-clans[j][0])**2 + (clans[i][3]-clans[j][1])**2\n child_cur=(clans[i][0]-clans[j][0])**2 + (clans[i][3]-clans[j][1])**2\n if mate_cur0:\n marriage_path=[]\n cur = candidate[-1]\n man_path = [cur]\n kinship_clans = []\n population = 0\n while True:\n next = clan_ls[cur][2]\n if next in man_path:\n man_path = man_path[man_path.index(next):]\n break\n else:\n man_path.append(next)\n cur=next\n kinship_clans.extend(man_path)\n descent_cycle = len(man_path)\n cur_woman_cycle_cur=0\n for clan in man_path:\n # if clan not in marriage_path:\n cur_path = [clan]\n cur = clan\n while True:\n next = clan_ls[cur][1]\n if next in cur_path:\n cur_path = cur_path[cur_path.index(next):]\n kinship_clans.extend(cur_path)\n if len(cur_path) > cur_woman_cycle_cur:\n cur_woman_cycle_cur = len(cur_path)\n break\n else:\n cur_path.append(next)\n cur = next\n marriage_cycle = cur_woman_cycle_cur\n candidate.pop()\n for man in man_path:\n if man in candidate:\n candidate.remove(man)\n kinship_clans = list(set(kinship_clans))\n # for i in kinship_clans:\n # population += sizes_clan[i]\n\n if descent_cycle >= cur_descent_cycle and marriage_cycle >= cur_marriage_cycle and len(kinship_clans) >= len(cur_clans):\n # if population >= cur_population:\n cur_descent_cycle = descent_cycle\n cur_marriage_cycle = marriage_cycle\n cur_clans = kinship_clans[:]\n\n cur_paternal_cycle = 0\n # clans_ori = np.array(clans)[cur_clans][:]\n clans_ori = clans[:]\n clans = []\n # clans_ori\n\n for clan in clans_ori:\n clans.append([clan[0], clan[2]])\n\n if len(clans)>0:\n num_clans=len(clans)\n clan_ls=[]\n for i in range(num_clans):\n mate = i\n cur_mate = (clans[i][1]-clans[i][0])**2 - 1\n for j in range(num_clans):\n mate_cur = (clans[i][1]-clans[j][0])**2\n if mate_cur0:\n cur = candidate[-1]\n marriage_path = [cur]\n kinship_clans = []\n population = 0\n while True:\n next = clan_ls[cur][1]\n if next in marriage_path:\n marriage_path = marriage_path[marriage_path.index(next):]\n break\n else:\n marriage_path.append(next)\n cur=next\n\n candidate.pop()\n for clan in marriage_path:\n if clan in candidate:\n candidate.remove(clan)\n\n if len(marriage_path) >= cur_paternal_cycle:\n # if population >= cur_population:\n cur_paternal_cycle = len(marriage_path)\n\n # data1 = data[:,[1, 3]]\n\n clans = []\n cur_maternal_cycle = 0\n for clan in clans_ori:\n clans.append([clan[1], clan[3]])\n\n if len(clans)>0:\n num_clans=len(clans)\n clan_ls=[]\n for i in range(num_clans):\n mate = i\n cur_mate = (clans[i][1]-clans[i][0])**2 - 1\n for j in range(num_clans):\n mate_cur=(clans[i][1]-clans[j][0])**2\n if mate_cur0:\n cur = candidate[-1]\n marriage_path = [cur]\n kinship_clans = []\n population = 0\n while True:\n next = clan_ls[cur][1]\n if next in marriage_path:\n marriage_path = marriage_path[marriage_path.index(next):]\n break\n else:\n marriage_path.append(next)\n cur=next\n\n candidate.pop()\n for clan in marriage_path:\n if clan in candidate:\n candidate.remove(clan)\n\n if len(marriage_path) >= cur_maternal_cycle:\n # if population >= cur_population:\n cur_maternal_cycle = len(marriage_path)\n\n\n if cur_marriage_cycle * cur_descent_cycle == 0:\n structure = 0\n elif cur_marriage_cycle == 1 and cur_descent_cycle == 1:\n structure = 1\n elif cur_marriage_cycle == 2 and cur_descent_cycle == 1:\n structure = 2\n elif cur_marriage_cycle == cur_descent_cycle == len(cur_clans) == 2:\n structure = 2\n elif cur_marriage_cycle > 2 and cur_descent_cycle == 1:\n structure = 3\n elif cur_marriage_cycle == cur_descent_cycle == len(cur_clans):\n structure = 3\n elif cur_marriage_cycle > 1 and cur_descent_cycle > 1 and len(cur_clans) > 3:\n structure = 4\n else:\n structure = 5\n\n descent = 2 * (cur_paternal_cycle > 1) + 1 * (cur_maternal_cycle > 1)\n\n # structures=[\"dead\",\"incest\", \"dual\", \"generalized\", \"restricted\", \"others\"]\n\n return [structure, descent, clusters_clan]\n\n\nclass Society:\n def __init__(self):\n self.families = []\n self.df=pd.DataFrame()\n\nclass Family:\n def __init__(self,trait,preference, counter):\n self.trait = trait\n self.preference = preference\n self.man = 0\n self.woman = 0\n self.counter = counter\n\n\ndef my_distance(x, y):\n return np.sum((x - y)**2,axis=1)\n\n\ndef generation(families):\n traits = np.array([family.trait for family in families])\n preferences = np.array([family.preference for family in families])\n for family in families:\n if family.counter == chance:\n distance = np.array([my_distance(traits, family.trait),my_distance(traits, family.preference), my_distance(preferences, family.trait)]).min(axis=0)\n friend = np.sum(np.exp(- distance)) / len(families)\n rival = np.sum(np.exp(- my_distance(preferences, family.preference))) / len(families)\n rate = math.exp(- d_c * (1 - friend) - d_m * rival)\n couple = birth * rate\n family.man = np.random.poisson(lam = couple)\n family.woman = np.random.poisson(lam = couple)\n family.counter -= 1\n\n families = [family for family in families if family.man * family.woman > 0]\n\n return families\n\n\ndef mating(families):\n next_generation = []\n random.shuffle(families)\n mates = np.array([mate.preference for mate in families])\n for family in families:\n if family.man > 0:\n dist = np.exp(- my_distance(mates, family.trait))\n dist = dist / np.sum(dist)\n mate = np.random.choice(families, p = dist)\n\n if mate.woman > 0:\n couple = min(family.man, mate.woman)\n family.man -= couple\n mate.woman -= couple\n for i in range(couple):\n new_trait = np.array([family.trait[0], mate.trait[1]]) + np.random.normal(0, mutation, 2)\n new_preference = np.array([family.preference[0], mate.preference[1]]) + np.random.normal(0, mutation, 2)\n next_generation.append(Family(new_trait, new_preference, chance))\n\n for family in families:\n if family.counter * family.man * family.woman > 0:\n next_generation.append(family)\n\n return next_generation\n\ndef main(l):\n num = 0\n societies = []\n structures = []\n descents = np.array([[0] * 4] * 4)\n for i in range(num_society):\n societies.append(Society())\n for j in range(num_family):\n societies[i].families.append(Family(np.random.normal(0, 1, 2), np.random.normal(0, 1, 2), chance))\n\n\n while num < iter:\n remove_ls = []\n duplicate_ls = []\n for society in societies:\n society.families = generation(society.families)\n society.df[num] = [[family.trait[0] for family in society.families], [family.trait[1] for family in society.families], [family.preference[0] for family in society.families], [family.preference[1] for family in society.families]]\n society.families = mating(society.families)\n population = len(society.families)\n if population < num_family / 10:\n remove_ls.append(society)\n elif population > num_family * 2:\n duplicate_ls.append(society)\n for society in remove_ls:\n societies.remove(society)\n for society in duplicate_ls:\n population = len(society.families)\n random.shuffle(society.families)\n n = math.floor(math.log2(population / num_family))\n k = round(len(society.families) / 2**n)\n for i in [0] * (2**n - 1):\n families = society.families[:k]\n society.families = society.families[k:]\n societies.append(Society())\n societies[-1].families = copy.deepcopy(families)\n societies[-1].df = society.df.copy()\n if len(societies) > num_society:\n random.shuffle(societies)\n societies = societies[:num_society]\n emigrants = []\n for society in societies:\n families = society.families\n random.shuffle(families)\n emigrants.extend(families[round((1 - migration) * len(families)):])\n society.families = families[:round((1 - migration) * len(families))]\n random.shuffle(emigrants)\n for society in societies:\n imigrants = round(len(society.families) / 9)\n society.families.extend(emigrants[:imigrants])\n emigrants = emigrants[imigrants:]\n if len(societies) == 0:\n break\n\n if num % 10 == 0:\n cur_structures = [0] * 7\n for society in societies:\n data = [[family.trait[0], family.trait[1], family.preference[0], family.preference[1]] for family in society.families]\n [structure, descent] = cluster(data)[:2]\n cur_structures[structure] += 1\n if structure in [1, 2, 3, 4] and num > 0.6 * iter:\n descents[structure - 1, descent] += 1\n cur_structures = cur_structures[:-1]\n structures.append(cur_structures.index(max(cur_structures)))\n num += 1\n if len(societies) == 0:\n structures = []\n\n\n if num == iter:\n k = 0\n for society in societies:\n flag = 0\n data = [[family.trait[0],family.trait[1],family.preference[0],family.preference[1]] for family in society.families]\n res = cluster2(data)\n flag = 1\n # if l == 0 and res[0] in [1, 2, 3] and res[1] == 3:\n # flag = 1\n # if l == 0 and res[0] in [1, 2, 3, 4] and res[0] == len(res[2]):\n # flag = 1\n # elif res[1] == 1 and res[0] in [1, 2, 3, 4] and res[0] == len(res[2]):\n # flag = 1\n\n if flag == 1 and k < 10:\n data = np.array(data)\n clusters = res[2]\n\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8,4))\n\n my_ls=[]\n for i in range(iter):\n my_ls.extend([[i,society.df.iat[0,i][j],society.df.iat[2,i][j]] for j in range(len(society.df.iat[0,i]))])\n df_res=pd.DataFrame(my_ls,columns=[\"time\",\"t\",\"p\"])\n ax1.scatter(df_res[\"time\"],df_res[\"t\"], s=0.2, color= \"blue\")\n ax1.scatter(df_res[\"time\"],df_res[\"p\"], s=0.2, color= \"red\")\n ax1.set_xlabel(\"generation\",fontsize=24)\n ax1.set_ylabel(r\"$t_1, p_1$\",fontsize=24)\n ax1.tick_params(labelsize = 16)\n\n my_ls=[]\n for i in range(iter):\n my_ls.extend([[i,society.df.iat[1,i][j],society.df.iat[3,i][j]] for j in range(len(society.df.iat[0,i]))])\n df_res=pd.DataFrame(my_ls,columns=[\"time\",\"t\",\"p\"])\n ax2.scatter(df_res[\"time\"],df_res[\"t\"], s=0.2, color= \"blue\")\n ax2.scatter(df_res[\"time\"],df_res[\"p\"], s=0.2, color= \"red\")\n ax2.set_xlabel(\"generation\",fontsize=24)\n ax2.set_ylabel(r\"$t_2, p_2$\",fontsize=24)\n ax2.tick_params(labelsize = 16)\n\n fig.tight_layout()\n fig.savefig(f\"figs/timeseries_{res[0]}_{num_society}societies_{num_family}families_dc{round(d_c * 100)}_dm{round(d_m * 100)}_mutation{round(mutation * 1000)}pm_ birth{birth}_{l}_{k}.pdf\", bbox_inches='tight')\n plt.close('all')\n\n fig, (axL, axC, axR) = plt.subplots(ncols=3, figsize=(12,4))\n\n for i in range(len(clusters)):\n try:\n axL.scatter(data[:, 0][clusters[i]], data[:, 2][clusters[i]], s=100-20*i, c=current_palette[i + 1])\n except:\n pass\n # axL.scatter(data[:,0], data[:,2], s=80)\n axL.set_xlabel(r\"$t_1$\",fontsize=24)\n axL.set_ylabel(r\"$p_1$\",fontsize=24)\n axL.tick_params(labelsize = 16)\n axL.set_aspect('equal', 'datalim')\n\n for i in range(len(clusters)):\n try:\n axC.scatter(data[:, 1][clusters[i]], data[:, 3][clusters[i]], s=100-20*i, c=current_palette[i + 1])\n except:\n pass\n # axC.scatter(data[:,1], data[:,3], s=80)\n axC.set_xlabel(r\"$t_2$\",fontsize=24)\n axC.set_ylabel(r\"$p_2$\",fontsize=24)\n axC.tick_params(labelsize = 16)\n axC.set_aspect('equal', 'datalim')\n\n for i in range(len(clusters)):\n try:\n axR.scatter(data[:, 0][clusters[i]], data[:, 1][clusters[i]], s=100-20*i, c = current_palette[i + 1])\n except:\n pass\n # axR.scatter(data[:,0], data[:,1], s=80)\n axR.set_xlabel(r\"$t_1$\",fontsize=24)\n axR.set_ylabel(r\"$t_2$\",fontsize=24)\n axR.tick_params(labelsize = 16)\n axR.set_aspect('equal', 'datalim')\n\n fig.tight_layout()\n fig.savefig(f\"figs/structure_{res[0]}_{num_society}societies_{num_family}families_dc{round(d_c * 100)}_dm{round(d_m * 100)}_mutation{round(mutation * 1000)}pm_ birth{birth}_{l}_{k}.pdf\", bbox_inches='tight')\n plt.close('all')\n k += 1\n\n return [structures, descents]\n\ndef run():\n df = pd.DataFrame(index=list(range(iter // 10)))\n df_descent = pd.DataFrame(index=list(range(16)))\n k = 0\n for l in range(10):\n try:\n res = main(l)\n if len(res[0]) == 0:\n continue\n else:\n df[k] = np.array(res[0]).T.tolist()\n df_descent[k] = np.ravel(res[1])\n k += 1\n except:\n pass\n df.to_csv(f\"res/{num_society}societies_{num_family}families_dc{round(d_c * 100)}_dm{round(d_m * 100)}_mutation{round(mutation * 1000)}pm_birth{birth}_sigma{sigma}_migration{round(migration * 100)}pc.csv\")\n df_descent.to_csv(f\"res2/{num_society}societies_{num_family}families_dc{round(d_c * 100)}_dm{round(d_m * 100)}_mutation{round(mutation * 1000)}pm_birth{birth}_sigma{sigma}_migration{round(migration * 100)}pc.csv\")\n\n\n#settings\nnum_family = 50\nnum_society = 50\nmutation = 0.1\nd_c = 0.05\nd_m = 3.0\nbirth = 5\nsigma = 1\niter = 1000\nchance = 2\nmigration = 0\nl = 0\n\nfor d_c in [0.01, 0.03, 0.05, 0.1][2 * (int(sys.argv[1])//9):2 * (int(sys.argv[1])//9+1)]:\n for d_m in [[0.1, 0.2, 0.3, 0.5, 1, 2, 3, 5, 10][int(sys.argv[1])%9]]:\n run()\n","sub_path":"revised/structure_migration.py","file_name":"structure_migration.py","file_ext":"py","file_size_in_byte":19215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"423179560","text":"import os\n\nos.environ[\"LOG_LEVEL\"] = \"INFO\"\n\nfrom enigma.design import ENTRY\nfrom enigma.enigma import Enigma\nfrom enigma.bombe import Bombe\nfrom enigma.menu import MenuMaker\nfrom tests.menu_test_data import BASIC_3CH as B3, BOMBE_TEST2 as B\nfrom pprint import pformat, pprint\n\n\n\ndef main():\n\n crib = B.crib\n ring = 'AAA'\n # ring settings should always be 'AAA' for a bombe, as we ignore them\n enig = Enigma('I', 'II', 'III', 'B', ring_settings_3=ring, current_window_3='AAA')\n # bombe = Bombe(menu=B3)\n # mm = MenuMaker('ABCD')\n\n i = 0\n possible_settings = {}\n while i < 17577:\n enig.step_enigma()\n pre_cypher_position = enig.window_letters\n\n cypher = enig.cypher(crib)\n\n if i % 1000 == 0:\n print(f\"{i}, pos={pre_cypher_position}, res={cypher}\")\n\n mm = MenuMaker(crib, cypher)\n mm.run()\n if len(mm.found_loops.keys()) >= 2:\n check = set()\n all_keys = set(mm.found_loops.keys())\n for loop in mm.found_loops.keys():\n others = all_keys - {loop}\n for oth_loop in others:\n check.add(len(oth_loop.intersection(loop)))\n if all((c <= 1 for c in check)):\n print(f\"found loops with {pre_cypher_position}! = \\n\", pformat(mm.found_loops.values()))\n mm.network_graph(label=f\"_{pre_cypher_position}_r{ring}\")\n possible_settings[pre_cypher_position] = mm.found_loops\n\n enig.set_window_letters(pre_cypher_position)\n i += 1\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scratch/find_menu.py","file_name":"find_menu.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"635139682","text":"from rest_framework import serializers\nfrom .models import Block, Transaction\n\nfrom .chain import Chain, Mine\n\n\nclass TransactionSerializer(Chain, serializers.ModelSerializer):\n class Meta:\n model = Transaction\n fields = ('id', 'sender', 'recipient', 'amount', 'date_created')\n\n def validate(self, data):\n \"\"\" validate through the blockchain network\n if the transaction is valid.\n \"\"\"\n sender = data.get('sender')\n amount = data.get('amount')\n\n if not self.sender_balance_valid(sender, amount):\n raise serializers.ValidationError(\"Sender doesn't have enough coins\")\n\n return data\n\n\nclass SmashSomeRocks(Mine, Chain, serializers.ModelSerializer):\n\n timestamp = serializers.DateTimeField(read_only=True)\n previous_hash = serializers.CharField(read_only=True)\n secret = serializers.CharField(read_only=True)\n proof = serializers.IntegerField(read_only=True)\n recipient = serializers.CharField(write_only=True)\n\n transactions = TransactionSerializer(read_only=True, many=True)\n\n class Meta:\n model = Block\n fields = ('timestamp', 'previous_hash', 'secret', 'transactions', 'proof', 'recipient')\n\n def create(self, data):\n \"\"\" create a new block\n \"\"\"\n recipient = data.get('recipient')\n proof = self.generate_pow(data)\n\n if not self.proof_is_found(proof):\n raise serializers.ValidationError(\"Ooops something's fishy is going on.\")\n\n # create a new transaction\n Transaction.objects.create(sender=\"Swiftkind Network\",\n recipient=recipient, amount=1)\n\n # create a new block\n block = self.new_block(recipient, proof)\n\n return block","sub_path":"blockchain/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"69578819","text":"\nimport itchat\nitchat.login()\nfriends=itchat.get_friends(update=True)[0:]\n\n#朋友圈性别比例\nmale=female=other=0\nfor i in friends[1:]:\n sex=i[\"Sex\"]\n if sex == 1:\n male += 1\n elif sex == 2:\n female += 1\n else:\n other += 1\n\ntotal = len(friends[1:])\nprint(\"男性好友:%2f%%\"%(float(male)/total*100)+\"\\n\" +\n \"女性好友:%2f%%\"%(float(female)/total*100) + \"\\n\" +\n \"不明性别好友:%2f%%\"%(float(other)/total*100))\n\n#plot code\n\n#朋友圈数据分析\ndef get_var(var):\n variable = []\n for i in friends:\n value = i[var]\n variable.append(value)\n return variable\n\nNickName = get_var(\"NickName\")\nSex = get_var(\"Sex\")\nProvince = get_var(\"Province\")\nCity = get_var(\"City\")\nSignature = get_var(\"Signature\")\n\nfrom pandas import DataFrame\ndata = {'NickName': NickName, 'Sex': Sex, 'Province': Province, 'City': City, 'Signature': Signature}\nframe = DataFrame(data)\nframe.to_csv('data.csv',index=True)\n\n#plot code\n\n#朋友圈个性签名词云图\nimport re\nsiglist =[]\nfor i in friends:\n signature = i[\"Signature\"].strip().replace(\"span\",\"\").replace(\"class\",\"\").replace(\"emoji\",\"\")\n rep = re.compile(\"1f\\d+\\w*|[<>/=]\")\n signature = rep.sub(\"\",signature)\n siglist.append(signature)\ntext = \"\".join(siglist)\n\n#第一次运行时将朋友圈内容存入文件\n\n#fout = open('friends.txt','wb')\n#pickle.dump(text,fout)\n#fout.close()\n\nimport pickle\n#fr = open('friends.txt','rb')\n#text = pickle.load(fr)\n#fr.close()\n\nimport jieba\nwlist = jieba.cut(text, cut_all = True)\ncounts = {}\nfor word in wlist:\n if len(word ) == 1:\n continue\n else:\n counts[word] = counts.get(word,0) + 1\n\n\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, ImageColorGenerator\nimport numpy as np\nimport PIL.Image as Image\ncoloring = np.array(Image.open(\"f:/22.jpg\"))\nwc = WordCloud(background_color=\"white\",\n max_words=2000,\n mask=coloring,\n max_font_size=60,\n random_state=42,\n scale=2,\n font_path=\"c:/Windows/Fonts/SimHei.ttf\")\nwc.generate_from_frequencies(counts)\nimage_colors = ImageColorGenerator(coloring)\n\nplt.imshow(wc)\nplt.axis(\"off\")\nplt.figure()\nplt.show()\n\n\n","sub_path":"SingleProgram/ChatInfo.py","file_name":"ChatInfo.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"123655515","text":"import math\n\n\ndef great_circle(coordinate_system, radius1, radius2, polar1, polar2, azimuth1, azimuth2, axial1, axial2, elevation1,\n elevation2):\n if coordinate_system == 0:\n radius = 6378137.0\n distance = radius * math.acos(\n math.cos(polar1) * math.cos(polar2) + math.sin(polar1) * math.sin(polar2) * math.cos(azimuth2 - azimuth1))\n\n if coordinate_system == 1:\n print(coordinate_system)\n radius = (radius1 + radius2) / 2\n print(radius)\n distance = radius * math.acos(\n math.cos(polar1) * math.cos(polar2) + math.sin(polar1) * math.sin(polar2) * math.cos(azimuth2 - azimuth1))\n print(distance)\n\n if coordinate_system == 2:\n print(coordinate_system)\n radius = math.sqrt(((axial1 + axial2) / 2) ** 2 + ((elevation1 + elevation2) / 2) ** 2)\n print(radius)\n distance = radius * math.acos(\n math.cos(math.atan2(axial1, elevation1)) * math.cos(math.atan2(axial2, elevation2)) + math.sin(\n math.atan2(axial1, elevation1)) * math.sin(math.atan2(axial2, elevation2)) * math.cos(\n azimuth2 - azimuth1))\n print(distance)\n\n return distance\n","sub_path":"build/lib/great_circle.py","file_name":"great_circle.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"592537845","text":"\nimport random\n\n# 09. Typoglycemia\n# スペースで区切られた単語列に対して,各単語の先頭と末尾の文字は残し,それ以外の文字の順序をランダムに並び替えるプログラムを作成せよ.ただし,長さが4以下の単語は並び替えないこととする.適当な英語の文(例えば\"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\")を与え,その実行結果を確認せよ.\n\nsentense = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\n\ndef typoglycemia(sentense):\n result = []\n for str in sentense.split(' '):\n if len(str) <= 4:\n result.append(str)\n else:\n chr_list = list(str[1:-1])\n random.shuffle(chr_list)\n result.append(str[0] + ''.join(chr_list) + str[-1])\n return ' '.join(result)\n\ndef typoglycemia2(target):\n def words():\n for word in target.split(' '):\n if len(word) <= 4:\n yield word\n else:\n middle = list(word[1:-1])\n random.shuffle(middle)\n yield word[0] + ''.join(middle) + word[-1]\n return ' '.join(words())\n\nprint(typoglycemia(sentense))\nprint(typoglycemia2(sentense))\n","sub_path":"chapter1_warmingUp/question_No009.py","file_name":"question_No009.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"69374976","text":"class ConversionTables:\n def __init__(self, table_filename = 'tables/tables'):\n self.tables = {}\n f = open(table_filename, 'r')\n lines = f.readlines()\n f.close()\n for l in lines:\n table_name, key, value = l.replace(\"\\n\",'').split('|')\n if len(table_name) > 0:\n if not table_name in self.tables.keys():\n self.tables[table_name] = {}\n if len(key)>0:\n self.tables[table_name][key] = value\n \n def return_month_number(self, textual_month):\n r = '00'\n months = self.tables['month'].keys()\n if textual_month in months:\n r = self.tables['month'][textual_month]\n else:\n for m in months:\n if m in textual_month:\n r = self.tables['month'][m]\n return r\n\n def normalize(self, table_name, key):\n v = key \n if table_name in self.tables.keys():\n t = self.tables[table_name]\n if key in t.keys():\n v = t[key]\n return v\n\n def remove_formatting(self, text):\n new_value = text\n if '>' in text and '<' in text: \n text = text.replace('>', '>-BREAK-')\n text = text.replace('<', '-BREAK-<')\n parts = text.split('-BREAK-')\n new_value = ''\n for part in parts:\n if '<' in part and '>' in part:\n pass\n else:\n new_value += part \n return new_value\n\n def table(self, table_name):\n return self.tables.get(table_name, {}) \n","sub_path":"src/xml_converter/src/reuse/tables/table_conversion.py","file_name":"table_conversion.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"129765107","text":"#Basics - import OS and CSV\r\nimport os\r\nimport csv\r\n\r\n#create path to the file we will be using and open the file\r\nbudget_data = os.path.join('Resources/budget_data.csv')\r\n\r\n#Print the header information\r\nprint(\"Financial Analysis\")\r\nprint(\"------------------------------\")\r\n\r\n#open the file as a CSV so we can read it\r\nwith open(budget_data) as csvfile:\r\n pybank_csv = csv.reader(csvfile, delimiter = ',')\r\n\r\n\r\n#The total number of months included in the dataset - count how many rows, without the header\r\n#Also calculate the net total and report out\r\n#Also keep track of the average change \r\n#Also keep track of greatest increase day and greatest decrease day \r\n\r\n #define header and variables to be used \r\n header = next(pybank_csv)\r\n total_months = 0 \r\n net_total = 0\r\n greatest_increase = 0\r\n greatest_decrease = 0 \r\n change_total = 0 \r\n first_row = 1 \r\n running_total = 0\r\n\r\n #for each row, count the row and add the total\r\n if header != None:\r\n for row in pybank_csv:\r\n profitloss = int(row[1])\r\n total_months = total_months + 1\r\n net_total = net_total + profitloss\r\n\r\n #keep track of the greatest increase, store date and value to print later\r\n if int(row[1]) > int(greatest_increase):\r\n inc_date = row[0]\r\n greatest_increase = row[1]\r\n\r\n #keep track of the greatest decrease, store date and value to print later\r\n if int(row[1]) < int(greatest_decrease):\r\n dec_date = row[0]\r\n greatest_decrease = row[1]\r\n\r\n #Keep track of the increase / decrease change - take the current line and subtract the one before it\r\n if first_row != 1:\r\n running_total = running_total + (int(row[1]) - first_value)\r\n first_value = int(row[1])\r\n\r\n if first_row == 1:\r\n first_value = int(row[1])\r\n first_row = 2\r\n \r\n average_change = \"{:.2f}\".format(running_total / (total_months-1))\r\n\r\n #print data out with requested format\r\n print(f\"Total Months: {total_months}\")\r\n print(f\"Total: ${net_total}\")\r\n print(f'Average Change: {average_change}')\r\n print(f\"Greatest Increase in Profits: {inc_date} (${greatest_increase})\")\r\n print(f\"Greatest Decrease in Profits: {dec_date} (${greatest_decrease})\")\r\n\r\n#Qrite output to a txt file in the same folder\r\noutput_file = os.path.join(\"pybank_output.txt\")\r\n\r\ntext_file = open(output_file, 'w')\r\ntext_file.write(\"Financial Analysis \\n\") \r\ntext_file.write(\"------------------------------ \\n\") \r\ntext_file.write(f\"Total Months: {total_months} \\n\")\r\ntext_file.write(f\"Total: ${net_total} \\n\")\r\ntext_file.write(f'Average Change: {average_change} \\n')\r\ntext_file.write(f\"Greatest Increase in Profits: {inc_date} (${greatest_increase}) \\n\")\r\ntext_file.write(f\"Greatest Decrease in Profits: {dec_date} (${greatest_decrease})\")\r\n\r\ntext_file.close()\r\n\r\n","sub_path":"pybank.py","file_name":"pybank.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"232172334","text":"__FILENAME__ = conf\r\n# -*- coding: utf-8 -*-\r\n#\r\n# django-logicaldelete documentation build configuration file, created by\r\n# sphinx-quickstart on Fri Mar 12 00:48:02 2010.\r\n#\r\n# This file is execfile()d with the current directory set to its containing dir.\r\n#\r\n# Note that not all possible configuration values are present in this\r\n# autogenerated file.\r\n#\r\n# All configuration values have a default; values that are commented out\r\n# serve to show the default.\r\n\r\nimport sys, os\r\n\r\n# If extensions (or modules to document with autodoc) are in another directory,\r\n# add these directories to sys.path here. If the directory is relative to the\r\n# documentation root, use os.path.abspath to make it absolute, like shown here.\r\n#sys.path.append(os.path.abspath('.'))\r\n\r\n# -- General configuration -----------------------------------------------------\r\n\r\n# Add any Sphinx extension module names here, as strings. They can be extensions\r\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\r\nextensions = ['sphinx.ext.todo', 'sphinx.ext.coverage']\r\n\r\n# Add any paths that contain templates here, relative to this directory.\r\ntemplates_path = ['_templates']\r\n\r\n# The suffix of source filenames.\r\nsource_suffix = '.rst'\r\n\r\n# The encoding of source files.\r\n#source_encoding = 'utf-8'\r\n\r\n# The master toctree document.\r\nmaster_doc = 'index'\r\n\r\n# General information about the project.\r\nproject = u'django-logicaldelete'\r\ncopyright = u'2010-2011, Patrick Altman'\r\n\r\n# The version info for the project you're documenting, acts as replacement for\r\n# |version| and |release|, also used in various other places throughout the\r\n# built documents.\r\n#\r\n# The short X.Y version.\r\nversion = '1.1'\r\n# The full version, including alpha/beta/rc tags.\r\nrelease = '1.1'\r\n\r\n# The language for content autogenerated by Sphinx. Refer to documentation\r\n# for a list of supported languages.\r\n#language = None\r\n\r\n# There are two options for replacing |today|: either, you set today to some\r\n# non-false value, then it is used:\r\n#today = ''\r\n# Else, today_fmt is used as the format for a strftime call.\r\n#today_fmt = '%B %d, %Y'\r\n\r\n# List of documents that shouldn't be included in the build.\r\n#unused_docs = []\r\n\r\n# List of directories, relative to source directory, that shouldn't be searched\r\n# for source files.\r\nexclude_trees = []\r\n\r\n# The reST default role (used for this markup: `text`) to use for all documents.\r\n#default_role = None\r\n\r\n# If true, '()' will be appended to :func: etc. cross-reference text.\r\n#add_function_parentheses = True\r\n\r\n# If true, the current module name will be prepended to all description\r\n# unit titles (such as .. function::).\r\n#add_module_names = True\r\n\r\n# If true, sectionauthor and moduleauthor directives will be shown in the\r\n# output. They are ignored by default.\r\n#show_authors = False\r\n\r\n# The name of the Pygments (syntax highlighting) style to use.\r\npygments_style = 'sphinx'\r\n\r\n# A list of ignored prefixes for module index sorting.\r\n#modindex_common_prefix = []\r\n\r\n\r\n# -- Options for HTML output ---------------------------------------------------\r\n\r\n# The theme to use for HTML and HTML Help pages. Major themes that come with\r\n# Sphinx are currently 'default' and 'sphinxdoc'.\r\nhtml_theme = 'default'\r\n\r\n# Theme options are theme-specific and customize the look and feel of a theme\r\n# further. For a list of options available for each theme, see the\r\n# documentation.\r\n#html_theme_options = {}\r\n\r\n# Add any paths that contain custom themes here, relative to this directory.\r\n#html_theme_path = []\r\n\r\n# The name for this set of Sphinx documents. If None, it defaults to\r\n# \" v documentation\".\r\n#html_title = None\r\n\r\n# A shorter title for the navigation bar. Default is the same as html_title.\r\n#html_short_title = None\r\n\r\n# The name of an image file (relative to this directory) to place at the top\r\n# of the sidebar.\r\n#html_logo = None\r\n\r\n# The name of an image file (within the static path) to use as favicon of the\r\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\r\n# pixels large.\r\n#html_favicon = None\r\n\r\n# Add any paths that contain custom static files (such as style sheets) here,\r\n# relative to this directory. They are copied after the builtin static files,\r\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\r\nhtml_static_path = ['_static']\r\n\r\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\r\n# using the given strftime format.\r\n#html_last_updated_fmt = '%b %d, %Y'\r\n\r\n# If true, SmartyPants will be used to convert quotes and dashes to\r\n# typographically correct entities.\r\n#html_use_smartypants = True\r\n\r\n# Custom sidebar templates, maps document names to template names.\r\n#html_sidebars = {}\r\n\r\n# Additional templates that should be rendered to pages, maps page names to\r\n# template names.\r\n#html_additional_pages = {}\r\n\r\n# If false, no module index is generated.\r\n#html_use_modindex = True\r\n\r\n# If false, no index is generated.\r\n#html_use_index = True\r\n\r\n# If true, the index is split into individual pages for each letter.\r\n#html_split_index = False\r\n\r\n# If true, links to the reST sources are added to the pages.\r\n#html_show_sourcelink = True\r\n\r\n# If true, an OpenSearch description file will be output, and all pages will\r\n# contain a tag referring to it. The value of this option must be the\r\n# base URL from which the finished HTML is served.\r\n#html_use_opensearch = ''\r\n\r\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\r\n#html_file_suffix = ''\r\n\r\n# Output file base name for HTML help builder.\r\nhtmlhelp_basename = 'django-logicaldeletedoc'\r\n\r\n\r\n# -- Options for LaTeX output --------------------------------------------------\r\n\r\n# The paper size ('letter' or 'a4').\r\n#latex_paper_size = 'letter'\r\n\r\n# The font size ('10pt', '11pt' or '12pt').\r\n#latex_font_size = '10pt'\r\n\r\n# Grouping the document tree into LaTeX files. List of tuples\r\n# (source start file, target name, title, author, documentclass [howto/manual]).\r\nlatex_documents = [\r\n ('index', 'django-logicaldelete.tex', u'django-logicaldelete Documentation',\r\n u'Patrick Altman', 'manual'),\r\n]\r\n\r\n# The name of an image file (relative to this directory) to place at the top of\r\n# the title page.\r\n#latex_logo = None\r\n\r\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\r\n# not chapters.\r\n#latex_use_parts = False\r\n\r\n# Additional stuff for the LaTeX preamble.\r\n#latex_preamble = ''\r\n\r\n# Documents to append as an appendix to all manuals.\r\n#latex_appendices = []\r\n\r\n# If false, no module index is generated.\r\n#latex_use_modindex = True\r\n\r\n########NEW FILE########\r\n__FILENAME__ = admin\r\nfrom django.contrib import admin\r\n\r\n\r\nclass ModelAdmin(admin.ModelAdmin):\r\n \"\"\"\r\n A base model admin to use in providing access to to logically deleted\r\n objects.\r\n \"\"\"\r\n \r\n list_display = (\"id\", \"__unicode__\", \"active\")\r\n list_display_filter = (\"active\",)\r\n \r\n def queryset(self, request):\r\n qs = self.model._default_manager.all_with_deleted()\r\n ordering = self.ordering or ()\r\n if ordering:\r\n qs = qs.order_by(*ordering)\r\n return qs\r\n\r\n########NEW FILE########\r\n__FILENAME__ = managers\r\nfrom django.db import models\r\n\r\nfrom logicaldelete.query import LogicalDeleteQuerySet\r\n\r\n\r\nclass LogicalDeletedManager(models.Manager):\r\n \"\"\"\r\n A manager that serves as the default manager for `logicaldelete.models.Model`\r\n providing the filtering out of logically deleted objects. In addition, it\r\n provides named querysets for getting the deleted objects.\r\n \"\"\"\r\n \r\n def get_query_set(self):\r\n if self.model:\r\n return LogicalDeleteQuerySet(self.model, using=self._db).filter(\r\n date_removed__isnull=True\r\n )\r\n \r\n def all_with_deleted(self):\r\n if self.model:\r\n return super(LogicalDeletedManager, self).get_query_set()\r\n \r\n def only_deleted(self):\r\n if self.model:\r\n return super(LogicalDeletedManager, self).get_query_set().filter(\r\n date_removed__isnull=False\r\n )\r\n \r\n def get(self, *args, **kwargs):\r\n return self.all_with_deleted().get(*args, **kwargs)\r\n \r\n def filter(self, *args, **kwargs):\r\n if \"pk\" in kwargs:\r\n return self.all_with_deleted().filter(*args, **kwargs)\r\n return self.get_query_set().filter(*args, **kwargs)\r\n\r\n########NEW FILE########\r\n__FILENAME__ = models\r\nimport datetime\r\n\r\nfrom django.db import models\r\n\r\nfrom logicaldelete import managers\r\n\r\n\r\nclass Model(models.Model):\r\n \"\"\"\r\n This base model provides date fields and functionality to enable logical\r\n delete functionality in derived models.\r\n \"\"\"\r\n \r\n date_created = models.DateTimeField(default=datetime.datetime.now)\r\n date_modified = models.DateTimeField(default=datetime.datetime.now)\r\n date_removed = models.DateTimeField(null=True, blank=True)\r\n \r\n objects = managers.LogicalDeletedManager()\r\n \r\n def active(self):\r\n return self.date_removed == None\r\n active.boolean = True\r\n \r\n def delete(self):\r\n self.date_removed = datetime.datetime.now()\r\n self.save()\r\n \r\n class Meta:\r\n abstract = True\r\n\r\n########NEW FILE########\r\n__FILENAME__ = query\r\nfrom django.db.models.query import QuerySet\r\n\r\n\r\nclass LogicalDeleteQuerySet(QuerySet):\r\n \r\n def delete(self):\r\n assert self.query.can_filter(), \\\r\n \"Cannot use 'limit' or 'offset' with delete.\"\r\n for obj in self.all():\r\n obj.delete()\r\n self._result_cache = None\r\n delete.alters_data = True\r\n\r\n########NEW FILE########\r\n","sub_path":"repoData/paltman-django-logicaldelete/allPythonContent.py","file_name":"allPythonContent.py","file_ext":"py","file_size_in_byte":9689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"294867060","text":"# (C) Copyright 2014 Hewlett Packard Enterprise Development Company LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for monascastatsd.py.\"\"\"\n\nimport collections\nimport socket\nimport time\nimport unittest\n\nimport monascastatsd as mstatsd\n\n\nclass FakeSocket(object):\n\n \"\"\"A fake socket for testing.\"\"\"\n\n def __init__(self):\n self.payloads = collections.deque()\n\n def send(self, payload):\n self.payloads.append(payload)\n\n def recv(self):\n try:\n return self.payloads.popleft()\n except IndexError:\n return None\n\n def __repr__(self):\n return str(self.payloads)\n\n\nclass BrokenSocket(FakeSocket):\n\n def send(self, payload):\n raise socket.error(\"Socket error\")\n\n\nclass TestMonascaStatsd(unittest.TestCase):\n\n def setUp(self):\n conn = mstatsd.Connection()\n conn.socket = FakeSocket()\n self.client = mstatsd.Client(connection=conn, dimensions={'env': 'test'})\n\n def recv(self, metric_obj):\n return metric_obj._connection.socket.recv()\n\n def test_counter(self):\n counter = self.client.get_counter(name='page.views')\n\n counter.increment()\n self.assertEqual(\"page.views:1|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter += 1\n self.assertEqual(\"page.views:1|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter.increment(11)\n self.assertEqual(\"page.views:11|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter += 11\n self.assertEqual(\"page.views:11|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter.decrement()\n self.assertEqual(\"page.views:-1|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter -= 1\n self.assertEqual(\"page.views:-1|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter.decrement(12)\n self.assertEqual(\"page.views:-12|c|#{'env': 'test'}\",\n self.recv(counter))\n\n counter -= 12\n self.assertEqual(\"page.views:-12|c|#{'env': 'test'}\",\n self.recv(counter))\n\n def test_counter_with_dimensions(self):\n counter = self.client.get_counter('counter_with_dims',\n dimensions={'date': '10/24', 'time': '23:00'})\n\n counter.increment(dimensions={'country': 'canada', 'color': 'red'})\n self.assertEqual(\"counter_with_dims:1|c|#{'date': '10/24', 'color': 'red', \" +\n \"'country': 'canada', 'env': 'test', 'time': '23:00'}\",\n self.recv(counter))\n\n counter += 1\n self.assertEqual(\"counter_with_dims:1|c|#{'date': '10/24', 'env': 'test', 'time': '23:00'}\",\n self.recv(counter))\n\n def test_set(self):\n set = self.client.get_set('set')\n set.send('metric', 123)\n assert self.recv(set) == \"set.metric:123|s|#{'env': 'test'}\"\n\n def test_gauge(self):\n gauge = self.client.get_gauge('gauge')\n gauge.send('metric', 123.4)\n assert self.recv(gauge) == \"gauge.metric:123.4|g|#{'env': 'test'}\"\n\n def test_histogram(self):\n histogram = self.client.get_histogram('histogram')\n\n histogram.send('metric', 123.4)\n self.assertEqual(\"histogram.metric:123.4|h|#{'env': 'test'}\", self.recv(histogram))\n\n def test_gauge_with_dimensions(self):\n gauge = self.client.get_gauge('gauge')\n gauge.send('gt', 123.4,\n dimensions={'country': 'china',\n 'age': 45,\n 'color': 'blue'})\n self.assertEqual(\"gauge.gt:123.4|g|#{\" +\n \"'color': 'blue', \" +\n \"'country': 'china', \" +\n \"'age': 45, \" +\n \"'env': 'test'}\",\n self.recv(gauge))\n\n def test_histogram_with_dimensions(self):\n histogram = self.client.get_histogram('my_hist')\n histogram.send('h', 1, dimensions={'color': 'red'})\n self.assertEqual(\"my_hist.h:1|h|#{'color': 'red', 'env': 'test'}\", self.recv(histogram))\n\n def test_sample_rate(self):\n counter = self.client.get_counter('sampled_counter')\n counter.increment(sample_rate=0)\n assert not self.recv(counter)\n for _ in range(10000):\n counter.increment(sample_rate=0.3)\n self.assert_almost_equal(3000,\n len(self.client.connection.socket.payloads),\n 150)\n self.assertEqual(\"sampled_counter:1|c|@0.3|#{'env': 'test'}\", self.recv(counter))\n\n def test_samples_with_dimensions(self):\n gauge = self.client.get_gauge()\n for _ in range(100):\n gauge.send('gst',\n 23,\n dimensions={'status': 'sampled'},\n sample_rate=0.9)\n\n def test_timing(self):\n timer = self.client.get_timer()\n timer.timing('t', 123)\n self.assertEqual(\"t:123|ms|#{'env': 'test'}\", self.recv(timer))\n\n def test_time(self):\n timer = self.client.get_timer()\n with timer.time('t'):\n time.sleep(2)\n packet = self.recv(timer)\n name_value, type_, dimensions = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('t', name)\n self.assert_almost_equal(2.0, float(value), 0.1)\n self.assertEqual(\"{'env': 'test'}\", dimensions.lstrip('#'))\n\n def test_timed(self):\n timer = self.client.get_timer()\n\n @timer.timed('timed.test')\n def func(a, b, c=1, d=1):\n \"\"\"docstring.\"\"\"\n time.sleep(0.5)\n return (a, b, c, d)\n\n self.assertEqual('func', func.__name__)\n self.assertEqual('docstring.', func.__doc__)\n\n result = func(1, 2, d=3)\n # Assert it handles args and kwargs correctly.\n self.assertEqual(result, (1, 2, 1, 3))\n\n packet = self.recv(timer)\n name_value, type_, dimensions = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n self.assertEqual(\"{'env': 'test'}\", dimensions.lstrip('#'))\n\n def test_socket_error(self):\n self.client.connection.socket = BrokenSocket()\n self.client.get_gauge().send('no error', 1)\n assert True, 'success'\n self.client.connection.socket = FakeSocket()\n\n def test_batched(self):\n self.client.connection.open_buffer()\n gauge = self.client.get_gauge('site')\n gauge.send('views', 123)\n timer = self.client.get_timer('site')\n timer.timing('timer', 123)\n self.client.connection.close_buffer()\n\n self.assertEqual(\"site.views:123|g|#{'env': 'test'}\\nsite.timer:123|ms|#{'env': 'test'}\",\n self.recv(gauge))\n\n def test_context_manager(self):\n fake_socket = FakeSocket()\n with mstatsd.Connection() as conn:\n conn.socket = fake_socket\n client = mstatsd.Client(name='ContextTester', connection=conn)\n client.get_gauge('page').send('views', 123)\n client.get_timer('page').timing('timer', 12)\n\n self.assertEqual('ContextTester.page.views:123|g\\nContextTester.page.timer:12|ms',\n fake_socket.recv())\n\n def test_batched_buffer_autoflush(self):\n fake_socket = FakeSocket()\n with mstatsd.Connection() as conn:\n conn.socket = fake_socket\n client = mstatsd.Client(name='BufferedTester', connection=conn)\n counter = client.get_counter('mycounter')\n for _ in range(51):\n counter.increment()\n self.assertEqual('\\n'.join(['BufferedTester.mycounter:1|c' for _ in range(50)]),\n fake_socket.recv())\n\n self.assertEqual('BufferedTester.mycounter:1|c', fake_socket.recv())\n\n @staticmethod\n def assert_almost_equal(a, b, delta):\n assert 0 <= abs(a - b) <= delta, \"%s - %s not within %s\" % (a,\n b,\n delta)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_monascastatsd.py","file_name":"test_monascastatsd.py","file_ext":"py","file_size_in_byte":8985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"623716565","text":"\"\"\"Make pretty tables.\"\"\"\n\n\ndef tabulator(rows, spacer=' '):\n if len(rows) == 0:\n return (_ for _ in ())\n lengths = []\n row_len = len(rows[0])\n for i in range(row_len):\n lengths.append(max((len(str(row[i])) for row in rows), default=0))\n fmt = ''\n for i in range(row_len):\n if i != 0:\n fmt = fmt + spacer\n fmt = fmt + '{:' + str(lengths[i] or 1) + '}'\n return [fmt.format(*row) for row in rows]\n","sub_path":"tiny/tabulator.py","file_name":"tabulator.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"335082962","text":"from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError as RestValidationError\nfrom django.core.exceptions import ValidationError as ModelsValidationError\n\nfrom room_service.models import Room, Reservation\nfrom room_service.validators import ReservationDateValidator\n\n__all__ = ['ReservationSerializer', 'RoomSerializer']\n\n\nclass ReservationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Reservation\n fields = ('id', 'theme', 'room', 'date', 'start_time', 'end_time')\n\n def create(self, validated_data):\n try:\n return super(ReservationSerializer, self).create(validated_data)\n except ModelsValidationError as error: # That is not good! Only for test\n raise RestValidationError(error.message)\n\n def validate(self, attrs):\n date = attrs.get('date')\n start = attrs.get('start_time')\n end = attrs.get('end_time')\n date_validator = ReservationDateValidator(date=date,\n start=start,\n end=end,\n exc_class=RestValidationError)\n date_validator.validate()\n return super(ReservationSerializer, self).validate(attrs)\n\n\nclass RoomSerializer(serializers.ModelSerializer):\n reservations = ReservationSerializer(read_only=True, many=True)\n\n class Meta:\n model = Room\n fields = ('id', 'title', 'created_at', 'reservations',)\n","sub_path":"room_service/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"516315241","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport os\nfrom astroquery.irsa import Irsa # had to install astroquery w/ pip\nimport astropy.units as u\n\n#matplotlib.rcParams.update({'font.size':18})\n#matplotlib.rcParams.update({'font.family':'serif'})\n\nIrsa.ROW_LIMIT = 0\nIrsa.TIMEOUT = 1000\n\n\ndef WISE_LC(obj, alldata=False, interac=False, clobber=False, title=''): # moreplots=False,\n # return 1 if successfully finished\n # return 0 if doesnt download\n retval = 0\n\n # create directories that we'll need\n if not os.path.exists('data'):\n os.makedirs('data')\n\n if not os.path.exists('img'):\n os.makedirs('img')\n\n # the WISE tables to search\n cats = ['neowiser_p1bs_psd', 'allsky_4band_p1bs_psd', 'allsky_3band_p1bs_psd', 'allsky_2band_p1bs_psd']\n\n # if clobber=False (i.e. don't overwrite),\n # double-check the data/ dir to see if this star has already been run!\n doobj = True\n if clobber is False:\n #booleans of if each output file prev. exists.\n t1 = os.path.isfile('data/' + obj + cats[0] + '.csv')\n t2 = os.path.isfile('data/' + obj + cats[1] + '.csv')\n t3 = os.path.isfile('data/' + obj + cats[2] + '.csv')\n t4 = os.path.isfile('data/' + obj + cats[3] + '.csv')\n\n # if ANY of these files exists, then DONT do object again\n doobj = not(t1 | t2 | t3 | t4)\n if doobj==False:\n print(\"\\x1b[31mWISE_LC: Data already pulled\\x1b[0m\")\n\n # should we actually Do this Object? False if prev found & clobber=False\n if doobj:\n colors = ['#1f77b4', '#ff7f0e', '#c5b0d5', '#d62728']\n\n plt.figure(figsize=(13,8))\n\n totvisits = 0\n for k in range(len(cats)):\n try:\n table1 = Irsa.query_region(obj, catalog=cats[k], spatial='Cone', radius=3 * u.arcsec)\n # table2 = Irsa.query_region(obj, catalog=cats[1], spatial='Cone', radius=3 * u.arcsec)\n # table3 = Irsa.query_region(obj, catalog=cats[2], spatial='Cone', radius=3 * u.arcsec)\n # table4 = Irsa.query_region(obj, catalog=cats[3], spatial='Cone', radius=3 * u.arcsec)\n\n table1.sort('mjd')\n # table2.sort('mjd')\n # table3.sort('mjd')\n # table4.sort('mjd')\n\n df1 = table1.to_pandas()\n # df2 = table2.to_pandas()\n # df3 = table3.to_pandas()\n # df4 = table4.to_pandas()\n\n totvisits = totvisits + len(df1) #+ len(df2) + len(df3) + len(df4)\n\n # manually fix the Python3 str=>bytestr problem... boo\n df1['ph_qual'] = df1['ph_qual'].str.decode('ascii')\n # df2['ph_qual'] = df2['ph_qual'].str.decode('ascii')\n # df3['ph_qual'] = df3['ph_qual'].str.decode('ascii')\n # df4['ph_qual'] = df4['ph_qual'].str.decode('ascii')\n\n df1['cc_flags'] = df1['cc_flags'].str.decode('ascii')\n # df2['cc_flags'] = df2['cc_flags'].str.decode('ascii')\n # df3['cc_flags'] = df3['cc_flags'].str.decode('ascii')\n # df4['cc_flags'] = df4['cc_flags'].str.decode('ascii')\n\n\n df1.to_csv('data/' + obj + cats[k] + '.csv')\n # df2.to_csv('data/' + obj + cats[1] + '.csv')\n # df3.to_csv('data/' + obj + cats[2] + '.csv')\n # df4.to_csv('data/' + obj + cats[3] + '.csv')\n\n\n #### QUALITY CUTS\n # can't add this to the latter 3 surveys... (df1['qual_frame'] > 8)\n if k==0:\n ok1 = (df1['ph_qual'].str[0] == 'A') & (df1['nb'] == 1) & (df1['cc_flags'].str[0:2] == '00') & (df1['w1rchi2'] < 5) & (df1['qual_frame'] > 8)\n else:\n ok1 = (df1['ph_qual'].str[0] == 'A') & (df1['nb'] == 1) & (df1['cc_flags'].str[0:2] == '00') & (df1['w1rchi2'] < 5)\n # ok3 = (df3['ph_qual'].str[0] == 'A') & (df3['nb'] == 1) & (df3['cc_flags'].str[0:2] == '00') & (df3['w1rchi2'] < 5)\n # ok4 = (df4['ph_qual'].str[0] == 'A') & (df4['nb'] == 1) & (df4['cc_flags'].str[0:2] == '00') & (df4['w1rchi2'] < 5)\n\n if alldata:\n plt.scatter(df1['mjd'], df1['w1mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df2['mjd'], df2['w1mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df3['mjd'], df3['w1mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df4['mjd'], df4['w1mpro'], c='k', s=8, alpha=0.25)\n\n plt.errorbar(df1['mjd'][ok1], df1['w1mpro'][ok1], yerr=df1['w1sigmpro'][ok1],\n marker='o', linestyle='none', alpha=0.25, color=colors[k])\n # plt.errorbar(df2['mjd'][ok2], df2['w1mpro'][ok2], yerr=df2['w1sigmpro'][ok2],\n # marker='o', linestyle='none', alpha=0.25, color=colors[1])\n # plt.errorbar(df3['mjd'][ok3], df3['w1mpro'][ok3], yerr=df3['w1sigmpro'][ok3],\n # marker='o', linestyle='none', alpha=0.25, color=colors[2])\n # plt.errorbar(df4['mjd'][ok4], df4['w1mpro'][ok4], yerr=df4['w1sigmpro'][ok4],\n # marker='o', linestyle='none', alpha=0.25, color=colors[3])\n except Exception as eek:\n print(\"\\x1b[31mWISE_LC: ' + str(eek) + ' encountered. Huh.\\x1b[0m\")\n\n\n plt.ylabel('W1 (mag)')\n plt.xlabel('MJD (days)')\n plt.gca().invert_yaxis()\n if len(title) > 0:\n title_txt = title + ', N=' + str(totvisits)\n else:\n title_txt = obj + ', N=' + str(totvisits)\n plt.title(title_txt)\n plt.savefig('img/'+obj + '_W1.png', dpi=150, bbox_inches='tight', pad_inches=0.25)\n if interac:\n plt.show()\n else:\n plt.close()\n\n retval = 1 # a value to return, assuming the code makes it this far!\n\n # # 2) W1-W2 color light curve\n # if moreplots:\n # plt.figure(figsize=(13,8))\n # if alldata:\n # plt.scatter(df1['mjd'], df1['w1mpro']-df1['w2mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df2['mjd'], df2['w1mpro']-df2['w2mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df3['mjd'], df3['w1mpro']-df3['w2mpro'], c='k', s=8, alpha=0.25)\n # plt.scatter(df4['mjd'], df4['w1mpro']-df4['w2mpro'], c='k', s=8, alpha=0.25)\n #\n # plt.errorbar(df1['mjd'][ok1], df1['w1mpro'][ok1] - df1['w2mpro'][ok1],\n # yerr=np.sqrt(df1['w1sigmpro'][ok1]**2 + df1['w2sigmpro'][ok1]**2),\n # marker='o', linestyle='none', alpha=0.25, color=colors[0])\n # plt.errorbar(df2['mjd'][ok2], df2['w1mpro'][ok2] - df2['w2mpro'][ok2],\n # yerr=np.sqrt(df2['w1sigmpro'][ok2]**2 + df2['w2sigmpro'][ok2]**2),\n # marker='o', linestyle='none', alpha=0.25, color=colors[1])\n # plt.errorbar(df3['mjd'][ok3], df3['w1mpro'][ok3] - df3['w2mpro'][ok3],\n # yerr=np.sqrt(df3['w1sigmpro'][ok3]**2 + df3['w2sigmpro'][ok3]**2),\n # marker='o', linestyle='none', alpha=0.25, color=colors[2])\n # plt.errorbar(df4['mjd'][ok4], df4['w1mpro'][ok4] - df4['w2mpro'][ok4],\n # yerr=np.sqrt(df4['w1sigmpro'][ok4]**2 + df4['w2sigmpro'][ok4]**2),\n # marker='o', linestyle='none', alpha=0.25, color=colors[3])\n # plt.xlabel('MJD (days)')\n # plt.ylabel('W1-W2 (mag)')\n # plt.title(obj + ', N=' + str(totvisits))\n # plt.savefig('img/'+obj + '_W1W2.png', dpi=150, bbox_inches='tight', pad_inches=0.25)\n # # plt.show()\n # plt.close()\n\n\n # 3) CMD\n # plt.figure(figsize=(8,8))\n # plt.errorbar(df1['w1mpro'] - df1['w2mpro'], df1['w1mpro'],\n # xerr=np.sqrt(df1['w1sigmpro']**2 + df1['w2sigmpro']**2), yerr=df1['w1sigmpro'],\n # marker='o', linestyle='none', alpha=0.25, color='#1f77b4')\n # plt.errorbar(df2['w1mpro_ep'] - df2['w2mpro_ep'], df2['w1mpro_ep'],\n # xerr=np.sqrt(df2['w1sigmpro_ep']**2 + df2['w2sigmpro_ep']**2 ), yerr=df2['w1sigmpro_ep'],\n # marker='o', linestyle='none', alpha=0.25, color='#ff7f0e')\n #\n # plt.ylabel('W1 (mag)')\n # plt.xlabel('W1-W2 (mag)')\n # plt.gca().invert_yaxis()\n # plt.savefig('img/'+obj + '_cmd.png', dpi=150, bbox_inches='tight', pad_inches=0.25)\n # plt.close()\n\n\n\n # bonus: RA,Dec to make sure not a blend, etc\n # plt.figure(figsize=(8, 8))\n # plt.scatter(df1['ra'], df1['dec'],\n # marker='o', alpha=0.25, color='#1f77b4')\n # plt.scatter(df2['ra'], df2['dec'],\n # marker='o', alpha=0.25, color='#ff7f0e')\n # plt.xlabel('RA (deg)')\n # plt.ylabel('Dec (deg)')\n # plt.savefig('img/' + obj + '_radec.png', dpi=150, bbox_inches='tight', pad_inches=0.25)\n # plt.close()\n\n return retval\n\n\nif __name__ == \"__main__\":\n import sys\n # WISE_LC(str(sys.argv[1:]), interac=True, alldata=True, clobber=True)\n print('Running: '+ ' '.join(sys.argv[1:]))\n WISE_LC(' '.join(sys.argv[1:]), interac=True, alldata=True, clobber=True)\n","sub_path":"WISE/data/WISE_lcs/GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":9456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"167902073","text":"\"\"\"\nUVA 11396 - Claw Decomposition\nSearching strategy: BFS\nUnsolved\n\"\"\"\n\nfrom queue import Queue\n\ntrue, false = True, False\nblack, white = 1, 0\n\n\n# region Method Descriptions\n\ndef init_array(n):\n return [[] for i in range(n)]\n\n\ndef scan(t=int):\n scanned = input().split()\n len_scan = len(scanned)\n if len_scan is 1:\n return t(scanned[0])\n\n for i in range(len_scan):\n scanned[i] = t(scanned[i])\n\n return scanned\n\n\n# endregion\n\nwhile true:\n V = scan()\n if V is 0:\n break\n\n graph = init_array(V + 1)\n while true:\n u, v = scan()\n if u is v and v is 0:\n break\n graph[u].append(v)\n graph[v].append(u)\n\n q = Queue()\n q.put(1)\n colors = [-1] * (V + 1)\n colors[1] = 1\n\n yes = true\n while not q.empty() and yes:\n u = q.get()\n for v in graph[u]:\n if colors[v] is -1:\n colors[v] = 1 - colors[u]\n q.put(v)\n elif colors[v] is colors[u]:\n yes = false\n break\n\n print(\"YES\" if yes else \"NO\")\n","sub_path":"assignments/assignment1/11396 - Claw Decomposition.py","file_name":"11396 - Claw Decomposition.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"199806191","text":"import sys; sys.path.append(\"/usr/local/lib/python2.7/site-packages\")\nimport os\nimport cv2\n\ndebug = True\n\nvideopath = os.path.join('./boxing.avi')\ncap = cv2.VideoCapture(videopath)\n\nprevgray = None\n\ncount = 0\nwhile cap.isOpened():\n count += 1\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n ret, frame = cap.read()\n if not ret:\n break\n\n cv2.imshow('frame', frame)\n\n # generate optical flow\n if prevgray is None:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n prevgray = gray\n continue\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n prevgray = gray\n horz = cv2.normalize(flow[..., 0], None, 0, 255, cv2.NORM_MINMAX)\n vert = cv2.normalize(flow[..., 1], None, 0, 255, cv2.NORM_MINMAX)\n horz = horz.astype('uint8')\n vert = vert.astype('uint8')\n\n\ncap.release()\n\n","sub_path":"src/temporal/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"99120829","text":"\"\"\"\nClass that represents where the robot can move and where the\ndifferent obstacles and objects laying on the table are.\n\"\"\"\nimport math\nimport uuid\n\nfrom scripts.src.pathfinding.node import Node\nfrom scripts.src.pathfinding.tile_role import TileRole\nfrom scripts.src.pathfinding.direction import Direction\nfrom scripts.src.pathfinding.obstacle_representation import ObstacleRepresentation\n\n\nclass Map:\n \"\"\"\n Class that represents where the robot can move and where the\n different obstacles and objects laying on the table are.\n \"\"\"\n def __init__(self, image_width, image_height, node_size=25,\n safety_cushion=0, robot_width=100, obstacle_width=40, puck_width=25,\n obstacle_representation=ObstacleRepresentation.SQUARE):\n self.node_size = node_size\n self.safety_cushion = safety_cushion\n self.robot_width = robot_width\n self.obstacle_width = obstacle_width\n self.puck_width = puck_width\n self.obstacle_cushion_width = self.safety_cushion + self.robot_width + self.obstacle_width\n self.obstacle_puck_width = self.safety_cushion + self.robot_width + self.puck_width\n self.obstacle_representation = obstacle_representation\n\n self.width, self.height = image_width, image_height\n\n self.obstacles = []\n self.pucks = []\n\n self.node_matrix = []\n\n self.table_walls_start_y = 40\n self.table_walls_end_y = 810\n self.table_walls_start_x = 0\n self.table_walls_end_x = 1600\n\n def render_map(self):\n \"\"\"Creates the nodes and generates the obstacles, pucks, start and end node.\"\"\"\n self.create_nodes()\n self.connect_nodes()\n self.add_table_walls()\n\n def add_top_wall(self, width):\n top_wall_uuid = uuid.uuid4()\n start_wall_top = max(0, self.table_walls_start_y // self.node_size)\n for row in range(\n start_wall_top,\n ((self.table_walls_start_y + width) // self.node_size) + 1):\n for node in self.node_matrix[row]:\n node.role = TileRole.OBSTACLE\n node.uuid = top_wall_uuid\n node.held_by.add(top_wall_uuid)\n\n def add_bottom_wall(self, width):\n bot_wall_uuid = uuid.uuid4()\n end_wall_bot = min(len(self.node_matrix), (self.table_walls_end_y // self.node_size)+1)\n for row in range(\n (self.table_walls_end_y - width) // self.node_size,\n end_wall_bot):\n for node in self.node_matrix[row]:\n node.role = TileRole.OBSTACLE\n node.uuid = bot_wall_uuid\n node.held_by.add(bot_wall_uuid)\n\n def add_right_wall(self, width):\n right_wall_uuid = uuid.uuid4()\n end_wall_right = min(len(self.node_matrix[0]), (self.table_walls_end_x//self.node_size)+1)\n for column in range(\n (self.table_walls_end_x - width) // self.node_size,\n end_wall_right):\n for row in range(len(self.node_matrix)):\n node = self.get_node_from_matrix_coordinates((column, row))\n node.role = TileRole.OBSTACLE\n node.uuid = right_wall_uuid\n node.held_by.add(right_wall_uuid)\n\n def add_left_wall(self, width):\n left_wall_uuid = uuid.uuid4()\n start_wall_left = max(0, (self.table_walls_start_x // self.node_size))\n for column in range(\n start_wall_left,\n ((self.table_walls_start_x + width)//self.node_size)+1):\n for row in range(len(self.node_matrix)):\n node = self.get_node_from_matrix_coordinates((column, row))\n node.role = TileRole.OBSTACLE\n node.uuid = left_wall_uuid\n node.held_by.add(left_wall_uuid)\n\n def add_table_walls(self):\n width = self.robot_width + self.safety_cushion\n self.add_top_wall(width)\n self.add_bottom_wall(width)\n self.add_left_wall(width)\n self.add_right_wall(width)\n\n def create_nodes(self):\n \"\"\"Creates the matrix containing the nodes.\"\"\"\n node_matrix = [\n [] for _ in range((self.height // self.node_size) + 1)\n ]\n\n for column in range((self.height // self.node_size)+1):\n for row in range((self.width // self.node_size)+1):\n y_position = column * self.node_size + self.node_size / 2\n x_position = row * self.node_size + self.node_size / 2\n node_matrix[column].append(\n Node((column, row), (x_position, y_position), self.node_size, self.node_size))\n\n self.node_matrix = node_matrix\n\n def connect_nodes(self):\n \"\"\"Connect each node to its neighbors. This method basically\n defines what movements are allowed by the robot.\"\"\"\n for line in self.node_matrix:\n for node in line:\n possible_neighbors = [\n (y_position, x_position, direction)\n\n for (y_position, x_position, direction) in\n [\n (\n node.matrix_center[0] - 1,\n node.matrix_center[1],\n Direction.UP\n ),\n (\n node.matrix_center[0] + 1,\n node.matrix_center[1],\n Direction.DOWN\n ),\n (\n node.matrix_center[0],\n node.matrix_center[1] - 1,\n Direction.LEFT\n ),\n (\n node.matrix_center[0],\n node.matrix_center[1] + 1,\n Direction.RIGHT\n ),\n\n (\n node.matrix_center[0] - 1,\n node.matrix_center[1] - 1,\n Direction.TOP_LEFT\n ),\n (\n node.matrix_center[0] - 1,\n node.matrix_center[1] + 1,\n Direction.TOP_RIGHT\n ),\n (\n node.matrix_center[0] + 1,\n node.matrix_center[1] - 1,\n Direction.DOWN_LEFT),\n (\n node.matrix_center[0] + 1,\n node.matrix_center[1] + 1,\n Direction.DOWN_RIGHT\n ),\n ]\n\n if ((0 <= x_position < len(self.node_matrix[0])\n and 0 <= y_position < len(self.node_matrix))\n and (x_position, y_position) != node.matrix_center)\n ]\n\n for (y_position, x_position, direction) in possible_neighbors:\n node.neighbors.append((self.node_matrix[y_position][x_position], direction))\n\n def delete_object(self, object_position: (int, int)):\n #TODO: pourrait chercher dans un carré autour de l'emplacement de la puck\n # au lieu de toute la matrix\n puck = self.get_node_from_pixel(object_position)\n _uuid = puck.uuid\n\n if _uuid is not None:\n for row in self.node_matrix:\n for node in row:\n if _uuid in node.held_by:\n node.held_by.remove(node.uuid)\n if len(node.held_by) == 0:\n node.uuid = None\n node.role = TileRole.EMPTY\n else:\n node.uuid = list(node.held_by)[0]\n elif _uuid is None:\n raise Exception(\"t'essaies de delete une case vide\")\n\n def create_round_obstacle(self, obstacle: (int, int), radius, role, obstacle_uuid):\n width, height = obstacle\n lower_range_column = int(max(0, ((height - radius) // self.node_size)))\n lower_range_row = int(max(0, ((width - radius) // self.node_size)))\n higher_range_column = int(min(len(self.node_matrix),\n ((height + radius) // self.node_size) + 1))\n higher_range_row = int(min(len(self.node_matrix[0]),\n ((width + radius) // self.node_size) + 1))\n\n for column in range(lower_range_column, higher_range_column):\n for row in range(lower_range_row, higher_range_row):\n node = self.get_node_from_matrix_coordinates((row, column))\n\n distance = get_distance(obstacle, node.pixel_coordinates_center)\n\n if distance < radius:\n node.role = TileRole.CUSHION\n node.uuid = obstacle_uuid\n node.held_by.add(obstacle_uuid)\n\n obstacle_node = self.get_node_from_pixel(obstacle)\n obstacle_node.role = role\n obstacle_node.uuid = obstacle_uuid\n obstacle_node.held_by.add(obstacle_uuid)\n\n def create_square_obstacle(self, obstacle: (int, int), length, role, obstacle_uuid):\n width, height = obstacle\n lower_range_column = int(max(0, ((height - length) // self.node_size)))\n lower_range_row = int(max(0, ((width - length) // self.node_size)))\n higher_range_column = int(min(len(self.node_matrix), ((height + length) // self.node_size)))\n higher_range_row = int(min(len(self.node_matrix[0]), ((width + length) // self.node_size)))\n\n\n for column in range(lower_range_column, higher_range_column):\n for row in range(lower_range_row, higher_range_row):\n node = self.get_node_from_matrix_coordinates((row, column))\n node.role = TileRole.CUSHION\n node.uuid = obstacle_uuid\n node.held_by.add(obstacle_uuid)\n\n obstacle_node = self.get_node_from_pixel(obstacle)\n obstacle_node.role = role\n obstacle_node.uuid = obstacle_uuid\n obstacle_node.held_by.add(obstacle_uuid)\n\n def set_obstacle(self, obstacle):\n obstacle_uuid = uuid.uuid4()\n if self.obstacle_representation is ObstacleRepresentation.RADIUS:\n self.create_round_obstacle(obstacle, self.obstacle_cushion_width, TileRole.OBSTACLE, obstacle_uuid)\n elif self.obstacle_representation is ObstacleRepresentation.SQUARE:\n self.create_square_obstacle(obstacle, self.obstacle_cushion_width, TileRole.OBSTACLE, obstacle_uuid)\n else:\n self.create_round_obstacle(obstacle, self.obstacle_cushion_width, TileRole.OBSTACLE, obstacle_uuid)\n\n def set_puck(self, puck):\n obstacle_uuid = uuid.uuid4()\n if self.obstacle_representation is ObstacleRepresentation.RADIUS:\n self.create_round_obstacle(puck, self.obstacle_puck_width, TileRole.PUCK, obstacle_uuid)\n elif self.obstacle_representation is ObstacleRepresentation.SQUARE:\n self.create_square_obstacle(puck, self.obstacle_puck_width, TileRole.PUCK, obstacle_uuid)\n else:\n self.create_round_obstacle(puck, self.obstacle_puck_width, TileRole.PUCK, obstacle_uuid)\n\n def get_node_matrix(self):\n \"\"\"Gets the node matrix\"\"\"\n return self.node_matrix\n\n def get_node_from_pixel(self, pixel):\n \"\"\"Gets a node using any of the pixels the node should cover\"\"\"\n x_position = pixel[0] // self.node_size\n y_position = pixel[1] // self.node_size\n return self.node_matrix[y_position][x_position]\n\n def get_node_from_matrix_coordinates(self, coordinates):\n \"\"\"Gets a node using its position in the node matrix\"\"\"\n x_position, y_position = coordinates\n return self.node_matrix[y_position][x_position]\n\n\ndef get_distance(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return math.sqrt(pow(x2-x1, 2) + pow(y2-y1, 2))\n","sub_path":"scripts/src/pathfinding/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":12013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"579386755","text":"\"\"\"raven URL Configuration\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\n# Django\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\n# Local Django\nfrom raven.views import (\n DocumentationView, LandingView, LogoutView,\n RegisterView, IndexView, RegistrationRequestView\n)\n\n\nurlpatterns = [\n # Admin\n url(r'^admin/', admin.site.urls),\n\n # Documentation\n url(r'^docs/$', DocumentationView.as_view(), name='docs'),\n url(r'^docs/(?P.*)$', DocumentationView.as_view(), name='docs'),\n\n # Pages\n url(r'^$', LandingView.as_view(), name='landing'),\n url(r'^logout/$', LogoutView.as_view(), name='logout'),\n url(r'^register/$', RegisterView.as_view(), name='register'),\n url(r'^index/$', IndexView.as_view(), name='index'),\n url(\n r'^registration/requests/$',\n RegistrationRequestView.as_view(), name='registration-request'\n )\n]\n","sub_path":"raven/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"582787564","text":"\nimport heapq\n\ndef power(index):\n pq = []\n hash = set()\n maxNum = 2 ** 31 - 1\n k = 0\n for i in range(2, index + 3):\n for j in range(2, index + 3):\n k += 1\n num = i ** j\n if num > maxNum:\n break\n if num in hash:\n continue\n hash.add(num)\n heapq.heappush(pq, num)\n\n # pop out the numbers in the range of [0, index]\n res = None\n for i in range(index + 1):\n res = heapq.heappop(pq)\n return res\n\n\nres = power(0)\nprint(res) # 4\n\nres = power(3)\nprint(res) # 16\n\n\nres = power(8)\nprint(res) # 49\n\nres = power(22)\nprint(res) # 289\n\nres = power(100)\nprint(res) # 6724\n\n\n\n\n","sub_path":"wepay/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"276102380","text":"import numpy as np\nimport itertools as it\n\nfrom helpers import *\n\nfrom mobject import Mobject, Group\nfrom mobject.vectorized_mobject import VMobject\nfrom mobject.tex_mobject import TextMobject\nfrom animation import Animation\nfrom animation import sync_animation_run_times_and_rate_funcs\nfrom transform import Transform\n\nclass Rotating(Animation):\n CONFIG = {\n \"axis\" : OUT,\n \"radians\" : 2*np.pi,\n \"run_time\" : 5,\n \"rate_func\" : None,\n \"in_place\" : True,\n \"about_point\" : None,\n \"about_edge\" : None,\n }\n def update_submobject(self, submobject, starting_submobject, alpha):\n submobject.points = np.array(starting_submobject.points)\n\n def update_mobject(self, alpha):\n Animation.update_mobject(self, alpha)\n about_point = None\n if self.about_point is not None:\n about_point = self.about_point\n elif self.in_place: #This is superseeded\n self.about_point = self.mobject.get_center()\n self.mobject.rotate(\n alpha*self.radians, \n axis = self.axis,\n about_point = self.about_point,\n about_edge = self.about_edge,\n )\n\nclass ShowPartial(Animation):\n def update_submobject(self, submobject, starting_submobject, alpha):\n submobject.pointwise_become_partial(\n starting_submobject, *self.get_bounds(alpha)\n )\n\n def get_bounds(self, alpha):\n raise Exception(\"Not Implemented\")\n\nclass ShowCreation(ShowPartial):\n CONFIG = {\n \"submobject_mode\" : \"one_at_a_time\",\n }\n def get_bounds(self, alpha):\n return (0, alpha)\n\nclass Uncreate(ShowCreation):\n CONFIG = {\n \"rate_func\" : lambda t : smooth(1-t),\n \"remover\" : True\n }\n\nclass Write(ShowCreation):\n CONFIG = {\n \"rate_func\" : None,\n \"submobject_mode\" : \"lagged_start\",\n }\n def __init__(self, mob_or_text, **kwargs):\n digest_config(self, kwargs) \n if isinstance(mob_or_text, str):\n mobject = TextMobject(mob_or_text)\n else:\n mobject = mob_or_text\n if \"run_time\" not in kwargs:\n self.establish_run_time(mobject)\n if \"lag_factor\" not in kwargs:\n if len(mobject.family_members_with_points()) < 4:\n min_lag_factor = 1\n else:\n min_lag_factor = 2\n self.lag_factor = max(self.run_time - 1, min_lag_factor)\n ShowCreation.__init__(self, mobject, **kwargs)\n\n def establish_run_time(self, mobject):\n num_subs = len(mobject.family_members_with_points())\n if num_subs < 15:\n self.run_time = 1\n else:\n self.run_time = 2\n\nclass DrawBorderThenFill(Animation):\n CONFIG = {\n \"run_time\" : 2,\n \"stroke_width\" : 2,\n \"stroke_color\" : None,\n \"rate_func\" : double_smooth,\n }\n def __init__(self, vmobject, **kwargs):\n if not isinstance(vmobject, VMobject):\n raise Exception(\"DrawBorderThenFill only works for VMobjects\")\n self.reached_halfway_point_before = False\n Animation.__init__(self, vmobject, **kwargs)\n\n def update_submobject(self, submobject, starting_submobject, alpha):\n submobject.pointwise_become_partial(\n starting_submobject, 0, min(2*alpha, 1)\n )\n if alpha < 0.5:\n if self.stroke_color:\n color = self.stroke_color \n elif starting_submobject.stroke_width > 0:\n color = starting_submobject.get_stroke_color()\n else:\n color = starting_submobject.get_color()\n submobject.set_stroke(color, width = self.stroke_width)\n submobject.set_fill(opacity = 0)\n else:\n if not self.reached_halfway_point_before:\n self.reached_halfway_point_before = True\n submobject.points = np.array(starting_submobject.points)\n width, opacity = [\n interpolate(start, end, 2*alpha - 1)\n for start, end in [\n (self.stroke_width, starting_submobject.get_stroke_width()),\n (0, starting_submobject.get_fill_opacity())\n ]\n ]\n submobject.set_stroke(width = width)\n submobject.set_fill(opacity = opacity)\n\nclass ShowPassingFlash(ShowPartial):\n CONFIG = {\n \"time_width\" : 0.1,\n \"remover\" : True,\n }\n def get_bounds(self, alpha):\n alpha *= (1+self.time_width)\n alpha -= self.time_width/2.0\n lower = max(0, alpha - self.time_width/2.0)\n upper = min(1, alpha + self.time_width/2.0)\n return (lower, upper)\n\n def clean_up(self, *args, **kwargs):\n ShowPartial.clean_up(self, *args, **kwargs)\n for submob, start_submob in self.get_all_families_zipped():\n submob.pointwise_become_partial(start_submob, 0, 1)\n\nclass ShowCreationThenDestruction(ShowPassingFlash):\n CONFIG = {\n \"time_width\" : 2.0,\n \"run_time\" : 1,\n }\n\nclass Homotopy(Animation):\n CONFIG = {\n \"run_time\" : 3,\n \"apply_function_kwargs\" : {},\n }\n def __init__(self, homotopy, mobject, **kwargs):\n \"\"\"\n Homotopy a function from (x, y, z, t) to (x', y', z')\n \"\"\"\n def function_at_time_t(t):\n return lambda p : homotopy(p[0], p[1], p[2], t)\n self.function_at_time_t = function_at_time_t\n digest_config(self, kwargs)\n Animation.__init__(self, mobject, **kwargs)\n\n def update_submobject(self, submob, start, alpha):\n submob.points = start.points\n submob.apply_function(\n self.function_at_time_t(alpha),\n **self.apply_function_kwargs\n )\n\nclass SmoothedVectorizedHomotopy(Homotopy):\n def update_submobject(self, submob, start, alpha):\n Homotopy.update_submobject(self, submob, start, alpha)\n submob.make_smooth()\n\nclass ApplyWave(Homotopy):\n CONFIG = {\n \"direction\" : DOWN,\n \"amplitude\" : 0.2,\n \"run_time\" : 1,\n \"apply_function_kwargs\" : {\n \"maintain_smoothness\" : False,\n },\n }\n def __init__(self, mobject, **kwargs):\n digest_config(self, kwargs, locals())\n left_x = mobject.get_left()[0]\n right_x = mobject.get_right()[0]\n vect = self.amplitude*self.direction\n def homotopy(x, y, z, t):\n start_point = np.array([x, y, z])\n alpha = (x-left_x)/(right_x-left_x)\n power = np.exp(2*(alpha-0.5))\n nudge = there_and_back(t**power)\n return np.array([x, y, z]) + nudge*vect\n Homotopy.__init__(self, homotopy, mobject, **kwargs)\n\nclass PhaseFlow(Animation):\n CONFIG = {\n \"virtual_time\" : 1,\n \"rate_func\" : None,\n }\n def __init__(self, function, mobject, **kwargs):\n digest_config(self, kwargs, locals()) \n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n if hasattr(self, \"last_alpha\"):\n dt = self.virtual_time*(alpha-self.last_alpha)\n self.mobject.apply_function(\n lambda p : p + dt*self.function(p)\n )\n self.last_alpha = alpha\n\nclass MoveAlongPath(Animation):\n def __init__(self, mobject, path, **kwargs):\n digest_config(self, kwargs, locals())\n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n point = self.path.point_from_proportion(alpha)\n self.mobject.move_to(point)\n\nclass UpdateFromFunc(Animation):\n \"\"\"\n update_function of the form func(mobject), presumably\n to be used when the state of one mobject is dependent\n on another simultaneously animated mobject\n \"\"\"\n def __init__(self, mobject, update_function, **kwargs):\n digest_config(self, kwargs, locals())\n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n self.update_function(self.mobject)\n\nclass UpdateFromAlphaFunc(UpdateFromFunc):\n def update_mobject(self, alpha):\n self.update_function(self.mobject, alpha)\n\nclass MaintainPositionRelativeTo(Animation):\n CONFIG = {\n \"tracked_critical_point\" : ORIGIN\n }\n def __init__(self, mobject, tracked_mobject, **kwargs):\n digest_config(self, kwargs, locals())\n tcp = self.tracked_critical_point\n self.diff = mobject.get_critical_point(tcp) - \\\n tracked_mobject.get_critical_point(tcp)\n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n self.mobject.shift(\n self.tracked_mobject.get_critical_point(self.tracked_critical_point) - \\\n self.mobject.get_critical_point(self.tracked_critical_point) + \\\n self.diff\n )\n\nclass WiggleOutThenIn(Animation):\n CONFIG = {\n \"scale_value\" : 1.1,\n \"rotation_angle\" : 0.01*TAU,\n \"n_wiggles\" : 6,\n \"run_time\" : 2,\n \"scale_about_point\" : None,\n \"rotate_about_point\" : None,\n }\n def __init__(self, mobject, **kwargs):\n digest_config(self, kwargs)\n if self.scale_about_point is None:\n self.scale_about_point = mobject.get_center()\n if self.rotate_about_point is None:\n self.rotate_about_point = mobject.get_center()\n Animation.__init__(self, mobject, **kwargs)\n\n def update_submobject(self, submobject, starting_sumobject, alpha):\n submobject.points[:,:] = starting_sumobject.points\n submobject.scale(\n interpolate(1, self.scale_value, there_and_back(alpha)),\n about_point = self.scale_about_point\n )\n submobject.rotate(\n wiggle(alpha, self.n_wiggles)*self.rotation_angle,\n about_point = self.rotate_about_point\n )\n\n### Animation modifiers ###\n\nclass ApplyToCenters(Animation):\n def __init__(self, AnimationClass, mobjects, **kwargs):\n full_kwargs = AnimationClass.CONFIG\n full_kwargs.update(kwargs)\n full_kwargs[\"mobject\"] = Mobject(*[\n mob.get_point_mobject()\n for mob in mobjects\n ])\n self.centers_container = AnimationClass(**full_kwargs)\n full_kwargs.pop(\"mobject\")\n Animation.__init__(self, Mobject(*mobjects), **full_kwargs)\n self.name = str(self) + AnimationClass.__name__\n\n def update_mobject(self, alpha):\n self.centers_container.update_mobject(alpha)\n center_mobs = self.centers_container.mobject.split()\n mobjects = self.mobject.split() \n for center_mob, mobject in zip(center_mobs, mobjects):\n mobject.shift(\n center_mob.get_center()-mobject.get_center()\n )\n\nclass LaggedStart(Animation):\n CONFIG = {\n \"run_time\" : 2,\n \"lag_ratio\" : 0.5,\n }\n def __init__(self, AnimationClass, mobject, arg_creator = None, **kwargs):\n digest_config(self, kwargs)\n for key in \"rate_func\", \"run_time\", \"lag_ratio\":\n if key in kwargs:\n kwargs.pop(key)\n if arg_creator is None:\n arg_creator = lambda mobject : (mobject,)\n self.subanimations = [\n AnimationClass(\n *arg_creator(submob),\n run_time = self.run_time,\n rate_func = squish_rate_func(\n self.rate_func, beta, beta + self.lag_ratio\n ),\n **kwargs\n )\n for submob, beta in zip(\n mobject, \n np.linspace(0, 1-self.lag_ratio, len(mobject))\n )\n ]\n Animation.__init__(self, mobject, **kwargs)\n\n def update(self, alpha):\n for anim in self.subanimations:\n anim.update(alpha)\n return self\n\n def clean_up(self, *args, **kwargs):\n for anim in self.subanimations:\n anim.clean_up(*args, **kwargs)\n\nclass Succession(Animation):\n CONFIG = {\n \"rate_func\" : None,\n }\n def __init__(self, *args, **kwargs):\n \"\"\"\n Each arg will either be an animation, or an animation class \n followed by its arguments (and potentially a dict for \n configuration).\n\n For example, \n Succession(\n ShowCreation(circle),\n Transform, circle, square,\n Transform, circle, triangle,\n ApplyMethod, circle.shift, 2*UP, {\"run_time\" : 2},\n )\n \"\"\"\n animations = []\n state = {\n \"animations\" : animations,\n \"curr_class\" : None,\n \"curr_class_args\" : [],\n \"curr_class_config\" : {},\n }\n def invoke_curr_class(state):\n if state[\"curr_class\"] is None:\n return\n anim = state[\"curr_class\"](\n *state[\"curr_class_args\"], \n **state[\"curr_class_config\"]\n )\n state[\"animations\"].append(anim)\n anim.update(1)\n state[\"curr_class\"] = None\n state[\"curr_class_args\"] = []\n state[\"curr_class_config\"] = {}\n\n for arg in args:\n if isinstance(arg, Animation):\n animations.append(arg)\n arg.update(1)\n invoke_curr_class(state)\n elif isinstance(arg, type) and issubclass(arg, Animation):\n invoke_curr_class(state)\n state[\"curr_class\"] = arg\n elif isinstance(arg, dict):\n state[\"curr_class_config\"] = arg\n else:\n state[\"curr_class_args\"].append(arg)\n invoke_curr_class(state)\n for anim in animations:\n anim.update(0)\n\n animations = filter (lambda x : not(x.empty), animations)\n\n self.run_times = [anim.run_time for anim in animations]\n if \"run_time\" in kwargs:\n run_time = kwargs.pop(\"run_time\")\n else:\n run_time = sum(self.run_times)\n self.num_anims = len(animations)\n if self.num_anims == 0:\n self.empty = True\n self.animations = animations\n #Have to keep track of this run_time, because Scene.play\n #might very well mess with it.\n self.original_run_time = run_time\n\n # critical_alphas[i] is the start alpha of self.animations[i]\n # critical_alphas[i + 1] is the end alpha of self.animations[i]\n critical_times = np.concatenate(([0], np.cumsum(self.run_times)))\n self.critical_alphas = map (lambda x : np.true_divide(x, run_time), critical_times) if self.num_anims > 0 else [0.0]\n\n # self.scene_mobjects_at_time[i] is the scene's mobjects at start of self.animations[i]\n # self.scene_mobjects_at_time[i + 1] is the scene mobjects at end of self.animations[i]\n self.scene_mobjects_at_time = [None for i in range(self.num_anims + 1)]\n self.scene_mobjects_at_time[0] = Group()\n for i in range(self.num_anims):\n self.scene_mobjects_at_time[i + 1] = self.scene_mobjects_at_time[i].copy()\n self.animations[i].clean_up(self.scene_mobjects_at_time[i + 1])\n\n self.current_alpha = 0\n self.current_anim_index = 0 # If self.num_anims == 0, this is an invalid index, but so it goes\n if self.num_anims > 0:\n self.mobject = self.scene_mobjects_at_time[0]\n self.mobject.add(self.animations[0].mobject)\n else:\n self.mobject = Group()\n\n Animation.__init__(self, self.mobject, run_time = run_time, **kwargs)\n\n # Beware: This does NOT take care of calling update(0) on the subanimation.\n # This was important to avoid a pernicious possibility in which subanimations were called\n # with update(0) twice, which could in turn call a sub-Succession with update(0) four times,\n # continuing exponentially.\n def jump_to_start_of_anim(self, index):\n if index != self.current_anim_index:\n self.mobject.remove(*self.mobject.submobjects) # Should probably have a cleaner \"remove_all\" method...\n for m in self.scene_mobjects_at_time[index].submobjects:\n self.mobject.add(m)\n self.mobject.add(self.animations[index].mobject)\n\n self.current_anim_index = index\n self.current_alpha = self.critical_alphas[index]\n\n def update_mobject(self, alpha):\n if self.num_anims == 0:\n return\n\n i = 0\n while self.critical_alphas[i + 1] < alpha:\n i = i + 1\n # TODO: Special handling if alpha < 0 or alpha > 1, to use\n # first or last sub-animation\n\n # At this point, we should have self.critical_alphas[i] <= alpha <= self.critical_alphas[i +1]\n\n self.jump_to_start_of_anim(i)\n sub_alpha = inverse_interpolate(\n self.critical_alphas[i], \n self.critical_alphas[i + 1], \n alpha\n )\n self.animations[i].update(sub_alpha)\n\n def clean_up(self, *args, **kwargs):\n # We clean up as though we've played ALL animations, even if\n # clean_up is called in middle of things\n for anim in self.animations:\n anim.clean_up(*args, **kwargs)\n\nclass AnimationGroup(Animation):\n CONFIG = {\n \"rate_func\" : None\n }\n def __init__(self, *sub_anims, **kwargs):\n digest_config(self, kwargs, locals())\n sub_anims = filter (lambda x : not(x.empty), sub_anims)\n if len(sub_anims) == 0:\n self.empty = True\n self.run_time = 0\n else:\n # Should really make copies of animations, instead of messing with originals...\n sync_animation_run_times_and_rate_funcs(*sub_anims, **kwargs)\n self.run_time = max([a.run_time for a in sub_anims])\n everything = Mobject(*[a.mobject for a in sub_anims])\n Animation.__init__(self, everything, **kwargs)\n\n def update_mobject(self, alpha):\n for anim in self.sub_anims:\n anim.update(alpha)\n\n def clean_up(self, *args, **kwargs):\n for anim in self.sub_anims:\n anim.clean_up(*args, **kwargs)\n\nclass EmptyAnimation(Animation):\n CONFIG = {\n \"run_time\" : 0,\n \"empty\" : True\n }\n\n def __init__(self, *args, **kwargs):\n return Animation.__init__(self, Group(), *args, **kwargs)\n","sub_path":"animation/simple_animations.py","file_name":"simple_animations.py","file_ext":"py","file_size_in_byte":18294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"234603007","text":"import numpy as np\ndef len_path(put):\n lenght = 0\n (x, y) = put[0]\n put = put[1:len(put) - 1]\n for (m ,k) in put:\n if m != x and k != y:\n lenght += np.sqrt(2)\n print('+sq2')\n else:\n lenght += 1\n print('+1')\n (x, y) = (m, k)\n return lenght\n\npath = [(7, 11), (7, 12), (7, 13), (7, 14), (7, 15), (6, 16), (6, 17), (6, 18), (7, 19)]\n\nlen_path(path)","sub_path":"ALEX@JENA/lenp.py","file_name":"lenp.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"289301931","text":"# Create a matplotlib chart with title 'My Chart'.\n#\n# .. pyvista-plot::\n#\nimport pyvista\nimport matplotlib.pyplot as plt\nf, ax = plt.subplots()\n_ = ax.plot([0, 1, 2], [2, 1, 3])\nchart = pyvista.ChartMPL(f)\nchart.title = 'My Chart'\nchart.show()\n","sub_path":"version/0.37/api/plotting/charts/_autosummary/pyvista-ChartMPL-15.py","file_name":"pyvista-ChartMPL-15.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"439436189","text":"# l = [0,1,2]\n# print(len(l))\n\n# 3.用map来处理字符串列表,把列表中所有人都变成sb,比方alex_sb\n#\nname=['alex','wupeiqi','yuanhao','nezha']\n# def func(item):\n# return item+'_sb'\n# ret = map(func,name) #ret是迭代器\n# for i in ret:\n# print(i)\n# print(list(ret))\n\nret = map(lambda x:x+'_sb',name)\nprint(list(ret))\n\n\n# 4.用filter函数处理数字列表,将列表中所有的偶数筛选出来\nnum = [1,3,5,6,7,8]\nos = filter(lambda x:x%2==0,num)\nprint(list(os))\n\n# num = [1,3,5,6,7,8]\n# def func(x):\n# if x%2 == 0:\n# return True\n# ret = filter(func,num) #ret是迭代器\n# print(list(ret))\n#\n# ret = filter(lambda x:x%2 == 0,num)\n# ret = filter(lambda x:True if x%2 == 0 else False,num)\n# print(list(ret))\n\n# 6.如下,每个小字典的name对应股票名字,shares对应多少股,price对应股票的价格\nportfolio = [\n {'name': 'IBM', 'shares': 100, 'price': 91.1},\n {'name': 'AAPL', 'shares': 50, 'price': 543.22},\n {'name': 'FB', 'shares': 200, 'price': 21.09},\n {'name': 'HPQ', 'shares': 35, 'price': 31.75},\n {'name': 'YHOO', 'shares': 45, 'price': 16.35},\n {'name': 'ACME', 'shares': 75, 'price': 115.65}\n]\n\n# 6.1.计算购买每支股票的总价\n# sum = map(lambda dic:dic{dic{'name'}:dic{'shares'}*dic{'price'}},portfolio)\n# print(list(sum))\n\nret = map(lambda dic : {dic['name']:round(dic['shares']*dic['price'],2)},portfolio)\nprint(list(ret))\n\n\n\n\n","sub_path":"venv/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"536834180","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\n\nimport six\nimport requests\n\nfrom . import sig\nfrom . import util\n\n__version__ = '0.3'\n\n\nclass Open189Error(RuntimeError):\n def __init__(self, status_code, res_code, message):\n msg = '[HTTP {} res_code {}] {}'.format(status_code, res_code, message)\n super(Open189Error, self).__init__(msg)\n self.status_code = status_code\n self.res_code = res_code\n\n\ndef _process_response(res):\n '''Processes the API response, raising exception if that's the case.'''\n\n result = res.json()\n res_code = int(result.get('res_code', -1)) # fxxk this can be string\n if res.status_code != 200 or res_code != 0:\n raise Open189Error(\n res.status_code,\n res_code,\n result.get('res_message', None),\n )\n\n return result\n\n\nclass Open189App(object):\n '''SDK client for the open.189.cn platform.'''\n\n def __init__(self, app_id, secret, access_token=None):\n self.app_id = util.force_binary(app_id)\n self.secret = util.force_binary(secret)\n if access_token is not None:\n self.access_token = util.force_binary(access_token)\n else:\n self.access_token = None\n\n def _prepare_request_params(self, params):\n '''Prepares request parameters for sending (not needed for OAuth\n requests).\n\n Note that the method is destructive; the parameters dict is modified\n along the way. Since the params dict is never meant to be re-used, this\n is considered somewhat acceptable.\n\n '''\n\n params['app_id'] = self.app_id\n # NOTE: requests wouldn't add the field if value is None, so we can\n # blindly pass the value.\n params['access_token'] = self.access_token\n params['timestamp'] = util.get_timestamp()\n\n # sign the request\n sign = sig.sign(params, self.secret)\n params['sign'] = sign\n\n return params\n\n def _perform_get_sync(self, url, params, prepare=True):\n '''Performs a blocking API GET request to the specified endpoint with\n the specified parameters, raising exceptions appropriately.'''\n\n data = self._prepare_request_params(params) if prepare else params\n return _process_response(requests.get(url, params=data))\n\n def _perform_post_sync(self, url, params, prepare=True):\n '''Performs a blocking API POST request to the specified endpoint with\n the specified parameters, raising exceptions appropriately.'''\n\n data = self._prepare_request_params(params) if prepare else params\n return _process_response(requests.post(url, data=data))\n\n def _perform_access_token_req(self, **kwargs):\n kwargs['app_id'] = self.app_id\n kwargs['app_secret'] = self.secret\n kwargs['state'] = util.get_random_state_str()\n\n return self._perform_post_sync(\n 'https://oauth.api.189.cn/emp/oauth2/v3/access_token',\n kwargs,\n False,\n )\n\n def get_access_token_ac(self, code, redirect_uri):\n '''Gets an access token with the Authorization Code flow.\n\n Access token parameter is ignored.\n\n '''\n\n return self._perform_access_token_req(\n grant_type='authorization_code',\n code=code,\n redirect_uri=redirect_uri,\n )\n\n def get_access_token_cc(self):\n '''Gets a user-independent access token with the Client Credentials\n flow.\n\n Access token parameter is ignored.\n\n '''\n\n return self._perform_access_token_req(\n grant_type='client_credentials',\n )\n\n def refresh_access_token(self, refresh_token):\n '''Refresh access token using a previously returned refresh token.\n\n Access token parameter is ignored.\n\n '''\n\n return self._perform_access_token_req(\n grant_type='refresh_token',\n refresh_token=refresh_token,\n )\n\n def sms_get_token(self):\n '''Gets a token for sending verification SMS.\n\n Access token is required.\n\n '''\n\n return self._perform_get_sync(\n 'http://api.189.cn/v2/dm/randcode/token',\n {},\n )\n\n def sms_send_verification_sms(\n self,\n token,\n phone,\n code=None,\n callback_url=None,\n exp_time_min=None,\n ):\n '''Sends a verification SMS to the specified phone.\n\n Needs a token obtained with :meth:`sms_get_token`. Expiry time is in\n minutes; defaults to 5 minutes if not specified.\n\n The platform supports both platform-generated and custom verification\n codes. For the platform to generate the code for you, set the code\n parameter to ``None`` and provide a callback URL for receiving the\n generated code. Otherwise a string comprised of 6 digits must be\n provided, and the callback URL is ignored.\n\n Access token is required.\n\n '''\n\n params = {\n 'token': token,\n 'phone': phone,\n }\n if exp_time_min is not None:\n params['exp_time'] = str(exp_time_min)\n\n if code is None:\n if callback_url is None:\n raise ValueError(\n 'callback URL is required for platform-generated '\n 'verification code'\n )\n\n endpoint = 'http://api.189.cn/v2/dm/randcode/send'\n params['url'] = callback_url\n else:\n if len(code) != 6 or not code.isdigit():\n raise ValueError('only 6-digit string code is supported')\n\n endpoint = 'http://api.189.cn/v2/dm/randcode/sendSms'\n params['randcode'] = code\n\n return self._perform_post_sync(\n endpoint,\n params,\n )\n\n def sms_send_template(self, phone, template_id, template_params):\n '''Sends a template SMS to the specified phone.\n\n Access token is required.\n\n '''\n\n template_params_json = util.json_dumps_compact(template_params)\n params = {\n 'acceptor_tel': phone,\n 'template_id': template_id,\n 'template_param': template_params_json,\n }\n\n return self._perform_post_sync(\n 'http://api.189.cn/v2/emp/templateSms/sendSms',\n params,\n )\n","sub_path":"pyopen189/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"475036475","text":"#! /usr/bin/python\n# -*-coding:utf-8 -*-\nfrom appium import webdriver\nfrom time import sleep\nimport unittest\n#@classmethod\n\n\n#####################面向对象#####################\nclass DS(unittest.TestCase):\n ##测试脚本与appium服务器进行连接的参数数据\n d = {\n \"device\": \"android\",\n \"platformName\": \"Android\",\n \"platformVersion\": \"8.1.0\",\n \"deviceName\": \"3634c4cc\",\n \"appPackage\": \"com.qk.butterfly\",\n \"appActivity\": \".main.LauncherActivity\",\n \"noReset\": \"True\"\n }\n def setUp(self):\n self.dr = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_capabilities=self.d)\n\n sleep(5.0)\n ##检查那四个文字的函数/方法\n def test_1(self):\n text = self.dr.find_element_by_id('com.qk.butterfly:id/v_login_wx').find_element_by_class_name('android.widget.TextView').text\n print(text)\n #断言\n self.assertEqual(text,'微信')\n #\n # def tearDown(self):\n # self.dr.quit()\n def tearDown(self):\n self.dr.quit()\nif __name__ == '__main__':\n unittest.main()\n\n def close_app(self): #####关闭APP的函数\n self.dr.quit()\n\nif __name__ == '__main__':\n go = DS() #创建一个DS类的实例,赋值给变量go\n go.check_test() ##调用DS类的方法\n go.close_app()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"untitled/__KKK__/kkk__kk.py","file_name":"kkk__kk.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"400940106","text":"from ...datasources import revision\nfrom ...errors import RevisionDocumentNotFound\nfrom ...features import Feature\nfrom ..meta.infonoise import Infonoise\nfrom ..meta.regex_extractors import TextRegexExtractor\nfrom ..meta.token_filter import TokenFilter\nfrom .util import token_is_word\n\n\ndef raise_rnf():\n raise RevisionDocumentNotFound()\n\nclass Revision:\n \"\"\"\n Implements a set of features based on the revision of interest.\n \"\"\"\n\n DATASOURCE_MODULE = revision\n MODULE_NAME = \"revision\"\n\n def __init__(self, language, error_if_missing=False):\n self.language = language\n self.prefix = language.__name__ + \".\" + self.MODULE_NAME + \".\"\n\n self.words_list = TokenFilter(\n self.prefix + \"words\",\n self.DATASOURCE_MODULE.tokens,\n token_is_word,\n if_none=raise_rnf if error_if_missing else []\n )\n \"\"\"\n Returns a list of word tokens.\n \"\"\"\n\n self.words = Feature(\n self.prefix + \"words\", len,\n returns=int,\n depends_on=[self.words_list]\n )\n \"\"\"\n A count of the number of words in the revision.\n \"\"\"\n\n self.content_words_list = TokenFilter(\n self.prefix + \"content_words\",\n self.DATASOURCE_MODULE.content_tokens,\n token_is_word,\n if_none=raise_rnf if error_if_missing else []\n )\n \"\"\"\n Returns a list of words that appear in the (non-markup) content of the\n revision.\n \"\"\"\n\n self.content_words = Feature(\n self.prefix + \"content_words\", len,\n returns=int,\n depends_on=[self.content_words_list]\n )\n \"\"\"\n A count of the number of words in the (non-markup) content of the\n revision.\n \"\"\"\n\n if language.resources.stopwords is not None and \\\n language.resources.stemmer is not None:\n self.infonoise = Infonoise(\n self.prefix + \"infonoise\",\n language.resources.stopwords,\n language.resources.stemmer.stem,\n self.content_words_list\n )\n \"\"\"\n Returns a score measuring the proportion of text remaining after\n filtering markup and stopwords and stemming the remaining words.\n This feature is commonly used in quality prediction.\n \"\"\"\n\n if language.resources.badwords is not None:\n self.badwords_list = TextRegexExtractor(\n self.prefix + \"badwords\",\n self.DATASOURCE_MODULE.text,\n language.resources.badwords,\n if_none=raise_rnf if error_if_missing else None\n )\n \"\"\"\n Returns a list of the badwords that appear in the text.\n \"\"\"\n\n self.badwords = Feature(\n self.prefix + \"badwords\", len,\n returns=int,\n depends_on=[self.badwords_list]\n )\n \"\"\"\n Returns a count of the badwords that appear in the text.\n \"\"\"\n\n if language.resources.informals is not None:\n self.informals_list = TextRegexExtractor(\n self.prefix + \"informals\",\n self.DATASOURCE_MODULE.text,\n language.resources.informals,\n if_none=raise_rnf if error_if_missing else None\n )\n \"\"\"\n Returns a list of the informal words that appear in the text\n \"\"\"\n\n self.informals = Feature(\n self.prefix + \"informals\", len,\n returns=int,\n depends_on=[self.informals_list]\n )\n \"\"\"\n Returns a count of the informal words that appear in the text\n \"\"\"\n\n if language.resources.dictionary is not None:\n self.misspellings_list = TokenFilter(\n self.prefix + \"misspellings\",\n self.words_list,\n self.not_in_dictionary\n )\n \"\"\"\n Returns a list of the misspellings that appear in the text\n \"\"\"\n\n self.misspellings = Feature(\n self.prefix + \"misspellings\", len,\n returns=int,\n depends_on=[self.misspellings_list]\n )\n \"\"\"\n Returns a count of the misspellings that appear in the text\n \"\"\"\n\n def in_dictionary(self, word):\n return self.language.resources.dictionary.check(str(word))\n\n def not_in_dictionary(self, word):\n return not self.language.resources.dictionary.check(str(word))\n","sub_path":"revscoring/languages/space_delimited/revision.py","file_name":"revision.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"213140449","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom V19_4D_Parameters import TransferMatrixX, TransferMatrixY\r\n\r\n\r\n\r\ndef PhaseSpaceSimple4D(params):\r\n \r\n betaX = params['betaX']\r\n alphaX = params['alphaX']\r\n emitX = params['emitX']\r\n \r\n betaY = params['betaY']\r\n alphaY = params['alphaY']\r\n emitY = params['emitY']\r\n \r\n emitScale = np.diag([emitX, emitX, emitY, emitY])\r\n \r\n psCoordsNorm = np.random.normal(size = (4, params['Nparticles']))\r\n \r\n phaseSpaceCoordsNorm = np.matmul(np.sqrt(emitScale), psCoordsNorm)\r\n \r\n normInverse = np.array([ [np.sqrt(betaX), 0.0, 0.0, 0.0],\r\n [(-alphaX/np.sqrt(betaX)), (1/np.sqrt(betaX)), 0.0, 0.0], \r\n [0.0, 0.0, np.sqrt(betaY), 0.0],\r\n [0.0, 0.0, (-alphaY/np.sqrt(betaY)), (1/np.sqrt(betaY))]])\r\n #print(normInverse)\r\n \r\n \r\n phaseSpaceCoords = np.matmul(normInverse, phaseSpaceCoordsNorm)\r\n \r\n centroid = np.diag([params['X0'], params['PX0'], params['Y0'], params['PY0']])\r\n \r\n centroid = np.matmul(centroid, np.ones((4, params['Nparticles'])))\r\n \r\n phaseSpaceCoords = phaseSpaceCoords + centroid\r\n \r\n return phaseSpaceCoords\r\n \r\n \r\n\r\ndef Sinogram4D(params, phaseSpaceCoords):\r\n \r\n coordsX = phaseSpaceCoords[0:2]\r\n \r\n coordsY = phaseSpaceCoords[2:4]\r\n \r\n scanSteps = int( TransferMatrixX.shape[0]/2 )\r\n \r\n sinogramX = np.zeros( (scanSteps, params['PhaseSpaceResolution']) )\r\n \r\n sinogramY = sinogramX\r\n \r\n xbins = np.linspace(-1, 1, (params['PhaseSpaceResolution']+1))*params['PhaseSpaceRangeObs']*params['sigmaXObsMax']\r\n\r\n ybins = np.linspace(-1, 1, (params['PhaseSpaceResolution']+1))*params['PhaseSpaceRangeObs']*params['sigmaYObsMax']\r\n \r\n for i in range (scanSteps):\r\n \r\n j = 2*i\r\n k = j+2\r\n \r\n tmX = TransferMatrixX[j:k, :]\r\n\r\n tmY = TransferMatrixY[j:k, :]\r\n \r\n transformedCoordsX = np.matmul(tmX, coordsX)\r\n \r\n transformedCoordsY = np.matmul(tmY, coordsY)\r\n \r\n transPhaseSpaceDensityX, edges = np.histogram(transformedCoordsX[0,:], bins = xbins)\r\n \r\n sinogramX[i] = transPhaseSpaceDensityX\r\n\r\n transPhaseSpaceDensityY, edges = np.histogram(transformedCoordsY[0,:], bins = ybins)\r\n \r\n sinogramY[i] = transPhaseSpaceDensityY\r\n \r\n return sinogramX, sinogramY\r\n\r\n\r\n\r\ndef MakeSinograms4D(params):\r\n \r\n if params['ComplexPhaseSpace']:\r\n \r\n Xdata, Xdata2, Ydata, Ydata2 = MakeSinogramsComplexPS4D(params)\r\n \r\n else:\r\n \r\n Xdata, Xdata2, Ydata, Ydata2 = MakeSinogramsSimplePS4D(params)\r\n \r\n return Xdata, Xdata2, Ydata, Ydata2\r\n\r\n\r\n\r\ndef MakeSinogramsSimplePS4D(params):\r\n \r\n samples = params['Samples']\r\n \r\n scanSteps = params['ScanSteps']\r\n \r\n Xdata = np.zeros((scanSteps*samples, params['PhaseSpaceResolution']))\r\n\r\n Xdata2 = np.array([params['emitX'], params['betaX'], params['alphaX']])\r\n \r\n Xdata2 = np.diag(Xdata2)\r\n\r\n Ydata = np.zeros((scanSteps*samples, params['PhaseSpaceResolution']))\r\n\r\n Ydata2 = np.array([params['emitY'], params['betaY'], params['alphaY']])\r\n \r\n Ydata2 = np.diag(Ydata2)\r\n \r\n var = 0.4 + 1.2*(np.random.rand(samples, 3))\r\n \r\n Xdata2 = np.matmul(var, Xdata2)\r\n \r\n Ydata2 = np.matmul(var, Ydata2)\r\n \r\n params1 = params.copy()\r\n \r\n for i in range (samples):\r\n \r\n print('', end=f'\\rSample number: {i+1}/{samples}')\r\n \r\n params1['emitX'] = Xdata2[i,0]\r\n params1['betaX'] = Xdata2[i,1]\r\n params1['alphaX'] = Xdata2[i,2]\r\n\r\n params1['emitY'] = Ydata2[i,0]\r\n params1['betaY'] = Ydata2[i,1]\r\n params1['alphaY'] = Ydata2[i,2]\r\n\r\n phaseSpaceCoords = PhaseSpaceSimple4D(params1)\r\n \r\n Xdata[i*scanSteps:(i+1)*scanSteps, :], Ydata[i*scanSteps:(i+1)*scanSteps, :] = Sinogram4D(params, phaseSpaceCoords)\r\n \r\n print(' Done!')\r\n \r\n Xdata = Xdata*1000\r\n\r\n Ydata = Ydata*1000\r\n \r\n Xdata = Xdata.round(decimals=0)\r\n\r\n Ydata = Ydata.round(decimals=0)\r\n \r\n tag = params['Tag']\r\n\r\n np.savetxt('SinogramsX' + tag + '.txt', Xdata, delimiter=',')\r\n\r\n np.savetxt('OpticsParametersX' + tag + '.txt', Xdata2, delimiter=',')\r\n\r\n np.savetxt('SinogramsY' + tag + '.txt', Ydata, delimiter=',')\r\n\r\n np.savetxt('OpticsParametersY' + tag + '.txt', Ydata2, delimiter=',')\r\n\r\n return Xdata, Xdata2, Ydata, Ydata2\r\n\r\n\r\n\r\ndef MakeSinogramsComplexPS4D(params):\r\n \r\n samples = params['Samples']\r\n \r\n scanSteps = params['ScanSteps']\r\n \r\n psresolution = params['PhaseSpaceResolution']\r\n \r\n Xdata = np.zeros((scanSteps*samples, psresolution))\r\n\r\n Ydata = np.zeros((scanSteps*samples, psresolution))\r\n \r\n sigmax = np.sqrt(params['emitX']*params['betaX'])\r\n #print(sigmax)\r\n \r\n sigmay = np.sqrt(params['emitY']*params['betaY'])\r\n #print(sigmay)\r\n \r\n sigmapx = np.sqrt(params['emitX']*(1 + np.square(params['alphaX']))/params['betaX'])\r\n #print(sigmapx)\r\n\r\n sigmapy = np.sqrt(params['emitY']*(1 + np.square(params['alphaY']))/params['betaY'])\r\n #print(sigmapy)\r\n \r\n xbins = np.linspace(-1, 1, (psresolution+1))*params['PhaseSpaceRange']*sigmax\r\n\r\n pxbins = np.linspace(-1, 1, (psresolution+1))*params['PhaseSpaceRange']*sigmapx\r\n\r\n ybins = np.linspace(-1, 1, (psresolution+1))*params['PhaseSpaceRange']*sigmay\r\n\r\n pybins = np.linspace(-1, 1, (psresolution+1))*params['PhaseSpaceRange']*sigmapy\r\n \r\n Xdata2 = np.zeros( (psresolution*samples, psresolution) )\r\n\r\n Ydata2 = np.zeros( (psresolution*samples, psresolution) )\r\n\r\n npart = params['Nparticles']\r\n \r\n params1 = params.copy()\r\n \r\n for i in range (samples):\r\n \r\n print('', end=f'\\rSample number: {i+1}/{samples}')\r\n \r\n npart1 = int(npart*(0.3 + 0.2*(np.random.rand(1)[0]) ))\r\n npart2 = npart - npart1\r\n \r\n params1['X0'] = sigmax*(np.random.rand(1)[0]-0.5)*3\r\n params1['PX0'] = sigmapx*(np.random.rand(1)[0]-0.5)*3\r\n params1['Y0'] = sigmay*(np.random.rand(1)[0]-0.5)*3\r\n params1['PY0'] = sigmapy*(np.random.rand(1)[0]-0.5)*3\r\n \r\n params1['emitX'] = params['emitX']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['betaX'] = params['betaX']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['alphaX'] = params['alphaX']*(-1 + 3.0*(np.random.rand(1)[0]) )\r\n\r\n params1['emitY'] = params['emitY']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['betaY'] = params['betaY']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['alphaY'] = params['alphaY']*(-1 + 3.0*(np.random.rand(1)[0]) )\r\n\r\n params1['Nparticles'] = npart1\r\n \r\n psCoords1 = PhaseSpaceSimple4D(params1) \r\n\r\n params1['X0'] = sigmax*(np.random.rand(1)[0]-0.5)*3\r\n params1['PX0'] = sigmapx*(np.random.rand(1)[0]-0.5)*3\r\n params1['Y0'] = sigmay*(np.random.rand(1)[0]-0.5)*3\r\n params1['PY0'] = sigmapy*(np.random.rand(1)[0]-0.5)*3\r\n \r\n params1['emitX'] = params['emitX']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['betaX'] = params['betaX']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['alphaX'] = params['alphaX']*(-1 + 3.0*(np.random.rand(1)[0]) )\r\n \r\n params1['emitY'] = params['emitY']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['betaY'] = params['betaY']*(0.2 + 1.6*(np.random.rand(1)[0]) )\r\n params1['alphaY'] = params['alphaY']*(-1 + 3.0*(np.random.rand(1)[0]) )\r\n\r\n params1['Nparticles'] = npart2\r\n \r\n psCoords2 = PhaseSpaceSimple4D(params1)\r\n\r\n phaseSpaceCoords = np.concatenate((psCoords1,psCoords2),axis=1)\r\n\r\n Xdata[i*scanSteps:(i+1)*scanSteps, :], Ydata[i*scanSteps:(i+1)*scanSteps, :] = Sinogram4D(params, phaseSpaceCoords)\r\n \r\n PhaseSpaceDensityX, xedges1, xedges2 = np.histogram2d(phaseSpaceCoords[0,:], phaseSpaceCoords[1,:], bins = [xbins,pxbins])\r\n\r\n PhaseSpaceDensityY, yedges1, yedges2 = np.histogram2d(phaseSpaceCoords[2,:], phaseSpaceCoords[3,:], bins = [ybins,pybins])\r\n\r\n Xdata2[i*psresolution:(i+1)*psresolution, :] = PhaseSpaceDensityX\r\n\r\n Ydata2[i*psresolution:(i+1)*psresolution, :] = PhaseSpaceDensityY\r\n \r\n print(' Done!')\r\n \r\n Xdata = Xdata*1000\r\n \r\n Ydata = Ydata*1000\r\n \r\n Xdata = Xdata.round(decimals=0)\r\n\r\n Ydata = Ydata.round(decimals=0)\r\n\r\n tag = params['Tag']\r\n\r\n np.savetxt('SinogramsComplexPSX' + tag + '.txt', Xdata, delimiter=',')\r\n\r\n np.savetxt('PhaseSpaceDensityX' + tag + '.txt', Xdata2, delimiter=',')\r\n\r\n np.savetxt('SinogramsComplexPSY' + tag + '.txt', Ydata, delimiter=',')\r\n\r\n np.savetxt('PhaseSpaceDensityY' + tag + '.txt', Ydata2, delimiter=',')\r\n\r\n return Xdata, Xdata2, Ydata, Ydata2\r\n\r\n\r\n\r\ndef ArrayHeatMapPlot(inputData, fname):\r\n \r\n plt.clf()\r\n plt.imshow(inputData, cmap='hot', interpolation='nearest')\r\n plt.savefig(fname)\r\n plt.show()\r\n\r\n\r\n\r\ndef MultipleHeatMapPlot(image, figuresize, fname):\r\n \r\n plt.clf()\r\n fig = plt.figure(figsize=figuresize)\r\n \r\n for i in range(32):\r\n sub = fig.add_subplot(4, 8, i + 1)\r\n sub.imshow(image[i], interpolation='nearest', aspect='auto')\r\n plt.xticks([])\r\n plt.yticks([])\r\n \r\n plt.savefig(fname)\r\n plt.show()\r\n ","sub_path":"V19_4D_MakeSinograms.py","file_name":"V19_4D_MakeSinograms.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"291189932","text":"import os\nimport sys\n\nimport tornado.escape\nimport yaml\nimport json\nfrom bs4 import BeautifulSoup\n\npwd = os.getcwd()\n(qian, hou) = os.path.split(pwd)\nsys.path.append(qian)\n\nfrom torcms.model.infor_model import MInfor\n\nmequ = MInfor()\n\nrecs = mequ.get_all()\nfor rec in recs:\n if rec.extinfo['def_cat_uid'].startswith('05'):\n # for key in rec.extinfo:\n # print(key)\n # if 'ext_yaml' in rec.extinfo:\n # print('i' * 20)\n # pass\n # elif 'extra_yaml' in rec.extinfo:\n # print('t' * 20)\n # pass\n outstr= ''\n if 'def_version' in rec.extinfo:\n print(rec.uid)\n if rec.uid == '6847':\n continue\n print('m' * 20)\n data_json = rec.extinfo['def_json']\n # uu = json.dumps(data_json, indent= 1, sort_keys=True)\n vv = yaml.load(data_json)\n # print(dir(vv))\n for key, value in vv.items():\n # print(key, value)\n outstr = outstr + key + '\\n'\n value2 = sorted(value.items(), key = lambda asd:asd[0])\n for skey, svalue in value2:\n # print(skey, svalue)\n outstr = outstr + ' {0}: {1}\\n'.format(skey, svalue)\n print(outstr)\n # print(vv)\n # print(type(vv))\n # print('i' * 20)\n newdic = { 'ext_yaml': outstr.strip() }\n mequ.update_jsonb(rec.uid, newdic)\n pass\n\n\n","sub_path":"script/script_update_yaml.py","file_name":"script_update_yaml.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"356564845","text":"import sys\nimport codecs\nimport glob\nimport re\n\ntens_path = \"/Volumes/tensusers/timzee/cgn/n_tests/\" if sys.platform == \"darwin\" else \"/vol/tensusers/timzee/cgn/n_tests/\"\nali_path = \"/Volumes/tensusers/timzee/KALDI_FA_out/n_tests_a/\" if sys.platform == \"darwin\" else \"/vol/tensusers/timzee/KALDI_FA_out/n_tests_a/\"\ntz_path = \"/Volumes/timzee/Docs/\" if sys.platform == \"darwin\" else \"/home/timzee/Docs/\"\n\nphon_dict = {}\nwith open(tz_path + \"KALDI-CGN_phones3.txt\", \"r\") as f:\n for line in f:\n phon_dict[line.split(\",\")[0]] = line[:-1].split(\",\")[1]\n\nwith codecs.open(tens_path + \"validation_data_cgn-kaldi-kaldi-n_a_pos.csv\", \"w\", encoding=\"utf-8\") as h:\n with codecs.open(tens_path + \"validation_data_cgn-kaldi-kaldi-n_a.csv\", \"r\", encoding=\"utf-8\") as f:\n prev_sentence = \"\"\n phon_num = 0\n for l_num, line in enumerate(f, 1):\n print(l_num, line)\n if l_num == 1:\n h.write(line[:-1] + \",kaldi_pos,kaldi_dur\\n\")\n else:\n l_list = line[:-1].split(\",\")\n sentence = re.sub(r'[/-]', '_', l_list[0][:-1])\n kaldi_lab = l_list[2]\n if sentence != prev_sentence:\n ali_lines = []\n phon_num = 0\n glob_list = glob.glob(ali_path + \"*\" + sentence + \"*\")\n print(sentence, glob_list)\n if len(glob_list) > 0:\n with codecs.open(glob_list[0], \"r\", encoding=\"utf-8\") as g:\n ali_lines = g.readlines()\n if len(ali_lines) > 1:\n if kaldi_lab != \"NA\":\n phon_num += 1\n if phon_num > len(ali_lines) - 1:\n kaldi_pos = \"NA\"\n else:\n phon_dur = ali_lines[phon_num].split(\"\\t\")[5]\n kaldi_pos_l = ali_lines[phon_num].split(\"\\t\")[6].split(\"_\")\n kaldi_pos = kaldi_pos_l[1] if len(kaldi_pos_l) > 1 else kaldi_pos_l[0]\n if phon_num == 1:\n if phon_dict[kaldi_pos_l[0]] != kaldi_lab:\n ali_lines = []\n print(kaldi_lab, phon_num, kaldi_pos_l)\n else:\n kaldi_pos = \"NA\"\n phon_dur = \"NA\"\n h.write(re.sub(r'_', '', line[:-1]) + \",\" + kaldi_pos + \",\" + phon_dur + \"\\n\")\n prev_sentence = sentence[:]\n","sub_path":"add_kaldi_pos_n.py","file_name":"add_kaldi_pos_n.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"158907886","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom task.views import *\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'DjLive.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', index),\n url(r'^task/([0-9]+)?/$', task),\n url(r'^tasks/$', tasks),\n url(r'^profile/([0-9]+)?/$', profile),\n url(r'^people/$', people),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^create_task/$', create_task),\n\n #develop\n url('^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': '/Users/strelka/PycharmProjects/DjLive/media/'}),\n url('^uploads/(?P.*)$', 'django.views.static.serve',\n {'document_root': '/Users/strelka/PycharmProjects/DjLive/uploads/'}),\n\n\n)\n\n","sub_path":"DjLive/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"507026382","text":"#quotes\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl = 'http://quotes.toscrape.com'\nresponse = requests.get(url).content\nsopa = BeautifulSoup(response)\nnueva_sopa = sopa.findAll('span', {'class':'text'})\n\n#lista con quotes, se puede limpiar \ntext = [i.get_text()for i in nueva_sopa]\nprint(text)\n","sub_path":"quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"489100142","text":"import itertools\nimport pickle\nfrom datetime import datetime\nfrom pickle import PickleBuffer\nfrom typing import MutableSequence\nfrom unittest.mock import Mock\n\nimport pytest\n\nfrom snuba.consumer import (\n JSONRowInsertBatch,\n StreamingConsumerStrategyFactory,\n)\nfrom snuba.processor import InsertBatch, ReplacementBatch\nfrom snuba.utils.streams import Message, Partition, Topic\nfrom snuba.utils.streams.backends.kafka import KafkaPayload\nfrom tests.assertions import assert_changes\nfrom tests.backends.confluent_kafka import FakeConfluentKafkaProducer\nfrom tests.backends.metrics import TestingMetricsBackend, Timing\n\n\ndef test_streaming_consumer_strategy() -> None:\n messages = (\n Message(\n Partition(Topic(\"events\"), 0),\n i,\n KafkaPayload(None, b\"{}\", None),\n datetime.now(),\n )\n for i in itertools.count()\n )\n\n replacements_producer = FakeConfluentKafkaProducer()\n\n processor = Mock()\n processor.process_message.side_effect = [\n None,\n InsertBatch([{}]),\n ReplacementBatch(\"key\", [{}]),\n ]\n\n writer = Mock()\n\n metrics = TestingMetricsBackend()\n\n factory = StreamingConsumerStrategyFactory(\n None,\n processor,\n writer,\n metrics,\n max_batch_size=10,\n max_batch_time=60,\n processes=None,\n input_block_size=None,\n output_block_size=None,\n replacements_producer=replacements_producer,\n replacements_topic=Topic(\"replacements\"),\n )\n\n commit_function = Mock()\n strategy = factory.create(commit_function)\n\n for i in range(3):\n strategy.poll()\n strategy.submit(next(messages))\n\n assert metrics.calls == []\n\n processor.process_message.side_effect = [{}]\n\n with pytest.raises(TypeError):\n strategy.poll()\n strategy.submit(next(messages))\n\n def get_number_of_insertion_metrics() -> int:\n count = 0\n for call in metrics.calls:\n if isinstance(call, Timing) and call.name == \"insertions.latency_ms\":\n count += 1\n return count\n\n expected_write_count = 1\n\n with assert_changes(\n get_number_of_insertion_metrics, 0, expected_write_count\n ), assert_changes(\n lambda: writer.write.call_count, 0, expected_write_count\n ), assert_changes(\n lambda: len(replacements_producer.messages), 0, 1\n ):\n strategy.close()\n strategy.join()\n\n\ndef test_json_row_batch_pickle_simple() -> None:\n batch = JSONRowInsertBatch([b\"foo\", b\"bar\", b\"baz\"])\n assert pickle.loads(pickle.dumps(batch)) == batch\n\n\ndef test_json_row_batch_pickle_out_of_band() -> None:\n batch = JSONRowInsertBatch([b\"foo\", b\"bar\", b\"baz\"])\n\n buffers: MutableSequence[PickleBuffer] = []\n data = pickle.dumps(batch, protocol=5, buffer_callback=buffers.append)\n assert pickle.loads(data, buffers=[b.raw() for b in buffers]) == batch\n","sub_path":"tests/test_consumer.py","file_name":"test_consumer.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"47236445","text":"#\n#% $Id$ \n#\n#\n# Copyright (C) 2002-2007\n# The MeqTree Foundation & \n# ASTRON (Netherlands Foundation for Research in Astronomy)\n# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see ,\n# or write to the Free Software Foundation, Inc., \n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\nfrom Timba.TDL import *\nfrom Timba.Meq import meq\n\ndef _define_forest (ns, **kwargs):\n ## This defines a tree for\n ## f = a*sin(t*cos(2*f))\n \n ns.x << Meq.Time;\n ns.y << Meq.Freq;\n \n a = ns.alpha << 297.61903062068177;\n # note that here we've declared a Constant node named \"alpha\", and that\n # the Python variable 'a' now refers _TO THAT NODE_...\n\n # ...so the variable can subsequently be used elsewhere:\n ns.f << a*Meq.Sin(ns.x*Meq.Cos(2*ns.y));\n\n # and for comparison, a tree without Time/Freq:\n # note how node definitions are nested\n ns.f1 << a*Meq.Sin((ns.x1<<1)*Meq.Cos(2*(ns.y1<<1)))\n \n\n\n \n\ndef _test_forest (mqs, parent):\n domain = meq.domain(10,100,0,10) \n cells = meq.cells(domain,num_freq=200, num_time=100)\n request = meq.request(cells, rqtype='ev')\n result = mqs.execute('f',request);\n result = mqs.execute('f1',request);\n\n\n\n\nSettings.forest_state.bookmarks = [\n record(name=\"result of 'f'\",viewer='Result Plotter',udi='/node/f'),\n record(name=\"result of 'f1'\",viewer='Result Plotter',udi='/node/f1') \n];\nSettings.forest_state.cache_policy = 100;\n","sub_path":"Courses/Workshop2006/Day1/demo2-improved-tree.py","file_name":"demo2-improved-tree.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"651098336","text":"# ice aktarmalar\nfrom tkinter import *\n\n\nclass Saha(object):\n def __init__(self, oyun, ana_pencere):\n # İlk bildirimler\n self.oyun = oyun\n # yilanin gezinecegi sahanin genisligi ve yuksekligi...\n self.sahaBoyutlari = [30, 30] # ilki genislik(x), ikincisi yukseklik(y)\n self.sahaRengi = \"#FF0000\" # sahanın rengi\n self.saha_göster(ana_pencere)\n \n def saha_göster(self, ana_pencere):\n # sahanın yerleşeceği çerçeveyi oluştur\n çerçeve = Frame(ana_pencere, width=self.sahaBoyutlari[0] * 10,\n height=self.sahaBoyutlari[1] * 10)\n çerçeve.place(x=10, y=10)\n\n # \"Canvas\" lari tutacak diziyi olusturma ve iclerine \"Canvas\" lari atama\n canvas_dizisi_saha = []\n for i in range(self.sahaBoyutlari[1]):\n canvas_dizisi_saha.append([])\n for i in range(self.sahaBoyutlari[1]):\n for j in range(self.sahaBoyutlari[0]):\n canvas_dizisi_saha[i].append(Canvas(çerçeve, height=10, width=10,\n bg=self.sahaRengi, highlightthickness=0))\n \n # \"Canvas\" ları çerçeveye yerleştirme\n x = 0\n y = 0\n for dizi in canvas_dizisi_saha:\n for canvas in dizi:\n canvas.place(x=x, y=y)\n x += 10\n x = 0\n y += 10\n","sub_path":"Saha2.py","file_name":"Saha2.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"652146904","text":"# modules\nimport numpy as np\n\n\nclass g_wavepattern():\n '''\n Generator: wavepattern\n '''\n\n def __init__(self):\n self.freq = 0.5\n self.speed = 1.0\n\n def control(self, freq, speed, blub1):\n self.freq = int(freq*10)\n self.speed = speed*0.5\n\n def label(self):\n return ['Frequency', round(self.freq, 2),\n 'Speed', self.speed,\n 'empty', 'empty']\n\n def generate(self, step, dumpworld):\n # create world\n world = np.zeros([3, 10, 10, 10])\n\n for x in range(10):\n for y in range(10):\n for z in range(10):\n world[:, x, y, z] = np.sin(step*self.speed)*(\n np.sin(np.pi*self.freq*x/10)**6+\\\n np.sin(np.pi*self.freq*y/10)**6+\\\n np.sin(np.pi*self.freq*z/10)**6)\n\n return np.clip(world, 0, 1)\n","sub_path":"generators/g_wavepattern.py","file_name":"g_wavepattern.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"307606351","text":"def gcdIter(a, b):\n '''\n a, b: positive integers\n \n returns: a positive integer, the greatest common divisor of a & b.\n '''\n # Your code here\n result = min(a,b)\n while max(a,b)%result != 0 or min(a,b)%result !=0:\n result = result - 1\n return result\n","sub_path":"L05P04-GCD-Iter.py","file_name":"L05P04-GCD-Iter.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"272105689","text":"#!/usr/bin/env python3\n#\n# runTests.py\n#\n\n\nimport sys\nimport os\nfrom os.path import dirname\nfrom os.path import join\nfrom statistics import median\nimport subprocess\n\n\nrootDir = dirname(os.path.realpath(__file__))\n\n\n#By default, run all tests. Individual versions can be specified\ntargetsToRun = [\"brent_test\", \"openmp_test\"]\nif len(sys.argv) > 1:\n #Allow some shorthands, or the full names as above\n target = {\n \"cuda\": \"brent_test\",\n \"brent\": \"brent_test\",\n \"omp\": \"openmp_test\",\n \"openmp\": \"openmp_test\"\n }.get(sys.argv[1], sys.argv[1])\n if target not in targetsToRun:\n print(\"Invalid target: \" + target)\n exit()\n targetsToRun = [target]\n\n\n# Execute from the rootDir, piping output\ndef execute(*a):\n if len(a) == 1 and isinstance(a[0], str):\n #Single string argument, execute as shell=True\n args_ = a[0]\n else:\n #List of arguments, execute as shell=False\n args_ = []\n for i in a:\n if isinstance(i, str): args_.append(i)\n elif isinstance(i, list): args_ += i\n else: print(\"invalid argument: \", file=stderr)\n\n return subprocess.run(args=args_,\n cwd=rootDir,\n stdout=subprocess.PIPE,\n shell=isinstance(args_, str))\n\n\n\ndef run():\n numCorrect = 0\n numFailures = 0\n totalTests = 0\n\n print(\"Running \" + str(targetsToRun))\n\n for target in targetsToRun:\n executable = {\n \"brent_test\": \"./brent-kung\",\n \"openmp_test\": \"./openmp_inclusiveScan\"\n }.get(target)\n\n \n sectionSizes = {\n \"brent_test\": [1024,2048],\n \"openmp_test\": [2,4,8,16]\n }.get(target)\n\n\n for arraySize in list(map(lambda x: 2**x, range(8,29))): #from 2^8 to 2^28, by 2's\n for sectionSize in sectionSizes:\n totalTests += 1\n\n makeTarget = \"make \" + target + \" ARRAY_SIZE=\"+str(arraySize) +\" SECTION_SIZE=\"+str(sectionSize) + \" NUM_THREADS=\"+str(sectionSize)\n print(\"Running \\\"\" + makeTarget + \"\\\": \", end=\"\", flush=True)\n execute(makeTarget)\n\n proc = execute(executable)\n for l in proc.stdout.decode().split(\"\\n\"):\n if \"ALL CORRECT!\" in l:\n numCorrect += 1\n print(\"OK!\")\n elif \"FAIL!\" in l:\n numFailures += 1\n print(\"FAIL!\")\n \n ###end for sectionSize\n ###end for arraySize\n ###end for target\n\n if numCorrect > 0:\n print(\"Passed \" + str(totalTests) + \"/\" + str(totalTests))\n if numFailures > 0:\n print(\"Failed \" + str(numFailures) + \"/\" + str(totalTests))\n \n numUnknown = totalTests - (numFailures + numCorrect)\n if numUnknown > 0:\n print(\"ERROR \" + str(numUnknown) + \"/\" + str(totalTests))\n\n###end run()\n\n\nrun()\n\n","sub_path":"runTests.py","file_name":"runTests.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"367882078","text":"import tornado.ioloop\nimport tornado.web\nimport json\nimport os\nimport random\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"DeliciouSeret\")\n\n\nclass Collector(tornado.web.RequestHandler):\n def get(self):\n self.render(\"collector.html\")\n\n\nclass getInfo(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n json_dict = {\"movieId\": random.randrange(5), \"movieName\": random.randrange(5), \"movieImage\": \"movie_image\", \"recipeId\": random.randrange(5), \"recipeName\": random.randrange(5), \"recipeImage\": \"movie_image\"}\n self.finish(json.dump(json_dict, self))\n\n\nclass postRate(tornado.web.RequestHandler):\n def post(self, *args, **kwargs):\n print('POST')\n print('FOODID POST EREZ')\n print(self.get_argument('foodId'))\n print('MOVIEID')\n print(self.get_argument('movieId'))\n print('RATE')\n print(self.get_argument('rate'))\n\n\nsettings = dict(\n static_path = os.path.join(os.path.dirname(__file__), \"static\")\n )\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", Collector),\n (r\"/getInfo\", getInfo),\n (r\"/postRate\", postRate),\n ], **settings)\n\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(80)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"deliciouserver.py","file_name":"deliciouserver.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"450668383","text":"\"\"\"\nRecorder agents (base class + period-specific classes)\n\"\"\"\n\n__all__ = [\"HistoryRecorder\", \"Min1Recorder\", \"DailyRecorder\", \"WeeklyRecorder\",\n \"MonthlyRecorder\", \"BimonthlyRecorder\", \"QuarterlyRecorder\",\n \"SemesterlyRecorder\", \"YearlyRecorder\"]\n\nimport datetime\nfrom alavan.mainsys import *\nfrom alavan.consts import *\nfrom alavan import dt2ts, dt2str, ThreadAgent, Instrumenter\nfrom alavan import Instrument\nimport time\n#from alavan.datatypes.instrument import *\nfrom PyQt4.QtCore import pyqtSignal, Qt\n\n_AS_CHECK_DATA = 901 # Verifies what's recorded and what's available;\n # Draws a plan of recordings\n_AS_RECORD = 902 # Performs some recording;\n_AS_WAIT = 903 # Market is closed\n_AS_END = 121212\n\nclass HistoryRecorder(ThreadAgent):\n \"\"\"\n Thread that reads quote data from streamer and records into the DTDCache\n\n Tasks are added through the AddTack() method.\n\n It has a play/pause feature.\n \"\"\"\n # Note: works with instrument names instead of Intrument nstances because the latter cannot be made persistent.\n\n # TaskFinished(task_id, period, instrument) is emitted whenever it finishes\n # recording an instrument for a given period\n TaskFinished = pyqtSignal(int, int, Instrument)\n\n def __init__(self, *args, **kwargs):\n ThreadAgent.__init__(self, *args, **kwargs)\n\n ### Setup variables\n ## What to do when finishes?\n self.flagRestartWhenFinished = False # If True, will restart\n self.flagPauseWhenFinished = True # Only has effect if flagRestartWhenFinished is True.\n # If True, pauses after restarting.\n ## Miscellanea\n # If set, will unpause itself automatically after 60 seconds\n self.flagAutoUnpause = False\n # Number of seconds to remain paused until auto-unpause\n self.unpauseTimeout = 60\n # How many seconds to wait until restart\n self.sleepTime = .1\n\n self._flagPaused = False\n self._whenPaused = None # When was I paused? To wake up automatically if flagAutoUnpause is True\n\n # market.Streamer instance, automatically retrieved from mainSys() upon initialization\n self._streamer = None\n\n\n def AddTask(self, id, period, instrumentName):\n self.PutMessage(\"AddTask\", (id, period, instrumentName))\n\n def RemoveTask(self, id_):\n self.PutMessage(\"RemoveTask\", id_)\n\n def _pp_Restart(self):\n self._InitData()\n\n def _pp_TreatQueueItem(self, token, data):\n \"\"\"Treats Pause, Play.\n\n Tokens treated:\n AddTask -- appends element to self.data[\"tasks\"] and saves state.\n Pause -- sets _flagPause to True\n Play -- sets _flagPause to False\n \"\"\"\n o = self.data\n if token == \"AddTask\":\n # expects data to be (id_, period, instrumentName) (tuple)\n o[\"tasks\"].append(data)\n self._SaveData()\n elif token == \"RemoveTask\":\n # expects data to be id_ (integer)\n idx, tt = -1, o[\"tasks\"]\n for i, t in enumerate(tt):\n if t[0] == data:\n idx = i\n break\n if idx == -1:\n # Not such a big deal to make a fuss (i.e. raise exception)\n self.logger.warning(\"Task id %d not found\" % data)\n else:\n if idx == o[\"iTask\"]:\n # Task is current task, needs extra care\n o[\"iInterval\"] = -1\n elif idx < o[\"iTask\"]:\n o[\"iTask\"] -= 1\n del o[\"tasks\"][idx]\n self._SaveData()\n elif token == \"Pause\":\n self._pp_Pause()\n elif token == \"Play\":\n self._pp_Play()\n else:\n super(HistoryRecorder, self)._pp_TreatQueueItem(token, data)\n\n def run_(self):\n \"\"\"Inherited to implement booting sequence.\"\"\"\n if self._streamer is None:\n self._streamer = mainSys().GetStreamer()\n ThreadAgent.run_(self)\n\n def _Cycle(self):\n \"\"\"\n Records one interval per cycle.\n \"\"\"\n # self.logger.debug(\"ooooO .... Ooooo\")\n if self._flagPaused:\n if self.flagAutoUnpause and self._pp_WantsToUnpause():\n self._pp_Play()\n else:\n self.Sleep(1)\n else:\n #\n # if self.state == -1:\n # self._InitData()\n # self.state = _AS_RECORD\n # elif self.state == _AS_RECORD:\n # self._Record()\n self._Record()\n\n def _InitData(self):\n \"\"\"Initializes data dictionary.\n\n Dictionary keys:\n \"tasks\" -- [(instrumentName0, period0), ...]\n \"iTask\" -- index of current task\n \"iInterval\" -- index of current interval, or -1 if the task hasn't been\n started yet\n \"flagAnyQuotes\" -- initialized to False and after set to True when got\n any quotes from streamer\n \"\"\"\n o = self.data\n o[\"tasks\"] = []\n o[\"iTask\"] = 0\n o[\"iInterval\"] = -1 # -1 means that last date recorded in database needs to be found\n o[\"flagAnyQuotes\"] = False # Set to True when any recording is done\n\n\n def _Record(self):\n \"\"\"Records one interval and increments pointers.\"\"\"\n flagFinished = False\n o = self.data\n while True: # This loop is just to find next instrument and interval\n if o[\"iTask\"] >= len(o[\"tasks\"]):\n flagFinished = True\n break\n\n if o[\"iInterval\"] == -1: # i.e., makes the intervals for task iTask\n id_, period, instrumentName = o[\"tasks\"][o[\"iTask\"]]\n temp = mainSys().DTDCache.quote_GetMaxDatetime(period, instrumentName, True)\n dt1 = temp if temp is not None else QP.data[period][\"start\"]\n dt2 = datetime.datetime.today()\n o[\"intervals\"] = _SplitInterval(dt1, dt2, period)\n o[\"iInterval\"] = 0\n\n num_intervals = len(o[\"intervals\"])\n if o[\"iInterval\"] >= num_intervals:\n if num_intervals > 0:\n # Sends message that task was finished\n #self._pp_SendMessage(\"TaskFinished\", o[\"tasks\"][o[\"iTask\"]])\n\n data = o[\"tasks\"][o[\"iTask\"]]\n self.TaskFinished.emit(data[0], data[1], GetInstrument(data[2]))\n o[\"iTask\"] += 1\n o[\"iInterval\"] = -1\n else:\n break\n\n if flagFinished:\n if self.flagRestartWhenFinished:\n self.state = -1 # Restart everything\n if self.flagPauseWhenFinished:\n self.logger.debug(\"Finished and paused\")\n self._pp_Pause()\n else:\n self.Sleep(self.sleepTime)\n else:\n self._flagExit = True\n\n self.logger.debug(\"Finished\")\n\n return\n\n # Going to get history and record interval\n interval = o[\"intervals\"][o[\"iInterval\"]]\n _, period, instrumentName = o[\"tasks\"][o[\"iTask\"]]\n self.logger.info(\"Instrument %s, interval %s - %s\" %\n (instrumentName,\n dt2str(interval[0]), dt2str(interval[1])))\n try:\n instrument = mainSys().GetInstrument(instrumentName)\n quotes = self._streamer.GetHistory(instrument, dt2ts(interval[0]), dt2ts(interval[1]), period)\n if len(quotes) > 0:\n mainSys().DTDCache.quote_Record(period, instrument, quotes)\n o[\"flagAnyQuotes\"] = True\n self.logger.debug(\"...recorded %d quotes!\" % len(quotes))\n else:\n self.logger.debug(\"...got empty quotes data (why?)\")\n o[\"iInterval\"] += 1\n except:\n self.logger.exception(\"(SUPPRESSED) Error getting history, BUT WILL RETRY!!!\")\n self.Sleep(3)\n\n def _pp_Pause(self):\n self._flagPaused = True\n self._whenPaused = time.time()\n\n def _pp_Play(self):\n self._flagPaused = False\n\n def _pp_WantsToUnpause(self):\n \"\"\"Returns True/False: whether it is already time to unpause.\"\"\"\n if time.time()-self._whenPaused >= self.unpauseTimeout:\n return True\n return False\n\n\n################################################################################\nclass PeriodRecorder(HistoryRecorder, Instrumenter):\n \"\"\"\n Recorder that adds tasks automatically when started. Task period is a class\n variable and the instruments are determined in the Instrumenter fashion.\n \"\"\"\n\n ## This is configured by descendants\n # Recording period given as an QP\n period = None\n\n def __init__(self, *args, **kwargs):\n HistoryRecorder.__init__(self, *args, **kwargs)\n Instrumenter.__init__(self)\n\n def _InitData(self):\n HistoryRecorder._InitData(self)\n self._UpdateAllNames()\n # id is an arbitrary number here, won't really be used for anything\n self.data[\"tasks\"] = [(id_, self.period, x) for id_, x in enumerate(self._allNames)]\n\n\n################################################################################\nclass Min1Recorder(PeriodRecorder):\n period = QP.min1\n\n\nclass DailyRecorder(PeriodRecorder):\n period = QP.daily\n # Number of days to read at once\n intervalSize = 500\n\n\nclass WeeklyRecorder(PeriodRecorder):\n period = QP.weekly\n intervalSize = 500*7\n\n\nclass MonthlyRecorder(PeriodRecorder):\n period = QP.monthly\n intervalSize = 500*30\n\n\nclass BimonthlyRecorder(PeriodRecorder):\n period = QP.bimonthly\n intervalSize = 500*60\n\n\nclass QuarterlyRecorder(PeriodRecorder):\n period = QP.quarterly\n intervalSize = 500*90\n\n\nclass SemesterlyRecorder(PeriodRecorder):\n period = QP.semesterly\n intervalSize = 500*180\n\n\nclass YearlyRecorder(PeriodRecorder):\n period = QP.yearly\n intervalSize = 500*360\n\n\n\ndef _SplitInterval(dt1, dt2, period):\n \"\"\"Splits interval into smaller ones of maximum intervalSize days.\"\"\"\n intervalSize = QP.data[period][\"intervalSize\"]\n ret = []\n d1 = dt1\n flagBreak = False\n flagFirst = True\n while not flagBreak:\n d2 = d1+datetime.timedelta(days=intervalSize-1)\n if d2 > dt2:\n d2 = dt2\n flagBreak = True\n\n # In min1, second period on will read from 0:00\n if not flagFirst or period >= QP.daily:\n d1 = d1.replace(hour=0, minute=0)\n d1 = d1.replace(second=0, microsecond=0)\n d2 = d2.replace(hour=23, minute=59, second=0, microsecond=0)\n ret.append((d1, d2))\n if not flagBreak:\n d1 += datetime.timedelta(days=intervalSize)\n if d1 > dt2:\n break\n flagFirst = False\n return ret\n","sub_path":"lib/alavan/threads/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"34778780","text":"#calc_functions.py\r\n#Introduction to Programming\r\n#Author: Kaitlyn Stauder\r\n#Date: February 23, 2018\r\n\r\n# JA: Your parenthesis don't seem to be working properly\r\n\r\nfrom graphics import *\r\nimport math\r\n\r\nwin = GraphWin('Calc', 400, 580)\r\ndisplayTextElement = Text(Point(0, 50), \"green\")\r\ncalcGrid = [[7, 8, 9, '+', 'MC'],\r\n [4, 5, 6, '-', 'M+'],\r\n [1, 2, 3, '*', 'M-'],\r\n ['', 0, '(', ')', 'MR'],\r\n ['%', '^2', '/', '+/-', 'MS'],\r\n ['Mode', '.', 'sqrt', 'Del', '=']]\r\n\r\nsciGrid = [['x^y', 'sin', 'cos', 'tan', '10^x', 'ln', 'MS'],\r\n ['log', 'sin^-1', 'cos^-1', 'tan^-1', '(', ')', 'MR'],\r\n [7, 8, 9, '', '+/-', '+', 'M+'],\r\n [4, 5, 6, '', '^2', '-', 'M-'],\r\n [1, 2, 3, '', 'sqrt', '*', 'MC'],\r\n ['Mode', 0, '.', '%', '/', 'Del', '=']]\r\n\r\nstandardButtons = [['', '', '', '', ''],\r\n ['', '', '', '', ''],\r\n ['', '', '', '', ''],\r\n ['', '', '', '', ''],\r\n ['', '', '', '', ''],\r\n ['', '', '', '', '']]\r\n\r\nsciButtons = [['', '', '', '', '', '', ''],\r\n ['', '', '', '', '', '', ''],\r\n ['', '', '', '', '', '', ''],\r\n ['', '', '', '', '', '', ''],\r\n ['', '', '', '', '', '', ''],\r\n ['', '', '', '', '', '', '']]\r\n\r\n\r\ndef calcButton(x, y, value):\r\n button = Rectangle(Point(x, y), Point(x + 80, y + 80)) \r\n button.setFill('blue')\r\n button.setOutline('lightblue')\r\n button.draw(win)\r\n text = Text(Point(x + 40, y + 40), value) \r\n text.setTextColor('white')\r\n text.setSize(30)\r\n text.draw(win)\r\n return button\r\n\r\ndef inside(clicked, button): \r\n if clicked.getX() > button.p1.getX() and clicked.getX() < button.p2.getX():\r\n if clicked.getY() > button.p1.getY() and clicked.getY() < button.p2.getY():\r\n return True\r\n return False\r\n\r\ndef clickedButton(clicked, buttons): \r\n for i in range(len(buttons)):\r\n for j in range(len(buttons[0])):\r\n if clicked.getX() > buttons[i][j].p1.getX() and clicked.getX() < buttons[i][j].p2.getX():\r\n if clicked.getY() > buttons[i][j].p1.getY() and clicked.getY() < buttons[i][j].p2.getY():\r\n return i, j\r\n return -1, -1\r\n\r\ndef createStandardCalculatorButtons(buttons):\r\n for i in range (6):\r\n for j in range(5):\r\n buttons[i][j] = calcButton(j * 80, i * 80 + 100, calcGrid[i][j])\r\n\r\ndef createSciCalculatorButtons(buttons):\r\n for i in range(6):\r\n for j in range(7):\r\n buttons[i][j] = calcButton(j * 80, i * 80 + 100, sciGrid[i][j])\r\n\r\ndef get_number(op):\r\n if type(op) == str:\r\n if op =='':\r\n return op\r\n if '.' in op:\r\n return float(op)\r\n else:\r\n return int(op)\r\n return op\r\n \r\ndef add(op1, op2):\r\n return op1 + op2\r\n\r\ndef subtract(op1, op2):\r\n return op1 - op2\r\n\r\ndef multiply(op1, op2):\r\n return op1 * op2\r\n\r\ndef divide(op1, op2):\r\n return op1 / op2\r\n\r\ndef changeSign(op):\r\n return -1 * op\r\n\r\ndef square(op):\r\n return op ** 2\r\n\r\ndef sqrt(op):\r\n return math.sqrt(op)\r\n\r\ndef percent(op):\r\n return op / 100\r\n\r\ndef xRaisedy(op1, op2):\r\n return op1 ** op2\r\n\r\ndef sine(op):\r\n return math.sin(op)\r\n\r\ndef cosine(op):\r\n return math.cos(op)\r\n\r\ndef tangent(op):\r\n return math.tan(op)\r\n\r\ndef tenRaisedx(op):\r\n return 10 ** op\r\n\r\ndef log(op):\r\n return math.log10(op)\r\n\r\ndef inversesine(op):\r\n return math.asin(op)\r\n \r\ndef inversecosine(op):\r\n return math.acos(op)\r\n\r\ndef inversetangent(op):\r\n return math.atan(op)\r\n\r\ndef naturallog(op):\r\n return math.log(op)\r\n\r\ndef evaluateInputString(inputString):\r\n while '(' in inputString:\r\n for i in range(len(inputString) - 1, -1, -1):\r\n if inputString[i] == '(':\r\n break\r\n for j in range(i + 1, len(inputString)):\r\n if inputString[j] == ')':\r\n break\r\n expression = inputString[i + 1:j]\r\n if j < len(inputString):\r\n inputString = inputString[:i] + evaluateExpression(expression) + inputString[j + 1:]\r\n else:\r\n inputString = inputString[:i] + evaluateExpression(expression)\r\n if ',' in inputString:\r\n return evaluateExpression(inputString)\r\n return inputString\r\n\r\ndef evaluateExpression(expression):\r\n for i in range(len(expression)):\r\n if expression[i] == ',':\r\n break\r\n for j in range(i + 1, len(expression)):\r\n if expression[j] == ',':\r\n break\r\n expression = expression.replace(',', '')\r\n if j != len(expression) - 1:\r\n j = j - 2\r\n if i == 0:\r\n operator = expression[:j + 1]\r\n num1 = get_number(expression[j + 1:])\r\n return str(evaluateSingleOpExpression(num1, operator))\r\n else:\r\n num1 = get_number(expression[:i])\r\n if j < len(expression) - 1:\r\n operator = expression[i:j + 1]\r\n num2 = get_number(expression[j + 1:])\r\n return str(evaluateMultiOpExpression(num1, num2, operator))\r\n else:\r\n operator = expression[i:]\r\n return str(evaluateSingleOpExpression(num1, operator))\r\n\r\ndef evaluateSingleOpExpression(number, operator):\r\n if operator == '+/-':\r\n return changeSign(number)\r\n elif operator == '^2':\r\n return square(number)\r\n elif operator == 'sqrt':\r\n return sqrt(number)\r\n elif operator == '%':\r\n return percent(number)\r\n elif operator == 'sin':\r\n return sine(number)\r\n elif operator == 'cos':\r\n return cosine(number)\r\n elif operator == 'tan':\r\n return tangent(number)\r\n elif operator == '10^x':\r\n return tenRaisedx(number)\r\n elif operator == 'log':\r\n return log(number)\r\n elif operator == 'sin^-1':\r\n return inversesine(number)\r\n elif operator == 'cos^-1':\r\n return inversecosine(number)\r\n elif operator == 'tan^-1':\r\n return inversetangent(number)\r\n elif operator == 'ln':\r\n return naturallog(number)\r\n\r\ndef evaluateMultiOpExpression(number1, number2, operator):\r\n if operator == '+':\r\n return add(number1, number2)\r\n elif operator == '-':\r\n return subtract(number1, number2)\r\n elif operator == '*':\r\n return multiply(number1, number2)\r\n elif operator == '/':\r\n return divide(number1, number2)\r\n elif operator == 'x^y':\r\n return xRaisedy(number1, number2)\r\n\r\n \r\ndef main():\r\n global win, calcGrid, sciGrid, standardButtons, sciButtons\r\n grid = calcGrid\r\n buttons = standardButtons\r\n mode = 'standard'\r\n createStandardCalculatorButtons(buttons)\r\n inputString = ''\r\n displayString = ''\r\n displayTextElement = Text(Point(0, 50), '')\r\n displayTextElement2 = Text(Point(20, 70), '')\r\n displayTextElement.draw(win)\r\n operators = ['+', '-', '*', '/', '+/-', '^2', 'sqrt', '%', 'x^y', 'sin', 'cos', 'tan', '10^x', 'log', 'sin^-1', 'cos^-1', 'tan^-1', 'ln']\r\n memoryButtons = ['MC', 'M+', 'M-', 'MR', 'MS']\r\n memory = 0\r\n previousAnswer = ''\r\n clearWindow = False\r\n clearWindow2 = False\r\n while True:\r\n clicked = win.getMouse()\r\n row, col = clickedButton(clicked, buttons)\r\n if clearWindow:\r\n inputString = ''\r\n displayString = ''\r\n displayTextElement.undraw()\r\n displayTextElement = Text(Point(0, 50), displayString)\r\n displayTextElement.draw(win)\r\n clearWindow = False\r\n if clearWindow2: \r\n inputString = ''\r\n displayString = ''\r\n displayTextElement2.undraw()\r\n displayTextElement2 = Text(Point(0, 70), displayString)\r\n displayTextElement2.draw(win)\r\n clearWindoe2 = False\r\n if row >= 0:\r\n userInput = grid[row][col]\r\n if userInput not in ['Del', '=', 'Mode'] and userInput not in memoryButtons:\r\n if str(userInput) in operators:\r\n inputString = inputString + ',' + str(userInput) + ','\r\n else:\r\n inputString = inputString + str(userInput)\r\n displayString = (displayString + str(userInput)).rjust(150)\r\n displayString2 = str(userInput).rjust(150)\r\n displayTextElement.undraw()\r\n displayTextElement2.undraw()\r\n displayTextElement = Text(Point(0, 50), displayString)\r\n displayTextElement2 = Text(Point(0, 70), displayString2)\r\n displayTextElement.draw(win)\r\n displayTextElement2.draw(win)\r\n else:\r\n if userInput == 'Del':\r\n inputString = ''\r\n displayString = ''\r\n previousAnswer = ''\r\n displayTextElement.undraw()\r\n displayTextElement2.undraw()\r\n displayTextElement = Text(Point(0, 50), displayString)\r\n displayTextElement.draw(win)\r\n \r\n elif userInput == '=':\r\n previousAnswer = evaluateInputString(inputString)\r\n displayString = previousAnswer.rjust(150)\r\n displayTextElement.undraw()\r\n displayTextElement2.undraw()\r\n displayTextElement = Text(Point(0, 70), displayString)\r\n displayTextElement.draw(win)\r\n clearWindow = True\r\n elif userInput in memoryButtons:\r\n if userInput == 'MC':\r\n inputString = ''\r\n displayString = ''\r\n displayTextElement.undraw()\r\n displayTextElement2.undraw()\r\n displayTextElement = Text(Point(0, 50), displayString)\r\n displayTextElement.draw(win)\r\n memory = 0\r\n elif userInput == 'MR':\r\n displayTextElement.undraw()\r\n displayString = str(memory).rjust(150)\r\n displayTextElement = Text(Point(0, 70), displayString) #\r\n displayTextElement.draw(win)\r\n inputString = str(memory)\r\n else:\r\n if inputString == '':\r\n if previousAnswer != '':\r\n if userInput == 'M+':\r\n memory = memory + get_number(previousAnswer)\r\n elif userInput == 'M-':\r\n memory = memory + get_number(previousAnswer)\r\n elif userInput == 'MS':\r\n memory = memory + get_number(previousAnswer)\r\n else:\r\n if userInput == 'M+':\r\n memory = memory + get_number(evaluateInputString(inputString))\r\n elif userInput == 'M-':\r\n memory = memory - get_number(evaluateInputString(inputString))\r\n elif userInput == 'MS':\r\n memory = get_number(evaluateInputString(inputString))\r\n displayString = str(memory).rjust(150)\r\n displayTextElement.undraw()\r\n displayTextElement2.undraw()\r\n displayTextElement = Text(Point(0, 70), displayString)\r\n displayTextElement.draw(win)\r\n inputString = str(memory)\r\n elif userInput == 'Mode':\r\n clearWindow = True\r\n if mode == 'standard':\r\n grid = sciGrid\r\n buttons = sciButtons\r\n win.close()\r\n win = GraphWin('Calc', 560, 580)\r\n createSciCalculatorButtons(buttons)\r\n mode = 'scientific'\r\n elif mode == 'scientific':\r\n grid = calcGrid\r\n buttons = standardButtons\r\n win.close()\r\n win = GraphWin('Calc', 400, 580)\r\n createStandardCalculatorButtons(buttons)\r\n mode = 'standard' \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"calculator.pyw/calc_functions.py","file_name":"calc_functions.py","file_ext":"py","file_size_in_byte":12516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"228077300","text":"import sys\nfrom collections import deque\n# 리스트를 만들어서 기본값은 0이라고 세팅한후 직사각형이 있는 구역을 입력받으니까 그 구역을 1이라고 지정한다.\n# 반복문을 통해 0인 구역을 함수로 보내서 1로 갱신하고 인접한 사각형을 세서 넓이를 만든다음 정답으로 쓸 리스트에다가\n# 넣이를 넣어준다. 그 리스트의 길이가 영역의 개수가 되겠고, 요소각각이 넓이가 된다.\n\n\nm, n, k = map(int, sys.stdin.readline().split())\nl = [[0] * n for i in range(m)] # 모든 직사각형의 정보르 담은 리스트 선언\n\ndy = [-1, 0, 1, 0]\ndx = [0 , 1, 0, -1]\n\nresult = []\n\n\nfor i in range(k):\n x1, y1, x2, y2 = map(int, sys.stdin.readline().split())\n for j in range(y1, y2):\n for k in range(x1, x2):\n l[j][k] = 1 # 직사각형이 있는 구역을 1로 할당\n\n\ndef bfs(s1, s2):\n q = deque()\n q.append([s1, s2])\n cnt = 1\n l[s1][s2] = 1\n while q:\n y, x = q.popleft()\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0 <= ny < m and 0 <= nx < n:\n if l[ny][nx] == 0:\n l[ny][nx] = 1\n cnt +=1 \n q.append([ny, nx])\n \n result.append(cnt)\n \n\nfor i in range(m):\n for j in range(n):\n if l[i][j] == 0:\n bfs(i, j)\n\n \nprint(len(result))\nresult.sort()\nfor i in result:\n print(i, end=' ')\n","sub_path":"벡준2538.py","file_name":"벡준2538.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"365748186","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 28 21:18:42 2014\n\n@author: Richard\n\"\"\"\n\n\nfrom curve_class import Curve\n\n\ndef wa_flat_weight_weights(strategy_container):\n ''' Take a dictionary of signals and return a dictionary of weights according to flat weighting ''' \n total_assets = Curve()\n for child in strategy_container.children:\n total_assets = total_assets.add(Curve.from_dates(child.tprof.dates, 1))\n flat_weight = 1.0 / total_assets\n weights = {}\n for child in strategy_container.children:\n weights[child.id] = flat_weight.fill_nan(0)\n return weights\n\ndef wa_flat_weight(strategy_container):\n ''' Work out the weights and return the weighted signals '''\n weights = wa_flat_weight_weights(strategy_container)\n strategy_container.weights = weights","sub_path":"wa_flat_weight.py","file_name":"wa_flat_weight.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"225548641","text":"import fav_movies\r\nimport media\r\n\r\n# storing movie details: movie title, storyline, poster URL and movie\r\n# trailer URL\r\navatar = media.Movie(\"Avatar\", \"A marine on an alien planet\",\r\n \"http://upload.wikimedia.org/wikipedia/id/b/b0/Avatar-Teaser-Poster.jpg\", # NOQA\r\n \"http://www.youtube.com/watch?v=-9ceBgWV8io\")\r\n\r\nbahubali = media.Movie(\"Bahubali\", \"Story of a kingdom and a young legend\",\r\n \"https://s-media-cache-ak0.pinimg.com/originals/67/fd/63/67fd631e9d9dd46eca00db23012c5908.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=sOEg_YZQsTI\")\r\n\r\nsing = media.Movie(\"Sing\",\r\n \"The journey of a Koala Buster Moon to produce the world's\\\r\n greatest singing competition.\",\r\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcQeTMzh3aGw46IVUdS6N2tToanuLOc9dO7f6CgWVQlq1laJjuXa\", # NOQA\r\n \"https://www.youtube.com/watch?v=YDQizlFzVdA\")\r\n\r\nidiots = media.Movie(\"3 idiots\", \"Two friends looking for a lost buddy deal with a forgotten bet\\\r\n a wedding they are forced to crash and an out of control funeral.\",\r\n \"http://sites.psu.edu/pragnyaprabakaran/wp-content/uploads/sites/38771/2016/04/3i-poster-3.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=xvszmNXdM4w\")\r\n\r\nnotebook = media.Movie(\"Notebook\",\r\n \"In 1940s South Carolina, mill worker Noah Calhoun and\\\r\n rich girl Allie are desperately in love.\",\r\n \"http://cdn3.movieroomreviews.com/sites/movieroomreviews.com/files/imagecache/full_size_image/photos/the-notebook-movie-picture-59.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=FC6biTjEyZw\")\r\n\r\ndespicable = media.Movie(\"Despicable me\", \"A man who delights in all things wicked,\"\r\n \"supervillain Gru (Steve Carell) hatches a plan to steal the moon.\",\r\n \"https://images-na.ssl-images-amazon.com/images/I/51p%2BZw474tL.jpg\", # NOQA\r\n \"https://www.youtube.com/watch?v=nVwae09eSpo\")\r\n\r\n# grouping all instances together in a list\r\nmovies = [avatar, bahubali, sing, idiots, notebook, despicable]\r\n\r\n# calling function open_movies_page to open in a web browser\r\nfav_movies.open_movies_page(movies)\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"247736568","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import JointState\nfrom markers import *\nfrom proyectfunctions import *\nfrom roslib import packages\n\nimport rbdl\n\nrospy.init_node(\"control_pdg\")\npub = rospy.Publisher('joint_states', JointState, queue_size=1000)\nbmarker_actual = BallMarker(color['RED'])\nbmarker_deseado = BallMarker(color['GREEN'])\n\n#almacenamos datinhos\nfqact = open(\"/tmp/qactual_pdg.txt\", \"w\")\nfqdes = open(\"/tmp/qdeseado_pdg.txt\", \"w\")\nfxact = open(\"/tmp/xactual_pdg.txt\", \"w\")\nfxdes = open(\"/tmp/xdeseado_pdg.txt\", \"w\")\n\n# Nombres de las articulaciones\njnames = ['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6', 'joint_7']\n\n# Objeto (mensaje) de tipo JointState\njstate = JointState()\n# Valores del mensaje\njstate.header.stamp = rospy.Time.now()\njstate.name = jnames\n\n# =============================================================\n# Configuracion articular inicial (en radianes)\nq = np.array([0., 0., 0.2, np.pi/2, 0., 0., 0.])\n# Velocidad inicial\ndq = np.array([0., 0., 0., 0., 0., 0., 0.])\n# Configuracion articular deseada\nqdes = np.array([0.2, 0.2, 0.3, 1.2, 0.25, 0.2, 0.2])\n# =============================================================\n\n# Posicion resultante de la configuracion articular deseada\nxdes = fkine(qdes)[0:3,3]\n# Copiar la configuracion articular en el mensaje a ser publicado\njstate.position = q\npub.publish(jstate)\n\n# Modelo RBDL\nmodelo = rbdl.loadModel('../urdf/robot_siad7n.urdf')\nndof = modelo.q_size # Grados de libertad\ng = np.zeros(ndof) # Espacio para el vecor de gravedad\n\n# Frecuencia del envio (en Hz)\nfreq = 20\ndt = 1.0/freq\nrate = rospy.Rate(freq)\n\n# Simulador dinamico del robot\nrobot = Robot(q, dq, ndof, dt)\n# Se definen las ganancias del controlador\nvalores = 1*np.array([0.1, 1.0, 10, 1.0, 0.01, 0.01, 1e-10])\nKp = np.diag(valores)\nKd = 2*np.sqrt(Kp)\n\nu = np.zeros(ndof) # Espacio para la ley de control\nb = np.zeros(ndof) # Para efectos no lineales\nM = np.zeros([ndof, ndof]) # Para matriz de inercia\n\n# Bucle de ejecucion continua\nt = 0.0\n\nwhile not rospy.is_shutdown():\n\n # Leer valores del simulador\n q = robot.read_joint_positions()\n dq = robot.read_joint_velocities()\n # Posicion actual del efector final\n x = fkine(q)[0:3,3]\n # Tiempo actual (necesario como indicador para ROS)\n jstate.header.stamp = rospy.Time.now()\n\n fxact.write(str(t)+' '+str(x[0])+' '+str(x[1])+' '+str(x[2])+'\\n')\n fxdes.write(str(t)+' '+str(xdes[0])+' '+str(xdes[1])+' '+str(xdes[2])+'\\n')\n fqact.write(str(t)+' '+str(q[0])+' '+str(q[1])+' '+ str(q[2])+' '+ str(q[3])+' '+str(q[4])+' '+str(q[5])+' '+str(q[6])+'\\n ')\n fqdes.write(str(t)+' '+str(qdes[0])+' '+str(qdes[1])+' '+ str(qdes[2])+' '+ str(qdes[3])+' '+str(qdes[4])+' '+str(qdes[5])+' '+str(qdes[6])+'\\n ')\n\n # ----------------------------\n # Control dinamico\n # ----------------------------\n\n if np.linalg.norm(q- qdes) < 0.05:\n break\n\n rbdl.InverseDynamics(modelo, q, np.zeros(ndof), np.zeros(ndof), g) \n rbdl.CompositeRigidBodyAlgorithm(modelo,q,M)\n rbdl.NonlinearEffects(modelo,q,dq,b)\n\n u = M.dot(-Kd.dot(dq) + Kp.dot(qdes - q)) + b\n\n print('error: ')\n print(qdes - q)\n \n # Simulacion del robot\n robot.send_command(u)\n\n # Publicacion del mensaje\n jstate.position = q\n pub.publish(jstate)\n bmarker_deseado.xyz(xdes)\n bmarker_actual.xyz(x)\n t = t+dt\n # Esperar hasta la siguiente iteracion\n rate.sleep()\n","sub_path":"src/reporte/src/control_pdg.py","file_name":"control_pdg.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"297312488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUsage:\n leaf_spine_with_hosts_10000 [options]\n\nOptions:\n -h, --help Show this page\n --list\n --host=host\n\"\"\"\nimport sys\nimport json\n\n\ndef natural_numbers():\n i = 0\n while True:\n yield i\n i += 1\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n if '-h' in args:\n print (__doc__)\n return 1\n\n if '--host' in args:\n print (\"--host not supported\")\n return 1\n\n data = {'_meta': {'hostvars': {}}}\n leaves = ['Leaf{0}'.format(x) for x in range(48)]\n spines = ['Spine{0}'.format(x) for x in range(4)]\n hosts = ['Host{0}-{1}'.format(x, y) for x in range(24) for y in range(440)]\n hosts_per_leaf = {'Leaf{0}'.format(x): ['Host{0}-{1}'.format(x / 2, y) for y in range(440)] for x in range(48)}\n\n switches = []\n switches.extend(leaves)\n switches.extend(spines)\n\n devices = []\n devices.extend(leaves)\n devices.extend(spines)\n devices.extend(hosts)\n\n device_interface_seqs = {name: natural_numbers() for name in devices}\n\n for name in spines:\n links = []\n host_data = {'ansible_topology': {'type': \"switch\", 'links': links}}\n data['_meta']['hostvars'][name] = host_data\n\n for name in leaves:\n links = []\n for remote_device in spines:\n links.append({'name': 'eth{0}'.format(next(device_interface_seqs[name])),\n 'remote_device_name': remote_device,\n 'remote_interface_name': 'eth{0}'.format(next(device_interface_seqs[remote_device]))\n })\n for remote_device in hosts_per_leaf[name]:\n links.append({'name': 'eth{0}'.format(next(device_interface_seqs[name])),\n 'remote_device_name': remote_device,\n 'remote_interface_name': 'eth{0}'.format(next(device_interface_seqs[remote_device]))\n })\n host_data = {'ansible_topology': {'type': \"switch\", 'links': links}}\n data['_meta']['hostvars'][name] = host_data\n\n for name in hosts:\n links = []\n host_data = {'ansible_topology': {'type': \"host\", 'links': links}}\n data['_meta']['hostvars'][name] = host_data\n\n data['switches'] = switches\n data['hosts'] = hosts\n\n print (json.dumps(data, sort_keys=True, indent=4))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n\n\n","sub_path":"leaf_spine_with_hosts_10000.py","file_name":"leaf_spine_with_hosts_10000.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"87005195","text":"#coding:utf-8\nfrom web import app\nimport utils\n\napp.secret_key = 'adlj;ja;dlk'\n#app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\n\nconfig = utils.get_config('web')\napp.config.update(config)\n\nif __name__ == '__main__':\n app.run(host=config.get('bind','0.0.0.0'),port=int(config.get('port')),debug=True)\n","sub_path":"runweb.py","file_name":"runweb.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"599560841","text":"import views.helper.helper_text as helperText\nimport views.helper.helper_views as helper\nimport views.helper.helper_poison as poison\n\n\ndef enter():\n helperText.inform_enter_text()\n input(\"\")\n helper.clear()\n\n\ndef input_frame_size(development):\n helperText.inform_frame_size()\n if development:\n input_aux = 6\n else:\n input_aux = int(input())\n helper.clear()\n return input_aux\n\n\ndef input_channel_speed(development):\n helperText.inform_channel_speed()\n if development:\n input_aux = 2\n else:\n input_aux = int(input())\n helper.clear()\n return input_aux\n\n\ndef input_probability_resend(development):\n helperText.inform_probability_resend()\n if development:\n input_aux = 40\n else:\n input_aux = int(input())\n helper.clear()\n return input_aux\n\n\ndef input_time_program(development):\n helperText.inform_time_program()\n if development:\n input_aux = 1\n else:\n input_aux = int(input())\n helper.clear()\n return helper.convert_seconds_mileseconds(input_aux)\n\n\ndef input_stations(development):\n stations = []\n helperText.inform_stations()\n if development:\n qtStations = 2\n else:\n qtStations = int(input())\n for i in range(qtStations):\n station = dict()\n helperText.inform_avarage_transmission(i + 1)\n if development:\n station['avarage_transmission'] = 500000\n else:\n station['avarage_transmission'] = int(input())\n station['frames'] = []\n station['frames_transmitted'] = []\n station['poison'] = -1\n stations.append(station)\n helper.clear()\n return stations","sub_path":"views/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"587434610","text":"# -*- coding: utf-8 -*-\nimport constants\nimport db\nimport json\nimport telegram\nimport answers\nimport functions\nfrom khayyam import JalaliDate\nfrom telegram import ReplyKeyboardMarkup\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup, KeyboardButton\nfrom telegram.ext import CallbackQueryHandler, ConversationHandler\nfrom random import randint\n\ndef show_question(q_id, chat_id, bot, withans = False, callback = False, msg_id = 0):\n q = db.get_question(q_id)\n if (q == None):\n return -1\n question = q[\"question\"]\n q_link = '/q'+str(q['msg_id'])\n asker_id = q[\"user_id\"]\n date = JalaliDate(q['date'])\n datestr = functions.enToPersianNumb(date.strftime('%Y/%m/%d'))\n asker = db.get_user(asker_id)\n like = len(q['followers'])\n if chat_id in q['followers']:\n text_like = 'شما و '+functions.enToPersianNumb(like-1)+' نفر ♥️'\n else:\n text_like =functions.enToPersianNumb(like) + ' نفر♥️ '\n if (asker['username'] == ''):\n asker = '/u'+str(asker_id)\n else:\n asker = '/u'+asker['username']\n # asker = '/u'+str(asker_id)\n if db.user_have_answered(q_id, chat_id):\n answer_text = 'ویرایش جواب'\n answer_callback_data = 'edit_'+str(q_id)+\"_\"+str(chat_id)\n else:\n answer_text ='📝جواب میدم'\n answer_callback_data = 'answer_'+str(q_id)+'_'+str(chat_id)\n if db.user_is_admin(chat_id):\n delete_text = 'حذف'\n else:\n delete_text = ''\n buttons = [[\n InlineKeyboardButton(text=answer_text,\\\n callback_data=answer_callback_data),\n InlineKeyboardButton(text=text_like,\n callback_data='likequestion_'+str(q_id))],\n [InlineKeyboardButton(text=delete_text,\n callback_data='deleteQuestion_'+ str(q_id))\n ]]\n keyboard = InlineKeyboardMarkup(buttons)\n text_message = constants.TEXT_QUESTION+'\\n'+'🤔 سوال\\n '+question+'؟\\n' + '\\n لینک سوال: '+ q_link + '\\n\\nAsked by '+asker+'\\n'+datestr\n if withans:\n bot.sendMessage(chat_id, text = text_message)\n else:\n if (callback == False):\n msg = bot.sendMessage(chat_id, text = text_message,\n reply_markup = keyboard)\n db.add_msgid_and_user_to_recent_messages_question(chat_id, q_id, msg['message_id'])\n if db.have_answer(q_id):\n answers.show_answers(bot, chat_id, q_id,show=True)\n else:\n bot.editMessageReplyMarkup(chat_id = chat_id,\n message_id = msg_id ,\n reply_markup = keyboard)\n\n # ForceReply\n\ndef insert_question(bot, update):\n msg = update.message\n if msg.text == 'سوالای اخیر' or msg.text == '🤔 از چجو بپرس' or msg.text == '⚙ تنظیمات' or msg.text == '👤 پروفایل ' or msg.text == 'لیست کاربران':\n return constants.STATE_ASK\n question_id = str(msg.message_id)+'-'+str(msg.chat_id)\n db.insert_question_to_temp(msg.message_id, msg.text, msg.chat_id, msg.date)\n # db.insert_new_question(msg.message_id, msg.text, msg.chat_id, msg.date)\n bot.sendMessage(update.message.chat_id,\n text = \"لطفا موضوع سوال خود را از موارد زیر انتخاب کنید یا اگر منصرف شده اید /skip را بزنید 😎\",\n reply_markup = constants.KEYBOARD_TOPIC)\n # bot.sendMessage(update.message.chat_id,\n # text = \" سوالت با موفقیت ثبت شد\",\n # reply_markup = constants.KEYBOARD_MAIN)\n return constants.STATE_TOPIC\n\ndef finish_question(bot, update):\n u_id = update.message.chat_id\n q_id = db.push_question_from_temp_to_questions(u_id)\n # db.follow_or_unfollow_question(q_id, u_id)\n bot.sendMessage(chat_id = u_id, text='سوالت با موفقیت ثبت شد 🤓', reply_markup= constants.KEYBOARD_MAIN)\n db.activate(update.message.chat_id)\n show_question_to_all_topic_followers(q_id, bot)\n return constants.STATE_MAIN\n\ndef skip_question(bot, update):\n bot.sendMessage(update.message.chat_id,\\\n text =\"سوالی ثبت نشد ☹️\", reply_markup= constants.KEYBOARD_MAIN)\n db.empty_temp(update.message.chat_id)\n db.activate(update.message.chat_id)\n return constants.STATE_MAIN\n\ndef show_random_question(bot, user_id):\n ques = db.get_last_10_questions()\n i = randint(0, len(ques)-1)\n show_question(ques[i]['id'], user_id, bot)\n\ndef show_question_to_followers(qid, anid, bot, user_id):\n followers = db.get_followers_question(qid)\n # user_followers = db.get_followers_user(user_id)\n topic = db.get_topic_of_question(qid)\n followers = db.get_users_followed_topic(topic)\n for user in followers:\n user = user['id']\n if db.user_is_active(user):\n try:\n # bot.sendMessage(user, text='سوالی که قبلا لایک کرده بودید جواب داده شد 😍')\n bot.sendMessage(user, text='به سوال زیر که در موضوع {} پرسیده شده بود جوابی جدید داده شد 😍'.format(topic))\n answers.show_answer(bot, user, qid, anid, True)\n except:\n print('exeption')\n db.unactivate(user)\n\n # for user in user_followers :\n # if db.user_is_active(user):\n # try:\n # bot.sendMessage(user, text='سوالی که قبلا لایک کرده بودید جواب داده شد 😍')\n # # show_question(qid, user, bot, withans = True)\n # answers.show_answer(bot, user, qid, anid, True)\n # except:\n # print('exeption')\n # db.unactivate(user)\n\ndef show_question_to_all_topic_followers(qid, bot):\n topic = db.get_topic_of_question(qid)\n all = db.get_users_followed_topic(topic)\n for user in all:\n try:\n bot.sendMessage(user['id'], text='سوال جدید زیر در موضوع {} پرسیده شده 🤓'.format(topic))\n print('yes')\n show_question(qid, user['id'], bot, False)\n except:\n print('except')\n db.unactivate(user['id'])\n\ndef show_last_questions(bot, chat_id, i=0 , number=5, callback = False, m_id = 0, topic = 'همه'):\n skip = number * i\n questions = db.get_last_questions(number, skip, topic)\n if (len(questions) < number ):\n next_text ='صفحه بعد'\n next_call ='notavailable0'\n else:\n next_text ='صفحه بعد'\n next_call ='nextpage'+'_'+str(i+1)+'_'+topic\n if (i == 0):\n before_text = 'صفحه قبل'\n before_call ='notavailable1'\n else:\n before_text = 'صفحه قبل'\n before_call = 'beforpage'+'_'+str(i-1)+'_'+topic\n\n last_questions_text = functions.enToPersianNumb(skip+1)+' تا '+functions.enToPersianNumb(skip+number)+' سوال اخیر در موضوع {}:\\n'.format(topic)\n buttons = [[\n InlineKeyboardButton(text=next_text,\n callback_data=next_call),\n InlineKeyboardButton(text=before_text,\n callback_data= before_call)\n ]]\n keyboard = InlineKeyboardMarkup(buttons)\n\n for q in questions:\n if q['answers'] == None:\n q_number = 0\n else:\n q_number = len(q['answers'])\n q_index = (i*number)+1+questions.index(q)\n text = '\\n'+functions.enToPersianNumb(q_index)+' 🤔سوال: '+ q['question']+'؟\\nلینک: /q'+str(q['msg_id'])+'\\n♥️تعداد دنبال کنندگان : '+ functions.enToPersianNumb(len(q['followers'])) +'\\n📝تعداد جواب ها: '+ functions.enToPersianNumb(q_number)+'\\n.'\n last_questions_text += text\n\n if callback:\n bot.editMessageText(chat_id = chat_id, message_id = m_id , text = last_questions_text, reply_markup = keyboard)\n else:\n # bot.sendMessage(chat_id, text = last_questions_text)\n bot.sendMessage(chat_id, text = last_questions_text, reply_markup = keyboard)\n\ndef show(bot, update):\n message = update.message.text\n if (message == 'همه'):\n bot.sendMessage(update.message.chat_id, text='سوال های اخیر در همه موضوعها:\\n.', reply_markup = constants.KEYBOARD_MAIN)\n bot.sendChatAction(update.message.chat_id, action = 'typing')\n show_last_questions(bot,update.message.chat_id)\n db.activate(update.message.chat_id)\n return constants.STATE_MAIN\n\n elif (message == 'پلتفرم' or message == 'استارتاپ' or message == 'متفرقه' or message == 'چجو'):\n bot.sendMessage(update.message.chat_id, text='سوال هایی که در موضوع {} مطرح شده:'.format(message), reply_markup = constants.KEYBOARD_MAIN)\n bot.sendChatAction(update.message.chat_id, action = 'typing')\n show_last_questions(bot,update.message.chat_id, topic = message)\n db.activate(update.message.chat_id)\n return constants.STATE_MAIN\n\n elif (message == '⬅️'):\n bot.sendMessage(update.message.chat_id, text='برگشتی به منوی اصلی 😃', reply_markup = constants.KEYBOARD_MAIN)\n db.activate(update.message.chat_id)\n return constants.STATE_MAIN\n else:\n bot.sendMessage(update.message.chat_id, text='لطفا از منوی زیر انتخاب کنید 😆', reply_markup = constants.KEYBOARD_READ)\n\ndef show_questions_asked_by_user(bot, chat_id, u_id, i=0, limit=5, callback = False, m_id = 0):\n skip = i * limit\n questions = db.get_questions_of_user(u_id, skip, limit)\n if (len(questions) < limit ):\n next_text ='صفحه بعد'\n next_call ='notavailable0'\n else:\n next_text ='صفحه بعد'\n next_call ='nextpageuserquestions'+'_'+str(i+1)+'_'+str(u_id)\n if (i == 0):\n before_text = 'صفحه قبل'\n before_call ='notavailable1'\n else:\n before_text = 'صفحه قبل'\n before_call = 'beforepageuserquestions'+'_'+str(i-1)+'_'+str(u_id)\n user_name = db.get_user(u_id)['first_name']\n last_questions_text = functions.enToPersianNumb(skip+1)+' تا '+functions.enToPersianNumb(skip+limit)+' سوال اخیر {}:\\n'.format(user_name)\n buttons = [[\n InlineKeyboardButton(text=next_text,\n callback_data=next_call),\n InlineKeyboardButton(text=before_text,\n callback_data= before_call)\n ]]\n keyboard = InlineKeyboardMarkup(buttons)\n for q in questions:\n if q['answers'] == None:\n q_number = 0\n else:\n q_number = len(q['answers'])\n number = (i*limit)+1+questions.index(q)\n text = '\\n'+ functions.enToPersianNumb(number)+' 🤔سوال: '+ q['question']+'؟\\nلینک: /q'+str(q['msg_id'])+'\\n♥️تعداد دنبال کنندگان : '+ functions.enToPersianNumb(len(q['followers'])) +'\\n📝تعداد جواب ها: '+ functions.enToPersianNumb(q_number)+'\\n.'\n last_questions_text += text\n\n if callback:\n bot.editMessageText(chat_id = chat_id, message_id = m_id , text = last_questions_text, reply_markup = keyboard)\n else:\n # bot.sendMessage(chat_id, text = last_questions_text)\n bot.sendMessage(chat_id, text = last_questions_text, reply_markup = keyboard)\n\ndef show_comming_sessions(bot, chat_id, i=0, edit = False, msg_id = 0):\n session , count = db.get_comming_sessions(i)\n if session == False:\n bot.sendMessage(chat_id= chat_id, text = 'در حال حاضر جلسه ای وجود ندارد')\n return constants.STATE_MAIN\n user = db.get_user(session['u_id'])\n user_name = user['first_name'] +' '+user['last_name']\n if i+1 == count:\n next_text ='بعدی'\n next_call ='notavailable0'\n else:\n next_text ='بعدی'\n next_call ='nextsession_'+str(i+1)+'_'+str(session['u_id'])\n if (i == 0):\n before_text = 'قبلی'\n before_call ='notavailable1'\n else:\n before_text = 'قبلی'\n before_call = 'nextsession_'+str(i-1)+'_'+str(session['u_id'])\n session_text = 'جلسه با '+ user_name\n buttons = [[\n InlineKeyboardButton(text=next_text,\n callback_data=next_call),\n InlineKeyboardButton(text=before_text,\n callback_data= before_call)\n ]]\n keyboard = InlineKeyboardMarkup(buttons)\n if edit == False:\n bot.sendMessage(chat_id= chat_id, text = session_text, reply_markup = keyboard)\n else:\n bot.editMessageText(chat_id= chat_id,message_id = msg_id, text = session_text, reply_markup = keyboard)\n","sub_path":"sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":12884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"345855953","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read CSV files for unigrams and bigrams\ndef readCSV(doc):\n df = pd.read_csv(\"../data/output/analysed/\" + doc + '.csv', names=['Document', 'Citations', 'Link', 'Score', 'Subject Area', 'Count'], index_col=0)\n # Fill the \"Count\" column in with \"1\" to get a total count in each subject area\n df['Count'].fillna(1, inplace=True)\n # Basic statistics about the dataframe\n print(str(df.head(5)) + \"\\n\")\n print(str(df.info()) + \"\\n\")\n print(str(df.describe()) + \"\\n\")\n return df\n\n# Load the unigrams and bigrams CSV files\nunigrams = readCSV(\"unigrams\")\nbigrams = readCSV(\"bigrams\")\n\n# Plot a graph of \"Score\" vs \"Citations\" for unigrams and bigrams\nunigrams.plot(kind='scatter', x='Citations', y='Score', title='Citation Count vs Abstract Score (Unigrams)')\nbigrams.plot(kind='scatter', x='Citations', y='Score', title='Citation Count vs Abstract Score (Bigrams)')\n\n# Statistical metrics for bigrams/unigrams\nprint(\"[Unigrams] S/Ci Correlation: \" + str(unigrams['Score'].corr(unigrams['Citations'])))\nprint(\"[Bigrams] S/Ci Correlation: \" + str(bigrams['Score'].corr(bigrams['Citations'])))\nprint(\"[Unigrams] S/Ci Covariance: \" + str(unigrams['Score'].cov(unigrams['Citations'])))\nprint(\"[Bigrams] S/Ci Covariance: \" + str(bigrams['Score'].cov(bigrams['Citations'])))\nprint(\"[Unigrams] S/Co Correlation: \" + str(unigrams['Score'].corr(unigrams['Count'])))\nprint(\"[Bigrams] S/Co Correlation: \" + str(bigrams['Score'].corr(bigrams['Count'])))\nprint(\"[Unigrams] S/Co Covariance: \" + str(unigrams['Score'].cov(unigrams['Count'])))\nprint(\"[Bigrams] S/Co Covariance: \" + str(bigrams['Score'].cov(bigrams['Count'])))\n\n# Take the top 100 papers and analyse their subject area\nunigrams = unigrams.head(100)\nbigrams = bigrams.head(100)\n\n# Group by subject area and get the total number of papers in each subject area\nunigram_subjects_sum = unigrams.groupby(['Subject Area']).sum()\nunigram_subjects_sum = unigram_subjects_sum.drop(['Citations', 'Score'], axis=1)\nbigram_subjects_sum = bigrams.groupby(['Subject Area']).sum()\nbigram_subjects_sum = bigram_subjects_sum.drop(['Citations', 'Score'], axis=1)\n\n# Plot count results in a bar graph\nunigram_subjects_sum.plot(kind='bar', title='A comparison of the sum of papers per subject area (Unigrams)')\nbigram_subjects_sum.plot(kind='bar', title='A comparison of the sum of papers per subject area (Bigrams)')\n\n# Group by subject area and get the mean score of each subject area\nunigram_subjects_mean = unigrams.groupby(['Subject Area']).mean()\nunigram_subjects_mean = unigram_subjects_mean.drop(['Citations', 'Count'], axis=1)\nbigram_subjects_mean = bigrams.groupby(['Subject Area']).mean()\nbigram_subjects_mean = bigram_subjects_mean.drop(['Citations', 'Count'], axis=1)\n\n# Plot count results in a bar graph\nunigram_subjects_mean.plot(kind='bar', title='A comparison of the mean score of papers per subject area (Unigrams)')\nbigram_subjects_mean.plot(kind='bar', title='A comparison of the mean score of papers per subject area (Bigrams)')\n\n# Show all plots\nplt.show()","sub_path":"Big Data Analytics/Hadoop Batch Analytics/processing/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"288409190","text":"## This is old, a lot of things have been changed\n# JSON_Structure, read and write sequence\n\n##################################################################################################\n# About: Server TCP code, get json object from the simulator and send another json object and\n# process the data using stable baseline\n# Notes:\n#TODO (DONE): Adapt this on python3 to solve the issues in json parser in python3\n#there is difference in json.loads as it only accept string not bytes and in 3 TCP read return bytes\n#and str not converting from bytes to str in py3 but .decode('UTF-8') does\n#and the same for sendall function of TCP it only takes bytes so we need to encode the string first to bytes like object\n#and solving some errors like https://bugs.python.org/issue24283\n\n#Reference: https://pymotw.com/2/socket/tcp.html\n\n#Coding Style: camelCase\n# Run it with . ~/virtualenvs/baselines_env/bin/activate\n\n##################################################################################################\n#import the libraries\nimport socket\nimport sys\nimport signal\nimport json\nfrom time import *\nimport os\nimport random\nimport numpy as np\nfrom transforms3d.euler import euler2mat\n\n# import stable_baselines\n\n\nprint(\"Finish importing the libraries\")\n\n#import openai\n#import tensorflow as tf\n#import numpy as np\n#from baselines import ...\n\n#--------------------------------------------Vars--------------------------------------------\n\n#Settings for the TCP communication\npacketSize = 500\nportNum = 10008\nhostName = 'localhost'\n# connection = None\n# clientAddress = None\n\nglobalFlag = 0 #this is used to reset the NTRT environment and TCP connection with it\n\n# JSON object structure\njsonObj = {\n # 'Controllers_num': 9,\n # 'Controllers_index': [2, 4, 5, 6, 7, 11, 13, 17, 19],\n # 'Controllers_val': [18,-1,-1,-1,-1,-1,-1,-1,-1],\n # 'Controllers_val': [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1, -1,-1,-1,-1,-1,-1,-1,-1,-1,-1],\n 'Controllers_val': [0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0],\n 'Reset': 0\n}\n#--------------------------------------------------------------------------------------------\n\n\n#--------------------------------------------Functions--------------------------------------------\n# Ctrl+C Handle to close safely the TCP connection\ndef signalHandler(signal, frame):\n # print('You pressed Ctrl+C!')\n tmp = str(input(\"You want reset or close: r/c: \\n\"))\n print(tmp)\n if(tmp == 'r'):\n reset()\n elif(tmp == 'c'):\n print(\"----------------------------------Exit-----------------------------------\")\n global globalFlag\n globalFlag = 2\n else:\n # print(\"Please Ctrl+C and write 'r' or 'c' \")\n sleep(5)\n# function for writing data into TCP connection\ndef write(connection, data):\n # print('sending data to the client:\"{}\"'.format(data))\n try:\n connection.sendall(data.encode())\n except Exception as e:\n print(\"$$$$$$$$$$$$ ERROR in Writing $$$$$$$$$$$$\")\n print(\"Error: \" + str(e))\n\n# function for reading data from TCP connection\ndef read(connection):\n try:\n data = []\n counter = 1\n # Receive the data in small chunks and retransmit it\n while True:\n data.append(connection.recv(packetSize).decode(\"utf-8\")) #reading part\n # print('{} received \"{}\"'.format(counter,data[-1]))\n # print(data[-1][-14:-1], ('ZFinished' in str(data[-1][-14:-1])))\n if 'ZFinished' in str(data[-1][-14:-1]):\n # print(\"FINISHED*************\")\n # sleep(5)\n break\n counter += 1\n return \"\".join(data)\n except ValueError:\n print(ValueError)\n print(\"$$$$$$$$$$$$ ERROR in Reading $$$$$$$$$$$$\")\n # sleep(2)\n return None\ndef reset():\n global globalFlag\n globalFlag = 1\n\ndef main():\n start_time = time()\n while True:\n #Note: TODO: Make in the simulator wait a second then send a message\n os.system('/home/hany/repos/Work/IU/Tensegrity/Tensegrity-Robotics/src/dev/legz/python_communication_test/helper.sh')\n\n print('#########\\nwaiting for a connection\\n#########')\n connection, clientAddress = sock.accept() #wait until it get a client\n print('connection from', clientAddress)\n global globalFlag\n globalFlag = 0\n target = 24\n sign = -0.1\n\n while True:\n r = read(connection)\n # print(r)\n if(r != None):\n jsonObjTmp = json.loads(r) # Parse the data from string to json\n print(\"s1##{:} $${:}\".format(jsonObj[\"Controllers_val\"][2],jsonObjTmp[\"Controllers\"][2]))\n\n # TODO: Use the incoming data after being converted to json\n\n # TODO:\n # Take the data from the simulator module\n # Formulate the data as observation\n # Generate Reward\n # Feed the RL Algorithm with Reward and observartion\n # Generate Action\n # Decide either end of episode (Reset the simulator) or specific Action\n # Modify the action in json\n \n\n\n # if(jsonObjTmp[\"Controllers\"][2] >= 23.5 and sign == 1):\n # print(\"FLIP\")\n # target = jsonObjTmp[\"Controllers\"][2]\n # sign = -6\n # if(jsonObjTmp[\"Controllers\"][2] <= 22.5 and sign == -6):\n # print(\"FLIP\")\n # # target = 24\n # sign = 1\n # target = jsonObjTmp[\"Controllers\"][2] + sign*0.5\n # # print(target)\n # print(sign)\n # # jsonObj[\"Controllers_val\"][2] = target\n # if(jsonObjTmp[\"Flags\"][0] == 1):\n # print(\"FLAG\")\n # # jsonObj[\"Controllers_val\"][2] = target\n # jsonObj[\"Controllers_val\"][2] = jsonObjTmp[\"Controllers\"][2]\n\n # print(\"s2##{:} $${:}\".format(jsonObj[\"Controllers_val\"][2],jsonObjTmp[\"Controllers\"][2]))\n # input()\n # # jsonObj[\"Controllers_val\"][2] = jsonObjTmp[\"Controllers\"][2]\n # if((time() - start_time)% 5 and jsonObjTmp[\"Flags\"][0] == 1):\n # print(jsonObjTmp[\"Center_of_Mass\"][4], jsonObjTmp[\"Orientation\"][4])\n # CMS = np.array(jsonObjTmp[\"Center_of_Mass\"][4])\n # half_length = 15\n # orientation_vector = np.array(jsonObjTmp[\"Orientation\"][4][:3])\n # end_point_local1 = np.array([0, half_length,0])\n # end_point_local2 = np.array([0,-half_length,0])\n\n # yaw,pitch,roll = orientation_vector\n # rot_mat = np.matrix(euler2mat(yaw, pitch, roll, 'syxz'))\n # print(rot_mat)\n\n # # print(\"end_point1 in local coordinate system\", end_point_local1)\n # # print(\"end_point2 in local coordinate system\", end_point_local2)\n\n\n # end_point_world1 = CMS+rot_mat.transpose().dot(end_point_local1)\n # end_point_world2 = CMS+rot_mat.transpose().dot(end_point_local2)\n\n\n # print(\"#2 end_point1 in world coordinate system\", end_point_world1)\n # print(\"#2 end_point2 in world coordinate system\", end_point_world2)\n\n\n if(jsonObjTmp[\"Flags\"][0] == 1):\n sign = -1*sign\n print(\"FLIP\")\n # input()\n\n jsonObj[\"Controllers_val\"][2] = sign\n jsonObj[\"Controllers_val\"][5] = sign\n\n print(\"state##{:} $${:}\".format(jsonObj[\"Controllers_val\"][2],jsonObjTmp[\"Controllers\"][2]))\n \n \n write(connection,json.dumps(jsonObj)) # Write to the simulator module the json object with the required info\n if(globalFlag > 0):\n print(\"GLOBAL FLAG Exit\")\n break\n connection.close()\n if(globalFlag == 2):\n sys.exit(0)\n#-------------------------------------------------------------------------------------------------\n\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a TCP/IP socket\nserverAddress = (hostName, portNum) # Bind the socket to the port\n\n\nprint('#########\\nstarting up on {} port {}\\n#########'.format(serverAddress, portNum))\nsock.bind(serverAddress)\nsock.listen(1) # Listen for incoming connections\n\n\n\nsignal.signal(signal.SIGINT, signalHandler) # Activate the listen to the Ctrl+C\n\n\n# This is top open the simulator\nprint(\"Opening the NTRT simulator\")\n\nmain()","sub_path":"src/dev/legz/python_communication_test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"318807080","text":"from diffusers import StableDiffusionPipeline\nimport torch\n\nmodel_id = \"model4/\"\npipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(\"cuda\")\n\nprompt = \"A \"\n\nnum_images = 5\n\nfor i in range(num_images):\n image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]\n image.save(f\"rcc{i}.png\")\n","sub_path":"v4 textual inversion/v4.py","file_name":"v4.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"434511259","text":"import pygame\nfrom math import *\n\nfrom constantes import *\nfrom player import *\nfrom arc import *\n\n\nclass Circle(pygame.sprite.Sprite):\n\n def __init__(self, height, width, rayon, sens_rotation, pos_x, vitesse_rotation, synchronisation):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([rayon * 2, rayon * 2]).convert_alpha()\n self.rect = self.image.get_rect()\n self.height = height\n self.width = width\n self.rayon = rayon\n self.pos_x = pos_x\n self.sens_rotation = sens_rotation\n self.v_r = vitesse_rotation\n self.synchro = synchronisation\n\n self.i = 0 # vit\n self.scroll = 0 # permet le scrolling\n\n self.all_arcs = pygame.sprite.Group()\n if self.synchro == 0:\n self.arc_1 = Arc(\n PURPLE, self.rect, 0 + self.i, pi / 2 + self.i, self.width, self.rayon)\n self.arc_2 = Arc(\n YELLOW, self.rect, pi / 2 + self.i, pi + self.i, self.width, self.rayon)\n self.arc_3 = Arc(\n BLUE, self.rect, pi + self.i, 3 * pi / 2 + self.i, self.width, self.rayon)\n self.arc_4 = Arc(\n ROSE, self.rect, 3 * pi / 2 + self.i, 2 * pi + self.i, self.width, self.rayon)\n elif self.synchro == 1:\n self.arc_1 = Arc(\n YELLOW, self.rect, 0 + self.i, pi / 2 + self.i, self.width, self.rayon)\n self.arc_2 = Arc(\n PURPLE, self.rect, pi / 2 + self.i, pi + self.i, self.width, self.rayon)\n self.arc_3 = Arc(\n ROSE, self.rect, pi + self.i, 3 * pi / 2 + self.i, self.width, self.rayon)\n self.arc_4 = Arc(\n BLUE, self.rect, 3 * pi / 2 + self.i, 2 * pi + self.i, self.width, self.rayon)\n if self.synchro == 2:\n self.arc_1 = Arc(\n ROSE, self.rect, 0 + self.i, pi / 2 + self.i, self.width, self.rayon)\n self.arc_2 = Arc(\n BLUE, self.rect, pi / 2 + self.i, pi + self.i, self.width, self.rayon)\n self.arc_3 = Arc(\n YELLOW, self.rect, pi + self.i, 3 * pi / 2 + self.i, self.width, self.rayon)\n self.arc_4 = Arc(\n PURPLE, self.rect, 3 * pi / 2 + self.i, 2 * pi + self.i, self.width, self.rayon)\n\n self.all_arcs.add(self.arc_1)\n self.all_arcs.add(self.arc_2)\n self.all_arcs.add(self.arc_3)\n self.all_arcs.add(self.arc_4)\n\n self.rect.center = (self.pos_x, self.height)\n\n self.image.fill((0, 0, 0, 0))\n\n def update(self):\n self.image.fill((0, 0, 0, 0))\n\n if self.sens_rotation == True:\n self.i += self.v_r # vitesse de rotation\n else:\n self.i -= self.v_r\n\n self.arc_1.update(0 + self.i, pi / 2 + self.i, self.width)\n self.arc_1.update(0 + self.i, pi / 2 + self.i, self.width)\n self.arc_2.update(pi / 2 + self.i, pi + self.i, self.width)\n self.arc_3.update(pi + self.i, 3 * pi / 2 + self.i, self.width)\n self.arc_4.update(3 * pi / 2 + self.i, 2 * pi + self.i, self.width)\n\n self.all_arcs.draw(self.image)\n # anti aliasing\n\n pygame.gfxdraw.aacircle(\n self.image, self.rayon, self.rayon, self.rayon + 2, GREY)\n pygame.gfxdraw.aacircle(\n self.image, self.rayon, self.rayon, self.rayon + 1, GREY)\n pygame.gfxdraw.aacircle(\n self.image, self.rayon, self.rayon, self.rayon - self.width, GREY)\n pygame.gfxdraw.aacircle(\n self.image, self.rayon, self.rayon, self.rayon - self.width - 1, GREY)\n\n self.rect.center = (self.pos_x, self.height + self.scroll)\n\n def collide(self, player):\n color = player.color\n if pygame.sprite.collide_mask(player, self.arc_1) and color != self.arc_1.color:\n print(\"Collision couleur PURPLE\")\n return True\n elif pygame.sprite.collide_mask(player, self.arc_2) and color != self.arc_2.color:\n print(\"Collision couleur YELLOW\")\n return True\n elif pygame.sprite.collide_mask(player, self.arc_3) and color != self.arc_3.color:\n print(\"Collision couleur BLUE\")\n return True\n elif pygame.sprite.collide_mask(player, self.arc_4) and color != self.arc_4.color:\n print(\"Collision couleur ROSE\")\n return True\n else:\n pass\n","sub_path":"Modele/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"390111550","text":"#!/usr/bin/env python3\nfrom ev3dev.ev3 import *\nfrom time import sleep\n\nsleep(5)\n\n# Attach large motors to ports B and C\nmB = LargeMotor('outB')\nmC = LargeMotor('outC')\n\n# Make the robot advance such that the wheels rotate 720 deg\n# (50% speed, apply brake when movement terminated). \n# Assuming speed_sp=900 gives full speed then\n# speed_sp=450 gives 50% speed\nmB.run_to_rel_pos(position_sp=720, speed_sp=450, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=720, speed_sp=450, stop_action=\"brake\")\n\n# wait for both motors to complete their movements\nmB.wait_while('running')\nmC.wait_while('running')\n \nsleep(1) # Wait one second\n\n# Make the robot move BACKWARDS such that the wheels rotate 720 deg\n# (50% speed, apply brake when movement terminated)\nmB.run_to_rel_pos(position_sp=-720, speed_sp=450)\nmC.run_to_rel_pos(position_sp=-720, speed_sp=450)\n# There was no need to include stop_action=\"brake\" because\n# that had already been set earlier\n\n# wait for both motors to complete their movements\nmB.wait_while('running')\nmC.wait_while('running')\n \nsleep(1) # Wait one second\n\n# Make the robot advance for 1000 milliseconds\n# (50% speed, apply brake when movement terminated)\nmB.run_timed(time_sp=1000, speed_sp=450)\nmC.run_timed(time_sp=1000, speed_sp=450)\n\n# wait for both motors to complete their movements\nmB.wait_while('running')\nmC.wait_while('running')","sub_path":"move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"626211196","text":"\"\"\"renaming list to pryce_list\n\nRevision ID: c827ee4a1745\nRevises: 41277ca11f79\nCreate Date: 2020-02-14 11:27:19.808120\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.exc import ProgrammingError\nfrom sqlalchemy.schema import FetchedValue\n\n# revision identifiers, used by Alembic.\nrevision = 'c827ee4a1745'\ndown_revision = '41277ca11f79'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pryce_list',\n sa.Column('pryce_list_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('owner', sa.Integer(), nullable=True),\n sa.Column('access_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['access_id'], ['access.access_id'], onupdate='CASCADE', ondelete='RESTRICT'),\n sa.ForeignKeyConstraint(['owner'], ['appuser.appuser_id'], onupdate='CASCADE', ondelete='RESTRICT'),\n sa.PrimaryKeyConstraint('pryce_list_id')\n )\n op.create_table('pryce_list_item',\n sa.Column('item_id', sa.Integer(), nullable=False),\n sa.Column('pryce_list_id', sa.Integer(), nullable=False),\n sa.Column('quantity', sa.Integer(), server_default=FetchedValue(), nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.item_id'], onupdate='CASCADE', ondelete='RESTRICT'),\n sa.ForeignKeyConstraint(['pryce_list_id'], ['pryce_list.pryce_list_id'], ),\n sa.ForeignKeyConstraint(['pryce_list_id'], ['pryce_list.pryce_list_id'], onupdate='CASCADE', ondelete='RESTRICT'),\n sa.PrimaryKeyConstraint('item_id', 'pryce_list_id')\n )\n op.drop_constraint('list_item_item_id_fkey', 'list_item', type_='foreignkey'),\n op.drop_constraint('list_item_list_id_fkey', 'list_item', type_='foreignkey'),\n op.drop_table('list_item')\n op.drop_constraint('list_access_id_fkey', 'list', type_='foreignkey')\n op.drop_constraint('list_owner_fkey', 'list', type_='foreignkey')\n op.drop_table('list')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('list',\n sa.Column('list_id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('owner', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('access_id', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['access_id'], ['access.access_id'], name='list_access_id_fkey',\n onupdate='CASCADE', ondelete='RESTRICT'),\n sa.ForeignKeyConstraint(['owner'], ['appuser.appuser_id'], name='list_owner_fkey',\n onupdate='CASCADE', ondelete='RESTRICT'),\n sa.PrimaryKeyConstraint('list_id', name='list_pkey')\n )\n op.create_table('list_item',\n sa.Column('item_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('list_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('quantity', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['item_id'], ['item.item_id'], name='list_item_item_id_fkey', onupdate='CASCADE', ondelete='RESTRICT'),\n sa.ForeignKeyConstraint(['list_id'], ['list.list_id'], name='list_item_list_id_fkey', onupdate='CASCADE', ondelete='RESTRICT'),\n sa.PrimaryKeyConstraint('item_id', 'list_id', name='list_item_pkey')\n )\n\n op.drop_table('pryce_list_item')\n op.drop_table('pryce_list')\n # ### end Alembic commands ###\n","sub_path":"pryce/database/migrations/versions/c827ee4a1745_renaming_list_to_pryce_list.py","file_name":"c827ee4a1745_renaming_list_to_pryce_list.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"341797368","text":"import gdal\nimport numpy as np\n\n\nclass RasterFile(object):\n DEFAULT_BAND_NUMBER = 1\n\n def __init__(self, filename):\n self.file = filename\n self._extent = None\n self._elevation = None\n self._hillshade = None\n self._slope = None\n self._aspect = None\n\n @property\n def file(self):\n return self._file\n\n @file.setter\n def file(self, filename):\n self._file = gdal.Open(filename)\n\n @property\n def extent(self):\n if self._extent is None:\n gt = self.geo_transform()\n x_min = gt[0]\n x_max = gt[0] + self.file.RasterXSize / gt[1]\n y_min = gt[3] + self.file.RasterYSize / gt[5]\n y_max = gt[3]\n\n self._extent = x_min, x_max, y_min, y_max\n return self._extent\n\n def values_for_band(self, band_number=1, **kwargs):\n raster = kwargs.get('raster', self.file)\n\n band = raster.GetRasterBand(band_number)\n values = np.ma.masked_values(\n band.ReadAsArray(), band.GetNoDataValue() or 0., copy=False\n )\n del band\n return values\n\n def get_raster_attribute(self, attribute):\n raster = gdal.DEMProcessing('', self.file, attribute, format='MEM')\n raster_values = self.values_for_band(raster=raster)\n del raster\n return raster_values\n\n @property\n def hill_shade(self):\n if self._hillshade is None:\n self._hillshade = self.get_raster_attribute('hillshade')\n return self._hillshade\n\n @property\n def slope(self):\n if self._slope is None:\n self._slope = self.get_raster_attribute('slope')\n return self._slope\n\n @property\n def aspect(self):\n if self._aspect is None:\n self._aspect = self.get_raster_attribute('aspect')\n return self._aspect\n\n @property\n def elevation(self):\n if self._elevation is None:\n self._elevation = self.values_for_band()\n return self._elevation\n\n def geo_transform(self):\n return self.file.GetGeoTransform()\n\n def join_masks(self, attribute, other):\n \"\"\"\n Extend the numpy mask for given attribute with mask from given other\n masked numpy array.\n\n Note: This will *permanently* change the mask.\n\n :param attribute: name of property to change the mask\n :param other: Masked numpy array to extend the mask with\n \"\"\"\n attr = getattr(self, attribute)\n attr.mask = np.ma.mask_or(attr.mask, other.mask)","sub_path":"raster_compare/base/raster_file.py","file_name":"raster_file.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"642276385","text":"import UTILS.PROMPI_data as prd\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport yt\n\ndataloc = 'D:\\simonX\\ob-mres-newbindata-5Apr19\\ob-mres-newbindata-5Apr19'\nfilename_blck = dataloc+'ob3d.45.hrez.00617.bindata'\n\ndat = ['density','temp']\n\nblock = prd.PROMPI_bindata(filename_blck,dat)\n\n\n#plt.plot(ob_blck.test())\n\n#plt.plot(ob_blck.test()[:,10,10])\n#plt.plot(ob_blck.test()[:,200,200])\n#plt.plot(ob_blck.test()[:,150,200])\n\n#ds = yt.load_uniform_grid({}, [128, 128, 128],\n# bbox=np.array([[0.0, 1.0], [0.0, np.pi], [0.0, 2*np.pi]]),\n# geometry=\"spherical\")\n\n#s = ds.slice(2, np.pi/2)\n#p = s.to_pw(\"funfield\", origin=\"native\")\n#p.set_zlim(\"all\", 0.0, 4.0)\n#p.show()\t\t\t\t\t\t \n\ngrid = block.grid()\n\nnx = grid['nx']\nny = grid['ny']\nnz = grid['nz']\n\nprint(nx,ny,nz)\n\nrho = block.datadict['density']\n\ndata = dict(density = (rho, \"g/cm**3\"))\n\nds = yt.load_uniform_grid(data, rho.shape,\n bbox=np.array([[3.e8, 1.e9], [0.0, np.pi], [0.0, 2*np.pi]]),\n geometry=\"spherical\")\n\n\n\n#arr = np.random.random(size=(64,64,64))\n\n#data = dict(density = (arr, \"g/cm**3\"))\n#bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n#ds = yt.load_uniform_grid(data, arr.shape, length_unit=\"Mpc\", bbox=bbox, nprocs=1)\n\ns = ds.slice(2, np.pi/2)\n#p = s.to_pw(\"funfield\", origin=\"native\")\n#s.set_zlim(\"all\", 0.0, 4.0)\ns.save('test')\ns.show()\n\n\n\n#plt.show(block=False)\n","sub_path":"show_slice.py","file_name":"show_slice.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"360521206","text":"import logging\nimport requests\n\nlogger = logging.getLogger()\n\n\ndef run(config, event_json, good_indicators):\n logger.debug('Running the Loki Bot detection module.')\n\n tags = []\n detections = []\n extra = []\n\n # Loop over each sandboxed sample in the event.\n for sample in event_json['sandbox']:\n\n # Loop over each HTTP request in the sample.\n for request in sample['http_requests']:\n\n # Make sure the request was to a known Loki Bot URI path.\n if request['uri'].endswith('/fre.php'):\n\n # Continue if it was a POST request.\n if request['method'] == 'POST':\n\n # Detected Loki Bot if the user-agent matches.\n if request['user_agent'] == 'Mozilla/4.08 (Charon; Inferno)':\n detections.append('Detected Loki Bot by HTTP POST to URL \"{}\" with user-agent \"{}\"'.format(request['url'], request['user_agent']))\n tags.append('lokibot')\n else:\n detections.append('ERROR: Looks like we detected Loki Bot, but a change in the user-agent: {}'.format(request['user_agent']))\n\n # Get the HTTP status code from the request's URL. We are expecting a 404 response.\n status_code = requests.head(request['url']).status_code\n if status_code == 404:\n\n # Get the text from the URL. We are expecting \"File not found.\"\n text = requests.get(request['url']).content.decode('utf-8').strip()\n if text == 'File not found.':\n detections.append('Detected Loki Bot by HTTP 404 response code and text at URL: {}'.format(request['url']))\n tags.append('lokibot')\n else:\n detections.append('ERROR: Looks like we detected Loki Bot, but a change in the text at URL \"{}\" is now: {}'.format(request['url'], text))\n else:\n detections.append('ERROR: Looks like we detected Loki Bot, but a change in the HTTP status code at URL \"{}\" is now: {}'.format(request['url'], status_code))\n\n return tags, detections, extra\n","sub_path":"lib/modules/detections/lokibot.py","file_name":"lokibot.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"626982949","text":"import os\nimport etcd\n\n\nclass DictEtcd(dict):\n def __init__(self, prefix='/', etcd_client=etcd.Client(port=2379)):\n self.etcd_client = etcd_client\n self.prefix = prefix\n\n def __getitem__(self, key):\n path = os.path.join(self.prefix, str(key).replace('/', r'%252F'))\n try:\n result = self.etcd_client.get(path)\n if result.dir:\n return self.__class__(path, self.etcd_client)\n\n return result.value\n\n except etcd.EtcdKeyNotFound:\n raise KeyError\n\n def __setitem__(self, key, value):\n path = os.path.join(self.prefix, str(key).replace('/', r'%252F'))\n if isinstance(value, dict):\n try:\n result = self.etcd_client.get(path)\n if not result.dir:\n del self[key]\n raise etcd.EtcdKeyNotFound\n except etcd.EtcdKeyNotFound:\n self.etcd_client.write(path, None, None, True)\n\n for k, v in value.iteritems():\n tmp = self.__class__(path, self.etcd_client)\n tmp[k] = v\n\n return None\n\n if isinstance(value, list):\n try:\n result = self.etcd_client.get(path)\n if not result.dir:\n del self[key]\n raise etcd.EtcdKeyNotFound\n except etcd.EtcdKeyNotFound:\n self.etcd_client.write(path, None, None, True)\n\n for k in range(0, len(value)):\n tmp = self.__class__(path, self.etcd_client)\n tmp[str(k)] = value[k]\n\n return None\n\n try:\n result = self.etcd_client.get(path)\n if result.dir:\n raise etcd.EtcdNotFile\n except etcd.EtcdNotFile:\n del self[key]\n except etcd.EtcdKeyNotFound:\n pass\n\n if isinstance(value, tuple):\n self.etcd_client.set(path, value[0], value[1])\n else:\n self.etcd_client.set(path, value)\n\n return None\n\n def __delitem__(self, key):\n path = os.path.join(self.prefix, str(key).replace('/', r'%252F'))\n self.etcd_client.delete(path, True)\n\n def __iter__(self):\n tmp = self.etcd_client.get(self.prefix)\n for item in tmp._children:\n yield os.path.basename(item['key']).replace('%2F', r'/')\n\n def __str__(self):\n return \"%s\" % self.copy()\n\n def copy(self):\n tmp = dict()\n\n for key in self.iterkeys():\n if not isinstance(self[key], dict) and not isinstance(self[key], self.__class__):\n tmp[key] = self[key]\n continue\n\n path = os.path.join(self.prefix, str(key).replace('/', r'%252F'))\n tmp[key] = self.__class__(path, self.etcd_client).copy()\n\n return tmp\n\n def iterkeys(self):\n return self.__iter__()\n\n def iteritems(self):\n for key in self.__iter__():\n yield (key, self[key])\n\n def keys(self):\n tmp = list()\n for item in self.__iter__():\n tmp.append(item)\n\n return tmp\n\n def items(self):\n tmp = list()\n for item in self.iteritems():\n tmp.append(item)\n\n return tmp\n\n def itervalues(self):\n for key in self.__iter__():\n yield self[key]\n\n def values(self):\n tmp = list()\n for value in self.itervalues():\n tmp.append(value)\n\n return tmp\n","sub_path":"dictetcd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"544823447","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.isotonic import IsotonicRegression \nfrom multiprocessing import Pool\nimport pickle\n\ndef reliability_binning(obs, verification_data,\n prob_bins=np.arange(0, 1.06, 0.05)): \n \n obs = np.array(obs).ravel()\n verification_data = np.array(verification_data).ravel()\n \n plot_freqs = np.zeros(len(prob_bins))\n for bin_index, bins in enumerate(prob_bins):\n if bins >= 1:\n plot_freqs[bin_index] = np.nan \n continue\n bin_array = np.count_nonzero(\n (verification_data < prob_bins[bin_index+1])&\n (verification_data >= prob_bins[bin_index]))\n obs_yes = np.count_nonzero(\n (verification_data < prob_bins[bin_index+1])&\n (verification_data >= prob_bins[bin_index])&\n (obs > 0.0))\n if bin_array > 0:\n try:\n obs_freq = (obs_yes / bin_array) \n if obs_freq < 0:\n obs_freq = np.nan\n plot_freqs[bin_index] = obs_freq\n except:\n plot_freqs[bin_index] = np.nan \n else: \n plot_freqs[bin_index] = np.nan\n return plot_freqs\t\n\ndef sharpness_frequencies(verification_data, prob_bins=np.arange(0, 1.06, 0.05)):\n frequency_values = np.zeros( (prob_bins.shape[0]) )\n verification_data = np.array(verification_data).ravel()\n for f,f_thresh in enumerate(prob_bins):\n if f_thresh > 1:\n frequency_values[f] = np.nan\n continue\n total_freq = np.count_nonzero((verification_data >= prob_bins[f]) &\n (verification_data < prob_bins[f+1]))\n if total_freq > 1000:\n frequency_values[f] = total_freq\n else:\n frequency_values[f] = np.nan\n return frequency_values\n\ndef brier_skill_score(obs,verification_data): \n obs = np.array(obs).ravel()\n verification_data = np.array(verification_data).ravel()\n bs_obs = np.nanmean((obs - np.nanmean(obs)) ** 2.0)\n bs = np.nanmean((verification_data - obs) ** 2.0)\n bss = 1.0-(bs/bs_obs)\n return bss\n\ndef calibration(obs,verification_train,prediction_test):\n obs = np.array(obs).ravel()\n verification_train = np.array(verification_train).ravel()\n data_shape = np.shape(prediction_test)\n ir = IsotonicRegression(out_of_bounds='clip')\n ir.fit(verification_train, obs) \n calibrated_data = ir.transform(np.array(prediction_test).ravel())\n return calibrated_data.reshape(data_shape)\n\n\ndef contingency_metrics(obs,v_data,metric_name,\n prob_bins=np.arange(0.05, 1.06, 0.05),obs_thresh=1):\n\n obs = np.array(obs).ravel()\n v_data = np.array(v_data).ravel()\n\n metrics = np.zeros((prob_bins.shape))\n \n for t, threshold in enumerate(prob_bins):\n if threshold > 1:\n continue\n \n obs_threshold = np.where(obs >= threshold, 1, 0)\n hit = np.count_nonzero((v_data >= threshold) & (obs_threshold >= obs_thresh))\n false_alarm = np.count_nonzero((v_data >= threshold) & (obs_threshold < obs_thresh))\n miss = np.count_nonzero((v_data < threshold) & (obs_threshold >= obs_thresh))\n \n try: \n if metric_name == 'pod':\n metrics[t] = hit/(hit+miss)\n elif metric_name == 'sr':\n metrics[t] = hit/(hit+false_alarm)\n elif metric_name == 'ets':\n hits_ran = ((hit+miss)*(hit+false_alarm))/(v_data.shape[0])\n metrics[t] = (hit-hits_ran)/(hit+miss+false_alarm-hits_ran)\n elif metric_name == 'bias':\n metrics[t] = (hit+false_alarm)/(hit+miss) \n elif metric_name == 'csi':\n metrics[t] = (hit)/(hit+miss+false_alarm)\n except ZeroDivisionError:\n metrics[t] = np.nan\n \n return metrics\n\ndef read_data_and_output_files(obs,verification,out_file,metric,\n bootstrap,num_proc,metric_name=None,cl=95.):\n if bootstrap:\n all_indices = np.arange(0, len(obs))\n subset = int(len(all_indices)*0.050)\n subset_list = [np.random.choice(all_indices,subset,replace=True) for n in range(bootstrap)]\n pool = Pool(processes=num_proc)\n if metric_name:\n result = [pool.apply_async(metric, args=(obs[s],verification[s],metric_name)) for s in subset_list]\n else:\n result = [pool.apply_async(metric, args=(obs[s],verification[s])) for s in subset_list]\n bootstrap_score = [p.get() for p in result]\n lower_p = (100. - cl)/2.0\n upper_p = 100.-lower_p\n data = {}\n data['mean'] = np.nanmean(bootstrap_score,axis=0)\n data['lower_p'] = np.nanpercentile(bootstrap_score,lower_p,axis=0)\n data['upper_p'] = np.nanpercentile(bootstrap_score,upper_p,axis=0)\n else:\n if metric_name:\n data = metric(obs,verification,metric_name)\n else:\n data = metric(obs,verification)\n \n with open(out_file, 'wb') as f:\n print('Saving',out_file)\n pickle.dump(data, f)\n return data\n\ndef plotting_storm_reports(date,hail_threshold):\n storm_reports = pd.read_csv('https://www.spc.noaa.gov/climo/reports/{0}_rpts_hail.csv'.format(date))\n inches_threshold = round((hail_threshold)*0.03937)*100\n hail_size = storm_reports.loc[:,'Size'].values\n storm_lat = storm_reports.loc[:,'Lat'].values\n storm_lon = storm_reports.loc[:,'Lon'].values\n data_points = np.where(hail_size >= inches_threshold)[0]\n \n return data_points,storm_lat,storm_lon \n\n","sub_path":"Calibration/verification_calculations.py","file_name":"verification_calculations.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"37426147","text":"#!/usr/bin/env python\n\nimport rospy\n\nfrom std_msgs.msg import String\n\nrospy.init_node('topictest_pub')\n# node name.\n\npub = rospy.Publisher('test_hello', String)\n# topic name.\n\nrate = rospy.Rate(2) #2 times per second\n\nwhile not rospy.is_shutdown():\n pub.publish('Hello ROS')\n rate.sleep()\n\n","sub_path":"test_pub_topic.py","file_name":"test_pub_topic.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"644274393","text":"'''\nThe Kin App Server API is defined here.\n'''\n\nfrom flask import request, jsonify, abort\nimport redis_lock\nimport requests\nfrom uuid import UUID\nimport json\n\nfrom kinappserver import app, config\nfrom kinappserver.stellar import create_account, send_kin\nfrom kinappserver.utils import InvalidUsage, InternalError, send_gcm\nfrom kinappserver.model import create_user, update_user_token, update_user_app_version, store_task_results, add_task, get_task_ids_for_user, get_task_by_id, is_onboarded, set_onboarded, reward_address_for_task, send_push_tx_completed\n\n\ndef limit_to_local_host():\n '''aborts non-local requests for sensitive APIs (nginx specific). allow on DEBUG'''\n if config.DEBUG or request.headers.get('X-Forwarded-For', None) is None:\n pass\n else:\n abort(403) # Forbidden\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n # converts exceptions to responses\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.errorhandler(InternalError)\ndef handle_internal_error(error):\n # converts exceptions to responses\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\ndef extract_header(request):\n try:\n return request.headers.get('X-USERID')\n except Exception as e:\n print('cant extract user_id from header')\n raise InvalidUsage('bad header')\n\n@app.route('/health', methods=['GET'])\ndef get_health():\n return ''\n\n\n@app.route('/send-kin', methods=['POST'])\ndef send_kin_to_user():\n '''temp endpoint for testing sending kin TODO remove'''\n payload = request.get_json(silent=True)\n try:\n public_address = payload.get('public_address', None)\n amount = payload.get('amount', None)\n if None in (public_address, amount):\n raise InvalidUsage('bad-request') \n except Exception as e:\n print('exception: %s' % e)\n raise InvalidUsage('bad-request')\n\n #establish trust\n from stellar_base.asset import Asset\n my_asset = Asset('KIN', 'GCKG5WGBIJP74UDNRIRDFGENNIH5Y3KBI5IHREFAJKV4MQXLELT7EX6V')\n tx_hash = app.kin_sdk.trust_asset(my_asset, limit=2)\n print('trust tx hash: %s' % tx_hash)\n\n tx_hash = send_kin(public_address, amount, 'test')\n print('transfer tx hash: %s' % tx_hash)\n return jsonify(status='ok')\n\n@app.route('/send-gcm', methods=['POST'])\ndef send_gcm_push():\n '''temp endpoint for testing gcm TODO remove'''\n payload = request.get_json(silent=True)\n try:\n push_payload = payload.get('push_payload', None)\n push_token = payload.get('push_token', None)\n if None in (push_token, push_payload):\n raise InvalidUsage('bad-request') \n except Exception as e:\n print('exception: %s' % e)\n raise InvalidUsage('bad-request') \n send_gcm(push_token, push_payload)\n return jsonify(status='ok')\n\n@app.route('/send-tx-completed', methods=['POST'])\ndef send_gcm_push_tx_completed():\n #TODO remove this function\n '''temp endpoint for testing the tx-completed push'''\n payload = request.get_json(silent=True)\n try:\n user_id = extract_header(request)\n except Exception as e:\n print('exception in send_gcm_push_tx_completed: %s' % e)\n raise InvalidUsage('bad-request') \n send_push_tx_completed(user_id, 'tx_hash', 2, 'task_id')\n return jsonify(status='ok')\n\n\n@app.route('/user/app-launch', methods=['POST'])\ndef app_launch():\n \n payload = request.get_json(silent=True)\n try:\n user_id = extract_header(request)\n app_ver = payload.get('app_ver', None)\n except Exception as e:\n raise InvalidUsage('bad-request') \n update_user_app_version(user_id, app_ver)\n return jsonify(status='ok')\n\n@app.route('/user/update-token', methods=['POST'])\ndef update_token():\n ''' update a user's token in the database '''\n payload = request.get_json(silent=True)\n try:\n user_id = extract_header(request)\n token = payload.get('token', None)\n if None in (user_id, token):\n raise InvalidUsage('bad-request')\n except Exception as e:\n raise InvalidUsage('bad-request')\n print('updating token for user %s' % user_id)\n update_user_token(user_id, token)\n return jsonify(status='ok')\n\n@app.route('/user/task/results',methods=['POST'])\ndef quest_answers():\n payload = request.get_json(silent=True)\n try:\n user_id = extract_header(request)\n task_id = payload.get('id', None)\n address = payload.get('address', None)\n results = payload.get('results', None)\n if None in (user_id, task_id, address, results):\n raise InvalidUsage('bad-request')\n #TODO more input checks here\n except Exception as e:\n raise InvalidUsage('bad-request')\n # store the results and pay the user\n store_task_results(user_id, task_id, results)\n try:\n tx_hash, amount = reward_address_for_task(address, task_id) # blocks until payment is complete\n except Exception as e:\n print('exception: %s' % e)\n print('failed to reward task %s at address %s' % (task_id, address))\n else:\n send_push_tx_completed(user_id, tx_hash, amount, task_id)\n create_tx(tx_hash, user_id, amount, 'task_id: %s' % task_id)\n return jsonify(status='ok')\n\n@app.route('/task/add',methods=['POST'])\ndef add_task_api():\n #limit_to_local_host()\n payload = request.get_json(silent=True)\n try:\n task = payload.get('task', None)\n except Exception as e:\n print('exception: %s' % e)\n raise InvalidUsage('bad-request')\n if add_task(task):\n return jsonify(status='ok')\n else:\n raise InvalidUsage('failed to add task')\n\n@app.route('/user/tasks',methods=['GET'])\ndef get_next_task():\n '''return the current task for the user with the given id'''\n user_id = request.args.get('user-id', None)\n tasks = []\n for tid in get_task_ids_for_user(user_id):\n tasks.append(get_task_by_id(tid))\n print(tasks)\n return jsonify(tasks=tasks)\n\n@app.route('/user/onboard', methods=['POST'])\ndef onboard_user():\n # input sanity\n try:\n user_id = extract_header(request)\n public_address = request.get_json(silent=True).get('public_address', None)\n if None in (public_address, user_id):\n raise InvalidUsage('bad-request')\n except Exception as e:\n raise InvalidUsage('bad-request')\n\n # ensure the user exists but does not have an account:\n onboarded = is_onboarded(user_id)\n if onboarded == True:\n raise InvalidUsage('user already has an account')\n elif onboarded is None:\n raise InvalidUsage('no such user exists')\n else:\n # create an account, provided none is already being created\n lock = redis_lock.Lock(app.redis, 'address:' + public_address)\n if lock.acquire(blocking=False):\n try:\n print('creating account with address %s and amount %s' % (public_address, config.STELLAR_INITIAL_ACCOUNT_BALANCE))\n tx_id = create_account(public_address, config.STELLAR_INITIAL_ACCOUNT_BALANCE)\n set_onboarded(user_id, True)\n except Exception as e:\n print('exception trying to create account:%s' % e)\n raise InternalError('unable to create account')\n else:\n print('created account %s with txid %s' % (public_address, tx_id))\n finally:\n lock.release()\n else:\n raise InvalidUsage('already creating account for user_id: %s and address: %s' % (user_id, public_address))\n\n return jsonify(status='ok')\n\n\n@app.route('/user/register', methods=['POST'])\ndef register():\n ''' register a user to the system. \n called once by every client until 200OK is received from the server.\n the payload may contain a optional push token.\n '''\n payload = request.get_json(silent=True)\n try:\n user_id = payload.get('user_id', None)\n os = payload.get('os', None)\n device_model = payload.get('device_model', None)\n token = payload.get('token', None)\n time_zone = payload.get('time_zone', None)\n device_id = payload.get('device_id', None)\n app_ver = payload.get('app_ver', None)\n #TODO more input check on the values\n if None in (user_id, os, device_model, time_zone, app_ver): # token is optional, device-id is required but may be None\n raise InvalidUsage('bad-request')\n if os not in ('iOS', 'android'):\n raise InvalidUsage('bad-request')\n user_id = UUID(user_id) # throws exception on invalid uuid\n except Exception as e:\n raise InvalidUsage('bad-request')\n else:\n try:\n create_user(user_id, os, device_model, token, time_zone, device_id, app_ver)\n except InvalidUsage as e:\n raise InvalidUsage('duplicate-userid')\n else:\n print('created user with user_id %s' % (user_id) )\n return jsonify(status='ok')\n","sub_path":"kinappserver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"348352061","text":"from Directed_Graph import Directed_Graph\r\nimport random\r\n\r\n\r\nclass ExitError(Exception):\r\n pass\r\n\r\n\r\nclass UI:\r\n def __init__(self, graph):\r\n self.graph = graph\r\n\r\n def print_menu(self):\r\n print()\r\n print(\"0. Exit\")\r\n print(\"1. Number of vertices\")\r\n print(\"2. Parse vertices\")\r\n print(\"3. Edge between two vertices\")\r\n print(\"4. In and out degree of a given vertex\")\r\n print(\"5. Parse the set of outbound edges of a specified vertex\")\r\n print(\"6. Parse the set of inbound edges of a specified vertex\")\r\n print(\"7. Retrieve the information (the integer) attached to a specified edge.\")\r\n print(\"8. Modify the information (the integer) attached to a specified edge.\")\r\n print(\"9. Save graph to file.\")\r\n print(\"10. Read graph from file.\")\r\n print(\"11. Add vertex.\")\r\n print(\"12. Remove vertex.\")\r\n print(\"13. Add edge.\")\r\n print(\"14. Remove edge.\")\r\n\r\n def num_vertices(self):\r\n print(self.graph.Number_of_vertices)\r\n\r\n def parse_vertices(self):\r\n print(list(self.graph.Vertices))\r\n\r\n def exit(self):\r\n raise ExitError()\r\n\r\n def edge_between(self):\r\n print(\"Edge between\")\r\n x = int(input(\"x > \"))\r\n y = int(input(\"y > \"))\r\n # for i in self.graph.dcosts.keys():\r\n # print(i, end=\" \")\r\n print(self.graph.edge_between(x, y))\r\n\r\n def in_out_degree(self):\r\n vertex = int(input(\"Vertex > \"))\r\n print(\r\n \"Out degree of vertex\", vertex, \"is :\", len(self.graph.out_degree(vertex))\r\n )\r\n print(\"In degree of vertex\", vertex, \"is :\", len(self.graph.in_degree(vertex)))\r\n\r\n def parse_in_vertices(self):\r\n vertex = int(input(\"Vertex > \"))\r\n print(self.graph.in_vertices(vertex))\r\n\r\n def parse_out_vertices(self):\r\n vertex = int(input(\"Vertex > \"))\r\n print(self.graph.out_vertices(vertex))\r\n\r\n def get_cost(self):\r\n print(\"Edge between\")\r\n x = int(input(\"x > \"))\r\n y = int(input(\"y > \"))\r\n # for i in self.graph.dcosts.keys():\r\n # print(i, end=\" \")\r\n print(self.graph.get_cost(x, y))\r\n\r\n def set_cost(self):\r\n print(\"Edge between\")\r\n x = int(input(\"x > \"))\r\n y = int(input(\"y > \"))\r\n value = int(input(\"New value > \"))\r\n self.graph.set_cost(x, y, value)\r\n\r\n def save_to_file(self):\r\n file_name = input(\"File name > \")\r\n self.graph.save_to_file(file_name)\r\n\r\n def read_new_graph(self):\r\n file_name = input(\"Graph file name > \")\r\n self.graph = Directed_Graph(read_input(file_name))\r\n\r\n def add_vertex(self):\r\n vertex = int(input(\"New vertex > \"))\r\n self.graph.add_vertex(vertex)\r\n\r\n def remove_vertex(self):\r\n vertex = int(input(\"Delete vertex > \"))\r\n self.graph.remove_vertex(vertex)\r\n\r\n def add_edge(self):\r\n print(\"New edge between\")\r\n vertex1 = int(input(\"x > \"))\r\n vertex2 = int(input(\"y > \"))\r\n cost = int(input(\"cost > \"))\r\n self.graph.add_edge(vertex1, vertex2, cost)\r\n\r\n def remove_edge(self):\r\n print(\"Remove edge between\")\r\n vertex1 = int(input(\"x > \"))\r\n vertex2 = int(input(\"y > \"))\r\n self.graph.remove_edge(vertex1, vertex2)\r\n\r\n def start(self):\r\n commands = {\r\n \"0\": self.exit,\r\n \"1\": self.num_vertices,\r\n \"2\": self.parse_vertices,\r\n \"3\": self.edge_between,\r\n \"4\": self.in_out_degree,\r\n \"5\": self.parse_out_vertices,\r\n \"6\": self.parse_in_vertices,\r\n \"7\": self.get_cost,\r\n \"8\": self.set_cost,\r\n \"9\": self.save_to_file,\r\n \"10\": self.read_new_graph,\r\n \"11\": self.add_vertex,\r\n \"12\": self.remove_vertex,\r\n \"13\": self.add_edge,\r\n \"14\": self.remove_edge,\r\n }\r\n while True:\r\n try:\r\n self.print_menu()\r\n cmd = input(\"Option > \")\r\n if cmd not in commands.keys():\r\n raise ValueError(\"No such option available\")\r\n commands[cmd]()\r\n except ValueError as va:\r\n print(va)\r\n except ExitError:\r\n break\r\n\r\n\r\ndef read_input(file_name):\r\n with open(file_name, \"r\") as f:\r\n text = f.read()\r\n text = text.split(\"\\n\")\r\n numbers = []\r\n for t in text:\r\n t = t.split(\" \")\r\n numbers.extend(t)\r\n for i in range(len(numbers)):\r\n numbers[i] = int(numbers[i])\r\n # print(numbers)\r\n return numbers\r\n\r\n\r\ndef generate_graph(num_vertices=None, num_edges=None):\r\n if num_edges == None or num_vertices == None:\r\n num_vertices = int(input(\"Number of vertices > \"))\r\n num_edges = int(input(\"Number of edges > \"))\r\n\r\n graph = [num_vertices, num_edges]\r\n edges = []\r\n while len(edges) < num_edges:\r\n v1 = random.choice(range(num_vertices))\r\n v2 = random.choice(range(num_vertices))\r\n cost = random.choice(range(0, 100))\r\n edge = (v1, v2, cost)\r\n if edge not in (edges):\r\n print(len(edges), num_edges)\r\n edges.append(edge)\r\n\r\n for e in edges:\r\n graph.extend([e[0], e[1], e[2]])\r\n return graph\r\n\r\n\r\ndef generate_and_save():\r\n d = Directed_Graph(generate_graph(10000, 100000))\r\n d.save_to_file(\"seminar_graph10k.txt\")\r\n d = Directed_Graph(generate_graph(100000, 1000000))\r\n d.save_to_file(\"seminar_graph100k.txt\")\r\n d = Directed_Graph(generate_graph(1000000, 10000000))\r\n d.save_to_file(\"seminar_graph1kk.txt\")\r\n\r\n\r\n# generate_and_save()\r\n\r\n\r\ndef generate_graph_v2(num_vertices=None, num_edges=None):\r\n\r\n while True:\r\n if num_edges == None or num_vertices == None:\r\n # print(\"Nu e bine\")\r\n # print(num_edges, num_vertices)\r\n num_vertices = int(input(\"Number of vertices > \"))\r\n num_edges = int(input(\"Number of edges > \"))\r\n\r\n if num_edges > (num_vertices * (num_vertices + 1)) / 2:\r\n print(\"Cannot generate such a graph.\")\r\n num_edges = None\r\n num_vertices = None\r\n else:\r\n break\r\n\r\n graph = [num_vertices, num_edges]\r\n edges = {}\r\n while len(edges) < num_edges:\r\n v1 = random.choice(range(num_vertices))\r\n v2 = random.choice(range(num_vertices))\r\n cost = random.choice(range(0, 100))\r\n # edge[(v1,v2)] = cost\r\n if (v1, v2) not in edges:\r\n edges[(v1, v2)] = cost\r\n graph.extend([v1, v2, cost])\r\n # if len(edges) % 100000 == 0:\r\n print(len(edges), num_edges)\r\n\r\n # for e in edges:\r\n # graph.extend([e[0], e[1], e[2]])\r\n return graph\r\n\r\n\r\ndef generate_graph_v2_test():\r\n print(\"Executing test generate_graph_v2_test\")\r\n # d = Directed_Graph(generate_graph_v2(100000, 1000000))\r\n # d.save_to_file(\"seminar_graph100k.txt\")\r\n d = Directed_Graph(generate_graph_v2(10000, 100000))\r\n d.save_to_file(\"seminar_graph10k.txt\")\r\n\r\n\r\n# generate_graph_v2_test()\r\n\r\n\r\ndef main_start():\r\n print(\"1. Generate random graph\")\r\n print(\"2. Read from file\")\r\n while True:\r\n option = input(\" > \")\r\n if option == \"1\":\r\n ui = UI(Directed_Graph(generate_graph_v2()))\r\n break\r\n elif option == \"2\":\r\n input_file = \"graph10k.txt\"\r\n # input_file = \"input1kvertices.txt\"\r\n # file = \"graph1m.txt\"\r\n ui = UI(Directed_Graph(read_input(input_file)))\r\n break\r\n else:\r\n print(\"No such option\")\r\n\r\n ui.start()\r\n\r\n\r\ndef random_example():\r\n ui = UI(Directed_Graph(generate_graph_v2(8, 17)))\r\n graph = ui.graph\r\n # print(graph.dout)\r\n # print(graph.din)\r\n # print(graph.dcosts)\r\n graph.dout = {\r\n 0: [1],\r\n 1: [7, 0, 4, 3],\r\n 2: [2],\r\n 3: [1],\r\n 4: [4],\r\n 5: [0, 6, 7, 2],\r\n 6: [0, 2, 6, 7],\r\n 7: [4],\r\n }\r\n graph.din = {\r\n 0: [5, 1, 6],\r\n 1: [0, 3],\r\n 2: [6, 5, 2],\r\n 3: [1],\r\n 4: [1, 4, 7],\r\n 5: [],\r\n 6: [6, 5],\r\n 7: [1, 5, 6],\r\n }\r\n graph.dcosts = {\r\n (0, 1): 0,\r\n (1, 7): 67,\r\n (5, 0): 12,\r\n (1, 0): 87,\r\n (3, 1): 21,\r\n (6, 0): 15,\r\n (1, 4): 64,\r\n (6, 2): 17,\r\n (6, 6): 47,\r\n (4, 4): 62,\r\n (5, 6): 4,\r\n (1, 3): 22,\r\n (5, 7): 38,\r\n (5, 2): 41,\r\n (2, 2): 65,\r\n (6, 7): 9,\r\n (7, 4): 48,\r\n }\r\n graph.save_to_file('repres_ex.txt')\r\n\r\n\r\nrandom_example()\r\n\r\n# main_start()\r\n","sub_path":"Lab1Final/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"128683827","text":"# -*- coding: utf-8 -*-\nimport os\nimport xml.etree.ElementTree as ET\nimport numpy as np\nfrom dataset.imdb import Imdb\nfrom evaluate.eval_voc import voc_eval\nimport cv2\n\nclass KITTICar(Imdb):\n \"\"\"\n Implement of Imdb for KITTI dataset in VOC format\n\n # Parameters\n\n image_set: str\n set to be used, can be train, val, test\n root_path: str\n root path of the dataset\n shuffle: boolean\n whether to initial shuffle the image list\n is_train: boolean\n if true, will load annotaions\n class_names: str\n class names seperated by comma\n names: str\n the file which stores the class_names, will take effect if class_names is None\n true_negative_images: bool\n whether to include true_negative_images\n \"\"\"\n def __init__(self, image_set, root_path, shuffle=True, is_train=True, class_names=None,\n names=\"kitti_voc.names\", true_negative_images=False):\n super(KITTICar, self).__init__(\"KITTI_Car_\" + image_set)\n self.image_set = image_set\n self.root_path = root_path\n self.data_path = os.path.join(root_path, \"training\", \"image_2\")\n self.extension = \".png\"\n self.is_train = is_train\n self.true_negative_images = true_negative_images\n\n if class_names is not None:\n self.classes = class_names.strip().split(',')\n else:\n self.classes = self._load_class_names(names,\n os.path.join(os.path.dirname(__file__), \"names\"))\n\n self.config = {\n \"use_difficult\": True,\n 'comp_id': 'comp4'\n }\n self.num_classes = len(self.classes)\n self.image_set_index = self._load_image_set_index(shuffle)\n self.num_images = len(self.image_set_index)\n if self.is_train:\n self.labels = self._load_image_labels()\n\n if not self.true_negative_images:\n self._filter_image_with_no_gt()\n\n @property\n def cache_path(self):\n \"\"\"\n make a directory to store all caches\n\n Returns:\n ---------\n cache path\n \"\"\"\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path\n\n def _filter_image_with_no_gt(self):\n \"\"\"\n filter images that have no ground-truth labels.\n use case: when you wish to work only on a subset of pascal classes, you have 2 options:\n 1. use only the sub-dataset that contains the subset of classes\n 2. use all images, and images with no ground-truth will count as true-negative images\n :return:\n self object with filtered information\n \"\"\"\n\n # filter images that do not have any of the specified classes\n self.labels = [f[np.logical_and(f[:, 0] >= 0, f[:, 0] <= self.num_classes-1), :] for f in self.labels]\n # find indices of images with ground-truth labels\n gt_indices = [idx for idx, f in enumerate(self.labels) if not f.size == 0]\n\n self.labels = [self.labels[idx] for idx in gt_indices]\n self.image_set_index = [self.image_set_index[idx] for idx in gt_indices]\n old_num_images = self.num_images\n self.num_images = len(self.labels)\n\n print ('filtering images with no gt-labels. can abort filtering using *true_negative* flag')\n print ('... remaining {0}/{1} images. '.format(self.num_images, old_num_images))\n\n def _load_image_set_index(self, shuffle):\n \"\"\"\n find out which indexes correspond to given image set (train or val)\n\n # Parameters\n\n shuffle: bool\n whether to shuffle the image list\n\n # Returns\n\n entire list of images specified in the setting\n \"\"\"\n image_set_index_file = os.path.join(self.root_path, \"training\",\n \"image_sets\", \"Car\", self.image_set + \".txt\")\n assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)\n with open(image_set_index_file) as f:\n image_set_index = [x.strip() for x in f.readlines()]\n if shuffle:\n np.random.shuffle(image_set_index)\n return image_set_index\n\n def _label_path_from_index(self, index):\n \"\"\"\n given image index, find out annotation path\n\n Parameters:\n ----------\n index: int\n index of a specific image\n\n Returns:\n ----------\n full path of annotation file\n \"\"\"\n label_file = os.path.join(self.data_path, \"..\", \"label_2car\", \"xml\", index + \".xml\")\n assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)\n return label_file\n\n def _load_image_labels(self):\n \"\"\"\n preprocess all ground-truths\n\n Returns:\n ----------\n labels packed in [num_images x max_num_objects x 5] tensor\n \"\"\"\n temp = []\n\n for idx in self.image_set_index:\n label_file = self._label_path_from_index(idx)\n tree = ET.parse(label_file)\n root = tree.getroot()\n size = root.find(\"size\")\n width = float(size.find(\"width\").text)\n height = float(size.find(\"height\").text)\n label = []\n\n for obj in root.iter(\"object\"):\n difficult = int(obj.find(\"difficult\").text if obj.find(\"difficult\") else 0)\n cls_name = obj.find(\"name\").text\n if cls_name not in self.classes:\n cls_id = len(self.classes)\n else:\n cls_id = self.classes.index(cls_name)\n xml_box = obj.find(\"bndbox\")\n xml_box = obj.find('bndbox')\n xmin = float(xml_box.find('xmin').text) / width\n ymin = float(xml_box.find('ymin').text) / height\n xmax = float(xml_box.find('xmax').text) / width\n ymax = float(xml_box.find('ymax').text) / height\n label.append([cls_id, xmin, ymin, xmax, ymax, difficult])\n temp.append(np.array(label))\n return temp\n\n def label_from_index(self, index):\n \"\"\"\n given image index, return preprocessed ground-truth\n\n Parameters:\n ----------\n index: int\n index of a specific image\n Returns:\n ----------\n ground-truths of this image\n \"\"\"\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]\n\n def image_path_from_index(self, index):\n \"\"\"\n given image index, find out full path\n\n # Parameters:\n index: int\n index of a specific image\n # Returns:\n full path of the image\n \"\"\"\n assert self.image_set_index is not None, \"Dataset not initialized\"\n name = self.image_set_index[index]\n image_file = os.path.join(self.data_path, name + self.extension)\n assert os.path.exists(image_file), \"Path does not exist: {}\".format(image_file)\n return image_file\n","sub_path":"dataset/kitti_voc_car.py","file_name":"kitti_voc_car.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"118325137","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nfrom LSTM import predict_lstm, run_train_model_lstm\r\nfrom MathExpressions import get_math_expression_from_prediction_result\r\nfrom TestOneSimpleExpression import get_accuracy\r\nfrom TestRotatedImages import get_bounding_boxes_for_rotated_image\r\nfrom TrainRecognizeMathSymbols import prepare_data_for_prediction, run_train_model, predict, \\\r\n prepare_data_for_prediction_lstm\r\nfrom fuzzywuzzy import fuzz\r\n\r\n\r\ndef display_image(image, color=False):\r\n if color:\r\n plt.imshow(image)\r\n else:\r\n plt.imshow(image, 'gray')\r\n plt.show()\r\n\r\n\r\ndef read_label_from_txt_file(file_path):\r\n image_path = []\r\n labels_for_dictionary = []\r\n expressions = []\r\n with open(file_path) as fp:\r\n line = fp.readline()\r\n while line:\r\n n = 2\r\n lines = line.split(\"|\")\r\n image_path.append(lines[0])\r\n expressions_list = []\r\n expressions_list.append(lines[2])\r\n\r\n if len(lines) == 7:\r\n expressions_list.append(lines[4])\r\n expression = lines[6]\r\n expressions_list.append(expression[0:-1])\r\n n = 3\r\n else:\r\n expression = lines[4]\r\n expressions_list.append(expression[0:-1])\r\n expressions.append(expressions_list)\r\n label_list = []\r\n j = 1\r\n for index in range(0, n):\r\n labels = []\r\n symbols = lines[j].split(';')\r\n for symbol in symbols:\r\n if len(symbol) != 0:\r\n if str != \"\\n\":\r\n labels.append(symbol)\r\n j += 2\r\n label_list.append(labels)\r\n labels_for_dictionary.append(label_list)\r\n line = fp.readline()\r\n dictionary = dict(zip(image_path, labels_for_dictionary))\r\n expression_dictionary = dict(zip(image_path, expressions))\r\n\r\n return dictionary, expression_dictionary\r\n\r\n\r\ndef extract_expressions_from_image(image__path):\r\n img = cv2.imread(image__path)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # display_image(img)\r\n ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\r\n _, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n\r\n im2 = img.copy()\r\n for cnt in contours:\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n if h < 35 and w < 70:\r\n thresh1[y - 30:y + h + 30, x - 30:x + w + 30] = 255\r\n elif w > 100 and h < 30:\r\n thresh1[y - 25:y + h + 25, x - 5:x + w + 5] = 255\r\n # else:\r\n # thresh1[y - 5:y + h + 5, x - 10:x + w + 10] = 255\r\n\r\n # display_image(thresh1)\r\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 5))\r\n dilation = cv2.dilate(thresh1, rect_kernel, iterations=13)\r\n # display_image(dilation)\r\n _, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n sorted_contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[1])\r\n\r\n im2 = img.copy()\r\n bounding_boxes = []\r\n i = 0\r\n while i < len(sorted_contours):\r\n x, y, w, h = cv2.boundingRect(sorted_contours[i])\r\n # print(\"Bounding bozxes\")\r\n # print(w)\r\n # print(h)\r\n # print(x)\r\n # print(y)\r\n\r\n if (i + 1) < len(sorted_contours) and w < 500 and h < 400:\r\n bounding_boxes, im2, j = join_small_contours(sorted_contours, i, x, y, w, h, im2, bounding_boxes)\r\n i = j\r\n # display_image(im2)\r\n else:\r\n cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n bounding_boxes.append({\r\n 'xmin': x,\r\n 'xmax': x + w,\r\n 'ymin': y,\r\n 'ymax': y + h\r\n })\r\n # display_image(im2)\r\n i += 1\r\n # for box in bounding_boxes:\r\n # print(\"****************\")\r\n # print(box['xmin'])\r\n # print(box['xmax'])\r\n # print(box['ymin'])\r\n # print(box['ymax'])\r\n # print(\"****************\")\r\n # plt.subplot(1, 2, 1)\r\n # plt.imshow(thresh1, cmap=\"gray\")\r\n # plt.subplot(1, 2, 2)\r\n # plt.imshow(im2, cmap=\"gray\")\r\n # plt.show()\r\n bounding_boxes = sorted(bounding_boxes, key=lambda k: (k['ymin']))\r\n expressions = []\r\n for box in bounding_boxes:\r\n expression = img[box['ymin']:box['ymax'], box['xmin']:box['xmax']]\r\n expressions.append(expression)\r\n return expressions\r\n\r\n\r\ndef join_small_contours(sorted_contours, index, x, y, w, h, im2, bounding_boxes):\r\n j = index + 1\r\n min_x = x\r\n min_y = y\r\n max_x = x + w\r\n max_y = y + h\r\n need_to_join_contours = False\r\n while j < len(sorted_contours):\r\n x2, y2, w2, h2 = cv2.boundingRect(sorted_contours[j])\r\n\r\n # print(\"Bounding x2\")\r\n # print(w2)\r\n # print(h2)\r\n # print(x2)\r\n # print(y2)\r\n if (w < 300 and h < 300) and not (w2 < 300 and h2 < 300) and (j - 1) == index:\r\n cv2.rectangle(im2, (min(x, x2), min(y, y2)), (max(x + w, x2 + w2), max(y + h, y2 + h2)), (0, 255, 0), 2)\r\n bounding_boxes.append({\r\n 'xmin': min(x, x2),\r\n 'xmax': max(x + w, x2 + w2),\r\n 'ymin': min(y, y2),\r\n 'ymax': max(y + h, y2 + h2)\r\n })\r\n return bounding_boxes, im2, j + 1\r\n if w2 < 300 and h2 < 400:\r\n need_to_join_contours = True\r\n min_x = min(min_x, x2)\r\n min_y = min(min_y, y2)\r\n max_x = max(max_x, x2 + w2)\r\n max_y = max(max_y, y2 + h2)\r\n else:\r\n break\r\n\r\n j += 1\r\n\r\n if need_to_join_contours:\r\n cv2.rectangle(im2, (min_x, min_y), (max_x, max_y), (0, 255, 0), 2)\r\n bounding_boxes.append({\r\n 'xmin': min_x,\r\n 'xmax': max_x,\r\n 'ymin': min_y,\r\n 'ymax': max_y\r\n })\r\n else:\r\n cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n bounding_boxes.append({\r\n 'xmin': x,\r\n 'xmax': x + w,\r\n 'ymin': y,\r\n 'ymax': y + h\r\n })\r\n return bounding_boxes, im2, j\r\n\r\n\r\ndef get_prediction_results(img, trained_model, trained_model_lstm):\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n regions_and_box_array = get_bounding_boxes_for_rotated_image(img)\r\n\r\n # CNN\r\n regions_array_for_prediction = [region[0] for region in regions_and_box_array]\r\n regions_array_for_prediction = prepare_data_for_prediction(regions_array_for_prediction)\r\n bounding_boxes = [region[1] for region in regions_and_box_array]\r\n results = predict(trained_model, regions_array_for_prediction)\r\n expression = get_math_expression_from_prediction_result(results, bounding_boxes)\r\n\r\n # LSTM\r\n regions_array_for_prediction = [region[0] for region in regions_and_box_array]\r\n regions_array_for_prediction_lstm = prepare_data_for_prediction_lstm(regions_array_for_prediction)\r\n bounding_boxes = [region[1] for region in regions_and_box_array]\r\n results_lstm = predict_lstm(trained_model_lstm, regions_array_for_prediction_lstm)\r\n expression_lstm = get_math_expression_from_prediction_result(results_lstm, bounding_boxes)\r\n return results, expression, results_lstm, expression_lstm\r\n\r\n\r\ndef load_rotated_image_and_return_normal_image(image):\r\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n gray = cv2.bitwise_not(gray)\r\n img = gray.copy()\r\n\r\n thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)[1]\r\n coords = np.column_stack(np.where(thresh > 0))\r\n\r\n rotated_rect = cv2.minAreaRect(coords)\r\n (x, y), (width, height), angle = rotated_rect\r\n angle = cv2.minAreaRect(coords)[-1]\r\n # print(\"IMG SHAPE\")\r\n # print(image.shape[0])\r\n # print(image.shape[1])\r\n # print(\"MIN AREA RECT\")\r\n # print(height)\r\n # print(width)\r\n # print(\"ANGLE\")\r\n if 1 > (image.shape[0] - image.shape[1]) > -17 and (width - height) > 0:\r\n angle = -angle\r\n elif (image.shape[0] - image.shape[1]) > 0 and (width - height) > 0:\r\n return image\r\n\r\n if angle < -45:\r\n angle = -(90 + angle)\r\n else:\r\n angle = -angle\r\n if 0 < (image.shape[1] - image.shape[0]) < 100 and abs(width - height) < 450 and angle > 0:\r\n angle = -angle\r\n if (abs(image.shape[1] - image.shape[0]) < 150 and abs(width - height) < 60) or (\r\n abs(image.shape[1] - image.shape[0]) < 30 and abs(width - height) < 150):\r\n angle = -angle\r\n (h, w) = image.shape[:2]\r\n (cX, cY) = (w // 2, h // 2)\r\n M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)\r\n cos = np.abs(M[0, 0])\r\n sin = np.abs(M[0, 1])\r\n nW = int((h * sin) + (w * cos))\r\n nH = int((h * cos) + (w * sin))\r\n M[0, 2] += (nW / 2) - cX\r\n M[1, 2] += (nH / 2) - cY\r\n\r\n rotated = cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))\r\n # plt.subplot(1, 2, 1)\r\n # plt.imshow(image, cmap=\"gray\")\r\n # plt.subplot(1, 2, 2)\r\n # plt.imshow(rotated, cmap=\"gray\")\r\n # plt.show()\r\n # display_image(rotated)\r\n return rotated\r\n\r\n\r\ndef test_several_rotated_expressions():\r\n predicted_labels_for_expressions = []\r\n truth_labels_for_expressions = []\r\n truth_labels_for_symbols = []\r\n predicted_labels_for_symbols = []\r\n predicted_labels_for_expressions_lstm = []\r\n predicted_labels_for_symbols_lstm = []\r\n\r\n path = 'dataset/test/several_rotated_expressions/'\r\n truth_symbols_dictionary, truth_expression_dictionary = read_label_from_txt_file(path + \"label.txt\")\r\n trained_model = run_train_model()\r\n trained_model_lstm = run_train_model_lstm()\r\n ratio_cnn = 0\r\n ratio_lstm = 0\r\n for img_name in os.listdir(path):\r\n if img_name.find(\"png\") != -1:\r\n img_path = path + img_name\r\n expressions = extract_expressions_from_image(img_path)\r\n\r\n model_predicted_expressions = []\r\n truth_expressions = truth_expression_dictionary[img_path]\r\n truth_labels = truth_symbols_dictionary[img_path]\r\n j = 0\r\n model_predicted_labels = []\r\n for expression_img in expressions:\r\n # display_image(expression_img)\r\n img = load_rotated_image_and_return_normal_image(expression_img)\r\n results, expression, results_lstm, expression_lstm = get_prediction_results(img, trained_model,\r\n trained_model_lstm)\r\n model_predicted_expressions.append(expression)\r\n model_predicted_labels.append(results)\r\n\r\n predicted_labels_for_expressions_lstm.append(expression_lstm)\r\n predicted_labels_for_symbols_lstm.append(results_lstm)\r\n ratio_cnn += fuzz.ratio(expression, truth_expressions[j])\r\n ratio_lstm += fuzz.ratio(expression_lstm, truth_expressions[j])\r\n\r\n print(\"TEST SEVERAL ROTATED EXPRESSIONS CNN\")\r\n print(img_path)\r\n print(\"Thruth expression\")\r\n print(truth_expressions[j])\r\n print(\"Predicted expression\")\r\n print(expression)\r\n print(\"FuzzyWuzzy ratio\")\r\n print(fuzz.ratio(expression, truth_expressions[j]))\r\n # print(\"Predicted symbols: \")\r\n # print(results)\r\n print(\"*****************************\")\r\n\r\n print(\"TEST SEVERAL ROTATED EXPRESSIONS LSTM \")\r\n print(img_path)\r\n print(\"Thruth expression\")\r\n print(truth_expressions[j])\r\n print(\"Predicted expression\")\r\n print(expression_lstm)\r\n print(\"FuzzyWuzzy ratio\")\r\n print(fuzz.ratio(expression_lstm, truth_expressions[j]))\r\n # print(\"Thruth symbols\")\r\n # print(truth_labels[j])\r\n # print(\"Predicted symbols: \")\r\n # print(results_lstm)\r\n print(\"*****************************\")\r\n\r\n j += 1\r\n if len(model_predicted_expressions) != len(truth_expressions):\r\n model_predicted_expressions.append('')\r\n new_empty_array = []\r\n model_predicted_labels.append(new_empty_array)\r\n for labels in truth_labels:\r\n truth_labels_for_symbols.append(labels)\r\n\r\n for expression in truth_expressions:\r\n truth_labels_for_expressions.append(expression)\r\n\r\n for predicted_labels in model_predicted_labels:\r\n predicted_labels_for_symbols.append(predicted_labels)\r\n\r\n for predicted_expression in model_predicted_expressions:\r\n predicted_labels_for_expressions.append(predicted_expression)\r\n\r\n print(\"TEST IMAGES WITH SEVERAL ROTATED EXPRESSION CNN\")\r\n get_accuracy(truth_labels_for_symbols, predicted_labels_for_symbols, truth_labels_for_expressions,\r\n predicted_labels_for_expressions)\r\n print(\"FuzzyWazzy average ratio\")\r\n print(round(ratio_cnn / len(truth_labels_for_expressions), 2))\r\n print(\"********************************\")\r\n print(\"TEST IMAGES WITH SEVERAL ROTATED EXPRESSION LSTM \")\r\n get_accuracy(truth_labels_for_symbols, predicted_labels_for_symbols_lstm, truth_labels_for_expressions,\r\n predicted_labels_for_expressions_lstm)\r\n print(\"FuzzyWazzy average ratio\")\r\n print(round(ratio_lstm / len(truth_labels_for_expressions), 2))\r\n print(\"********************************\")\r\n\r\n\r\ntest_several_rotated_expressions()\r\n","sub_path":"TestSeveralRotatedExpressions.py","file_name":"TestSeveralRotatedExpressions.py","file_ext":"py","file_size_in_byte":13862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"327207281","text":"#\n# (c) 2018 Extreme Networks Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport json\n\nfrom mock import MagicMock, patch, call\n\nfrom ansible_collections.community.network.tests.unit.compat import unittest\nfrom ansible_collections.community.network.plugins.module_utils.network.slxos import slxos\n\n\nclass TestPluginCLIConfSLXOS(unittest.TestCase):\n \"\"\" Test class for SLX-OS CLI Conf Methods\n \"\"\"\n\n def test_get_connection_established(self):\n \"\"\" Test get_connection with established connection\n \"\"\"\n module = MagicMock()\n connection = slxos.get_connection(module)\n self.assertEqual(connection, module.slxos_connection)\n\n @patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')\n def test_get_connection_new(self, connection):\n \"\"\" Test get_connection with new connection\n \"\"\"\n socket_path = \"little red riding hood\"\n module = MagicMock(spec=[\n 'fail_json',\n ])\n module._socket_path = socket_path\n\n connection().get_capabilities.return_value = '{\"network_api\": \"cliconf\"}'\n returned_connection = slxos.get_connection(module)\n connection.assert_called_with(socket_path)\n self.assertEqual(returned_connection, module.slxos_connection)\n\n @patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')\n def test_get_connection_incorrect_network_api(self, connection):\n \"\"\" Test get_connection with incorrect network_api response\n \"\"\"\n socket_path = \"little red riding hood\"\n module = MagicMock(spec=[\n 'fail_json',\n ])\n module._socket_path = socket_path\n module.fail_json.side_effect = TypeError\n\n connection().get_capabilities.return_value = '{\"network_api\": \"nope\"}'\n\n with self.assertRaises(TypeError):\n slxos.get_connection(module)\n\n @patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')\n def test_get_capabilities(self, connection):\n \"\"\" Test get_capabilities\n \"\"\"\n socket_path = \"little red riding hood\"\n module = MagicMock(spec=[\n 'fail_json',\n ])\n module._socket_path = socket_path\n module.fail_json.side_effect = TypeError\n\n capabilities = {'network_api': 'cliconf'}\n\n connection().get_capabilities.return_value = json.dumps(capabilities)\n\n capabilities_returned = slxos.get_capabilities(module)\n\n self.assertEqual(capabilities, capabilities_returned)\n\n @patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')\n def test_run_commands(self, connection):\n \"\"\" Test get_capabilities\n \"\"\"\n module = MagicMock()\n\n commands = [\n 'hello',\n 'dolly',\n 'well hello',\n 'dolly',\n 'its so nice to have you back',\n 'where you belong',\n ]\n\n responses = [\n 'Dolly, never go away again1',\n 'Dolly, never go away again2',\n 'Dolly, never go away again3',\n 'Dolly, never go away again4',\n 'Dolly, never go away again5',\n 'Dolly, never go away again6',\n ]\n\n module.slxos_connection.get.side_effect = responses\n\n run_command_responses = slxos.run_commands(module, commands)\n\n calls = []\n\n for command in commands:\n calls.append(call(\n command,\n None,\n None\n ))\n\n module.slxos_connection.get.assert_has_calls(calls)\n\n self.assertEqual(responses, run_command_responses)\n\n @patch('ansible_collections.community.network.plugins.module_utils.network.slxos.slxos.Connection')\n def test_load_config(self, connection):\n \"\"\" Test load_config\n \"\"\"\n module = MagicMock()\n\n commands = [\n 'what does it take',\n 'to be',\n 'number one?',\n 'two is not a winner',\n 'and three nobody remember',\n ]\n\n slxos.load_config(module, commands)\n\n module.slxos_connection.edit_config.assert_called_once_with(commands)\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/module_utils/network/slxos/test_slxos.py","file_name":"test_slxos.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"158361391","text":"import sys, subprocess, re\n\ndef bindingRegionsDisordered(fileName):\n anchor=\"/home/shihab/tools/ANCHOR/anchor\"\n\n command=anchor+\" \"+fileName+\" -d /home/shihab/tools/ANCHOR/\"\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n\n lines = process.stdout.readlines()\n predictions=[]\n for line in lines:\n if line[0]!=\"#\":\n predict=re.findall(\"\\d+\", line.strip())\n predictions.append(predict)\n if predictions==[]:\n anchor_res=\"0\"\n else:\n anchor_res=predictions[0]\n return anchor_res[0]\n\n","sub_path":"scripts/moreConservative/anchor.py","file_name":"anchor.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"298920052","text":"from collections import Counter\nlines = open('2022/day6/input.txt').read().splitlines()\n\nfor line in lines:\n print(line)\n l = list(line)\n for i in range(14, len(l)):\n cur = l[i-14:i]\n if len(Counter(cur)) == 14:\n print(line, cur, len(Counter(cur)), i)\n break\n","sub_path":"2022/day6/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"521455047","text":"'''\nCopyright (c) 2015 Mehdi Fatemi.\nPostdoctoral Researcher at Cognitive Systems Lab, McMaster University.\n \nJune 2015.\n'''\n\n__author__ = '@mefatemi'\n__version__ = '0.1'\n\n\nimport random\nfrom copy import deepcopy\n\nclass GridWorld(object):\n\n def __init__(self, **kwarg):\n\n if kwarg['make_board_method'] == 'random':\n try:\n self.board_w = kwarg['board_w'] # board width\n self.board_h = kwarg['board_h'] # board height\n self.board = [] # game board\n self.current_p = [] # current position of the agent\n self.available_actions = ['up', 'down', 'left', 'right']\n self.make_board_method = kwarg['make_board_method']\n self.make_board()\n\n except:\n print ('Improper inputs for a random game-board.')\n\n elif kwarg['make_board_method'] == 'user':\n try:\n self.board_w = kwarg['board_w'] # board width\n self.board_h = kwarg['board_h'] # board height\n self.board = kwarg['board'] # game board\n try:\n self.current_p = self.board.index('X')\n except:\n print ('Please enter the current location manually.')\n\n self.available_actions = ['up', 'down', 'left', 'right']\n self.make_board_method = kwarg['make_board_method']\n \n except:\n print ('Improper inputs for a user game-board.')\n else:\n raise NameError('Board method is not recognized. Must be \"random\" or \"user\".')\n\n\n def clone(self):\n '''\n Make a clone of the current game.\n '''\n return deepcopy(self)\n \n \n def set_start(self, row, col):\n '''\n Setting the current position\n '''\n self.current_p = (row - 1) * self.board_w + col - 1\n \n\n def make_board(self, p = 0.2):\n '''\n This method initializes the game board in accordance with\n the 'method' of random board-game.\n\n p : percentage of holes in the board\n\n '''\n\n if self.make_board_method == 'random':\n # Making holes and ordinary places in accordance with p:\n for i in range(self.board_h * self.board_w):\n if random.random() > p:\n self.board.append('O')\n else:\n self.board.append('H')\n\n # Selecting a random place as the Goal\n self.board[random.randint(0, (self.board_h * self.board_w) - 1)] = 'G'\n\n # Selecting a random initial position\n pos = [ind for ind, value in enumerate(self.board) if value == 'O']\n self.current_p = random.choice(pos)\n\n\n def display(self):\n '''\n This method displayes the board\n '''\n s = deepcopy(self.board)\n \n ## colouring the board:\n s[self.current_p] = '\\033[%dm%s' % (34, 'X') + '\\033[0m'\n\n if 'G' in s:\n s[s.index('G')] = '\\033[%dm%s' % (32, 'G') + '\\033[0m'\n for indx, item in enumerate(s):\n if item == 'H':\n s[indx] = '\\033[%dm%s' % (31, 'H') + '\\033[0m'\n \n ## printing:\n row = ' {} |'*(self.board_w - 1) + ' {}'\n h_line = '\\n' + '-'*(self.board_w*3 + (self.board_w - 1)) + '\\n'\n print('\\n'+((row + h_line) * (self.board_h - 1) + row).format(*s)+'\\n')\n \n \n def move(self, action):\n if action == 'up':\n reward = self.move_up_()\n state = deepcopy(self.board)\n state[self.current_p] = 'X'\n\n elif action == 'down':\n reward = self.move_down_()\n state = deepcopy(self.board)\n state[self.current_p] = 'X'\n\n elif action == 'left':\n reward = self.move_left_()\n state = deepcopy(self.board)\n state[self.current_p] = 'X'\n\n elif action == 'right':\n reward = self.move_right_()\n state = deepcopy(self.board)\n state[self.current_p] = 'X'\n\n else:\n raise ValueError('Action is not recognized.')\n\n return state, reward\n\n\n def move_up_(self):\n if self.current_p <= self.board_w - 1: # first row\n return -5.0\n\n elif self.board[self.current_p - self.board_w] == 'H': # falling into a hole\n return -5.0\n\n elif self.board[self.current_p - self.board_w] == 'G': # reaching the Goal!!!\n self.current_p -= self.board_w\n return 30.0\n \n else:\n self.current_p -= self.board_w # just an 'O'\n return -1.0\n\n\n def move_down_(self):\n if self.current_p >= (self.board_h - 1)*self.board_w: # last row\n return -5.0\n\n elif self.board[self.current_p + self.board_w] == 'H': # falling into a hole\n return -5.0\n\n elif self.board[self.current_p + self.board_w] == 'G': # reaching the Goal!!!\n self.current_p += self.board_w\n return 30.0\n \n else:\n self.current_p += self.board_w\n return -1.0\n\n\n def move_right_(self):\n if (self.current_p % self.board_w) == self.board_w - 1: # last col\n return -5.0\n\n elif self.board[self.current_p + 1] == 'H': # falling into a hole\n return -5.0\n\n elif self.board[self.current_p + 1] == 'G': # reaching the Goal!!!\n self.current_p += 1\n return 30.0\n \n else:\n self.current_p += 1\n return -1.0\n\n\n def move_left_(self):\n if (self.current_p % self.board_w) == 0: # first col\n return -5.0\n\n elif self.board[self.current_p - 1] == 'H': # falling into a hole\n return -5.0\n\n elif self.board[self.current_p - 1] == 'G': # reaching the Goal!!!\n self.current_p -= 1\n return 30.0\n \n else:\n self.current_p -= 1\n return -1.0\n\n\n \n\n\n\n \n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"153538354","text":"import src.data_storage as data\nimport src.constants as const\n\ndef getHorizontalLine():\n iter_height = 0\n iter_width = 0\n horizontal_line = ''\n while iter_height < const.HEIGHT:\n while iter_width < const.WIDTH:\n horizontal_line += const.SEPARATOR_W\n iter_width += 1\n if iter_width == const.WIDTH:\n horizontal_line += const.SEPARATOR_W_END + '\\n'\n iter_height += 1\n return horizontal_line\n\ndef getVisualTable(table):\n iter_height = 0\n iter_width = 0\n table_view = ''\n while iter_height < const.HEIGHT:\n table_view += getHorizontalLine()\n while iter_width < const.WIDTH:\n table_view += const.SEPARATOR_H + table[iter_height][iter_width]\n if iter_width == const.WIDTH-1:\n table_view += const.SEPARATOR_H + '\\n'\n iter_width += 1\n if iter_height == const.HEIGHT-1:\n table_view += getHorizontalLine()\n iter_height += 1\n iter_width = 0\n \n return table_view\n\ndef getQuestions():\n return ('Выбери действие:\\n1) Сделать запись\\n2) Получить значение по координатам'\n +'\\n3) Показать все ячейки\\n4) Удалить значение по координатам\\n'+\n '0) Программа завершает работу.\\n')\n\ndef getAnswers(answer='none'): \n if answer == 'engaged':\n return 'Ячейка занята! Перезаписать?\\n'\n elif answer == 'done':\n return 'Запись сделана!\\n'\n elif answer == 'recording-format':\n return 'Введите x и y в формате x=2;y=2;value=\\'v\\'\\n'\n elif answer == 'input-format':\n return 'Введите x и y в формате x=2;y=2;\\n'\n elif answer == 'emptyCell':\n return 'пустая ячейка\\n'\n elif answer == 'deleted':\n return 'Запись удалена!\\n'\n elif answer == 'none':\n return 'Информация о вопросе не передана'\n else:\n return 'Значение в ячейке: {}'.format(answer)\n","sub_path":"04_Some_table/src/graphical_representation.py","file_name":"graphical_representation.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"191762125","text":"from django.conf.urls import url, include\r\nfrom django.views import generic\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url('^$', generic.RedirectView.as_view(\r\n url='./userstories/'), name=\"index\"),\r\n url('^anwendungen/', include(views.AnwendungenViewSet().urls)),\r\n url('^userstories/', include(views.UserstoriesViewSet().urls)),\r\n url('^rollen/', include(views.RollenViewSet().urls)),\r\n url('^schlagworte/', include(views.SchlagworteViewSet().urls)),\r\n url('^glossarentraege/', include(views.GlossareintraegeViewSet().urls)),\r\n url('^notizen/', include(views.NotizenViewSet().urls)),\r\n]\r\n","sub_path":"userstories/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"539542842","text":"from socket import error as CannotListenError\n\nfrom pycloudia.activities.facades.interfaces import IListener\nfrom pycloudia.activities.facades.exceptions import ListenFailedError\n\n\nclass HttpListener(IListener):\n \"\"\"\n :type logger: L{ILogger}\n :type io_loop: L{tornado.ioloop.IOLoop}\n :type protocol_factory: L{pycloudia.activities.facades.tornado_impl.protocol.ProtocolFactory}\n \"\"\"\n logger = None\n io_loop = None\n protocol_factory = None\n\n def __init__(self, host, min_port=8081, max_port=8084):\n self.host = host\n self.port = None\n self.min_port = min_port\n self.max_port = max_port\n\n def start(self, director):\n protocol = self.protocol_factory(director)\n for port in range(self.min_port, self.max_port + 1):\n try:\n protocol.listen(port, self.host, io_loop=self.io_loop)\n self.logger.info('Listening started on %s:%s', self.host, port)\n except CannotListenError as e:\n self.logger.info('Listening failed on %s:%s -- %s', self.host, port, e)\n else:\n self.port = port\n raise ListenFailedError(self.host, (self.max_port, self.max_port))\n","sub_path":"pycloudia/activities/facades/tornado_impl/listeners.py","file_name":"listeners.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"38663944","text":"# This is the first in a series of exercises focused on working with\n# «streams». A stream is like a sequence, but it is not held in\n# memory all at once: instead, pieces of the stream are extracted\n# from the input (e.g. a file), then processed and discarded, before\n# another piece is extracted from the input. Some of the concepts\n# that we will explore are available in the ‹asyncio› library which\n# we will look at next week. However, for now, we will do everything\n# by hand, to get a better understanding of the principles.\n\n# A «stream processor» will be a (semi)coroutine (i.e. a generator)\n# which takes another (semi)coroutine as an argument. It will\n# extract data from the ‘upstream’ (the coroutine that it got as an\n# argument) using ‹next› and it'll send it further ‘downstream’\n# using ‹yield›.\n\n# We will use the convention that an empty stream yields ‹None›\n# forever (i.e. we will not use ‹StopIteration›). A «source» is like\n# a stream processor, but does not take another stream processor as\n# an argument: instead, it creates a new stream. A «sink» is another\n# variation: it takes a stream, but does not yield anything. It\n# «consumes» the stream. Obviously, stream processors can be\n# chained: the chain starts with a source, followed by some\n# processors and ends with a sink. \n\n# Let us first define a simple source, which yields chunks of text.\n# To use it, do something like: ‹stream, cnt = make_test_source()›.\n# The ‹cnt› variable will keep track of how many chunks were pulled\n# out of the stream – this is useful for testing.\n\nclass Box:\n def __init__( self, v ):\n self.value = v\n\ndef make_test_source():\n counter = Box( 0 )\n def test_source():\n yield \"hello \"\n counter.value += 1\n yield \"world\\ni am\\n\"\n counter.value += 1\n yield \" a\"\n counter.value += 1\n yield \" strea\"\n counter.value += 1\n yield \"m\\nsour\"\n counter.value += 1\n yield \"ce\\n\"\n counter.value += 1\n while True:\n yield None\n return ( test_source(), counter )\n\n# What follows is a very simple sink, which prints the content of\n# the stream to ‹stdout›:\n\ndef dump_stream( stream ):\n while True:\n x = next( stream )\n if x is None: break\n print( end = x )\n\n# Your first goal is to define a simple stream processor, which\n# takes a stream of chunks (like the test source above) and produces\n# a stream of «lines». Each line ends with a newline character. To\n# keep in line with the stated goal of minimizing memory use, the\n# processor should only pull out as many chunks as it needs to, and\n# not more.\n\ndef stream_getline( stream ):\n pass\n\ndef test_main():\n stream, counter = make_test_source()\n assert counter.value == 0\n lines = stream_getline( stream )\n assert counter.value == 0\n assert next( lines ) == \"hello world\\n\"\n assert counter.value == 1\n assert next( lines ) == \"i am\\n\"\n assert counter.value == 1\n assert next( lines ) == \" a stream\\n\"\n assert counter.value == 4\n assert next( lines ) == \"source\\n\"\n assert counter.value == 5\n assert next( lines ) is None\n assert counter.value == 6\n\nif __name__ == '__main__':\n test_main()\n","sub_path":"06/stream-getline.py","file_name":"stream-getline.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"171012577","text":"import os, sys\r\nimport flask\r\nimport json, csv\r\n\r\nfrom flask import Flask, redirect, url_for, request\r\nfrom flask import render_template\r\n\r\n# from build_teams import build_teams\r\nfrom build_teams_with_gender import *\r\nfrom build_teams_with_random import *\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index(features = None, teams = None):\r\n # if we're running\r\n # if request.method == 'POST':\r\n # classname = str(request.form['classname'])\r\n # num_teams = int(request.form['numteams'])\r\n #\r\n # people = json.load(open('static/people.json'))\r\n # # features = json.load(open('static/traits.json'))\r\n #\r\n # input_feats = request.form.getlist('features')\r\n # for i in range(len(input_feats)):\r\n # input_feats[i] = str(input_feats[i])\r\n #\r\n # # # just testing\r\n # # teams = []\r\n # # teams.append(people[0:2])\r\n # # teams.append(people[2:4])\r\n # # teams.append(people[4:6])\r\n # teams = build_teams(num_teams, people, input_feats)\r\n # return render_template('index.html', teams = teams, features = input_feats)\r\n\r\n classname = \"\"\r\n num_teams = 1\r\n people = json.load(open('static/people.json'))\r\n input_feats = []\r\n algo_type = None\r\n\r\n if 'classname' in request.args:\r\n classname = str(request.args['classname'])\r\n if 'numteams' in request.args:\r\n num_teams = int(request.args['numteams'])\r\n if 'features' in request.args:\r\n input_feats = request.args.getlist('features')\r\n if 'run' in request.args:\r\n algo_type = str(request.args['run'])\r\n for i in range(len(input_feats)):\r\n input_feats[i] = str(input_feats[i])\r\n if input_feats and algo_type:\r\n if algo_type == 'Random':\r\n teams = build_random_teams(num_teams, people, input_feats)\r\n elif algo_type == 'Run':\r\n teams = build_teams(num_teams, people, input_feats)\r\n else:\r\n teams = []\r\n return render_template('index.html', teams = teams, features = input_feats)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host='0.0.0.0')\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"2835007","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGet twinpy strucuture\n\"\"\"\n\nimport argparse\nfrom twinpy.api_twinpy import Twinpy\n\n\n# argparse\ndef get_argparse():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-pk', '--shear_pk', type=int,\n help=\"AiidaShearWorkChain PK.\")\n args = parser.parse_args()\n\n return args\n\n\ndef main(shear_pk,\n ):\n twinpy = Twinpy.initialize_from_aiida_shear(shear_pk)\n\nif __name__ == '__main__':\n args = get_argparse()\n main(shear_pk=args.shear_pk,\n )\n","sub_path":"scripts/twinpy-shear.py","file_name":"twinpy-shear.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"20771429","text":"String = input()\ndef numberOfVowel(String):\n vowel =\"AEIOUaeiou\"\n counter = 0\n vowelPlace = list()\n\n for i in String:\n if i in vowel:\n counter+=1\n vowelPlace.append(i)\n\n if counter == 0:\n return \"No vowels in the name!\"\n else:\n return \"Vowels : \" + str(vowelPlace) + \". Total number of vowels: \" + str(counter)\n\nprint(numberOfVowel(String))","sub_path":"CSE111 Lab Assignment 2/Task6.py","file_name":"Task6.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"641552857","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/ore/xapian/operation.py\n# Compiled at: 2008-11-06 00:27:28\n\"\"\"\n$Id: $\n\"\"\"\nfrom zope import component, interface\nimport transaction, threading, xappy, interfaces, queue, logging\nlog = logging.getLogger('ore.xapian')\n\nclass IndexOperation(object):\n \"\"\"\n an async/queued index operation\n \"\"\"\n interface.implements(interfaces.IIndexOperation)\n __slots__ = ('oid', 'resolver_id', 'requeue')\n requeue = False\n\n def __init__(self, oid, resolver_id):\n self.oid = oid\n self.resolver_id = resolver_id\n\n def resolve(self):\n if self.resolver_id:\n resolver = component.getUtility(interfaces.IResolver, self.resolver_id)\n else:\n resolver = component.getUtility(interfaces.IResolver)\n instance = resolver.resolve(self.oid)\n if not instance:\n log.error('Idx Operation - Could Not Resolve %s' % self.oid)\n return\n return instance\n\n def process(self, connection):\n raise NotImplemented\n\n @property\n def document_id(self):\n return self.oid\n\n\nclass AddOperation(IndexOperation):\n interface.implements(interfaces.IAddOperation)\n\n def process(self, connection):\n if interfaces.DEBUG_LOG:\n log.info('Adding %r' % self.document_id)\n instance = self.resolve()\n doc = interfaces.IIndexer(instance).document(connection)\n doc.id = self.document_id\n doc.fields.append(xappy.Field('resolver', self.resolver_id or ''))\n connection.add(doc)\n\n\nclass ModifyOperation(IndexOperation):\n interface.implements(interfaces.IModifyOperation)\n\n def process(self, connection):\n if interfaces.DEBUG_LOG:\n log.info('Modifying %r' % self.document_id)\n instance = self.resolve()\n doc = interfaces.IIndexer(instance).document(connection)\n doc.id = self.document_id\n doc.fields.append(xappy.Field('resolver', self.resolver_id))\n connection.replace(doc)\n\n\nclass DeleteOperation(IndexOperation):\n interface.implements(interfaces.IDeleteOperation)\n\n def process(self, connection):\n if interfaces.DEBUG_LOG:\n log.info('Deleting %r' % self.document_id)\n connection.delete(self.document_id)\n\n\nclass OperationBufferManager(object):\n \"\"\"\n ideally we'd be doing this via the synchronizer api, but that has several\n issues, which i need to work on in the transaction package, for now the\n standard transaction manager api suffices.\n \"\"\"\n\n def __init__(self, buffer):\n self.buffer = buffer\n\n def tpc_finish(self, transaction):\n self.buffer.flush()\n\n def abort(self, transaction):\n self.buffer.clear()\n\n def sortKey(self):\n return str(id(self))\n\n def commit(self, transaction):\n pass\n\n tpc_abort = abort\n tpc_vote = tpc_begin = commit\n\n\nclass OperationBuffer(object):\n \"\"\"\n an operation buffer aggregates operations across a transaction\n \"\"\"\n\n def __init__(self):\n self.ops = {}\n self.registered = False\n\n def add(self, op):\n \"\"\"add an operation to the buffer, aggregating with existing operations\"\"\"\n previous = self.ops.get(op.document_id)\n if previous is not None:\n op = self._choose(previous, op)\n if op is not None:\n self.ops[op.document_id] = op\n if not self.registered:\n self._register()\n return\n\n def clear(self):\n self.ops = {}\n self.registered = False\n self.manager = None\n return\n\n def flush(self):\n for op in self.ops.values():\n queue.index_queue.put(op)\n\n self.ops = {}\n self.registered = False\n self.manager = None\n return\n\n def _register(self):\n self.manager = OperationBufferManager(self)\n self.registered = True\n transaction.get().join(self.manager)\n\n def _choose(self, previous, new):\n \"\"\"\n for a given content object, choose one operation to perform given\n two candidates. can also return no operations.\n \"\"\"\n p_kind = interfaces.IDeleteOperation.providedBy(previous) and 2 or interfaces.IAddOperation.providedBy(previous) and 1 or interfaces.IModifyOperation.providedBy(previous) and 0\n n_kind = interfaces.IDeleteOperation.providedBy(new) and 2 or interfaces.IAddOperation.providedBy(new) and 1 or interfaces.IModifyOperation.providedBy(new) and 0\n if p_kind == 1 and n_kind == 2:\n return\n if p_kind > n_kind:\n return previous\n return new\n\n\n_buffer = threading.local()\n\ndef get_buffer():\n op_buffer = getattr(_buffer, 'buffer', None)\n if op_buffer is not None:\n return op_buffer\n op_buffer = OperationBuffer()\n _buffer.buffer = op_buffer\n return op_buffer\n\n\nclass OperationFactory(object):\n interface.implements(interfaces.IOperationFactory)\n __slots__ = ('context', )\n resolver_id = ''\n\n def __init__(self, context):\n self.context = context\n\n def add(self):\n return self._store(AddOperation(*self._id()))\n\n def modify(self):\n return self._store(ModifyOperation(*self._id()))\n\n def remove(self):\n return self._store(DeleteOperation(*self._id()))\n\n def _store(self, op):\n if interfaces.DEBUG_SYNC and interfaces.DEBUG_SYNC_IDX:\n if interfaces.DEBUG_LOG:\n log.info('Processing %r %r' % (op.oid, op))\n op.process(interfaces.DEBUG_SYNC_IDX)\n interfaces.DEBUG_SYNC_IDX.flush()\n if interfaces.DEBUG_LOG:\n log.info('Flushed Index')\n else:\n get_buffer().add(op)\n\n def _id(self):\n if self.resolver_id:\n resolver = component.getUtility(interfaces.IResolver, self.resolver_id)\n else:\n resolver = component.getUtility(interfaces.IResolver)\n oid = resolver.id(self.context)\n if not oid:\n raise KeyError('Key Not Found %r' % self.context)\n return (\n oid, self.resolver_id)","sub_path":"pycfiles/ore.xapian-0.5.0-py2.5/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"458002447","text":"\"\"\"MODULE: Afficher et comparer le highscore de l'utilisateur connecté\"\"\"\r\n\r\nclass scoring():\r\n def __init__(self, name, note):\r\n self.name = name\r\n self.note = str(note)\r\n\r\n \"\"\"Fonction afficher\"\"\"\r\n def afficher(self):\r\n with open (\"annexes/highscore.txt\", 'r') as f:\r\n data = f.read()\r\n return data\r\n\r\n \"\"\"Fonction pour comparer la note et le dernier highscore\"\"\"\r\n def bestScore(self):\r\n best = self.afficher()\r\n if int(best) < int(self.note):\r\n with open (\"annexes/highscore.txt\", 'w') as f:\r\n f.write(self.note)\r\n else:\r\n print (\"Vous n'avez pas battu votre meilleur score\")\r\n print(\"Votre meilleur score est: %s\" %self.afficher())\r\n\r\n\"\"\"MAIN DE TEST\"\"\"\r\nif __name__ == '__main__':\r\n sc = scoring(\"rezr\", 3)\r\n sc.bestScore()\r\n sc2 = scoring(\"haha\", 30)\r\n print (sc2.afficher())\r\n sc2.bestScore()\r\n print (sc2.afficher())\r\n","sub_path":"scrumdibou/scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"122325763","text":"# Demonstration code developed bu Mayo Clinic Radiology Informatics Laboratory\n#\n#\n# Module 2 -SVM\n# Load the libraries\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import svm\nimport pandas as pd\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\n# read the CSV file\n\nData=pd.read_csv ('DataExample.csv')\n\n\"\"\"\n\nif you need to print or have access to the data as numpy array\n\nprint (Data)\nprint(Data.as_matrix(columns=['NAWMpost']))\n\n\"\"\"\n\n\nClassNAWMpost=(Data['NAWMpost'].values)\nClassNAWMpost= (np.asarray(ClassNAWMpost))\nClassNAWMpost=ClassNAWMpost[~np.isnan(ClassNAWMpost)]\nClassNAWMpre=(Data[['NAWMpre']].values)\nClassNAWMpre= (np.asarray(ClassNAWMpre))\nClassNAWMpre=ClassNAWMpre[~np.isnan(ClassNAWMpre)]\nClassTUMORpost=(Data[['TUMORpost']].values)\nClassTUMORpost= (np.asarray(ClassTUMORpost))\nClassTUMORpost=ClassTUMORpost[~np.isnan(ClassTUMORpost)]\nClassTUMORpre=(Data[['TUMORpre']].values)\nClassTUMORpre= (np.asarray(ClassTUMORpre))\nClassTUMORpre=ClassTUMORpre[~np.isnan(ClassTUMORpre)]\nX_1 = np.stack((ClassNAWMpost,ClassNAWMpre)) # we only take the first two features.\nX_2 = np.stack((ClassTUMORpost,ClassTUMORpre))\nX=np.concatenate((X_1.transpose(), X_2.transpose()),axis=0)\ny =np.zeros((np.shape(X))[0])\ny[np.shape(X_1)[1]:]=1\n\nh = .02 # step size in the mesh\n\n# we create an instance of SVM and fit out data. We do not scale our\n# data since we want to plot the support vectors\nC = 1.0 # SVM regularization parameter\nsvc = svm.SVC(kernel='linear', C=C).fit(X, y)\nrbf_svc = svm.SVC(kernel='rbf', gamma=0.1, C=10).fit(X, y)\npoly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)\nlin_svc = svm.LinearSVC(C=C).fit(X, y)\n\n# create a mesh to plot in\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n# title for the plots\ntitles = ['SVC with linear kernel',\n 'LinearSVC (linear kernel)',\n 'SVC with RBF kernel',\n 'SVC with polynomial (degree 3) kernel']\n\n\nfor i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n plt.subplot(2, 2, i + 1)\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)\n plt.xlabel('Intensity post contrast')\n plt.ylabel('Intensity pre contrast')\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.xticks(())\n plt.yticks(())\n plt.title(titles[i])\n\nplt.show()\n\n\n# understanding margins\n\nfor C in [1,2,3,10]:\n\tfig = plt.subplot()\n\tclf = svm.SVC(C,kernel='linear')\n\tclf.fit(X, y)\n\t# create a mesh to plot in\n\tx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n\ty_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\txx = np.linspace(x_min,x_max)\n\t# print (xx)\n\txx=np.asarray(xx)\n\t# get the separating hyperplane\n\tw = clf.coef_[0]\n\t# print(w)\n\ta = -w[0] / w[1]\n\t# print (a)\n\tyy = a * xx - (clf.intercept_[0]) / w[1]\n\t# print(yy)\n\t# plot the parallels to the separating hyperplane that pass through the\n\t# support vectors\n\tb = clf.support_vectors_[0]\n\tyy_down = a * xx + (b[1] - a * b[0])\n\tb = clf.support_vectors_[-1]\n\tyy_up = a * xx + (b[1] - a * b[0])\n\n\t# plot the line, the points, and the nearest vectors to the plane\n\tplt.plot(xx, yy, 'k-')\n\tplt.plot(xx, yy_down, 'k--')\n\tplt.plot(xx, yy_up, 'k--')\n\n\tplt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n\t s=80, facecolors='none')\n\tplt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)\n\tplt.axis('tight')\n\tplt.show()\n\n\n\n# compare all classifiers\n\n\n\n\nh = .02 # step size in the mesh\nlinearly_separable = (X, y)\nnames = [\"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\", \"Decision Tree\",\n \"Random Forest\", \"AdaBoost\", \"Naive Bayes\", \"Linear Discriminant Analysis\",\n \"Quadratic Discriminant Analysis\"]\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n AdaBoostClassifier(),\n GaussianNB(),\n LinearDiscriminantAnalysis(),\n QuadraticDiscriminantAnalysis()]\n\n\ndatasets = [linearly_separable\n ]\n\nfigure = plt.figure(figsize=(27, 9))\ni = 1\n# iterate over datasets\nfor ds in datasets:\n # preprocess dataset, split into training and test part\n X, y = ds\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\nfigure.subplots_adjust(left=.02, right=.98)\nplt.show()\n","sub_path":"Code/Module3.py","file_name":"Module3.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"395994537","text":"import datetime\nimport json\nimport os\nimport re\nimport sqlalchemy\nfrom szurubooru import config, db, errors\nfrom szurubooru.func import util, tag_categories\n\nclass TagNotFoundError(errors.NotFoundError): pass\nclass TagAlreadyExistsError(errors.ValidationError): pass\nclass TagIsInUseError(errors.ValidationError): pass\nclass InvalidTagNameError(errors.ValidationError): pass\nclass InvalidTagCategoryError(errors.ValidationError): pass\nclass InvalidTagRelationError(errors.ValidationError): pass\n\ndef _verify_name_validity(name):\n name_regex = config.config['tag_name_regex']\n if not re.match(name_regex, name):\n raise InvalidTagNameError('Name must satisfy regex %r.' % name_regex)\n\ndef _get_plain_names(tag):\n return [tag_name.name for tag_name in tag.names]\n\ndef _lower_list(names):\n return [name.lower() for name in names]\n\ndef _check_name_intersection(names1, names2):\n return len(set(_lower_list(names1)).intersection(_lower_list(names2))) > 0\n\ndef export_to_json():\n output = {\n 'tags': [],\n 'categories': [],\n }\n all_tags = db.session \\\n .query(db.Tag) \\\n .options(\n sqlalchemy.orm.joinedload('suggestions'),\n sqlalchemy.orm.joinedload('implications')) \\\n .all()\n for tag in all_tags:\n item = {\n 'names': [tag_name.name for tag_name in tag.names],\n 'usages': tag.post_count,\n 'category': tag.category.name,\n }\n if len(tag.suggestions):\n item['suggestions'] = \\\n [rel.names[0].name for rel in tag.suggestions]\n if len(tag.implications):\n item['implications'] = \\\n [rel.names[0].name for rel in tag.implications]\n output['tags'].append(item)\n for category in tag_categories.get_all_categories():\n output['categories'].append({\n 'name': category.name,\n 'color': category.color,\n })\n export_path = os.path.join(config.config['data_dir'], 'tags.json')\n with open(export_path, 'w') as handle:\n handle.write(json.dumps(output, separators=(',', ':')))\n\ndef get_tag_by_name(name):\n return db.session \\\n .query(db.Tag) \\\n .join(db.TagName) \\\n .filter(db.TagName.name.ilike(name)) \\\n .first()\n\ndef get_tags_by_names(names):\n names = util.icase_unique(names)\n if len(names) == 0:\n return []\n expr = sqlalchemy.sql.false()\n for name in names:\n expr = expr | db.TagName.name.ilike(name)\n return db.session.query(db.Tag).join(db.TagName).filter(expr).all()\n\ndef get_or_create_tags_by_names(names):\n names = util.icase_unique(names)\n for name in names:\n _verify_name_validity(name)\n related_tags = get_tags_by_names(names)\n new_tags = []\n for name in names:\n found = False\n for related_tag in related_tags:\n if _check_name_intersection(_get_plain_names(related_tag), [name]):\n found = True\n break\n if not found:\n new_tag = create_tag(\n names=[name],\n category_name=tag_categories.get_default_category().name,\n suggestions=[],\n implications=[])\n db.session.add(new_tag)\n new_tags.append(new_tag)\n return related_tags, new_tags\n\ndef get_siblings(tag):\n tag_alias = sqlalchemy.orm.aliased(db.Tag)\n pt_alias1 = sqlalchemy.orm.aliased(db.PostTag)\n pt_alias2 = sqlalchemy.orm.aliased(db.PostTag)\n result = db.session \\\n .query(tag_alias, sqlalchemy.func.count(tag_alias.tag_id)) \\\n .join(pt_alias1, pt_alias1.tag_id == tag_alias.tag_id) \\\n .join(pt_alias2, pt_alias2.post_id == pt_alias1.post_id) \\\n .filter(pt_alias2.tag_id == tag.tag_id) \\\n .filter(pt_alias1.tag_id != tag.tag_id) \\\n .group_by(tag_alias.tag_id) \\\n .order_by(tag_alias.post_count.desc()) \\\n .limit(50)\n return result\n\ndef merge_tags(source_tag, target_tag):\n db.session.execute(\n sqlalchemy.sql.expression.update(db.PostTag) \\\n .where(db.PostTag.tag_id == source_tag.tag_id) \\\n .values(tag_id=target_tag.tag_id))\n db.session.delete(source_tag)\n\ndef create_tag(names, category_name, suggestions, implications):\n tag = db.Tag()\n tag.creation_time = datetime.datetime.now()\n update_names(tag, names)\n update_category_name(tag, category_name)\n update_suggestions(tag, suggestions)\n update_implications(tag, implications)\n return tag\n\ndef update_category_name(tag, category_name):\n category = db.session \\\n .query(db.TagCategory) \\\n .filter(db.TagCategory.name == category_name) \\\n .first()\n if not category:\n category_names = tag_categories.get_all_category_names()\n raise InvalidTagCategoryError(\n 'Category %r is invalid. Valid categories: %r.' % (\n category_name, category_names))\n tag.category = category\n\ndef update_names(tag, names):\n names = util.icase_unique([name for name in names if name])\n if not len(names):\n raise InvalidTagNameError('At least one name must be specified.')\n for name in names:\n _verify_name_validity(name)\n expr = sqlalchemy.sql.false()\n for name in names:\n if util.value_exceeds_column_size(name, db.TagName.name):\n raise InvalidTagNameError('Name is too long.')\n expr = expr | db.TagName.name.ilike(name)\n if tag.tag_id:\n expr = expr & (db.TagName.tag_id != tag.tag_id)\n existing_tags = db.session.query(db.TagName).filter(expr).all()\n if len(existing_tags):\n raise TagAlreadyExistsError(\n 'One of names is already used by another tag.')\n tag_names_to_remove = []\n for tag_name in tag.names:\n if not _check_name_intersection([tag_name.name], names):\n tag_names_to_remove.append(tag_name)\n for tag_name in tag_names_to_remove:\n tag.names.remove(tag_name)\n for name in names:\n if not _check_name_intersection(_get_plain_names(tag), [name]):\n tag.names.append(db.TagName(name))\n\ndef update_implications(tag, relations):\n if _check_name_intersection(_get_plain_names(tag), relations):\n raise InvalidTagRelationError('Tag cannot imply itself.')\n related_tags, new_tags = get_or_create_tags_by_names(relations)\n db.session.flush()\n tag.implications = related_tags + new_tags\n\ndef update_suggestions(tag, relations):\n if _check_name_intersection(_get_plain_names(tag), relations):\n raise InvalidTagRelationError('Tag cannot suggest itself.')\n related_tags, new_tags = get_or_create_tags_by_names(relations)\n db.session.flush()\n tag.suggestions = related_tags + new_tags\n","sub_path":"server/szurubooru/func/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"588219393","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n\ndef escape_maze(r, c, N, queue, maze):\n maze[r][c] = 5 # 스타트 포인트를 5로 설정하고 이후 6, 7, 8, 9 이런 식으로 올라간다.\n while queue:\n t = queue.pop(0)\n tr, tc = t[0], t[1]\n if tr != 0 and maze[tr-1][tc] == 3:\n return maze[tr][tc]-5\n elif tc != N-1 and maze[tr][tc+1] == 3:\n return maze[tr][tc]-5\n elif tr != N-1 and maze[tr+1][tc] == 3:\n return maze[tr][tc]-5\n elif tc != 0 and maze[tr][tc-1] == 3:\n return maze[tr][tc]-5\n \n if tr != 0 and maze[tr-1][tc] == 0:\n queue.append([tr-1, tc])\n maze[tr-1][tc] = maze[tr][tc]+1\n if tc != N-1 and maze[tr][tc+1] == 0:\n queue.append([tr, tc+1])\n maze[tr][tc+1] = maze[tr][tc]+1\n if tr != N-1 and maze[tr+1][tc] == 0:\n queue.append([tr+1, tc])\n maze[tr+1][tc] = maze[tr][tc]+1\n if tc != 0 and maze[tr][tc-1] == 0:\n queue.append([tr, tc-1])\n maze[tr][tc-1] = maze[tr][tc]+1\n return 0 \n\ntestcase = int(input())\n\nfor tc in range(1, testcase+1):\n N = int(input())\n maze = [list(map(int, input())) for _ in range(N)]\n queue = []\n bre = 0\n for r in range(N):\n for c in range(N):\n if maze[r][c] == 2:\n queue.append([r, c])\n ans = escape_maze(r, c, N, queue, maze)\n bre = 1\n break\n if bre == 1:\n break\n print(\"#{} {}\".format(tc, ans))\n","sub_path":"SSAFY/algorithms/problems/swea-5105-미로의거리/SWEA5105_세환.py","file_name":"SWEA5105_세환.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"265808375","text":"#!/usr/bin/env python\n\nfrom subprocess import Popen, PIPE\nimport re\nimport time\nimport os\nimport sys\nimport simplejson as json\n\ndef download_cert(ip, starttls_method=None, port=None):\n if not re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', ip):\n raise Exception(\"Invalid ip address passes\")\n\n if starttls_method is None:\n if port is None:\n port=443\n cmd = [\"./timeout3\", \"-t 5\",\n \"openssl\",\n \"s_client\",\n \"-connect\",\n \"%s:%s\" % (ip, port)]\n else:\n if port is None:\n raise Exception(\"port is mandatory for starttls\")\n cmd = [\"./timeout3\", \"-t 5\",\n \"openssl\",\n \"s_client\",\n \"-starttls\",\n starttls_method,\n \"-connect\",\n \"%s:%s\" % (ip, port)]\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False)\n p.stdin.close()\n\n cert = \"\"\n # Try to read the certificate\n for l in p.stdout.readlines():\n if cert:\n cert += l\n if l.startswith(\"-----END CERTIFICATE-----\"):\n break\n elif l.startswith(\"-----BEGIN CERTIFICATE\"):\n cert += l\n\n return cert\n\ndef save_cert(ip, cert, proto=\"https\"):\n if not re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', ip):\n raise Exception(\"Invalid ip address passes\")\n\n try:\n os.mkdir(\"data/certs\")\n os.mkdir(\"data/certs/%s\" % proto)\n except:\n pass\n filename = \"data/certs/%s/%s\" % (proto, ip)\n fh = open(filename, \"w\")\n fh.write(str(cert))\n fh.close()\n\ndef download_cert_https(ip):\n return download_cert(ip, port=443)\n\ndef download_cert_smtp(ip):\n return download_cert(ip, port=25, starttls_method=\"smtp\")\n\ndef download_cert_ldapi(ip):\n return download_cert(ip, port=389, starttls_method=\"ldap\")\n\ndef download_cert_ldap(ip):\n return download_cert(ip, port=636)\n\ndef parse_heartbleed_json(json_file):\n fh = open(json_file)\n return json.load(fh)\n \nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: %s \" % sys.argv[0])\n sys.exit(1)\n\n heartbleed_servers = parse_heartbleed_json(sys.argv[1])\n\n count = 0\n for ip in heartbleed_servers.keys():\n if heartbleed_servers[ip]['status'] is not True:\n continue\n count += 1\n # Wait a bit every 20 hosts\n if (count % 20) == 0:\n time.sleep(10)\n # Fork into background each job\n if not os.fork():\n cert = download_cert_https(ip)\n save_cert(ip, cert, \"https\")\n sys.exit(0)\n\n# vim: sts=4 expandtab autoindent\n","sub_path":"certpull.py","file_name":"certpull.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"403910084","text":"#!/usr/bin/python\n\nimport csv\nimport cv2\nimport sklearn\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom random import shuffle\nfrom keras.models import Sequential, Model\nfrom keras.layers import Flatten, Dense, Lambda, Input, merge, ELU\nfrom keras.layers.convolutional import Convolution2D, Cropping2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.core import Dropout\nfrom keras.callbacks import *\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom sklearn.model_selection import train_test_split\n\nTRAINING_DATA_DIRS = ['training_data/basic_lap/', \\\n 'training_data/basic_lap_clockwise/',\n 'training_data/recovery_lap/',\n #'training_data/recovery_lap_clockwise/',\n 'training_data/smooth_curves/',\n 'training_data/smooth_curves_clockwise/',\n 'training_data/basic_lap_2/',\n 'training_data/basic_lap_2_clockwise/',\n 'training_data/recovery_lap_2/']\n #'training_data/recovery_lap_2_clockwise/']\n\ndef process_image(filename):\n \"\"\"\n Open and convert image to RGB\n \"\"\"\n image = cv2.imread(filename)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\ndef generator(samples, batch_size=32):\n \"\"\"\n Generate next batch of training/validation data\n \"\"\"\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n image = process_image(batch_sample.filename)\n if batch_sample.flip:\n image = cv2.flip(image, 1)\n images.append(image)\n angles.append(batch_sample.steering)\n #print(batch_sample.filename + \" angle: \" + str(batch_sample.steering))\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\nclass Sample(object):\n \"\"\"Contains a sample and steering angle\"\"\"\n def __init__(self, filename, steering, flip):\n self.filename = filename\n self.steering = steering\n self.flip = flip\n\n# All samples for training/validation\nsamples = []\n\nfor TRAINING_DATA_DIR in TRAINING_DATA_DIRS:\n with open(TRAINING_DATA_DIR + 'driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n current_path = TRAINING_DATA_DIR + 'IMG/' + line[0].split('/')[-1]\n steering = float(line[3])\n samples.append(Sample(current_path, steering, False))\n # Optionally use the other two cameras\n #current_path = TRAINING_DATA_DIR + 'IMG/' + line[1].split('/')[-1]\n #samples.append(Sample(current_path, min(1.0, float(line[3]) + 0.2)))\n #current_path = TRAINING_DATA_DIR + 'IMG/' + line[2].split('/')[-1]\n #samples.append(Sample(current_path, max(-1.0, float(line[3]) - 0.2)))\n\n# Split samples into three buckets\nsamples_left = list(filter(lambda x : x.steering <= -0.4, samples))\nsamples_center = list(filter(lambda x : x.steering > -0.4 and x.steering < 0.4, samples))\nsamples_right = list(filter(lambda x : x.steering >= 0.4, samples))\n\nshuffle(samples_left)\nshuffle(samples_center)\nshuffle(samples_right)\n\n# Split training/validation data with similar distribution\ntrain_samples = np.concatenate((samples_left[:int(len(samples_left) * 0.8)], samples_center[:int(len(samples_center) * 0.8)], samples_right[:int(len(samples_right) * 0.8)]))\nvalidation_samples = np.concatenate((samples_left[int(len(samples_left) * 0.8):], samples_center[int(len(samples_center) * 0.8):], samples_right[int(len(samples_right) * 0.8):]))\n\n# Plot distrubtion histograms in training/validation\nfig, axes = plt.subplots(1, 2)\naxes[0].hist([x.steering for x in train_samples], 10)\naxes[0].set_title('Distribution in training')\naxes[1].hist([x.steering for x in validation_samples], 10)\naxes[1].set_title('Validation in training')\nplt.show()\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\ndef display_cropped(model, image):\n \"\"\"\n Output from cropping layer. Will be useful for writeup\n \"\"\"\n cropping_layer = model.get_layer('cropping2d_1')\n cropping_function = K.function([cropping_layer.input], [cropping_layer.output])\n cropped_image = cropping_function([image.reshape(1, 160, 320, 3)])[0]\n plt.title('Original')\n plt.imshow(image)\n plt.show()\n plt.title('Cropped')\n plt.imshow(np.uint8(cropped_image.reshape(70, 320, 3)))\n plt.show()\n\ndef traffic_net():\n \"\"\"\n Multi-Stage Traffic Net with modified hidden layers and filter size\n \"\"\"\n net_input = Input(shape=(160, 320, 3))\n pool1 = net_input\n pool1 = Lambda(lambda x: (x / 255.0) - 0.5)(pool1)\n pool1 = Cropping2D(cropping=((65,25), (0,0)), input_shape=(160, 320, 3))(pool1)\n pool1 = Convolution2D(nb_filter=12, nb_row=5, nb_col=5, subsample=(2, 2), border_mode='valid')(pool1)\n pool1 = ELU()(pool1)\n pool1 = Dropout(0.2)(pool1)\n pool1 = Convolution2D(nb_filter=24, nb_row=5, nb_col=5, subsample=(2, 2), border_mode='valid')(pool1)\n pool1 = ELU()(pool1)\n pool1 = Dropout(0.2)(pool1)\n\n pool2 = Convolution2D(nb_filter=36, nb_row=3, nb_col=3, border_mode='valid')(pool1)\n pool2 = ELU()(pool2)\n pool2 = Dropout(0.2)(pool2)\n pool2 = Convolution2D(nb_filter=48, nb_row=3, nb_col=3, border_mode='valid')(pool2)\n pool2 = ELU()(pool2)\n pool2 = Dropout(0.2)(pool2)\n\n # Connect both stage 1 convolutional layer and stage 2 convolution layer to hidden layers\n pool1 = Flatten()(pool1)\n pool2 = Flatten()(pool2)\n pools = merge([pool1, pool2], mode='concat', concat_axis=1)\n\n fc = Dense(100)(pools)\n fc = ELU()(fc)\n fc = Dropout(0.2)(fc)\n fc = Dense(50)(fc)\n fc = ELU()(fc)\n fc = Dense(10)(fc)\n fc = ELU()(fc)\n fc = Dense(1)(fc)\n\n model = Model(input=net_input, output=fc)\n return model\n\ndef nvidia_net():\n model = Sequential()\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320, 3), output_shape=(160,320, 3)))\n model.add(Cropping2D(cropping=((65,25), (0,0)), input_shape=(160,320, 3)))\n model.add(Convolution2D(nb_filter=24, nb_row=5, nb_col=5, subsample=(2, 2), border_mode='valid'))\n model.add(ELU())\n model.add(Dropout(0.2))\n model.add(Convolution2D(nb_filter=36, nb_row=5, nb_col=5, subsample=(2, 2), border_mode='valid'))\n model.add(ELU())\n model.add(Dropout(0.2))\n model.add(Convolution2D(nb_filter=48, nb_row=5, nb_col=5, subsample=(2, 2), border_mode='valid'))\n model.add(ELU())\n model.add(Dropout(0.2))\n model.add(Convolution2D(nb_filter=64, nb_row=3, nb_col=3, subsample=(1, 1), border_mode='valid'))\n model.add(ELU())\n model.add(Dropout(0.2))\n model.add(Convolution2D(nb_filter=64, nb_row=3, nb_col=3, subsample=(1, 1), border_mode='valid'))\n model.add(ELU())\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n\n model.add(Dense(100))\n model.add(ELU())\n model.add(Dropout(0.2))\n model.add(Dense(50))\n model.add(ELU())\n model.add(Dense(10))\n model.add(ELU())\n model.add(Dense(1))\n\n return model\n\nmodel = nvidia_net()\n#model = traffic_net()\nmodel.summary()\n# TODO Exponential decay learning rate\nmodel.compile(loss='mse', optimizer='adam')\nsave_checkpointer = ModelCheckpoint(filepath=\"model.h5\", monitor='val_loss', verbose=1, save_best_only=True)\nstop_checkpointer = EarlyStopping(monitor='val_loss', min_delta=0.0, patience=3, verbose=1, mode='auto')\nmodel.fit_generator(train_generator, samples_per_epoch=len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=10, callbacks=[save_checkpointer, stop_checkpointer])\n#display_cropped(model, X_train[0])\nexit()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"485530176","text":"#Imports and setup.\nimport tkinter as tk\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport numpy as np\nimport os\nfrom scipy.signal import find_peaks\nfrom scipy.integrate import simps\nfrom scipy.optimize import curve_fit\n#Serial connection class\n\n\ndef start_application():\n global read_real_time\n read_real_time = HL_FSCAV_REAL_TIME()\n read_real_time.master.mainloop()\n\n\nclass HL_FSCAV_REAL_TIME:\n def __init__(self):\n #Definition of app window.\n self.master = tk.Tk()\n self.master.title('FSCAV Real Time')\n self.master.geometry(\"1000x600\")\n self.master.configure(bg='gray')\n #Parameters\n self.path = ''\n self.list_of_files = []\n self.first_integration_point_array = []\n self.second_integration_point_array = []\n self.charge_array = []\n self.exponential_fit_charge_array = []\n self.exponential_fit_parameters = (0.05, 0.1, 0.0002)\n self.response_time = []\n self.rmse_fitting = None\n self.samples_array = []\n self.cvs_array = []\n self.time_array = []\n self.first_integration_point = 0\n self.second_integration_point = 0\n self.refresh_period = 1000\n self.frequency = 500000\n self.reading_bool = False\n self.auto_bool = False\n self.timer_process = None\n self.cv_graph_index = 0\n #Window components\n self.input_frame = tk.Frame(self.master, bg='gray')\n self.input_frame.grid(row=0, padx=10, pady=10)\n self.control_frame = tk.Frame(self.master, bg='gray')\n self.control_frame.grid(row=1, padx=10, pady=10)\n self.graph_frame = tk.Frame(self.master, bg='gray')\n self.graph_frame.grid(row=1, padx=10, pady=10)\n self.right_frame = tk.Frame(self.master, bg='gray')\n self.right_frame.grid(row=0, column=3, padx=10, pady=10)\n tk.Label(self.input_frame, text=\"Inputs\", font=(None, 15), anchor=\"e\", bg='gray').grid(row=0, columnspan=2)\n self.path_input = self.get_input_object(self.input_frame, 'Path', 'gray', [1,0,1,1,0,0], [1,1,1,1,0,0], '\\ ')\n self.first_integration_point_input = self.get_input_object(self.input_frame, 'Sample 1', 'gray', [2,0,1,1,0,0], [2,1,1,1,0,0], '60')\n self.second_integration_point_input = self.get_input_object(self.input_frame, 'Sample 2', 'gray', [3,0,1,1,0,0], [3,1,1,1,0,0], '350')\n self.frequency_input = self.get_input_object(self.input_frame, 'Freq. (Hz)', 'gray', [4,0,1,1,0,0], [4,1,1,1,0,0], '500000')\n self.checking_period_input = self.get_input_object(self.input_frame, 'Period (s)', 'gray', [5,0,1,1,0,0], [5,1,1,1,0,0], '10')\n\n tk.Label(self.control_frame, text=\"Control Panel\", font=(None, 15), anchor=\"e\", bg='gray').grid(row=0, column=0, columnspan=2)\n\n self.start_button = self.get_button_object(self.control_frame, self.start_reading_signals, 2, 10, 'Start', [5,0,1,2,0,0])\n self.stop_button = self.get_button_object(self.control_frame, self.stop_reading_signals, 2, 10, 'Stop', [5,2,1,2,0,0])\n self.save_charge_button = self.get_button_object(self.control_frame, self.save_charge, 2, 10, 'Save Charge', [6,0,1,2,0,0])\n self.reset_files_button = self.get_button_object(self.control_frame, self.reset_files, 2, 10, 'Reset Charge', [6,2,1,2,0,0])\n self.previous_button = self.get_button_object(self.control_frame, self.previous_button_pushed, 2, 5, '<', [7,0,1,1,10,10])\n self.file_label = tk.Label(self.control_frame, text=\" \", bg=\"gray\")\n self.file_label.grid(row=7,column=1)\n self.next_button = self.get_button_object(self.control_frame, self.next_button_pushed, 2, 5, '>', [7,2,1,1,10,10])\n self.auto_variable = tk.IntVar()\n self.auto_button = tk.Checkbutton(self.control_frame, text=\"Auto\", variable = self.auto_variable)\n self.auto_button.grid(row=7, column=3, rowspan=1, columnspan=2)\n\n self.response_time_button = self.get_button_object(self.right_frame, self.response_time_button_pushed, 2, 15, 'Calc. response time', [0,0,1,1,10,10])\n self.response_time_label_variable = tk.StringVar()\n self.response_time_label = tk.Label(self.right_frame, text=\" \", bg=\"gray\", textvariable = self.response_time_label_variable)\n self.response_time_label.grid(row=0, column=1)\n\n self.k_input = self.get_input_object(self.right_frame, 'k', 'gray', [2,0,1,1,0,0], [2,1,1,1,0,0], '0.05')\n self.c0_input = self.get_input_object(self.right_frame, 'c0', 'gray', [3,0,1,1,0,0], [3,1,1,1,0,0], '0.1')\n self.base_input = self.get_input_object(self.right_frame, 'b', 'gray', [4,0,1,1,0,0], [4,1,1,1,0,0], '0.0002')\n\n self.list_of_files_box = tk.Listbox(self.control_frame, bg=\"white\")\n self.list_of_files_box.grid(row=8, column=0, columnspan=5, rowspan=3)\n\n self.charge_figure = self.generate_figure(self.master, [4,2], 100, [0,1,1,1,10,10], self.charge_array, 'tab:blue', 'Charge (nA·s)', 'Samples', 10)\n self.cvs_figure = self.generate_figure(self.master, [4,2], 100, [1,1,1,1,10,10], self.charge_array, 'tab:blue', 'Current (nA)', 'Time (s)', 10)\n\n #Menu\n self.menubar = tk.Menu(self.master)\n\n self.filemenu = tk.Menu(self.menubar, tearoff=0)\n self.filemenu.add_command(label=\"Reset Application\", command=self.reset_application)\n self.filemenu.add_command(label=\"Exit\", command=self.master.destroy)\n\n self.edit = tk.Menu(self.menubar, tearoff = 0)\n\n self.menubar.add_cascade(label=\"File\", menu=self.filemenu)\n self.menubar.add_cascade(label = 'Edit', menu=self.edit)\n self.master.config(menu=self.menubar)\n\n def get_input_object(self, macro, label_name, color, label_position, input_position, default_value):\n tk.Label(macro, text=label_name, bg=color).grid(row=label_position[0], column=label_position[1],\n rowspan=label_position[2], columnspan=label_position[3], padx=label_position[4], pady=label_position[5])\n input = tk.Entry(macro)\n input.insert(0, default_value)\n input.grid(row=input_position[0], column=input_position[1], rowspan=input_position[2],\n columnspan=input_position[3], padx=input_position[4], pady=input_position[5])\n return input\n\n def get_button_object(self, macro, callback_fcn, height, width, text, position):\n button = tk.Button(master = macro, command = callback_fcn, height = height, width = width, text = text)\n button.grid(row=position[0], column=position[1], rowspan=position[2], columnspan=position[3], padx=position[4], pady=position[5])\n return button\n\n def generate_figure(self, macro, size, dpi, position, array, color, ylabel, xlabel, fontsize):\n figure = Figure(figsize=(size[0], size[1]), dpi=dpi)\n axes = figure.add_subplot(111)\n axes.set_ylabel(ylabel, fontsize=fontsize)\n axes.set_xlabel(xlabel, fontsize=fontsize)\n axes.tick_params(axis='both', labelsize=fontsize)\n line = axes.plot(array, marker='.', color=color)\n scatter = axes.plot([],[], marker='.', color='black')\n figure.tight_layout()\n canvas = FigureCanvasTkAgg(figure, master=macro)\n plot_widget = canvas.get_tk_widget()\n plot_widget.grid(row=position[0], column=position[1], rowspan=position[2], columnspan=position[3], padx=position[4], pady=position[5])\n return figure, axes, line, scatter, canvas, plot_widget\n\n\n\n def start_reading_signals(self):\n if(not self.reading_bool):\n self.reading_bool = True\n self.disable_inputs()\n self.read_signals()\n\n\n def read_signals(self):\n self.get_input_parameters()\n self.read_files()\n self.calculate_charge()\n self.update_charge_graph()\n self.write_file_list()\n if(self.reading_bool): self.timer_process = self.master.after(self.refresh_period, self.read_signals)\n\n\n def stop_reading_signals(self):\n if(self.reading_bool):\n self.reading_bool = False\n self.enable_inputs()\n self.master.after_cancel(self.timer_process)\n\n\n def save_charge(self):\n f = open('charge.txt', \"a\")\n [f.write(self.list_of_files[i]+'\\t'+str(self.charge_array[i])+'\\n') for i in self.samples_array]\n f.close()\n\n def update_charge_graph(self):\n self.charge_figure[2][0].set_data(self.samples_array, self.charge_array)\n self.charge_figure[1].relim()\n self.charge_figure[1].autoscale_view()\n self.charge_figure[4].draw()\n self.charge_figure[4].flush_events()\n\n\n def get_input_parameters(self):\n self.path = r\"\"+self.path_input.get()\n self.auto_bool = self.auto_variable.get()\n self.first_integration_point = int(self.first_integration_point_input.get())\n self.second_integration_point = int(self.second_integration_point_input.get())\n self.frequency = float(self.frequency_input.get())\n self.refresh_period = 1000*int(self.checking_period_input.get())\n\n\n def read_files(self):\n all_files = list(filter(lambda x: x[-4:] == '.txt', os.listdir(self.path)))\n diff_files = np.setdiff1d(all_files, self.list_of_files)\n for file in diff_files:\n matrix = open(self.path+\"/\"+file).read()\n matrix = np.array([item.split() for item in matrix.split('\\n')[:-1]])\n matrix = matrix.astype('float64')\n self.cvs_array.append([x[2] for x in matrix])\n self.list_of_files = np.append(self.list_of_files, diff_files)\n\n\n\n def calculate_charge(self):\n if(self.auto_bool): self.get_auto_intervals()\n else: self.get_manual_intervals()\n self.time_array = np.linspace(0, len(self.cvs_array[0])*(1/self.frequency), len(self.cvs_array[0]))\n self.charge_calculation()\n\n\n def get_manual_intervals(self):\n self.first_integration_point_array = [self.first_integration_point]*len(self.cvs_array)\n self.second_integration_point_array = [self.second_integration_point]*len(self.cvs_array)\n\n\n def get_auto_intervals(self):\n oxidation_length = int(len(self.cvs_array[0])/2)\n self.first_integration_point_array = []\n self.second_integration_point_array = []\n for x in self.cvs_array:\n tmp = find_peaks(np.negative(x[0:oxidation_length]))[0]\n try:\n if(tmp[0]<100): self.first_integration_point_array.append(tmp[0])\n else: self.first_integration_point_array.append(self.first_integration_point)\n except:\n self.first_integration_point_array.append(self.first_integration_point)\n try:\n self.second_integration_point_array.append(tmp[1])\n except:\n self.second_integration_point_array.append(self.second_integration_point)\n\n\n def charge_calculation(self):\n self.samples_array = range(0, len(self.first_integration_point_array))\n coeffs = [np.polyfit((self.time_array[self.first_integration_point_array[i]],self.time_array[self.second_integration_point_array[i]]),\n (self.cvs_array[i][self.first_integration_point_array[i]], self.cvs_array[i][self.second_integration_point_array[i]]), 1)\n for i in self.samples_array]\n lines = [(coeffs[i][0] * self.time_array) + coeffs[i][1] for i in self.samples_array]\n Q = [simps(self.cvs_array[i][self.first_integration_point_array[i]:self.second_integration_point_array[i]],\n self.time_array[self.first_integration_point_array[i]:self.second_integration_point_array[i]]) for i in self.samples_array]\n Qline = [simps(lines[i][self.first_integration_point_array[i]:self.second_integration_point_array[i]],\n self.time_array[self.first_integration_point_array[i]:self.second_integration_point_array[i]]) for i in self.samples_array]\n self.charge_array = np.subtract(Q, Qline)\n\n def reset_files(self):\n self.cvs_array = []\n self.time_array = []\n self.charge_array = []\n self.list_of_files = []\n self.samples_array = []\n self.first_integration_point_array = []\n self.second_integration_point_array = []\n self.read_signals()\n\n def disable_inputs(self):\n self.path_input.configure(state=\"disabled\")\n self.first_integration_point_input.configure(state=\"disabled\")\n self.second_integration_point_input.configure(state=\"disabled\")\n self.frequency_input .configure(state=\"disabled\")\n self.checking_period_input.configure(state=\"disabled\")\n self.start_button.configure(bg = \"#7bf76d\")\n self.stop_button.configure(bg = \"SystemButtonFace\")\n\n def enable_inputs(self):\n self.path_input.configure(state=\"normal\")\n self.first_integration_point_input.configure(state=\"normal\")\n self.second_integration_point_input.configure(state=\"normal\")\n self.frequency_input .configure(state=\"normal\")\n self.checking_period_input.configure(state=\"normal\")\n self.start_button.configure(bg = \"SystemButtonFace\")\n self.stop_button.configure(bg = \"#ff4a4a\")\n def write_file_list(self):\n self.list_of_files_box.delete(0, 'end')\n for entry in self.list_of_files:\n self.list_of_files_box.insert('end', entry)\n\n def previous_button_pushed(self):\n if (not self.samples_array or self.cv_graph_index == self.samples_array[0]): return\n elif(self.cv_graph_index>self.samples_array[-1]): self.cv_graph_index = self.samples_array[-1]\n else: self.cv_graph_index = self.cv_graph_index-1\n self.graph_cv()\n self.file_label.config(text=self.cv_graph_index)\n\n def next_button_pushed(self):\n if (not self.samples_array or self.cv_graph_index == self.samples_array[-1]): return\n elif(self.cv_graph_index>self.samples_array[-1]): self.cv_graph_index = self.samples_array[-1]\n else: self.cv_graph_index = self.cv_graph_index+1\n self.graph_cv()\n self.file_label.config(text=self.cv_graph_index)\n\n def graph_cv(self):\n self.cvs_figure[2][0].set_data(self.time_array, self.cvs_array[self.cv_graph_index])\n self.cvs_figure[3][0].set_data([self.time_array[self.first_integration_point_array[self.cv_graph_index]], self.time_array[self.second_integration_point_array[self.cv_graph_index]]],\n [self.cvs_array[self.cv_graph_index][self.first_integration_point_array[self.cv_graph_index]], self.cvs_array[self.cv_graph_index][self.second_integration_point_array[self.cv_graph_index]]])\n self.cvs_figure[1].relim()\n self.cvs_figure[1].autoscale_view()\n self.cvs_figure[4].draw()\n self.cvs_figure[4].flush_events()\n\n def response_time_button_pushed(self):\n self.get_exponential_fitting()\n self.update_response_time()\n\n\n def get_exponential_fitting(self):\n self.exponential_fit_parameters = (float(self.k_input.get()), float(self.c0_input.get()), float(self.base_input.get()))\n try:\n params, cv = curve_fit(self.mono_exp, np.array(self.samples_array), self.charge_array, self.exponential_fit_parameters)\n self.exponential_fit_parameters = params\n self.exponential_fit_charge_array = self.mono_exp(np.array(self.samples_array), params[0], params[1], params[2])\n self.rmse_fitting = np.sqrt(np.mean((self.charge_array-self.exponential_fit_charge_array)**2))\n self.response_time = 3/params[0]\n except:\n params, cv = curve_fit(self.mono_exp, np.array(self.samples_array), self.charge_array, self.exponential_fit_parameters)\n self.exponential_fit_parameters = params\n self.exponential_fit_charge_array = self.mono_exp(np.array(self.samples_array), params[0], params[1], params[2])\n self.rmse_fitting = np.sqrt(np.mean((self.charge_array-self.exponential_fit_charge_array)**2))\n self.response_time = 3/params[0]\n # self.exponential_fit_parameters=(None,None,None)\n # self.rmse_fitting = None\n # self.response_time = None\n #Define exponential function.\n def update_response_time(self):\n self.response_time_label_variable.set(\"{:.2f}\".format(self.response_time)+' min')\n def mono_exp(self, t, k, c0, base):\n return c0 * np.exp(-t * k) + base\n\n\n def reset_application(self):\n self.master.destroy()\n start_application()\n\n\nstart_application()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"589572128","text":"import pyb, json, os, micropython\n\nmicropython.alloc_emergency_exception_buf(100)\n\nm = \"bootstrap.py\"\nif \"main.py\" in os.listdir():\n m = \"main.py\"\nelif \"apps\" in os.listdir():\n apps = os.listdir(\"apps\")\n if (\"home\" in apps) and (\"main.py\" in os.listdir(\"apps/home\")):\n m = \"apps/home/main.py\"\n elif (\"app_library\" in apps) and (\"main.py\" in os.listdir(\"apps/app_library\")):\n m = \"apps/app_library/main.py\"\npyb.main(m)\n","sub_path":"stmhal/init-files/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"628758463","text":"def find_path(graph):\n\n\tsome_big_value = 999999;\n\tshortest = [some_big_value]*len(graph)\n\tshortest[0] = 0\n\tpred = [-1]*len(graph)\n\n\t# find index with min value shortest\n\tdef comp(a):\n\t\treturn shortest[a]\n\tnodes = [x for x in range(len(graph))]\n\t\n\twhile nodes:\n\t\tnode = nodes.pop( nodes.index(min(nodes,key=comp)))\n\t\tfor neigtber in graph[node]:\n\t\t\trelax(node, neigtber[0], neigtber[1], shortest, pred)\n\n\treturn (shortest, pred)\n\n\ndef relax(u, v, weight, shortest, pred):\n\t#print(\"-->\",u,v,weight)\n\tif (shortest[u] + weight)< shortest[v]:\n\t\tshortest[v] = shortest[u] + weight\n\t\tpred[v] = u\n\n\ndef form_represent_result(pred, topoint):\n\n\tresult = []\n\tcurrent = topoint\n\t\n\twhile current != -1:\n\t\tresult+= [current]\n\t\tcurrent = pred[current]\n\n\treturn result[::-1]\n\ndef print_path(path, shortest):\n\tnodes = [ str(x)+\"[path:\"+str(shortest[x])+\"]\" for x in path]\n\treturn '->'.join(nodes)\n\ndef test_shortest_path():\n\t# This is list of elements (where first this neigtber node, second - weight)\n\tgr = [\n\t\t\t[(1,6),(2,4)],\n\t\t\t[(3,3),(2,2)],\n\t\t\t[(1,1),(3,9),(4,3)],\n\t\t\t[(4,4)],\n\t\t\t[(3,5),(0,7)]\n\t\t];\n\n\t# Test sort\n\t(shor, pr) = find_path(gr)\n\tprint(shor)\n\tprint(pr)\n\n\tres = form_represent_result(pr,3)\n\tprint(\"Path to to print 4: \", print_path(res,shor))\n\ntest_shortest_path()","sub_path":"algorithms/Python/graph/dijekstra.py","file_name":"dijekstra.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"421543942","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\n\nclass ResCompany(osv.Model):\n _inherit = \"res.company\"\n\n def _get_cashenvoy_account(self, cr, uid, ids, name, arg, context=None):\n Acquirer = self.pool['payment.acquirer']\n company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id\n cashenvoy_ids = Acquirer.search(cr, uid, [\n ('website_published', '=', True),\n ('name', 'ilike', 'cashenvoy'),\n ('company_id', '=', company_id),\n ], limit=1, context=context)\n if cashenvoy_ids:\n cashenvoy = Acquirer.browse(cr, uid, cashenvoy_ids[0], context=context)\n return dict.fromkeys(ids, cashenvoy.cashenvoy_email_account)\n return dict.fromkeys(ids, False)\n\n def _set_cashenvoy_account(self, cr, uid, id, name, value, arg, context=None):\n Acquirer = self.pool['payment.acquirer']\n company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id\n cashenvoy_account = self.browse(cr, uid, id, context=context).cashenvoy_account\n cashenvoy_ids = Acquirer.search(cr, uid, [\n ('website_published', '=', True),\n ('cashenvoy_email_account', '=', cashenvoy_account),\n ('company_id', '=', company_id),\n ], context=context)\n if cashenvoy_ids:\n Acquirer.write(cr, uid, cashenvoy_ids, {'cashenvoy_email_account': value}, context=context)\n return True\n\n _columns = {\n 'cashenvoy_account': fields.function(\n _get_cashenvoy_account,\n fnct_inv=_set_cashenvoy_account,\n nodrop=True,\n type='char', string='Cashenvoy Account',\n help=\"Cashenvoy username (usually email) for receiving online payments.\"\n ),\n }","sub_path":"payment_cashenvoy/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"465086961","text":"import comicMaker,sys,json\n# from multiprocessing import Process\n\nsys.setrecursionlimit(25000)\n\ndef main():\n\ttry:\n\t\twith open('config.json', 'r', encoding=\"utf-8\") as f:\n\t\t\tbooks = json.load(f)\n\t\tmangaLikeLibrary=[*books['mangaLike']]\n\t\treadComicOnlineToLibrary=[*books['readComicOnlineTo']]\n\t\treadComicsOnlineRuLibrary=[*books['readComicsOnlineRu']]\n\t\tif not mangaLikeLibrary and not readComicOnlineToLibrary and not readComicsOnlineRuLibrary:\n\t\t\tprint(\"No books found!\")\n\t\t\treturn\n\t\tprint(\"List of books >\")\n\t\tif mangaLikeLibrary:\n\t\t\tfor i in mangaLikeLibrary:\n\t\t\t\tprint (\" > '\"+i+\"' download will start from Chapter-\"+books['mangaLike'][i])\n\t\tif readComicOnlineToLibrary:\n\t\t\tfor i in readComicOnlineToLibrary:\n\t\t\t\tprint (\" > '\"+i+\"' download will start from Chapter-\"+books['readComicOnlineTo'][i])\n\t\tif readComicsOnlineRuLibrary:\n\t\t\tfor i in readComicsOnlineRuLibrary:\n\t\t\t\tprint (\" > '\"+i+\"' download will start from Chapter-\"+books['readComicsOnlineRu'][i])\n\texcept:\n\t\t# raise\n\t print(\"No 'config.json' file found!\")\n\t return\n\t\n\t# if not comicMaker.confirm():\n\t\t# return\n\n\t# Process(target = comicMaker.mangaLike).start()\n\t# Process(target = comicMaker.readComicOnlineTo).start()\n\tcomicMaker.mangaLike()\n\tcomicMaker.readComicOnlineTo()\n\tcomicMaker.readComicsOnlineRu()\n\tprint(\" <<< All Downloads completed!\")\n\t# return\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"comicMaker.py","file_name":"comicMaker.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"581761185","text":"#RPS Component 7- Scoring System\n\n#Rounds won will be calculated (total - draw - lost)\n\nrounds_played = 0\nrounds_lost = 0\nrounds_drawn = 0\n\n#Results for testing purposes\ntest_results = [\"won\", \"won\", \"loss\", \"loss\",\"tie\"]\n\n#Play Game\nfor item in test_results:\n rounds_played += 1\n\n #Generate computer choice\n\n result = item\n\n if result == \"tie\":\n result = \"It is a tie\"\n rounds_drawn += 1\n\n elif result == \"loss\":\n rounds_lost += 1\n\n#Statistics\n#Rounds won will be calculated (total - draw - lost)\nrounds_won = rounds_played - rounds_lost - rounds_drawn\n\n#End of Game Summary\nprint()\nprint(\"*****End Game Summary*****\")\nprint(\"Rounds Won: {} \\t|\\t Rounds Lost: {} \\t|\\t Rounds Drawn: {}\".format(rounds_won, rounds_lost, rounds_drawn))\nprint()\nprint(\"Thank you for playing\")\n","sub_path":"07_RPS_Round_Score_v1.py","file_name":"07_RPS_Round_Score_v1.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"394915496","text":"# LB DEZ 20\nimport os\nimport pickle\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\n\nimport pandas as pd\nimport typing\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.utils import shuffle\n\nfrom idiva import log\nfrom idiva.io import ReadVCF\nfrom idiva.utils import seek_then_rewind\n\nfrom tqdm import tqdm\n\n\nclass FeatureExtractor:\n \"\"\"\n Given two vcf file this object trains a Perceptron classifier\n to be able to select the most important SNP's (GWAS with linear model)\n \"\"\"\n\n def __init__(self, *, case_vcf, ctrl_vcf):\n \"\"\"\n The arguments should be of type ReadVCF,\n and name-specified. (RA, 2020-12-08)\n \"\"\"\n self.clf, self.id = self.feature_extraction_chunks(ctrl_vcf=ctrl_vcf, case_vcf=case_vcf)\n self.save_classifier()\n\n def save_classifier(self):\n cache = (Path(__file__).parent.parent.parent.parent / \"input/download_cache\").resolve()\n assert cache.is_dir()\n\n filename = str(cache) + \"/classifier.sav\"\n\n pickle.dump(self.clf, open(filename, 'wb'))\n\n def get_extracted_variants(self) -> pd.DataFrame:\n \"\"\"\n Returns the id's of the selected SNP's\n \"\"\"\n selector = SelectFromModel(self.clf, prefit=True)\n\n return self.id[selector.get_support()]\n\n def get_reduced_dataframe(self, *, case_vcf, ctrl_vcf) -> pd.DataFrame:\n \"\"\"\n Returns reduced dataframe\n \"\"\"\n cache = (Path(__file__).parent.parent.parent.parent / \"input/download_cache\").resolve()\n assert cache.is_dir()\n\n from idiva.fextr import align\n\n with ctrl_vcf.rewind_when_done:\n ctrl_reader = ctrl_vcf\n with case_vcf.rewind_when_done:\n case_reader = case_vcf\n dataframe = align(ctrl=ctrl_reader, case=case_reader)\n id = dataframe.index \n\n dataframe['ID'] = dataframe.ID_case.combine_first(dataframe.ID_ctrl)\n\n dataframe = dataframe[['CHROM', 'POS', 'ID', 'REF', 'ALT']]\n\n extracted = self.get_extracted_variants().values\n\n return dataframe.loc[extracted]\n\n @staticmethod\n def get_reduced_dataframe_from_saved_classifier( *, case_vcf, ctrl_vcf) -> pd.DataFrame:\n \"\"\"\n Returns reduced dataframe given that a classifier is stored as classifier.sav\n \"\"\"\n clf = FeatureExtractor.get_saved_classifier()\n\n selector = SelectFromModel(clf, prefit=True)\n\n cache = (Path(__file__).parent.parent.parent.parent / \"input/download_cache\").resolve()\n assert cache.is_dir()\n\n from idiva.fextr import align\n\n with ctrl_vcf.rewind_when_done:\n ctrl_reader = ctrl_vcf\n with case_vcf.rewind_when_done:\n case_reader = case_vcf\n dataframe = align(ctrl=ctrl_reader, case=case_reader)\n id = dataframe.index\n\n dataframe['ID'] = dataframe.ID_case.combine_first(dataframe.ID_ctrl)\n\n dataframe = dataframe[['CHROM', 'POS', 'ID', 'REF', 'ALT']]\n\n extracted = id[selector.get_support()].values\n\n return dataframe.loc[extracted]\n\n def feature_extraction_chunks(self, *, case_vcf, ctrl_vcf):\n \"\"\"\n Returns a fitted Perceptron classifier for the given vcf files\n The classifier is trained in chunks where the chunks consist of a range of patient\n Therefore the classifier iterates columnwise over the vcf files\n The files are divided into equally many chunks and therefore the individual chunksize can differ\n \"\"\"\n log.info(\"Fit linear classifier and reduce number of variants (~0.5h)\")\n\n clf = Perceptron()\n\n cache = (Path(__file__).parent.parent.parent.parent / \"input/download_cache\").resolve()\n assert cache.is_dir()\n\n # create unique index\n id = None\n\n with ctrl_vcf.rewind_when_done:\n ctrl_reader = ctrl_vcf\n with case_vcf.rewind_when_done:\n case_reader = case_vcf\n dataframe = align(ctrl=ctrl_reader, case=case_reader)\n id = dataframe.index\n\n with ctrl_vcf.rewind_when_done:\n ctrl_reader = ctrl_vcf\n with case_vcf.rewind_when_done:\n case_reader = case_vcf\n\n header_ctrl = ctrl_reader.header\n header_case = case_reader.header\n\n exclude = [2, 3, 5, 6, 7, 8]\n\n names_ctrl = [i for idx, i in enumerate(header_ctrl) if idx not in exclude]\n names_case = [i for idx, i in enumerate(header_case) if idx not in exclude]\n\n len_ctrl = len(header_ctrl) - 9\n len_case = len(header_case) - 9\n\n min_batch_size = min([len_ctrl, len_case, 50])\n\n number_of_batches = int(max([np.ceil(len_ctrl / min_batch_size), np.ceil(len_case / min_batch_size)]))\n\n batch_size_ctrl = int(np.ceil(len_ctrl / number_of_batches))\n batch_size_case = int(np.ceil(len_case / number_of_batches))\n\n batches_ctrl = [i * batch_size_ctrl for i in range(number_of_batches)]\n batches_case = [i * batch_size_case for i in range(number_of_batches)]\n\n batches_ctrl.append(len_ctrl)\n batches_case.append(len_case)\n\n for idx in tqdm(range(number_of_batches), total=number_of_batches, postfix='feature selection'):\n clf = self.feature_extraction_batch(ctrl_reader, case_reader, names_ctrl, names_case,\n batches_ctrl, batches_case, idx, clf, id)\n\n return clf, id\n\n def feature_extraction_batch(self, ctrl_reader: ReadVCF, case_reader: ReadVCF, names_ctrl: List[str],\n names_case: List[str], batches_ctrl: List[int], batches_case: List[int], idx: int,\n clf, id: List[int]):\n \"\"\"\n Returns a trained classifier on one batch\n loads from both files some patients (one batch) for training\n \"\"\"\n with seek_then_rewind(ctrl_reader.fd, seek=ctrl_reader.dataline_start_pos) as fd_ctrl:\n with seek_then_rewind(case_reader.fd, seek=case_reader.dataline_start_pos) as fd_case:\n\n batch_names_ctrl = names_ctrl[:3]\n batch_names_case = names_case[:3]\n\n batch_names_ctrl.extend(names_ctrl[batches_ctrl[idx] + 3:batches_ctrl[idx + 1] + 3])\n batch_names_case.extend(names_case[batches_case[idx] + 3:batches_case[idx + 1] + 3])\n\n batch_columns_ctrl = [0, 1, 4]\n batch_columns_case = [0, 1, 4]\n\n batch_columns_ctrl.extend(list(range(batches_ctrl[idx] + 9, batches_ctrl[idx + 1] + 9)))\n batch_columns_case.extend(list(range(batches_case[idx] + 9, batches_case[idx + 1] + 9)))\n\n converter_dict_ctrl = {}\n\n for column in batch_names_ctrl:\n if column not in ['CHROM', 'POS', 'ALT']:\n converter_dict_ctrl[column] = self.convert_strang\n\n converter_dict_case = {}\n\n for column in batch_names_case:\n if column not in ['CHROM', 'POS', 'ALT']:\n converter_dict_case[column] = self.convert_strang\n\n dataframe_ctrl = pd.read_csv(fd_ctrl, sep='\\t', header=None,\n usecols=batch_columns_ctrl,\n names=batch_names_ctrl, converters=converter_dict_ctrl)\n\n dataframe_ctrl = dataframe_ctrl.drop_duplicates(['CHROM', 'POS', 'ALT'], keep='first')\n\n dataframe_ctrl['ID'] = dataframe_ctrl[['CHROM', 'POS', 'ALT']].apply(index_map, axis=1)\n\n dataframe_ctrl = dataframe_ctrl.drop(['CHROM', 'POS', 'ALT'], axis=1)\n\n dataframe_ctrl = dataframe_ctrl.set_index('ID')\n\n dataframe_ctrl = dataframe_ctrl.transpose()\n\n dataframe_ctrl = dataframe_ctrl.reindex(columns=id, fill_value=4)\n\n dataframe_case = pd.read_csv(fd_case, sep='\\t', header=None,\n usecols=batch_columns_case,\n names=batch_names_case, converters=converter_dict_case)\n\n dataframe_case = dataframe_case.drop_duplicates(['CHROM', 'POS', 'ALT'], keep='first')\n\n dataframe_case['ID'] = dataframe_case[['CHROM', 'POS', 'ALT']].apply(index_map, axis=1)\n\n dataframe_case = dataframe_case.drop(['CHROM', 'POS', 'ALT'], axis=1)\n\n dataframe_case = dataframe_case.set_index('ID')\n\n dataframe_case = dataframe_case.transpose()\n\n dataframe_case = dataframe_case.reindex(columns=id, fill_value=4)\n\n labels = np.zeros(dataframe_ctrl.shape[0])\n labels = np.append(labels, np.ones(dataframe_case.shape[0]))\n\n dataframe = dataframe_ctrl.append(dataframe_case)\n\n dataframe, labels = shuffle(dataframe, labels, random_state=0)\n\n clf.partial_fit(dataframe, labels, classes=[0, 1])\n\n return clf\n\n def convert_strang(self, strang: str) -> int:\n \"\"\"\n SNP as 0, 1, 2 for homozygous, heterozygous, and variant homozygous\n \"\"\"\n if strang == \"0|0\":\n return 0\n elif strang == \"0|1\":\n return 1\n elif strang == \"1|0\":\n return 1\n elif strang == \"1|1\":\n return 2\n\n return np.nan\n\n @staticmethod\n def get_saved_classifier():\n \"\"\"\n Returns the saved classifier if it exists\n otherwise a dummy classifier is returned\n \"\"\"\n cache = (Path(__file__).parent.parent.parent.parent / \"input/download_cache\").resolve()\n assert cache.is_dir()\n\n filename = str(cache) + \"/classifier.sav\"\n\n if os.path.exists(filename):\n loaded_model = pickle.load(open(filename, 'rb'))\n else:\n warnings.warn(\"no model saved\")\n loaded_model = DummyClassifier()\n return loaded_model\n\n\ndef align(case: ReadVCF, ctrl: ReadVCF):\n \"\"\"\n aligning case and control vcf file by joining on chrom, pos, ref and alt\n \"\"\"\n from idiva.utils import seek_then_rewind\n\n dfs = {}\n for (k, vcf) in zip(['case', 'ctrl'], [case, ctrl]):\n with seek_then_rewind(vcf.fd, seek=vcf.dataline_start_pos) as fd:\n dfs[k] = pd.read_csv(fd, sep='\\t', usecols=[0, 1, 2, 3, 4], header=None,\n names=[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\"])\n dfs[k].index = dfs[k].index.rename(name=\"rowid\")\n dfs[k] = dfs[k].reset_index().astype({'rowid': 'Int64'})\n\n dfs['case'] = dfs['case'].drop_duplicates(['CHROM', 'POS', 'REF', 'ALT'], keep='first')\n dfs['ctrl'] = dfs['ctrl'].drop_duplicates(['CHROM', 'POS', 'REF', 'ALT'], keep='first')\n\n df = join(case=dfs['case'], ctrl=dfs['ctrl'])\n\n df['CHROM'] = pd.to_numeric(df[['CHROM']].apply(translate_chrom, axis=1))\n\n df['CPA_ID'] = df[['CHROM', 'POS', 'ALT']].apply(index_map, axis=1)\n\n df = df.set_index('CPA_ID')\n\n # remove indels\n df = df[df['REF'].apply(lambda x: str(x) in ['A', 'C', 'G', 'T'])]\n df = df[df['ALT'].apply(lambda x: str(x) in ['A', 'C', 'G', 'T'])]\n\n return df\n\n\ndef join(case: pd.DataFrame, ctrl: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Outer-join two dataframes on the columns CHROM, POS, ALT.\n Use the suffixes _case and _ctrl for the other ambiguous columns.\n\n RA, 2020-11-14\n LB, 2020-12-04 adapted\n \"\"\"\n\n df = pd.merge_ordered(\n left=case, right=ctrl,\n suffixes=['_case', '_ctrl'],\n on=['CHROM', 'POS', 'REF', 'ALT'],\n how=\"outer\",\n )\n\n return df\n\n\ndef index_map(chromposalt) -> int:\n \"\"\"\n Returns unique identifier by mapping chrom, pos & alt\n \"\"\"\n chrom = chromposalt[0]\n pos = chromposalt[1]\n alt = chromposalt[2]\n\n if alt == \"A\":\n alt = 0\n elif alt == \"C\":\n alt = 1\n elif alt == \"G\":\n alt = 2\n elif alt == \"T\":\n alt = 3\n\n return chrom * 10000000000 + pos * 10 + alt\n\n\ndef translate_chrom(chrom: typing.Union[str, int]) -> int:\n \"\"\"\n translate non integer chromosomes (X,Y & MT) to integers (23, 24 & 25)\n \"\"\"\n\n if type(chrom) == pd.core.series.Series:\n chrom = chrom[0]\n\n if chrom == 'X':\n return 23\n elif chrom == 'Y':\n return 24\n elif chrom == 'MT':\n return 25\n else:\n return int(chrom)\n","sub_path":"project2/solution/idiva/fextr/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":12696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"399459316","text":"import pytest\nfrom utils import matchingStrings\n\n@pytest.mark.set1\ndef test_matchingStrings():\n\texpected_result = {\"ab\": 2, \"abc\": 1, \"bc\": 0}\n\tinput_sentence = \"ab,ab,abc\"\n\tassert matchingStrings(input_sentence,\"ab,abc,bc\") == expected_result,\"test failed\"\n\n\n# next time i'll use mock way for testing my code, there is a lot more to test.\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"523441260","text":"import scrapy\n\n\nclass USYDSpider(scrapy.Spider):\n name = \"usyd\"\n start_urls = [\n \"http://sydney.edu.au/courses/a-z/A\",\n \"http://sydney.edu.au/courses/a-z/B\",\n \"http://sydney.edu.au/courses/a-z/C\",\n \"http://sydney.edu.au/courses/a-z/D\",\n \"http://sydney.edu.au/courses/a-z/E\",\n \"http://sydney.edu.au/courses/a-z/F\",\n \"http://sydney.edu.au/courses/a-z/G\",\n \"http://sydney.edu.au/courses/a-z/H\",\n \"http://sydney.edu.au/courses/a-z/I\",\n \"http://sydney.edu.au/courses/a-z/J\",\n \"http://sydney.edu.au/courses/a-z/L\",\n \"http://sydney.edu.au/courses/a-z/M\",\n \"http://sydney.edu.au/courses/a-z/N\",\n \"http://sydney.edu.au/courses/a-z/O\",\n \"http://sydney.edu.au/courses/a-z/P\",\n \"http://sydney.edu.au/courses/a-z/R\",\n \"http://sydney.edu.au/courses/a-z/S\",\n \"http://sydney.edu.au/courses/a-z/T\",\n \"http://sydney.edu.au/courses/a-z/U\",\n \"http://sydney.edu.au/courses/a-z/V\",\n \"http://sydney.edu.au/courses/a-z/W\",\n ]\n\n def parse(self, response):\n # Follow links to course pages\n for href in response.css(\".result-set a::attr(href)\").extract():\n yield response.follow(href, self.parse_course)\n\n def parse_course(self, response):\n assumed_knowledge_selector = response.css(\"p:nth-child(5)::text\").extract()\n commencing_semester_selector = response.css(\"p:nth-child(14) span::text\").extract()\n international_selector = response.css(\"p:nth-child(10) span::text\").extract()\n\n assumed_knowledge_list_length = len(assumed_knowledge_selector)\n commencing_semester_list_length = len(commencing_semester_selector)\n international_list_length = len(international_selector)\n\n yield {\n 'name': response.css(\"#w4 h1::text\").extract_first(),\n 'abbreviation': response.css(\"p:nth-child(5) span::text\").extract_first(),\n 'description': response.css(\"br+ p::text , #w4 b::text\").extract(),\n 'duration': response.css(\"p:nth-child(2) span::text\").extract_first(),\n 'mode': response.css(\"p:nth-child(15) span::text\").extract_first(),\n 'course_code': response.css(\"p:nth-child(1) span::text\").extract_first(),\n 'uac_code': response.css(\"p:nth-child(6) span::text\").extract_first(),\n 'cricos_code': response.css(\".no-print+ p span::text\").extract_first(),\n 'atar': response.css(\"p:nth-child(7) span::text\").extract_first(),\n 'faculty': response.css(\"p:nth-child(18) span > a::text\").extract_first(),\n 'international': response.css(\"p:nth-child(10) span::text\")[international_list_length - 1].extract(),\n 'commencing_semester': response.css(\"p:nth-child(14) span::text\")[\n commencing_semester_list_length - 1].extract(),\n 'credit_points_required': response.css(\"p:nth-child(13) span::text\").extract_first(),\n 'apply_now_url': response.css(\".center:nth-child(1) .button:nth-child(1)::attr(href)\").extract_first(),\n 'english_language_requirements': response.css(\"p:nth-child(11) span::text\").extract_first(),\n 'application_closing_dates': response.css(\"p:nth-child(12) span::text\").extract_first(),\n 'assumed_knowledge': response.css(\"p:nth-child(5)::text\")[assumed_knowledge_list_length - 1].extract(),\n 'url': response.url,\n }\n","sub_path":"crawler/spiders/USYDSpider.py","file_name":"USYDSpider.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"67481422","text":"import os\nimport pickle\nfrom connect import session\n\ndef gen(name):\n data = session()\n with open(os.path.join(\"test\", name+\".pickle\"), \"wb\") as f:\n pickle.dump(data, f)\n\ndef load(name):\n data_path = os.path.join(\"test\", name + \".pickle\")\n if not os.path.exists(data_path):\n raise Exception(\"ファイルがありません\")\n with open(data_path, \"rb\") as f:\n data = pickle.load(f)\n return data\n\n\n","sub_path":"fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"431891230","text":"#\n# In linear algebra, a Toeplitz matrix is one in which the elements on any given diagonal from\n# top left to bottom right are identical.\n#\n# For example:\n#\n# 1 2 3 4 8\n# 5 1 2 3 4\n# 4 5 1 2 3\n# 7 4 5 1 2\n#\n# Write a program to determine whether a given input is a Toeplitz matrix.\n\ninput = [[1, 2, 3, 4, 8], [5, 1, 2, 3, 4], [4, 6, 1, 2, 3], [7, 4, 5, 1, 2]]\n\ndef is_input_toeplitz(matrix):\n expected_row_length = len(matrix[0])\n row = 0\n while(len(matrix) > 0):\n row+=1\n if (len(matrix[0]) != expected_row_length):\n actual_row_length = len(matrix[0])\n print(f'MATRIX MALFORMED - EXPECTED {expected_row_length} ITEMS PER ROW (FOUND {actual_row_length} ON ROW {row})')\n return\n i = 0\n while(i < len(matrix[0])):\n target = matrix[0][i]\n max_length = min(len(matrix), len(matrix[0]) - i)\n z = 0\n while(z < max_length):\n if (matrix[z][i + z] != target):\n print('Not a Toeplitz Matrix')\n print(f'Expected [{matrix[z][i + z]}] at ({i + z + 1}, {z + 1 + row}) to be [{target}]')\n return False\n z+=1\n i+=1\n matrix = matrix[1:]\n if (len(matrix) == 0):\n print('Yay, a Toeplitz Matrix')\n\nis_input_toeplitz(input)\n","sub_path":"python/toeplitz-matrix.py","file_name":"toeplitz-matrix.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"472600394","text":"import time\n\nfrom datetime import datetime as datef\n\nimport asyncio\n\nfrom main import bot\n\nimport groups\n\nimport chats\n\nfrom config import *\n\nimport schedule_func\n\n#from telethon.sync import TelegramClient, events\n\nasync def check(sleep):\n while True:\n await asyncio.sleep(sleep)\n\n #async with TelegramClient('name', api_id, api_hash) as client:\n # print('check!')\n # user = await client.get_entity('vladyslavbezruk')\n # print(user.status)\n\n if datef.today().strftime(\"%A\") not in ['Wednesday', 'Friday', 'Saturday', 'Sunday']:\n\n date = schedule_func.get_current_date()\n time = schedule_func.get_current_time()\n\n for code in schedule_func.schedules.keys():\n schedule = schedule_func.get_subj_list(code)\n name = groups.getName(code)\n\n max = 0\n\n for subject in schedule:\n if subject['date'] == date and schedule_func.get_int_time(subject['time_begin']) - time == time_before:\n answer = f\"⁉Заняття для групи {name} відбудеться через {time_before} хв:\\n📢{subject['name']}\\n🗓{subject['date']}\\n👤{subject['teacher']}\\n🕐{subject['time_begin']}-{subject['time_end']}\\n⏩{subject['url']}\"\n for chat_id in chats.chats.keys():\n if name in chats.chats[chat_id]:\n await bot.send_message(chat_id=chat_id, text=answer)\n\n if subject['date'] == date and schedule_func.get_int_time(subject['time_end']) > max:\n max = schedule_func.get_int_time(subject['time_end'])\n\n if max == time and max > 0:\n for chat_id in chats.chats.keys():\n if name in chats.chats[chat_id]:\n await bot.send_message(chat_id=chat_id, text=f\"⁉Заняття для групи {name} закінчились!\")\n elif max == time and max == 0:\n for chat_id in chats.chats.keys():\n if name in chats.chats[chat_id]:\n await bot.send_message(chat_id=chat_id, text=f\"⁉Сьогодні немає занять для групи {name}!\")\n","sub_path":"sources/py/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"152853605","text":"\"\"\"Allauth overrides\"\"\"\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom django.template.loader import render_to_string\n\nfrom readthedocs.core.utils import send_email\n\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\n\nclass AccountAdapter(DefaultAccountAdapter):\n\n \"\"\"Customize Allauth emails to match our current patterns\"\"\"\n\n def format_email_subject(self, subject):\n return force_text(subject)\n\n def send_mail(self, template_prefix, email, context):\n subject = render_to_string(\n '{0}_subject.txt'.format(template_prefix), context\n )\n subject = \" \".join(subject.splitlines()).strip()\n subject = self.format_email_subject(subject)\n\n send_email(\n recipient=email,\n subject=subject,\n template='{0}_message.txt'.format(template_prefix),\n template_html='{0}_message.html'.format(template_prefix),\n context=context\n )\n","sub_path":"readthedocs/core/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"38261100","text":"#Import the algorithms from the other files\nfrom InsertionSort import InsertionSorter\nfrom SelectionSort import SelectionSorter\nfrom MergeSort import MergeSorter\n\nfrom random import sample\n\n#Make a new array of random integers for us to sort\nunsortedList = sample(range(1000), 1000)\n\n#Make some copies of the list\nlistCopy1 = list(unsortedList)\nlistCopy2 = list(unsortedList)\nlistCopy3 = list(unsortedList)\nlistCopy4 = list(unsortedList)\n\n#Sort the lists\n#Built-in TomSort\nlistCopy1.sort()\n\n#Insertion Sort\ninsertionSorter = InsertionSorter() \ninsertionSorter.sort(listCopy2)\n\n#Selection Sort\nselectionSorter = SelectionSorter()\nselectionSorter.sort(listCopy3)\n\n#Merge Sort\nmergeSorter = MergeSorter()\nmergeSorter.sort(listCopy4)\n\n#Check and see if the sorted lists are all the same\nif listCopy1 == listCopy2 and listCopy2 == listCopy3 and listCopy3 == listCopy4:\n\tprint(\"Success\")\n\nelse:\n\tprint(\"Failure\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"275363076","text":"# Copyright 2017 ChinaNetCenter\n# All Rights Reserved.\n\nimport json\nimport mock\nimport uuid\n\n\nfrom django.test import TestCase\n\nfrom django.test import Client\n\nfrom billing_proxy.api import keystone\nfrom billing_proxy.api.views import APITokenView\nfrom billing_proxy.client import bss_api\nfrom billing_proxy.worker.order_handle import InstanceOrderHandle\n\n\nclass APIViewTestClient(Client):\n def __init__(self, enforce_csrf_checks=False, **defaults):\n import uuid\n token_id = str(uuid.uuid4())\n defaults.update({\"HTTP-X-Auth-Token\": token_id})\n super(APIViewTestClient, self).__init__(enforce_csrf_checks,\n **defaults)\n\n\nclass BPAPITestCase(TestCase):\n \"\"\"Test case base class for all unit tests.\n\n when we use the testcase, we need to first setup the test case\n the setup do http initialization\n initialize a bpclient and do some mock\n\n \"\"\"\n client_class = APIViewTestClient\n\n def setUp(self):\n super(BPAPITestCase, self).setUp()\n user_obj = mock.MagicMock()\n user_obj.name = \"wangb\"\n self.contract_id = \"934923cf3e1d44aab88620ccfa7ad4f5\"\n self.project_id = \"2260373daf4749b8aa0e42ecb221d3d0\"\n self.user_id = \"1146ebd09a79484facc0831c343cc90f\"\n user_obj.parent_user_id = str(uuid.uuid4())\n keystone.user_get = mock.Mock(return_value=user_obj)\n APITokenView.validate_token = mock.Mock(return_value=True)\n\n def mock_somthing(self, fake_order_id):\n bss_api.do_order = mock.Mock(return_value={\"orderId\": fake_order_id})\n\n def create_instance(self, data, fake_order_id):\n self.mock_somthing(fake_order_id)\n mock_get_flavor = mock.Mock(return_value=(\"2\", \"8G\"))\n InstanceOrderHandle._get_flavor = mock_get_flavor\n body = json.dumps(data)\n resp = self.client.post('/v1.0/orders.json', data=body,\n content_type=\"json\")\n resp_body = json.loads(resp.content)\n return resp_body\n\n def test_create_exclusive_host_order(self):\n fake_order_id = str(uuid.uuid4())\n\n data = {\n \"body\": {\n\n \"flavorRef\": 1,\n\n \"os_type\": \"linux\",\n\n },\n\n \"user_id\": self.user_id,\n \"resource_type\": \"instance\",\n \"tenant_id\": self.project_id,\n \"bss_args\": {\"contract_id\": self.contract_id,\n \"period\": \"year\",\n \"exclusive_host\": True}\n }\n self.create_instance(data, fake_order_id)\n\n def test_create_instance_order(self):\n fake_order_id = str(uuid.uuid4())\n\n data = {\n \"body\": {\n\n \"flavorRef\": 1,\n\n \"os_type\": \"linux\",\n\n },\n\n \"user_id\": self.user_id,\n\n \"resource_type\": \"instance\",\n\n \"tenant_id\": self.project_id,\n \"bss_args\": {\"contract_id\": self.contract_id,\n \"period\": \"year\",\n \"exclusive_host\": False}\n }\n resp_body = self.create_instance(data, fake_order_id)\n expected_resp_body = {\"orderId\": fake_order_id}\n self.assertEqual(resp_body, expected_resp_body)\n\n def test_create_mysql_instance_order(self):\n fake_order_id = str(uuid.uuid4())\n self.mock_somthing(fake_order_id)\n expected_resp_body = {\"orderId\": fake_order_id}\n data = {\n \"body\": {\n\n \"specification\": 2,\n\n \"MysqlCapability\": 600,\n\n },\n\n \"user_id\": self.user_id,\n\n \"resource_type\": \"MysqlDBInstance\",\n\n \"tenant_id\": self.project_id,\n \"bss_args\": {\"contract_id\": self.contract_id,\n \"period\": \"year\"}\n\n }\n\n body = json.dumps(data)\n resp = self.client.post('/v1.0/orders.json', data=body,\n content_type=\"json\")\n resp_body = json.loads(resp.content)\n self.assertEqual(resp_body, expected_resp_body)\n\n def test_create_systemsnapshot_order(self):\n fake_order_id = str(uuid.uuid4())\n self.mock_somthing(fake_order_id)\n expected_resp_body = {\"orderId\": fake_order_id}\n data = {\n \"body\": {},\n\n \"user_id\": self.user_id,\n\n \"resource_type\": \"systemSnapshot\",\n\n \"tenant_id\": self.project_id,\n \"bss_args\": {\"contract_id\": self.contract_id},\n }\n body = json.dumps(data)\n resp = self.client.post('/v1.0/orders.json', data=body,\n content_type=\"json\")\n resp_body = json.loads(resp.content)\n self.assertEqual(resp_body, expected_resp_body)\n","sub_path":"billing_proxy/tests/units/test_create_order.py","file_name":"test_create_order.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"600539670","text":"import buttons\nimport color\nimport display\nimport leds\nimport urandom\nimport utime\nimport os\n\n\nCOLORS = [(0, 153, 51), (0, 153, 0), (51, 204, 51), (0, 102, 0), (102, 255, 102)]\nLINE_AMOUNT = 50\nDELAY_SEC = 0.03\n\n\nclass Drawer:\n \"Drawer is a interface-like class with the draw method for the display.\"\n def draw(self, disp):\n pass\n\n\nclass Line:\n \"A simple matrix-like Line, is used by the LineDrawer.\"\n def __init__(self):\n self._x = urandom.randint(0, 160)\n self._y = urandom.randint(0, 20)\n self._speed = urandom.randint(1, 15)\n self._elems = []\n\n def update_check(self):\n self._y += self._speed\n self._elems.append(urandom.choice(COLORS))\n\n return self._y < 80\n\n def __iter__(self):\n self._iter_id = 0\n return self\n\n def __next__(self):\n if self._iter_id >= len(self._elems):\n raise StopIteration\n elif self._y + self._iter_id >= 80:\n raise StopIteration\n else:\n col = self._elems[self._iter_id]\n self._iter_id += 1\n return ((self._x, self._y + self._iter_id), col)\n\n\nclass LineDrawer(Drawer):\n \"LineDrawer draws multiple Lines for the matrix-like background.\"\n def __init__(self):\n self._lines = []\n for l in range(LINE_AMOUNT):\n self._lines.append(Line())\n\n def draw(self, disp):\n for i in range(LINE_AMOUNT):\n if not self._lines[i].update_check():\n self._lines[i] = Line()\n\n for p in self._lines[i]:\n disp.pixel(p[0][0], p[0][1],\n col=color.Color(p[1][0], p[1][1], p[1][2]))\n\n\nclass TextDrawer(Drawer):\n \"TextDrawer is an abstract class which draws the text from the _text method.\"\n def _text(self):\n pass\n\n def draw(self, disp):\n msg = self._text()\n disp.print(msg, fg=urandom.choice(COLORS),\n posx=80 - round(len(msg) / 2 * 14), posy=30)\n\n\nclass NickDrawer(TextDrawer):\n \"NickDrawer draws the nickname from the nickname.txt file.\"\n def __init__(self):\n nick = \"nickname.txt\"\n if nick in os.listdir(\".\"):\n f = open(nick, 'r')\n self._nick = f.read()\n f.close()\n else:\n self._nick = \"No {}\".format(nick)\n\n def _text(self):\n return self._nick\n\n\nclass TimeDrawer(TextDrawer):\n \"TimeDrawer draws the current time, like a digital clock.\"\n def _text(self):\n t = utime.localtime()\n h, m, s = t[3], t[4], t[5]\n return \"{:02}:{:02}:{:02}\".format(h, m, s)\n\n\nclass NoneDrawer(Drawer):\n \"NoneDrawer draws nothing to just show the background.\"\n def draw(self, disp):\n pass\n\n\ndef matrix_leds():\n for l in range(15):\n leds.set(l, urandom.choice(COLORS))\n\n\nbg = LineDrawer()\nfgs = [NoneDrawer(), NickDrawer(), TimeDrawer()]\nfg_no = 0\nleds_on = False\n\nwhile True:\n pressed = buttons.read(buttons.BOTTOM_LEFT | buttons.BOTTOM_RIGHT | buttons.TOP_RIGHT)\n if pressed & buttons.BOTTOM_LEFT != 0:\n fg_no = fg_no-1 if fg_no > 0 else len(fgs)-1\n elif pressed & buttons.BOTTOM_RIGHT != 0:\n fg_no = fg_no+1 if fg_no < len(fgs)-1 else 0\n elif pressed & buttons.TOP_RIGHT != 0:\n leds_on = not leds_on\n if not leds_on:\n leds.clear()\n\n with display.open() as disp:\n disp.clear()\n bg.draw(disp)\n fgs[fg_no].draw(disp)\n disp.update()\n\n if leds_on:\n matrix_leds()\n\n utime.sleep(DELAY_SEC)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"351185231","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# import torch\nimport os\nimport pickle\nimport argparse\n\n# from model.model import ResNetModel\n# from model.data import get_data_loader\n# from utils.plot_util import plot_keypoints\n# from run_forward import normalize_keypoints\n\n\nPATH = \"results/fuckme.pth\"\nOPATH = \"results/fuckyou.pth\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-m\", action = \"store_true\", dest=\"pull_model\", default = False)\nparser.add_argument(\"-s\", action = \"store_true\", dest=\"pull_samples\", default = False)\nargs = parser.parse_args()\n\ntraining_errors = []\nvalidation_errors = []\nmean_pixel_errors_val = []\nmean_pixel_errors_train = []\n\nif args.pull_samples:\n os.system(\"DEL /F/Q/S results\\*.*\")\n os.system(\"scp bahadorm@login1.informatik.uni-freiburg.de:~/Dokumente/dl-lab-ss19/exercise1_CV/code/results/*.png results/\")\nif args.pull_model :\n os.system(\"scp bahadorm@login1.informatik.uni-freiburg.de:~/Dokumente/dl-lab-ss19/exercise1_CV/code/model/*.pth results/\")\nos.system(\"scp bahadorm@login1.informatik.uni-freiburg.de:~/Dokumente/dl-lab-ss19/exercise1_CV/code/results/*.errors results/\")\n\ntry:\n with open('results/training.errors', 'rb') as filehandle:\n training_errors = pickle.load(filehandle)\n with open('results/validation.errors', 'rb') as filehandle:\n validation_errors = pickle.load(filehandle)\n with open('results/training_pixel.errors', 'rb') as filehandle:\n mean_pixel_errors_train = pickle.load(filehandle)\n with open('results/validation_pixel.errors', 'rb') as filehandle:\n mean_pixel_errors_val = pickle.load(filehandle)\nexcept FileNotFoundError:\n print(\"error file(s) not found\")\n\n\nepoch_eval = len(training_errors)/len(validation_errors)\ntraining_errors = np.array(training_errors)\nprint(\"training error : {}\\t last :{}\".format(training_errors.shape,training_errors[-1]))\nvalidation_errors = np.array(validation_errors).repeat(epoch_eval)\nprint(\"validation error : {}\\t last: {}\".format(validation_errors.shape,validation_errors[-1]))\nmean_pixel_errors_val = np.array(mean_pixel_errors_val).repeat(epoch_eval)\nprint(\"MPJPE : {}\\t last: {}\".format(mean_pixel_errors_val.shape,mean_pixel_errors_val[-1]))\nmean_pixel_errors_train = np.array(mean_pixel_errors_train)\n\nplt.figure()\nplt.plot(training_errors, label='training')\nplt.plot(validation_errors, label='validation')\nplt.ylabel('loss')\nplt.xlabel('epochs')\nplt.title('HPE')\nplt.legend()\nplt.show()\n\nplt.figure()\nplt.plot(mean_pixel_errors_train, label='train.')\nplt.plot(mean_pixel_errors_val, label='valid.')\nplt.ylabel('mean pixel loss')\nplt.xlabel('epochs')\nplt.title('MPJPE')\nplt.legend()\nplt.show()","sub_path":"exercise1_CV/code/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"264793103","text":"#класс Студенты\nclass Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = []\n self.courses_in_progress = []\n self.grades = {}\n\n #метод выставление оценки лекторам\n def rate_for_lecturer(self, lecturer, course, grade):\n if grade <= 0 or grade > 10:\n print(\"Выставте оценку по по 10-балльной шкале (от 1 до 10, где 1-минимальная, а 10 максимальная)\")\n if isinstance(lecturer, Lecturer) and course in lecturer.courses_attached and course in self.courses_in_progress:\n if course in lecturer.grades:\n lecturer.grades[course] += [grade]\n else:\n lecturer.grades[course] = [grade]\n else:\n return 'Не найдены данные о лекторе или о курсе'\n\n #метод подсчета средней оценки за домашную работу\n def get_avg_grade(self):\n sum_lect = 0\n count = 0\n for course in self.grades.values():\n sum_lect += sum(course)\n count += len(course)\n if count == 0:\n print(\" нет оценок\")\n else: \n return round(sum_lect / count, 2) \n\n #переопределение строки __str__, которую принимает на вход print\n def __str__(self):\n res = f'\\nИмя: {self.name}\\n'f'Фамилия: {self.surname} \\n'f'Средняя оценка за домашние задания:{self.get_avg_grade()}\\n'f'Курсы в процессе изучения: {self.courses_in_progress}\\n'f'Завершенные курсы: {self.finished_courses}'\n return res \n\n #переопределение метода lessthen, который теперь будет сравнивать объекты по признаку \"средняя оценка\"\n def __lt__(self, other_student):\n if not isinstance(other_student, Student):\n print('Студент не найден')\n return\n else:\n if self.get_avg_grade() < other_student.get_avg_grade():\n print(f'{self.name} {self.surname} учится хуже, чем {other_student.name} {other_student.surname}')\n return True\n else:\n print(f'{self.name} {self.surname} учи��ся лучше, чем {other_student.name} {other_student.surname}')\n return False\n\n #метод добавление ученика на курс\n def attach_course(self, courses):\n for course in courses:\n self.courses_in_progress.append(course)\n \n #метод завершения курса учеником\n def finished_course(self, courses):\n for course in courses:\n if course in self.courses_in_progress:\n self.courses_in_progress.remove(course)\n self.finished_courses.append(course)\n\n#класс Менторы \nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n\n#класс Лекторы (назледник Ментора) \nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {} \n self.courses_attached = [] \n\n #переопределение строки __str__, которую принимает на вход print\n def __str__(self):\n res = f'\\nИмя: {self.name}\\n'f'Фамилия: {self.surname} \\n'f'Средняя оценка за лекции: {self.get_avg_grade()}'\n return res \n\n #метод подсчета средней оценки за проведение лекций\n def get_avg_grade(self):\n sum_lect = 0\n count = 0\n for course in self.grades.values():\n sum_lect += sum(course)\n count += len(course)\n if count == 0:\n print(\" нет оценок\")\n else: \n return round(sum_lect / count, 2) \n\n #переопределение метода > lessthen, который теперь будет сравнивать объекты по признаку \"средняя оценка\" \n def __lt__(self, other_lecturer):\n if not isinstance(other_lecturer, Lecturer):\n print('Такого лектора нет!')\n return\n else:\n if self.get_avg_grade() < other_lecturer.get_avg_grade():\n print(f'{self.name} {self.surname} ведет хуже, чем {other_lecturer.name} {other_lecturer.surname}')\n return True\n else:\n print(f'{self.name} {self.surname} ведет лучше, чем {other_lecturer.name} {other_lecturer.surname}')\n return False\n\n #метод добавление лектора на курс\n def attach_course(self, courses):\n for course in courses:\n self.courses_attached.append(course)\n\nclass Reviewer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.courses_attached = [] \n\n #метод выставление оценки ученикам\n def rate_hw(self, student, course, grade):\n if grade <= 0 or grade > 10:\n print(\"Выставте оценку по по 10-балльной шкале (от 1 до 10, где 1-минимальная, а 10 максимальная)\")\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n #метод добавление ревьюера на курс\n def attach_course(self, courses):\n for course in courses:\n self.courses_attached.append(course)\n\n def __str__(self):\n res = f'\\nИмя: {self.name}\\n'f'Фамилия: {self.surname}' \n return res \n\n\n#создание экземпляров\nstudent1 = Student(\"Наталья\",\"Мухина\",\"ж\")\nstudent2 = Student(\"Виталий\",\"Жуков\",\"м\")\n\nlector1 = Lecturer(\"Лектор\",\"Лекторович\")\nlector2 = Lecturer(\"СекондЛектор\",\"Секондоривич\")\n\nreviewer1 = Reviewer (\"Провер\",\"Проверин\")\nreviewer2 = Reviewer (\"Ревьер\",\"Ревьюерин\")\n\n#назначаем всем курсы\nstudent1.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\nstudent1.finished_course([\"Курс3\"])\nstudent2.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\n\nlector1.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\nlector2.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\n\nreviewer1.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\nreviewer2.attach_course([\"Курс1\",\"Курс2\",\"Курс3\"])\n\n#ученики оценивают лекторов\nstudent1.rate_for_lecturer(lector1, 'Курс1', 6)\nstudent1.rate_for_lecturer(lector1, 'Курс2', 9)\nstudent1.rate_for_lecturer(lector1, 'Курс2', 7)\nstudent2.rate_for_lecturer(lector1, 'Курс1', 9)\nstudent2.rate_for_lecturer(lector1, 'Курс2', 9)\nstudent2.rate_for_lecturer(lector2, 'Курс3', 0)\nstudent2.rate_for_lecturer(lector2, 'Курс2', 9)\nstudent2.rate_for_lecturer(lector2, 'Курс1', 5)\n\n#проверяющие оценивают работы учеников\nreviewer1.rate_hw(student1, 'Курс1', 8)\nreviewer1.rate_hw(student2, 'Курс1', 9)\nreviewer1.rate_hw(student1, 'Курс1', 5)\nreviewer1.rate_hw(student1, 'Курс2', 8)\nreviewer2.rate_hw(student1, 'Курс2', 13)\nreviewer2.rate_hw(student1, 'Курс3', 7)\n\n#Выводим информацию об объектах\nprint(student1)\nprint(lector1)\nprint(reviewer1)\nprint('\\n')\n\n#сравниваем студентов\nstudent1 > student2\n\n#сравниваем лекторов\nlector1 < lector2\n\n# средняя оценка за доманшние задание по курсу\ndef get_avg_hw_grade(student_list, course):\n total_sum = 0\n for student in student_list:\n for c, grades in student.grades.items():\n if c == course:\n total_sum += sum(grades) / len(grades)\n return round(total_sum / len(student_list), 2)\n\nstudent_list = [student1, student2]\nhm_course = \"Курс1\"\nprint(f'Средняя оценка за ДЗ по курсу \"{hm_course}\": {get_avg_hw_grade(student_list, hm_course)}')\n\n# средняя оценка за лекции лекторов по курсу\ndef get_avg_lector_grade(lector_list, course):\n total_sum = 0\n for student in lector_list:\n for c, grades in student.grades.items():\n if c == course:\n total_sum += sum(grades) / len(grades)\n return round(total_sum / len(lector_list), 2)\n\nlector_list = [lector1, lector2]\nlc_course = \"Курс2\"\nprint(f'Средняя оценка лекторов по курсу \"{lc_course }\": {get_avg_lector_grade(lector_list, lc_course)}')\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"241236510","text":"#\tsearch.py\r\n# 实现本地共享文件夹中的文件搜索\r\n\r\nimport os\r\nimport frozen_dir\r\n\r\ndef search(filename):\r\n\t#path = os.path.abspath(os.path.dirname(__file__)) + '/share'\r\n\tpath = os.path.abspath(frozen_dir.app_path()) + '/share'\r\n\t#print(path)\r\n\tfor root, dirs, files in os.walk(path):\r\n\t\tfor file in files:\r\n\t\t\t#print(file)\r\n\t\t\tif str(file) == filename:\r\n\t\t\t\treturn os.path.join(root,file)\r\n\t#若未找到该文件,则返回None\r\n\treturn None\r\n\r\n\r\n","sub_path":"2019141460505/陈蓝玉_2019141460505_计网课设/陈蓝玉_2019141460505_计网课设/source/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"168898608","text":"import logging\nimport json\nimport os\n\nfrom constants import data_directory\n# logging.basicConfig(level=logging.DEBUG,\n# filename=\"app.log\",\n# filemode=\"w\",\n# format='%(asctime)s - %(levelname)s - %(message)s')\nlogging.basicConfig(level=logging.DEBUG)\n\n# while True:\n# _list = input(\"Take your list or create new list: \")\n# if _list == \"q\" or _list == \"Q\":\n# exit(0)\n# if _list.endswith(\".json\"): # if the file json exist\n# print(f\"{_list} selected.\")\n# path = os.path.join(data_directory, _list)\n# with open(path, \"r\") as data:\n# json = json.load(data)\n \n# oldext = os.path.splitext(_list)[0]\n# _list = oldext\n# break\n# try:\n# with open(_list, \"r\") as f:\n# print(f\"{_list} selected.\")\n# except FileNotFoundError:\n# print(\"Wrong file or file path\")\n# except UnicodeDecodeError:\n# print(f\"Impossible open to file {file}\")\n\n # if os.path.isfile(_list): # if the file is not a list .json\n # with open(_list, 'r') as f:\n # file = f.read()\n # print(f\"{_list} selected.\")\n # os.rename(_list, _list + \".json\")\n # with open(_list + \".json\", \"w\") as f:\n # json.dump(list(file), f, indent = 4) \n # print(f\"{_list}.json created and has been updated.\")\n # _list = _list + \".json\"\n # break\n # else: os.mknod(_list + \".json\")\n # with open(_list + \".json\", \"w\") as f:\n # json.dump([], f) \n # print(f\"{_list} create.\")\n # _list = _list + \".json\"\n # break\n\n\nclass List(list):\n def __init__(self, nom):\n self.nom = nom\n def add(self, element):\n if not isinstance(element, str):\n raise ValueError(\"Insert pls character string\")\n if element in self:\n logging.debug(f\"{element} is already in list.\")\n return False\n self.append(element)\n return True\n \n def delele(self, element):\n if not element in self:\n logging.error(f\"{element} no exist\")\n return False\n if element in self:\n self.remove(element)\n return True\n return False\n def show(self):\n print(f\"List {self.nom}\")\n for element in self:\n print(f\" - {element}\")\n def save(self):\n path = os.path.join(data_directory, f\"{self.nom}.json\")\n try:\n if not os.path.exists(data_directory):\n os.makedirs(data_directory)\n with open(path, \"w\") as f: # create file.json in path\n json.dump(self, f, indent=4)\n return True\n except FileNotFoundError:\n print(\"Wrong file or file path\")\n return False\n except UnicodeDecodeError:\n print(f\"Impossible open to file {path}\")\n return False\n\nif __name__ == \"__main__\":\n list_ = List(\"Sopping list\")\n list_.add(\"json\")\n list_.show()\n list_.save()\n # resulat = list_.add(element)\n # if resultat:\n # #add in graphic interface","sub_path":"Python/Udemy/LaListeDeCourses/LaListeDeCoursesClasse/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"416797307","text":"def index(request):\n # latest_question_list = Article.objects.order_by('id')[:5]\n typeToId = {\n '001_001': 1001,\n '001_002': 1002,\n '001_003': 1003,\n '001_004': 1004,\n '001_006': 1006,\n '001_011': 1011,\n '001_012': 1012,\n '001_013': 1013,\n '001_015': 1015,\n '001_016': 1016,\n '001_017': 1017,\n '001_014': 1014,\n '001_110': 1110\n }\n data = []\n with open('./art.json') as f:\n js = json.load(f) # js是转换后的字典\n for item in js:\n\n type = 1001\n views = 0\n created_at = None\n updated_at = None\n aid = 1\n title = ''\n description = ''\n content = ''\n for i in item:\n if i == 'type':\n type = typeToId[item[i][0]]\n if i == 'id':\n aid = item[i]\n if i == 'views':\n views = item[i]\n if i == 'created_at':\n created_at = datetime.fromtimestamp((item[i]/1000))\n if i == 'updated_at':\n print(item[i])\n if item[i]:\n print(item[i], 'asdasd')\n updated_at = datetime.fromtimestamp((item[i]/1000))\n if i == 'title':\n title = item[i]\n if i == 'description':\n description = item[i]\n if i == 'content':\n content = item[i]\n \n data.append(Article(\n author_id=1,\n aid=aid,\n type=type,\n views=views,\n updated_at=updated_at,\n created_at=created_at,\n title=title,\n description=description,\n content=content,\n ))\n\n print(data.__len__())\n\n Article.objects.bulk_create(data,5) \n\n # list = Article.objects.values()\n # for j in list:\n # print(j)\n # pass\n return HttpResponse('list')","sub_path":"server/sql/art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"257870195","text":"#!/usr/bin/python3.7\n#pip3 install --upgrade google-api-python-client oauth2client google-auth-httplib2 google-auth-oauthlib pymongo\nfrom os import name\nfrom weakref import ProxyTypes\nfrom pymongo import MongoClient\nimport httplib2\nimport googleapiclient.discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport requests\nimport json\n\n\n# Файл, полученный в Google Developer Console\nCREDENTIALS_FILE = 'credentials.json'\n# SHAO MANAGERS\nspreadsheet_id = '1Jz9UN60UUdhcmfwgkKX3HcfG8YX9zMZkFeLgch0WKXw'\n# адреса колонок\ncolumn_proxy = 'DC ip!B:B'\ncolumn_gps = 'DC ip!G:G'\ncolumn_host = 'DC ip!H:H'\ncolumn_port = 'DC ip!I:I'\ncolumn_login = 'DC ip!J:J'\ncolumn_passwd = 'DC ip!K:K'\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n CREDENTIALS_FILE,\n ['https://www.googleapis.com/auth/spreadsheets.readonly',\n 'https://www.googleapis.com/auth/drive.readonly'])\nhttpAuth = credentials.authorize(httplib2.Http())\nservice = googleapiclient.discovery.build('sheets', 'v4', http = httpAuth)\n\n# Функция считывания из гугл таблиц\ndef sheets_read(spreadsheet_id, cell):\n values = service.spreadsheets().values().get(\n spreadsheetId=spreadsheet_id,\n range=cell,\n majorDimension='COLUMNS'\n ).execute()\n raw = values.get('values')\n raw = raw[0]\n del raw[0]\n return raw\n\nproxy = sheets_read(spreadsheet_id, column_proxy)\ngps = sheets_read(spreadsheet_id, column_gps)\nhost = sheets_read(spreadsheet_id, column_host)\nport = sheets_read(spreadsheet_id, column_port)\nlogin = sheets_read(spreadsheet_id, column_login)\npasswd = sheets_read(spreadsheet_id, column_passwd)\n\n# запись в mongodb json о прокси\nclient = MongoClient('mongodb://localhost:27017')\ndb = client.astrid\ni = 0\nfor n in proxy:\n if n != '':\n item = {\n \"document\": {\n \"id\": proxy[i],\n \"jsonField\": {\n \"host\": host[i],\n \"port\": port[i],\n \"login\": login[i],\n \"passwd\": passwd[i],\n \"gps\": gps[i]\n }\n },\n \"version\": 1637056751,\n \"author\": \"api|api\",\n \"prevVersion\": 0,\n \"current\": True,\n \"deleted\": False\n }\n\n check = db.GMCModels.GMCLicense.find_one({'document.id': proxy[i]})\n if check != None:\n id = check.get('_id')\n results = db.GMCModels.GMCLicense.replace_one({'_id' : id }, item )\n print(proxy[i], 'updated')\n else:\n results = db.GMCModels.GMCLicense.insert_one(item)\n print(proxy[i], 'recorded')\n i += 1\n\n# удаление старых ip из астрида\nallproxy = db.GMCModels.GMCLicense.find({},{\"_id\": 0, \"document.id\": 1,})\nfor x in allproxy:\n x = x.get('document')\n x = x.get('id')\n if x not in proxy:\n db.GMCModels.GMCLicense.delete_one({'document.id': x})\n print(x,'deleted')\n","sub_path":"python/scripts/Channels_sinc/push_proxy_list.py","file_name":"push_proxy_list.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"101282385","text":"# -*- coding : utf-8 -*-\n# 一个简单的爬取京东上iPhone商品信息的小爬虫\n\nimport requests\n\ndef getHTMLText(url):\n try:\n r = requests.get(url, timeout = 30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text[:1000]\n except:\n return '爬取失败'\n\nif __name__ == \"__main__\":\n url = 'https://item.jd.com/3995643.html?cpdad=1DLSUE'\n print(getHTMLText(url))\n","sub_path":"Requests/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"517222402","text":"import sys\nimport os\nimport unittest\n# добавляем путь, чтобы видеть внешние папки, например, дойти до common\nsys.path.append(os.path.join(os.getcwd(), '../..'))\nfrom common.default_conf import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, \\\n RESPONSE, ERROR\nfrom server import process_client_message\n\n\nclass TestServer(unittest.TestCase):\n # тестовые данные\n ok_dict = {RESPONSE: 200}\n error_dict = {\n RESPONSE: 400,\n ERROR: 'Bad Request'\n }\n\n def test_ok_message(self):\n self.assertEqual(process_client_message(\n {ACTION: PRESENCE, TIME: '123.123', USER: {ACCOUNT_NAME: 'Demo'}}), self.ok_dict)\n\n def test_without_action(self):\n self.assertEqual(process_client_message(\n {TIME: '123.123', USER: {ACCOUNT_NAME: 'Demo'}}), self.error_dict)\n\n def test_wrong_action(self):\n self.assertEqual(process_client_message(\n {ACTION: 'test_not_ok_action', TIME: '123.123', USER: {ACCOUNT_NAME: 'Demo'}}), self.error_dict)\n\n def test_without_time(self):\n self.assertEqual(process_client_message(\n {ACTION: PRESENCE, USER: {ACCOUNT_NAME: 'Demo'}}), self.error_dict)\n\n def test_without_user(self):\n self.assertEqual(process_client_message(\n {ACTION: PRESENCE, TIME: '123.123'}), self.error_dict)\n\n def test_not_ok_user(self):\n self.assertEqual(process_client_message(\n {ACTION: PRESENCE, TIME: '123.123', USER: {ACCOUNT_NAME: 'TestNotOkUser'}}), self.error_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"homework/hw02/tests/unittests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"415381288","text":"# import pdb; pdb.set_trace()\nimport argparse\nimport os\nimport pickle\nimport glob\nfrom os.path import join\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom submit_small_experiments import get_method_info\n\n# SORT_ORDER = {\n# 'None': 0,\n# 'MNN': 1,\n# 'SeuratV3': 2,\n# 'ScAlign': 3,\n# 'closest': 4,\n# 'greedy_thresh_0.25_limit_02': 5,\n# 'hungarian_thresh_0.25': 6,\n# 'greedy_thresh_0.50_limit_02': 7,\n# 'hungarian_thresh_0.50': 8,\n# 'greedy_thresh_0.75_limit_02': 9,\n# 'hungarian_thresh_0.75': 10,\n# 'greedy_thresh_0.50_limit_01': 11,\n# 'greedy_thresh_0.50_limit_05': 12\n# }\n\ndef get_sort_order():\n method_list = get_method_info()\n order = {}\n for i, method in enumerate(method_list):\n order[method['name']] = i\n return order\n\ndef plot_clf(df, alignment_task, output_folder):\n sort_order = get_sort_order()\n df['ord'] = df.apply(lambda row: sort_order[row['method']], axis=1)\n df.sort_values('ord', inplace=True)\n sns.set(style=\"whitegrid\")\n ax = sns.barplot(x='data', y='acc', hue='method', data=df)\n ax.set_title('Classifier Accuracy on: {}'.format(alignment_task.as_plot_string()))\n\n plt.savefig(output_folder / '{}_acc.png'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_acc.svg'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_acc.pdf'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.close()\n\n # ax = sns.barplot(x='data', y='auc', hue='method', data=df)\n # ax.set_title('Classifier AUC on: {}'.format(alignment_task.as_plot_string()))\n\n # plt.savefig(output_folder / '{}_auc.png'.format(alignment_task.as_path()), bbox_inches='tight')\n # plt.savefig(output_folder / '{}_auc.svg'.format(alignment_task.as_path()), bbox_inches='tight')\n # plt.savefig(output_folder / '{}_auc.pdf'.format(alignment_task.as_path()), bbox_inches='tight')\n # plt.close()\n\ndef plot_lisi(df, alignment_task, output_folder):\n sort_order = get_sort_order()\n df['ord'] = df.apply(lambda row: sort_order[row['method']], axis=1)\n df.sort_values('ord', inplace=True)\n sns.set(style=\"whitegrid\")\n\n ax = sns.boxplot(x=\"score\", y=\"method\", hue=\"metric\", data=df, palette=\"Set3\", orient=\"h\", showfliers=False, hue_order=[alignment_task.batch_key, alignment_task.ct_key])\n\n groups = df.groupby(['method', 'metric'], sort=True)\n means = groups['score'].mean()\n medians = groups['score'].median()\n q1 = groups['score'].quantile(0.25)\n q2 = groups['score'].quantile(0.75)\n iqr = q2.max() - q1.min()\n whisker = q2.mean() + 1.5 * iqr\n print('whisker: {}'.format(whisker))\n stds = groups['score'].std()\n \n print(groups)\n print()\n print(medians)\n # new_left = df['score'].min()\n max_point = df['score'].max()\n print('max_point: {}'.format(max_point))\n\n # new_right = max_point + (max_point - new_left) / 5\n\n for tick, label in enumerate(ax.get_yticklabels()):\n print(tick)\n label = label.get_text()\n print(label)\n\n\n dataset_lisi_median = medians[label][alignment_task.batch_key]\n celltype_lisi_median = medians[label][alignment_task.ct_key]\n print(dataset_lisi_median)\n print(celltype_lisi_median)\n print()\n ax.text(whisker, tick - 0.19, '{:.3f}'.format(dataset_lisi_median), horizontalalignment='left', size='x-small', color='r', weight='semibold')\n ax.text(whisker, tick + 0.19, '{:.3f}'.format(celltype_lisi_median), horizontalalignment='left', size='x-small', color='r', weight='semibold')\n\n # print(new_left)\n # print(new_right)\n #ax.set_xlim(left=left, right=right)\n #plt.legend(bbox_to_anchor=(1.1, 1))\n plt.legend(loc='upper left', bbox_to_anchor=(1,1))\n\n ax.set_title('Scores on Task: {}'.format(alignment_task.as_plot_string()))\n\n plt.savefig(output_folder / '{}_sns.png'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_sns.svg'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_sns.pdf'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.close()\n\ndef plot_kBET(df, alignment_task, output_folder):\n sort_order = get_sort_order()\n df['ord'] = df.apply(lambda row: sort_order[row['method']], axis=1)\n df.sort_values('ord', inplace=True)\n sns.set(style=\"whitegrid\")\n\n ax = sns.boxplot(x=\"value\", y=\"method\", hue=\"metric\", data=df, showfliers=True, hue_order=['kBET.expected', 'kBET.observed', 'kBET.signif'])\n\n plt.legend(loc='upper left', bbox_to_anchor=(1,1))\n\n ax.set_title('kBET Rejection Rates on Task: {}'.format(alignment_task.as_plot_string()))\n\n plt.savefig(output_folder / '{}_kbet.png'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_kbet.svg'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.savefig(output_folder / '{}_kbet.pdf'.format(alignment_task.as_path()), bbox_inches='tight')\n plt.close()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('compile-results', description='Combine LISI scores from multiple experiments into summarizing plots.')\n parser.add_argument('root_folder', help='Root folder to search for result files.')\n parser.add_argument('output_folder', help='Path of output folder (created if not exists) to store plots in.')\n\n args = parser.parse_args()\n lisi_folder = Path(args.output_folder) / 'LISI'\n clf_folder = Path(args.output_folder) / 'classification'\n kbet_folder = Path(args.output_folder) / 'kBET'\n for path in [args.output_folder, lisi_folder, clf_folder, kbet_folder]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n results_by_task = defaultdict(list)\n for filename in glob.iglob(join(args.root_folder, '**/results.pickle'), recursive=True):\n print(filename)\n with open(filename, 'rb') as f:\n result = pickle.load(f)\n results_by_task[str(result['alignment_task'])].append(result)\n \n for task, results in results_by_task.items():\n print(task)\n method = []\n metric = []\n score = []\n for r in results:\n method.extend([r['method']]*(2*r['lisi'].shape[0]))\n for col in r['lisi'].columns:\n metric.extend([col]*r['lisi'].shape[0])\n score.extend(r['lisi'][col])\n df = pd.DataFrame(data={'method': method, 'metric': metric, 'score': score})\n plot_lisi(df, results[0]['alignment_task'], lisi_folder)\n \n for task, results in results_by_task.items():\n method = []\n data = []\n acc = []\n # auc = []\n for r in results:\n if r['clf'] == None:\n continue\n method.extend([r['method']]*2)\n for dataset in ['source', 'source_aligned']:\n data.append(dataset)\n acc.append(r['clf']['{}_acc'.format(dataset)])\n # auc.append(r['clf']['{}_auc'.format(dataset)])\n # df = pd.DataFrame(data={'method': method, 'data': data, 'acc': acc, 'auc': auc})\n df = pd.DataFrame(data={'method': method, 'data': data, 'acc': acc})\n plot_clf(df, results[0]['alignment_task'], clf_folder)\n \n for task, results in results_by_task.items():\n print(task)\n print(len(results))\n # scores = [r['kbet_stats'] for r in results]\n methods = [r['method'] for r in results]\n print(methods)\n method = []\n metric = []\n value = []\n for r in results:\n kbet = r['kbet_stats']\n method.extend([r['method']]*kbet.size)\n for col in kbet.columns:\n metric.extend([col]*kbet.shape[0])\n value.extend(kbet[col])\n df = pd.DataFrame(data={'method': method, 'metric': metric, 'value': value})\n plot_kBET(df, results[0]['alignment_task'], kbet_folder)\n","sub_path":"compile_results.py","file_name":"compile_results.py","file_ext":"py","file_size_in_byte":8060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"167329109","text":"#!/usr/bin/env python3\n\nfrom glob import iglob\nfrom os.path import relpath\nimport hashlib\nimport os\nimport json\n\n\ndef parse_count(lines):\n for line in lines:\n [count, lno] = line[:15].lstrip().split(':')\n lno = int(lno.lstrip())\n if lno == 0:\n continue\n if count == '-':\n count = 0\n elif count.startswith('#'):\n count = len(count)\n else:\n count = int(count)\n yield (lno, count)\n\n\ndef parse_file(filename):\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n assert lines[0].startswith(\" -: 0:Source:\")\n name = relpath(lines[0][23:])\n return (name, list(parse_count(lines[1:])))\n\n\ndef parse():\n d = {}\n for filename in iglob(\"*##*porus*.gcov\"):\n name, line_counts = parse_file(filename)\n f = d.get(name, {})\n for l, c in line_counts:\n f[l] = f.get(l, 0) + c\n d[name] = f\n return d\n\n\ndef coverage(line_counts):\n for i, (l, c) in enumerate(sorted(line_counts.items())):\n assert i+1 == l\n yield c\n\n\ndef source_digest(name):\n md5 = hashlib.md5()\n with open(name, 'rb') as f:\n md5.update(f.read())\n return md5.hexdigest()\n\n\ndef source_files():\n for name, line_counts in parse().items():\n yield {\"name\": name,\n \"source_digest\": source_digest(name),\n \"coverage\": list(coverage(line_counts))}\n\ndef json_file():\n return json.dumps({\n 'service_name': 'travis-ci',\n 'service_job_id': os.environ['TRAVIS_JOB_ID'],\n 'source_files': list(source_files())}).encode('utf-8')\n\ndef post(json_file):\n import requests\n r = requests.post('https://coveralls.io/api/v1/jobs', files={'json_file': json_file})\n print(r.status_code)\n print(r.headers)\n print(r.content)\n assert r.status_code == 200\n\nif __name__ == '__main__':\n post(json_file())\n","sub_path":"coveralls.py","file_name":"coveralls.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"428806845","text":"#coding:utf-8\n\n\ndef get_data(year,month,var):\n\timport netCDF4\n\timport numpy as np\n\timport subroutine\n\timport datetime as dt\n\ta=subroutine.read_meta_data('var')\n\tfilename=subroutine.celldata(a,'var',var,'ERA_fn')\n\tif year <= 1967:\n\t\tncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1958-1967.nc'\n\telif year >= 1968 and year <= 1977:\n\t\tncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1968-1977.nc'\n\telif year >= 1978 and year<=1988:\n\t\tncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1978-1988.nc'\n\telse:\n\t\tncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-Interim.nc'\n\n\tnc=netCDF4.Dataset(ncfile,'r')\n\ttime=nc.variables['time'][:]\n\tunits=nc.variables['time'].units\n\tdtime=netCDF4.num2date(time,units=units)\n\tfor i in range(0,dtime.size):\n\t\tif getattr(dtime[i],'year')==year and getattr(dtime[i],'month')==month:\n\t\t\tif year <=1988 and filename=='tp':\n\t\t\t\t# 1988年以前の降水量のデータは、convectiveとlargescaleの和で算出する\n\t\t\t\tdata=nc.variables['lsp'][i,:,:]+nc.variables['cp'][i,:,:]\n\t\t\telif year<=1988 and filename=='sff':\n\t\t\t\tdata=(nc.variables['e'][i,:,:]+nc.variables['ro'][i,:,:]+\\\n\t\t\t\t\t nc.variables['lsp'][i,:,:]+nc.variables['cp'][i,:,:])*2 # 何故か2倍することで丁度いい\n\t\t\telif filename=='sff':\n\t\t\t\t# 淡水フラックス\n\t\t\t\tdata=nc.variables['e'][i,:,:]+nc.variables['ro'][i,:,:]+\\\n\t\t\t\t\t nc.variables['tp'][i,:,:]\n\t\t\telif filename=='q':\n\t\t\t\t# 熱フラックス\n\t\t\t\tdata=nc.variables['sshf'][i,:,:]+nc.variables['slhf'][i,:,:]+\\\n\t\t\t\t\t nc.variables['ssr'][i,:,:]+nc.variables['str'][i,:,:]\n\t\t\telse:\n\t\t\t\tdata=nc.variables[filename][i,:,:]\n\t\t\t# print i,dtime[i]\n\t\t\t# add_offset=nc.variables[filename].add_offset\n\t\t\t# scale_factor=nc.variables[filename].scale_factor\n\t\t\t# data=data*scale_factor+add_offset\n\t\t\t# print 'scale_factor=',scale_factor\n\t\t\t# print 'add_offset=',add_offset\n\t\t\tif filename=='sff':\t# 何故か100倍することでオーダーが合う\n\t\t\t\tdata=data*100\n\n\t\t\tbreak\n\n\tnc.close()\n\treturn data[::-1,:]\n\n\ndef get_grid_value(var):\n\timport netCDF4\n\timport subroutine\n\timport numpy as np\n\tncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1958-1967.nc'\n\tnc=netCDF4.Dataset(ncfile,'r')\n\txgrid=nc.variables['longitude'][:]\n\tygrid=nc.variables['latitude'][:]\n\tnc.close()\n\treturn [xgrid,ygrid[::-1],np.array([0])]\n","sub_path":"ERA.py","file_name":"ERA.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"252751138","text":"from django.shortcuts import render, redirect, loader\nfrom .forms import *\n\n# Create your views here.\n\ndef home(request):\n\treturn render(request, 'football/index.html')\n\n\ndef login(request):\n\treturn render(request, 'football/login.html')\n\n\ndef register_player(request):\n if request.method == 'POST':\n player_form = register_player_form(request.POST)\n\n if player_form.is_valid():\n # allow when testing is finished\n player_form.save()\n return redirect('home')\n\n else:\n player_form = register_player_form()\n\n context = {'form': player_form}\n return render(request, 'football/registerplayer.html', context)\n\n\ndef register_team(request):\n\tif request.method == 'POST':\n\t\tteam_form = register_team_form(request.POST)\n\n\t\tif team_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\tteam_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tteam_form = register_team_form()\n\n\tcontext = {'form': team_form}\n\treturn render(request, 'football/registerteam.html', context)\n\n\ndef register_stadium(request):\n\tif request.method == 'POST':\n\t\tstadium_form = register_stadium_form(request.POST)\n\n\t\tif stadium_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\tstadium_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tstadium_form = register_stadium_form()\n\n\tcontext = {'form': stadium_form}\n\treturn render(request, 'football/registerstadium.html', context)\n\n\ndef register_match(request):\n\tif request.method == 'POST':\n\t\tmatch_form = register_match_form(request.POST)\n\n\t\tif match_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\tmatch_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tmatch_form = register_match_form()\n\n\tcontext = {'form': match_form}\n\treturn render(request, 'football/registermatch.html', context)\n\n\ndef register_roster(request):\n\tif request.method == 'POST':\n\t\troster_form = register_roster_form(request.POST)\n\n\t\tif roster_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\troster_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\troster_form = register_roster_form()\n\n\tcontext = {'form': roster_form}\n\treturn render(request, 'football/registerroster.html', context)\n\n\ndef register_stat(request):\n\tif request.method == 'POST':\n\t\tstat_form = register_stat_form(request.POST)\n\n\t\tif stat_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\tstat_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tstat_form = register_stat_form()\n\n\tcontext = {'form': stat_form}\n\treturn render(request, 'football/registerstat.html', context)\n\n\ndef register_score(request):\n\tif request.method == 'POST':\n\t\tscore_form = register_score_form(request.POST)\n\n\t\tif score_form.is_valid():\n\t\t\t# allow when testing is finished\n\t\t\tscore_form.save()\n\t\t\treturn redirect('home')\n\n\telse:\n\t\tscore_form = register_score_form()\n\n\tcontext = {'form': score_form}\n\treturn render(request, 'football/registerscore.html', context)\n\n\ndef view_player(request):\n\tplayers = Player.objects.all()\n\n\ttemplate = loader.get_template('football/viewplayer.html')\n\tcontext = {\n\t\t'players': players,\n\t}\n\treturn render(request, 'football/viewplayer.html', context)\n\n\ndef view_team(request):\n\tteams = Team.objects.all()\n\n\ttemplate = loader.get_template('football/viewteam.html')\n\n\tcontext = {\n\t\t'teams': teams,\n\t}\n\treturn render(request,'football/viewteam.html', context)\n\n\ndef view_stadium(request):\n\tstadiums = Stadium.objects.all()\n\n\ttemplate = loader.get_template('football/viewstadium.html')\n\n\tcontext = {\n\t\t'stadiums': stadiums,\n\t}\n\treturn render(request,'football/viewstadium.html', context)\n\n\ndef view_match(request):\n\tmatches = Matchup.objects.all()\n\n\ttemplate = loader.get_template('football/viewmatchup.html')\n\n\tcontext = {\n\t\t'matches': matches,\n\t}\n\treturn render(request,'football/viewmatchup.html', context)\n\n\ndef view_roster(request):\n\trosters = Roster.objects.all()\n\n\ttemplate = loader.get_template('football/viewroster.html')\n\n\tcontext = {\n\t\t'rosters': rosters,\n\t}\n\treturn render(request,'football/viewroster.html', context)\n\n\ndef view_stat(request):\n\tstats = Stat.objects.all()\n\n\ttemplate = loader.get_template('football/viewstat.html')\n\n\tcontext = {\n\t\t'stats': stats,\n\t}\n\treturn render(request,'football/viewstat.html', context)\n\n\ndef view_score(request):\n\tscores = Score.objects.all()\n\n\ttemplate = loader.get_template('football/viewscore.html')\n\n\tcontext = {\n\t\t'scores': scores,\n\t}\n\treturn render(request,'football/viewscore.html', context)\n","sub_path":"ift530Project/football/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"39321782","text":"from __main__ import Plugin\nimport requests\n\nuser_agent=\"archive plugin for /u/captainmeta4's IRC interface\"\n\nclass Main(Plugin):\n\n def helptext(self):\n\n yield \"$archive \"\n yield \"Obtain the most recent archive of if it exists, or begin archiving if it does not.\"\n\n def exe(self, message):\n\n url=self.args[1]\n\n #apis\n check='http://archive.org/wayback/available?url={}'\n save = 'https://web.archive.org/save/{}'\n headers={'User-Agent':user_agent}\n\n \n\n #check to see if it is already archived\n\n x=requests.get(check.format(url), headers=headers)\n j=x.json()\n if 'closest' in j['archived_snapshots']:\n yield j['archived_snapshots']['closest']['url']\n return\n\n #archive it\n\n x=requests.get(save.format(url), headers=headers)\n if x.status_code == 403:\n yield \"Internet Wayback Machine does not archive that site.\"\n return\n elif x.status_code != 200:\n yield \"There was an error archiving your url\"\n return\n\n yield \"Archive started. Try again in a minute or two to see if there's a saved archive page.\"\n\n \n\n \n","sub_path":"plugins/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"137786905","text":"import numpy as np\nfrom .classifiers import (\n BoxPlot,\n EqualInterval,\n FisherJenks,\n FisherJenksSampled,\n Quantiles,\n UserDefined,\n NaturalBreaks,\n MaximumBreaks,\n MaxP,\n StdMean,\n)\n\n__all__ = [\"Pooled\"]\n\ndispatcher = {\n \"boxplot\": BoxPlot,\n \"equalinterval\": EqualInterval,\n \"fisherjenks\": FisherJenks,\n \"fisherjenkssampled\": FisherJenksSampled,\n \"quantiles\": Quantiles,\n \"maximumbreaks\": MaximumBreaks,\n \"stdmean\": StdMean,\n \"userdefined\": UserDefined,\n}\n\n\nclass Pooled(object):\n \"\"\"Applying global binning across columns\n\n Parameters\n ----------\n\n Y : array\n (n, m), values to classify, with m>1\n\n classifier : string\n Name of mapclassify.classifier to apply\n\n **kwargs : dict\n additional keyword arguments for classifier\n\n Attributes\n ----------\n\n global_classifier : MapClassifier\n Instance of the pooled classifier defined as the classifier\n applied to the union of the columns.\n\n col_classifier : list\n Elements are MapClassifier instances with the pooled classifier\n applied to the associated column of Y.\n\n Examples\n --------\n >>> import numpy as np\n >>> import mapclassify as mc\n >>> n = 20\n >>> data = np.array([np.arange(n)+i*n for i in range(1,4)]).T\n >>> res = mc.Pooled(data)\n >>> res.col_classifiers[0].counts\n array([12, 8, 0, 0, 0])\n >>> res.col_classifiers[1].counts\n array([ 0, 4, 12, 4, 0])\n >>> res.col_classifiers[2].counts\n array([ 0, 0, 0, 8, 12])\n >>> res.global_classifier.counts\n array([12, 12, 12, 12, 12])\n >>> res.global_classifier.bins == res.col_classifiers[0].bins\n array([ True, True, True, True, True])\n >>> res.global_classifier.bins\n array([31.8, 43.6, 55.4, 67.2, 79. ])\n \"\"\"\n\n def __init__(self, Y, classifier=\"Quantiles\", **kwargs):\n self.__dict__.update(kwargs)\n Y = np.asarray(Y)\n n, cols = Y.shape\n y = np.reshape(Y, (-1, 1), order=\"f\")\n method = classifier.lower()\n if method not in dispatcher:\n print(f\"{method} not a valid classifier.\")\n return None\n global_classifier = dispatcher[method](y, **kwargs)\n # self.k = global_classifier.k\n col_classifiers = []\n name = f\"Pooled {classifier}\"\n for c in range(cols):\n res = UserDefined(Y[:, c], bins=global_classifier.bins)\n res.name = name\n col_classifiers.append(res)\n self.col_classifiers = col_classifiers\n self.global_classifier = global_classifier\n self._summary()\n\n def _summary(self):\n yb = self.global_classifier.yb\n self.classes = self.global_classifier.classes\n self.tss = self.global_classifier.tss\n self.adcm = self.global_classifier.adcm\n self.gadf = self.global_classifier.gadf\n\n def __str__(self):\n s = \"Pooled Classifier\"\n rows = [s]\n for c in self.col_classifiers:\n rows.append(c.table())\n return \"\\n\\n\".join(rows)\n\n def __repr__(self):\n return self.__str__()\n","sub_path":"mapclassify/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"552441077","text":"def class_extensions():\n def rule_importance(self):\n \"\"\"\n Retrieve rule importances for a Rulefit model\n\n :return: H2OTwoDimTable\n \"\"\"\n if self._model_json[\"algo\"] != \"rulefit\":\n raise H2OValueError(\"This function is available for Rulefit models only\")\n return self._model_json[\"output\"]['rule_importance']\n\nextensions = dict(\n __class__=class_extensions,\n)\n\ndoc = dict(\n __class__=\"\"\"\nBuilds a RuleFit on a parsed dataset, for regression or \nclassification. \n\"\"\"\n)\n","sub_path":"h2o-bindings/bin/custom/python/gen_rulefit.py","file_name":"gen_rulefit.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"216868268","text":"# coding=utf-8\nfrom page_object.cabinet.registration_and_login import RegisterPage\nfrom page_object.cabinet.cabinet_main import ProfilePage, MainPage, TransactionPage\nfrom selftf import *\nfrom datetime import datetime\nfrom ddt import ddt, data\n\n\n@ddt\nclass TestTransactionsHistory(SelfieCase):\n\n @classmethod\n def setUpClass(cls):\n cls.browser = Browser()\n\n cls.browser.open(Config().SITE)\n login = RegisterPage(cls)\n login.check_login_load()\n login_page = RegisterPage(cls)\n login_page.email_inp.typing('ahodkevich@trademux.net')\n login_page.pass_inp.typing('1081802aa')\n login_page.register_btn.click()\n\n log('Проверяем загрузку страницы после авторизации')\n cabinet = ProfilePage(cls)\n cabinet.check_load()\n\n @catcher_func\n def setUp(self):\n self.browser.refresh()\n main = MainPage(self)\n main.section_menu.item(1).click()\n cabinet = ProfilePage(self)\n cabinet.check_load()\n\n @data(0, 1, 2, 3)\n @catcher_func\n def test_01_check_periods(self, arg):\n \"\"\"Проверяем работу периодов\"\"\"\n main = MainPage(self)\n main.section_menu.item(4).click()\n\n page = TransactionPage(self)\n page.page_load()\n page.period_slc.select_by_index(arg)\n page.load_report_btn.click()\n delay(10, reason='На всякий случай ожидаем, при загрузке большого объема данных может задерживать загрузку')\n page.transaction_tbl.may_conditions(Displayed)\n \n @catcher_func\n def test_02_check_custom_period(self):\n \"\"\"Проверяем задание периода\"\"\"\n\n main = MainPage(self)\n main.section_menu.item(4).click()\n\n page = TransactionPage(self)\n page.page_load()\n page.period_slc.select_by_index(4)\n page.data_picker.click()\n page.start_picker.may_conditions(Displayed)\n page.end_picker.may_conditions(Displayed)\n page.start_picker.clear_input()\n page.start_picker.typing('02282010')\n page.end_picker.click()\n page.access_date.click()\n assert '02-28-2010' in page.data_picker.get_attribute('value'), \"Дата не соответствует выставленной\"\n\n page.load_report_btn.click()\n delay(10, reason='На всякий случай ожидаем, при загрузке большого объема данных может задерживать загрузку')\n page.transaction_tbl.may_conditions(Displayed)\n\n @data(0, 1, 2, 3, 4)\n @catcher_func\n def test_03_undata(self, arg):\n \"\"\"Проверяем аккаунт с пустой историей\"\"\"\n main = MainPage(self)\n main.section_menu.item(4).click()\n\n page = TransactionPage(self)\n page.page_load()\n page.account_slc.select_by_value(Config().CLEAR_ACCOUNT)\n page.period_slc.select_by_index(arg)\n page.load_report_btn.click()\n delay(10, reason='На всякий случай ожидаем, при загрузке большого объема данных может задерживать загрузку')\n page.transaction_tbl.row(1).may_conditions(NotDisplayed)\n\n @catcher_func\n def test_04_check_cells(self):\n \"\"\"Проверка формата ячеек\"\"\"\n\n main = MainPage(self)\n main.section_menu.item(4).click()\n\n page = TransactionPage(self)\n page.page_load()\n page.account_slc.select_by_value(Config().BASE_ACCOUNT)\n page.period_slc.select_by_index(3)\n page.load_report_btn.click()\n delay(10, reason='На всякий случай ожидаем, при загрузке большого объема данных может задерживать загрузку')\n page.transaction_tbl.may_conditions(Displayed)\n cols_text = page.transaction_tbl.get_row_text(1)\n datetime.strptime(cols_text['cell_1'], '%m-%d-%Y %H:%M.%S')\n log('Дата соответствует шаблону')\n\n operations_type = ['Deposit to credit', 'Deposit', 'Withdraw']\n assertion(cols_text['cell_2'], IsIn(operations_type),\n 'В ячейке типа операций неизвестное значение {}!'.format(cols_text['cell_2']))\n\n status = [\"In progress\", \"Complete\", \"Rejected\"]\n assertion(cols_text['cell_3'], IsIn(status),\n 'В ячейке статуса неизвестное значение {}!'.format(cols_text['cell_3']))\n\n digit = cols_text['cell_4']\n assert digit.isdigit() if '-' not in digit else digit.lstrip('-').isdigit()\n log('Значение ячейки сумма является числовым')\n\n @classmethod\n def tearDownClass(cls):\n cls.browser.exit()\n\n\nif __name__ == '__main__':\n run_tests()\n","sub_path":"test_transaction_history.py","file_name":"test_transaction_history.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"247359724","text":"#!/usr/bin/env python3\nfrom common import *\n\nfrom ship import Ship\nfrom fragment import Fragments\nfrom atlas import Atlas\nfrom screen import Screen\n\nclass Game():\n def __init__(self):\n pg.init()\n self.fps = 60\n self.clock = pg.time.Clock()\n self.ship = Ship()\n self.screen = Screen(self.ship, self.clock)\n self.atlas = Atlas()\n self.fragments = Fragments(Vector(0, 0), self.atlas.dim, int(abs(self.atlas.dim) / 10))\n\n self.all = [self.screen, self.fragments, self.ship, self.atlas]\n\n def update(self):\n for item in self.all:\n item.update(self.atlas)\n\n def draw(self):\n for item in self.all:\n item.draw(self.screen)\n\n def evenhandler(self):\n keys = pg.key.get_pressed()\n\n userquit = any([x.type == pg.QUIT for x in pg.event.get()])\n if keys[pg.K_q] or keys[pg.K_ESCAPE] or userquit:\n exit()\n \n def run(self):\n while True:\n self.evenhandler()\n self.update()\n self.draw()\n pg.display.update()\n self.clock.tick(self.fps)\n\nif __name__ == '__main__':\n game = Game()\n game.run()\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"432359236","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 12 15:56:24 2016\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom pandas import DataFrame\r\n\r\nurl = 'https://www.baiji.com.cn'\r\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:50.0) Gecko/20100101 Firefox/50.0'}\r\nr = requests.get(url,headers = headers)\r\nr.encoding = r.apparent_encoding\r\nsoup = BeautifulSoup(r.text,'lxml')\r\n\r\nnames = soup.find('ul',attrs={'class':'activeTab'})\r\ntels = soup.find_all('span',attrs={'class':'phone'})\r\naddrs = soup.find_all('em',attrs={'class':'addr150721'})\r\n\r\nresult=[]\r\nfor name,addr,tel in zip(names,addrs,tels):\r\n result.append((name.get_text(),addr.get_text(),tel.get_text()))\r\nDataFrame(result).to_csv('C:\\\\Users\\\\Administrator\\\\Desktop\\\\1.csv')\r\n\r\n\r\n","sub_path":"baiji store info.py","file_name":"baiji store info.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"388108892","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nCreated on Sun May 15 \n\n@author: Adrianacmy\n\nCreate a function reverse that takes in a dictionary and reverses it, such that \nall of the values become keys and all of the keys become values. Be careful: we \ndo not wish to lose any information. Consider what to do if the original \ndictionary has lists of values for a particular key, or has duplicate values \nfor some keys.\n\n'''\n\n# def format_dic_value(dict):\n# '''format the single elements value list if it is necessary'''\n\n# nw_dict = {}\n# for k, v in dict.items():\n# if len(v) == 1 and type(v) == list:\n# nw_dict[k] = ''.join(v)\n# else:\n# nw_dict[k] = v\n\n# return nw_dict\n\n\ndef convert_to_simple_list(lst, nw_list=[]):\n '''\n Convert a muti-dimentinal list to one dimention list.\n lst: any list \n nw_list: one dimentiona list, could start as empty \n return: a one dimention list\n '''\n for a in lst:\n if type(a) == list:\n convert_to_simple_list(a)\n else:\n nw_list.append(a)\n return nw_list\n\n# lst = ['a', 'b', 'c', [1,2,3], 'abc']\n# print(convert_to_simple_list(lst))\n\n\ndef add_dic_val(dic, k, v):\n '''\n add elements or values to a dictionary. \n dic: an empty dictionary \n k: a key \n v: a value \n '''\n dic[k] = dic.get(k, [])\n if not v in dic[k]:\n dic[k].append(v)\n\n\ndef reverse_dict(d):\n '''reverse keys and values in a dictionary'''\n \n r = {} #reversed dictionary\n\n for k, v in d.items():\n nw_lst = []\n if type(v) == list:\n value_list = convert_to_simple_list(v, nw_lst)\n # if value_list:\n for val in value_list:\n add_dic_val(r, val, k) \n else:\n add_dic_val(r, v, k)\n \n return r\n\n\ndef main():\n d = {1: 'a', 4: ['abc', 'egf'], 5: '',(1, 6): 'abc', 2:[1, 2, 3, [1, 2]], 8: ['', 2]}\n print(reverse_dict(d))\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"old/reverse_dict.py","file_name":"reverse_dict.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"495226177","text":"import csv\nfrom datetime import datetime\n\nfrom peewee import *\n\ndb = SqliteDatabase('test.sqlite')\n\n\ndef timer(func):\n \"\"\"Чтобы не потерять имя функции и документации\n или же можно воспользоваться декоратором wraps\n from functools import wraps\n \"\"\"\n\n def inner(*args, **kwargs):\n start = datetime.now()\n func(*args, **kwargs)\n return datetime.now() - start\n\n inner.__name__ = func.__name__\n inner.__doc__ = func.__doc__\n return inner\n\n\nclass Coin(Model):\n name = CharField(max_length=50)\n salary = CharField(max_length=50)\n\n class Meta:\n database = db\n\n\n@timer\ndef way_to_write_db1(reader, order):\n for row in reader:\n coin = Coin(name=row[order[0]], salary=row[order[1]])\n coin.save()\n print(row)\n\n\n@timer\ndef way_to_write_db2(coins):\n with db.atomic():\n for row in coins:\n Coin.create(**row)\n\n\ndef read_csv(file_name):\n \"\"\"\n Не забудь установить библиотеки\n pip install peewee\n pip install psycopg2\n pip install psycopg2.binary\n \"\"\"\n with open(file_name, 'r') as file:\n order = ('name', 'salary')\n reader = csv.DictReader(file, delimiter=';', fieldnames=order)\n coins = list(reader)\n\n # этот способ получиля быстрее\n print(way_to_write_db1(reader, order))\n\n # этот способ медленее\n print(way_to_write_db2(coins))\n\n\ndef main():\n db.connect()\n db.create_tables([Coin])\n read_csv('text.csv')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"course/read_from_csv_to_db/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"577316663","text":"\"\"\"\n\n\n\"\"\"\nfrom pyspark.sql.functions import concat_ws, collect_set, col, lower, when, current_timestamp\nfrom pyspark.sql.window import Window\nfrom connectors.TargetConnector import createDatasetFromCSVFile, writeIntoServingLayer, writeIntoHiveServingLayer\nfrom pyspark.sql import DataFrame\n\n\ndef getAdditionalInfo(config, ds: DataFrame):\n bookPath = config[\"Master\"][\"bookPath\"]\n\n billPath = config[\"Master\"][\"billPath\"]\n\n # get book info\n dataSet = bookInfo(ds, bookPath)\n\n # get bill type\n dataSet = billTypeInfo(dataSet, billPath)\n\n # get record status\n dataSet = statusOfRec(dataSet)\n\n # selected columns from a DataSet\n selected_Columns = config[\"Columns\"][\"selectedColumns\"]\n\n # choosing required columns\n dataSet = dataSet.select(*selected_Columns)\n\n # columns to rename\n summary_Columns = config[\"Columns\"][\"summaryColumns\"]\n\n # renaming the columns\n dataSet = dataSet.toDF(*summary_Columns)\n\n # required file paths\n servingTempPath = config['InvoicePath']['servingTempPath']\n\n deltaTable = config['ServingTable']['deltaTable']\n\n # dataSet.printSchema() #.filter(col(\"AmountDue._nil\") == True)\n # dataSet.show(500, truncate=False)\n # import sys\n # sys.exit(5)\n\n # writing into servingLayer\n writeIntoServingLayer(dataSet, servingTempPath)\n # writeIntoHiveServingLayer(dataSet, deltaTable, \"OverWrite\")\n return None\n\n\ndef bookInfo(ds, bookPath):\n # reading master CSV, collecting multiple books as set and converting it into string in order to save in csv.\n bookDS = createDatasetFromCSVFile(\"\", path=bookPath) \\\n .withColumn(\"BOOKS\", concat_ws(\",\", collect_set(col(\"BOOK\")).over(Window.partitionBy(col(\"TRIP\"))))) \\\n .select(\"TRIP\", \"BOOKS\").distinct()\n\n # joining invoice DS with masterBook DS to get the Book column\n DS = ds.join(bookDS, col(\"_ShellTripID\") == col(\"TRIP\"), \"left_outer\").drop(\"TRIP\", \"BOOK\")\n\n return DS\n\n\ndef billTypeInfo(ds, billPath):\n DS = ds\n\n # reading billType CSV\n billDS = createDatasetFromCSVFile(\"\", billPath)\n\n # joining invoice DS with billing DS to get the BillType column, use rename option while joining to remove grey warn\n DS = DS.join(billDS,\n (lower(DS._Name) == lower(billDS.VENDOR_NAME)) &\n (lower(DS.TypeOfService) == lower(billDS.COST_LINE_ITEM_TYPE)),\n \"left_outer\") \\\n .drop(\"VEDNOR_NAME\", \"COST_LINE_ITEM_TYPE\")\n\n return DS\n\n\ndef statusOfRec(ds):\n DS = ds.withColumn(\"row_status\", when(col(\"BOOKS\").contains(\",\")\n | col(\"Origin\").rlike(\"~|/\")\n | col(\"Destination\").rlike(\"~|/\")\n | col(\"DestinationCity\").contains(\".\")\n | col(\"OriginCity\").contains(\".\")\n | col(\"COST_TYPE\").isNull()\n | (col(\"DestinationState\") == \"\")\n | (col(\"OriginState\") == \"\")\n , \"Bad_Rec\").otherwise(\"Good_Rec\")) \\\n .withColumn(\"ProcessingTime\", current_timestamp())\n\n DS = DS.withColumn(\"status_reason\", when(col(\"BOOKS\").contains(\",\"), \"Multiple Books\")\n .when(col(\"Origin\").rlike(\"~|/\") | col(\"Destination\").rlike(\"~|/\")\n | col(\"DestinationCity\").contains(\".\") | col(\"OriginCity\").contains(\".\")\n | (col(\"DestinationState\") == \"\") | (col(\"OriginState\") == \"\"),\n \"Unexpected special characters or empty state/city in Origins/Destinations\")\n .when(col(\"COST_TYPE\").isNull(), \"No Bill Type\").otherwise(\"-\")) \\\n\n return DS\n","sub_path":"transform/AdditionalDetails.py","file_name":"AdditionalDetails.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"415671483","text":"import cv2\nimport numpy as np\nimport ransac\n\n# STEP 4 - Stitch the images together using the computed homography and inverse homography\n# function finds stitches together two images by projecting all points from the newimage that\n# and checking if they are contained in image2; then they are added/blended\ndef stitch(image1, image2, hom, homInv, stitchedImage):\n print(\"Stitching image\")\n height1, width1, c = image1.shape\n height2, width2, c = image2.shape\n\n # calculates the dimension of combining the two images\n stitchedImage, minheight, minwidth = resizeimage(image1, image2, homInv)\n stitchheight, stitchwidth, c = stitchedImage.shape\n\n # Copy image1 into new image at proper position\n stitchedImage[minheight:height1+minheight, minwidth:width1+minwidth] = image1\n #cv2.imwrite('results_images/4.png', stitchedImage)\n\n # project each point in new image if point lies within image2 boundaries add or blend pixel value\n for x in range(stitchwidth):\n for y in range(stitchheight):\n newx, newy = ransac.project(x-minwidth, y-minheight, hom)\n if newx<=width2 and newy<=height2 and newx>=0 and newy>=0:\n val = cv2.getRectSubPix(image2, (1, 1), (newx, newy))\n stitchedImage[y, x] = val\n\n #cv2.imwrite('results_images/4.png', stitchedImage)\n return stitchedImage\n\n# finds the needed size of stitching the two images together\ndef resizeimage(image1, image2, homInv):\n height1, width1, c = image1.shape\n height2, width2, c = image2.shape\n\n stitchheight = height1\n stitchwidth = width1\n\n # x is height, y is width\n # Find the projected 4 corners of image 2 onto image1\n x1, y1 = ransac.project(0, 0, homInv) # top left corner\n x2, y2 = ransac.project(width2, height2, homInv) # upper right corner\n x3, y3 = ransac.project(width2, 0, homInv)\n x4, y4 = ransac.project(0, height2, homInv)\n\n minheight = min(y1, y3) #find the minheight(if corner goes into the negative)\n maxheight = max(y2, y4)\n maxwidth = max(x2, x3)\n minwidth = min(x1, x4)\n print(\"minheight: \", minheight)\n print(\"minwidth: \", minwidth)\n print(\"maxheight: \", maxheight)\n print(\"mawidth: \", maxwidth)\n\n if maxheight > height1:\n stitchheight = maxheight\n\n if minheight < 0:\n stitchheight -= minheight # add height to the new image\n minheight = -minheight\n elif minheight > 0:\n minheight = 0\n\n if maxwidth > width1:\n\n stitchwidth = maxwidth\n\n if minwidth < 0:\n stitchwidth -= minwidth #addwidth to new image\n minwidth = -minwidth\n elif minwidth > 0:\n minwidth = 0\n\n\n\n stitchedImage = np.zeros(shape=[stitchheight, stitchwidth, 3], dtype=np.uint8)\n print(stitchwidth, stitchheight)\n return stitchedImage, minheight, minwidth\n\n","sub_path":"stitched.py","file_name":"stitched.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"532221337","text":"import tensorflow as tf\nimport numpy as np\n\n#file = open('/Users/Hamid/Documents/MachineLearning/NewCIFABX.csv', mode = 'r')\nfile = '/Users/Hamid/Documents/MachineLearning/NewCIFABX.csv'\nData_ABX3 = np.loadtxt(file, delimiter = ',', skiprows = 1, usecols = (1, 2, 3, 4, 5, 6, 10, 11, 12, 14, 15, 16, 18, 19, 20, 22, 23, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34))\n\n\nx_data = Data_ABX3[:, 0:-1]\ny_data = Data_ABX3[:, [-1]]\n\n# placeholders for a tensor that will be always fed.\nX = tf.placeholder(tf.float32, shape=[None, 26])\nY = tf.placeholder(tf.float32, shape=[None, 1])\n\nW = tf.Variable(tf.random_normal([26, 1]), name='weight')\nb = tf.Variable(tf.random_normal([1]), name='bias')\n\n# Hypothesis\nhypothesis = tf.matmul(X, W) + b\n\n# Simplified cost/loss function\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n# Minimize\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)\ntrain = optimizer.minimize(cost)\n\n# Launch the graph in a session.\nsess = tf.Session()\n# Initializes global variables in the graph.\nsess.run(tf.global_variables_initializer())\n\nfor step in range(20):\n cost_val, hy_val, _ = sess.run(\n [cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})\n print(step, \"Cost: \", cost_val, \"\\nPrediction:\\n\", hy_val)","sub_path":"hahaha.py","file_name":"hahaha.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"297192301","text":"import codecs\nimport datetime\nimport glob\nimport os\nimport re\n\nimport bottle\nimport bottle as app\nfrom bottle import static_file\nimport misaka as m\nimport pystache\n\nSTATIC_PATH = os.path.abspath(\n os.path.join(os.path.abspath(__file__), '../../assets')\n)\nBUILD_STATIC_PATH = os.path.abspath(\n os.path.join(os.path.abspath(__file__), '../../build')\n)\nNOTES_PATH = os.path.abspath(\n os.path.join(os.path.abspath(__file__), '../../notes')\n)\nTEMPLATE_PATH = os.path.abspath(\n os.path.join(os.path.abspath(__file__),'../../html')\n)\n\nloader = pystache.loader.Loader(search_dirs=[TEMPLATE_PATH], extension='html')\nrenderer = pystache.renderer.Renderer(\n search_dirs=[TEMPLATE_PATH],\n file_extension='html',\n file_encoding='utf8'\n)\n\n\ndef yield_notes_by_mtime():\n notes_match = os.path.join(NOTES_PATH, '*.md')\n notes = glob.glob(notes_match)\n\n cwd = os.getcwd()\n for x in notes:\n if 'conflicted copy' not in x:\n yield os.path.getmtime(os.path.abspath(os.path.join(cwd, x))), x\n\n\ndef get_notes_by_mtime():\n items = reversed(sorted(yield_notes_by_mtime(), key= lambda x: x[0]))\n\n items = [\n (datetime.datetime.fromtimestamp(int(x)).strftime('%Y-%m-%d %H:%M:%S'), y)\n for x, y in items\n ]\n for x, y in items:\n y = y.replace(NOTES_PATH, '')\n y = y.lower()\n y = y.replace('.md', '')\n y = 'http://localhost:9999/notes' + y\n yield x, y\n\ndef render(filename, template, edit=False):\n filename = filename.replace('.', '') # remove periods\n filename = filename.replace(' ', '-')\n filename = filename.replace('/', '-')\n filepath = os.path.join(NOTES_PATH, '%s.md' % (filename, ))\n\n if not os.path.exists(filepath):\n if edit:\n note = ''\n else:\n bottle.redirect('/edit/%s' % (filename, ))\n else:\n with codecs.open(filepath, 'r', 'utf8') as file_obj:\n note = file_obj.read()\n\n def replacement(match):\n match_str = match.groups()[0]\n replace_match = match_str.replace(' ', '-')\n replace_match = match_str.replace('/', '-')\n\n replace_match = replace_match.replace('.', '')\n return \"%s\" % (replace_match, match_str)\n\n if not edit:\n note = re.sub(\n r'\\[\\[([A-Z \\/\\.a-z0-9@]+)\\]\\]',\n replacement,\n m.html(note)\n )\n\n return pystache.render(loader.load_name(template), {\n 'title': ' '.join(x.capitalize() for x in filename.split('-')),\n 'filename': filename,\n 'content': note\n })\n\n@app.route('/')\ndef route_home():\n bottle.redirect('/notes/index')\n\n@app.route('/assets/')\ndef serve_static(path):\n return static_file(path, root=STATIC_PATH)\n\n@app.route('/build/')\ndef serve_build_static(path):\n return static_file(path, root=BUILD_STATIC_PATH)\n\n\n@app.route('/api/dates')\ndef render_api_dates():\n return {\n 'items': list(get_notes_by_mtime())\n }\n\n\n@app.route('/notes/')\ndef render_note(filename):\n return render(filename, 'entry')\n\n\n@app.route('/edit/')\ndef render_note(filename):\n return render(filename, 'entry-edit', edit=True)\n\n\n@app.route('/edit/', method='POST')\ndef handle_note_post(filename):\n data = bottle.request.forms.get('string')\n #import pdb; pdb.set_trace()\n\n with codecs.open(os.path.join(NOTES_PATH, '%s.md' % (filename, )), 'w', 'utf8') as file_obj:\n file_obj.write(data.decode('utf8'))\n return {'success': True}\n\n\nwebapp = bottle.default_app()\nwebapp.run(host='0.0.0.0', port=8080)\n","sub_path":"app/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"101855924","text":"import unittest\nfrom scikits.lazy import function, Symbol, Impl\n\n@Impl.allow_lazy()\ndef dummy_fn(a):\n return a + 5\n\ndef func(*args, **kwargs):\n return function(\n transform_policy_ctor=lambda :(lambda x:x),\n *args,**kwargs) \n\nclass Test_FunctionClosure_new_givens(unittest.TestCase):\n \"\"\"Test the givens argument to FunctionClosure.new \"\"\"\n\n # Behavioural\n # ===========\n # test that you can create an unattached symbol as a value, give it attributes\n # like a shape, and an allocation_target, and a name, and a 'strict=True' and those values\n # will be respected by the FunctionClosure\n def test_behaviour_0(self):\n s = Symbol.new(name='s')\n\n f = func([s], dummy_fn(s))\n assert f[s].name=='s'\n\n f = func([s], dummy_fn(s), givens={s:Symbol.new_kwargs(\n name='u', strict=True, shape=(4,5,6), opencl_ctx='blah')})\n assert f[s].name == 'u'\n assert f[s].strict==True\n assert f[s].shape == (4,5,6)\n assert f[s].opencl_ctx=='blah'\n\n\n # Unit tests\n # ==========\n\n # test that givens arg can be list, tuple or dict\n\n # test that it works recursively\n # .. test that you can replace a leaf or internal node\n # .. with a leaf or internal node\n\n # test that attributes are transferred properly\n # test that mutable and strict are transferred correctly\n\n # test that givens can replace inputs and outputs\n\n # test that it fails if you try to replace something twice\n\n # test that using a non-symbol key fails\n\n # test that using a non-symbol value fails w NotImplementedError\n\n","sub_path":"scikits/lazy/tests/test_lazy.py","file_name":"test_lazy.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"184877407","text":"from .models import Point, Line, Polygon\nfrom django.core.cache import cache, caches\nimport networkx as nx\n\n\ndef getAllDateDistances(minDist=14):\n\n collisionGraph = nx.Graph()\n points = Point.objects.prefetch_related('sourceRef')\n lines = Line.objects.prefetch_related('sourceRef')\n print('pointslen', len(points))\n for idx, p1 in enumerate(points):\n print('idx pvp', idx)\n for p2 in points[idx+1:]:\n if not collisionGraph.has_node(p1) or p2 not in collisionGraph.neighbors(p1):\n addNodes(collisionGraph, p1, p2)\n\n for idx, point in enumerate(points):\n print('idx pvl', idx)\n for line in lines:\n addNodes(collisionGraph, point, line)\n\n for idx, l1 in enumerate(lines):\n print('idx lvl', idx)\n for l2 in lines[idx+1:]:\n if not collisionGraph.has_node(l1) or l2 not in collisionGraph.neighbors(l1):\n addNodes(collisionGraph, l1, l2)\n\n print('nodecount', nx.number_of_nodes(collisionGraph))\n print('edgecount', nx.number_of_edges(collisionGraph))\n cache.set('dateGraph', collisionGraph, None)\n\n\ndef addNodes(graph, obj1, obj2):\n obj1s = obj1.dateRange.lower\n obj1e = obj1.dateRange.upper\n obj2s = obj2.dateRange.lower\n obj2e = obj2.dateRange.upper\n if all([obj1s, obj1e, obj2s, obj2e]):\n\n distance = (max(obj1s, obj2s) - min(obj1e, obj2e)).days\n if distance < 0:\n distance = 0\n # print('dist=', distance)\n graph.add_nodes_from([obj1, obj2])\n graph.add_edge(obj1, obj2, weight=distance)\n","sub_path":"transDjango/APIimports/buildNetwork.py","file_name":"buildNetwork.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"527559702","text":"#-*- coding:utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template.context import RequestContext\nfrom UserClass import UserLogin\nfrom django.shortcuts import render_to_response\n\nfrom com.CommonFuncs import loadClass\nfrom com.user.userloginController import userlogin\nimport sys\n\ndef route(request,arg1,arg2):\n\n getClass=loadClass(\"hcops.com.user.userloginController\",\"userlogin\")\n return getClass.run()\n\n if arg1 == \"index\":\n #print arg1\n return index(request)\n if arg1 == \"login\":\n #print arg1\n return Login(request)\n\n if arg1 == \"unlogin\":\n return unlogin(request)\n\n return HttpResponse(\"404 Not Found!\")\n\ndef index(request):\n data={\"result\":\"\",\"currentuser\":\"guest\"}\n if request.COOKIES.get(\"UserLogin_username\") != None:\n data[\"currentuser\"]=request.COOKIES.get(\"UserLogin_username\")\n\n myResponse = render_to_response(\"hcops/index.html\",data)\n return myResponse\n\ndef Login(request):\n\n beijing={\"id\":\"1\",\"name\":\"北京\"}\n shanghai = {\"id\":\"2\", \"name\": \"上海\"}\n guangzhou = {\"id\":\"3\", \"name\": \"广州\"}\n userAreas=[beijing,shanghai,guangzhou]\n data = {\"result\":\"sorry,please login ^_^!\",\"areas\":userAreas}\n\n if request.method == \"POST\":\n getUserName=request.POST.get(\"UserName\")\n getPassword=request.POST.get(\"UserPass\")\n uLogin = UserLogin(getUserName,getPassword)\n\n if uLogin.login_models():\n myResponse=HttpResponse(\"\")\n myResponse.set_cookie(\"UserLogin_username\", getUserName, 3600)\n return myResponse\n else:\n data[\"result\"]='用户名和密码登陆失败'\n\n myResponse = render_to_response(\"hcops/login.html\", data)\n return myResponse\n\ndef unlogin(request):\n res = HttpResponse()\n res.delete_cookie(\"UserLogin_username\")\n res.write(\"\")\n return res\n\n\ndef isMyNumber(num):\n if num.isnumeric() and int(num) >1:\n return True\n else:\n return False\n","sub_path":"DjangoProject/hcops/dropFile/views_ori_4.py","file_name":"views_ori_4.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"568395623","text":"import requests\nimport hashlib\nimport time\nimport random\n\n\ndef get_salt():\n salt = (int(time.time()*1000)) + (random.randint(0, 10))\n return salt\n\n\ndef get_md5(v):\n md5 = hashlib.md5()\n md5.update(v.encode('utf-8'))\n sign = md5.hexdigest()\n return sign\n\n\ndef get_sign(keyy, sal):\n sign = 'fanyideskweb' + keyy + str(sal) + '6x(ZHw]mwzX#u0V7@yfwK'\n sign = get_md5(sign)\n return sign\n\n\ndef youdao(key):\n\n url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n # url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'\n # 如果有 _o 的存在,则会报错\n # 去掉的话就可以正常使用了\n\n sal = get_salt()\n\n data = {\n 'action': 'FY_BY_REALTIME',\n 'client': 'fanyideskweb',\n 'doctype': 'json',\n 'from': 'AUTO',\n 'i': key,\n 'keyfrom': 'fanyi.web',\n 'salt': str(sal),\n 'sign': get_sign(key, sal),\n 'smartresult': 'dict',\n 'to': 'AUTO',\n 'typoResult': 'false',\n 'version': '2.1'\n }\n\n # data = urllib.parse.urlencode(data).encode()\n head = {\n 'Accept': 'application / json, text / javascript, * / *; q = 0.01',\n 'Accept - Encoding': 'gzip, deflate',\n 'Accept - Language': 'zh - CN, zh;q = 0.8, zh - TW;q = 0.7, zh - HK;q = 0.5, en - US;q = 0.3, en;q = 0.2',\n 'Connection': 'keep - alive',\n 'Content - Length': str(len(data)),\n 'Content - Type': 'application/x-www-form-urlencoded;charset = UTF - 8',\n 'Cookie': 'YOUDAO_MOBILE_ACCESS_TYPE=1; OUTFOX_SEARCH_USER_ID=-1087170847@10.169.0.84; JSESSIONID=aaa6'\n '5Zh4PIUybhDi8iKBw; ___rl__test__cookies=1541425927360; OUTFOX_SEARCH_USER_ID_NCOO=18609637'\n '02.1775036; fanyi-ad-id=52077; fanyi-ad-closed=1',\n 'Host': 'fanyi.youdao.com',\n 'Referer': 'http: // fanyi.youdao.com /',\n 'User - Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',\n 'X - Requested - With': 'XMLHttpRequest'\n }\n\n resp = requests.post(url, headers=head, data=data)\n resp.encoding = 'utf-8'\n # res = resp.json()\n print(resp.text)\n\n\nyoudao('crawl')\n","sub_path":"codes.py","file_name":"codes.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"240223745","text":"# -*- coding:utf-8 -*-\n# edit by fuzongfei\n\nimport json\nfrom ast import literal_eval\n\nfrom channels.layers import get_channel_layer\nfrom django.db import transaction\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom apps.project_manager.inception.inception_api import GetBackupApi, IncepSqlCheck\nfrom project_manager.models import IncepMakeExecTask\nfrom project_manager.tasks import incep_async_tasks, \\\n stop_incep_osc, get_osc_percent\nfrom project_manager.utils import check_incep_alive\nfrom user_manager.permissions import perform_tasks_permission_required\nfrom utils.tools import format_request\n\nchannel_layer = get_channel_layer()\n\n\nclass PerformRecordsView(View):\n \"\"\"渲染执行任务列表页\"\"\"\n\n def get(self, request):\n return render(request, 'perform_records.html')\n\n\nclass PerformRecordsListView(View):\n \"\"\"渲染执行任务列表页表格数据\"\"\"\n\n def get(self, request):\n exec_tasks = []\n user_in_group = '(' + str(request.session['groups'][0]) + ')' if len(request.session['groups']) == 1 else tuple(\n request.session['groups'])\n query = f\"select a.id,a.user,a.taskid,a.dst_host,a.dst_database,a.make_time, b.group_name,\" \\\n f\"case a.category when '0' then '线下任务' when '1' then '线上任务' end as category \" \\\n f\"from auditsql_incep_tasks as a join auditsql_groups as b \" \\\n f\"on a.group_id = b.group_id where b.group_id in {user_in_group} group by a.taskid \" \\\n f\"order by a.make_time desc\"\n for row in IncepMakeExecTask.objects.raw(query):\n exec_tasks.append({'user': row.user,\n 'taskid': row.taskid,\n 'group_name': row.group_name,\n 'category': row.category,\n 'dst_host': row.dst_host,\n 'dst_database': row.dst_database,\n 'make_time': row.make_time})\n return JsonResponse(list(exec_tasks), safe=False)\n\n\nclass PerformResultsView(View):\n \"\"\"返回执行任务执行结果和备份信息\"\"\"\n\n def get(self, request):\n id = request.GET.get('id')\n if IncepMakeExecTask.objects.get(id=id).exec_status in ('1', '4'):\n sql_detail = IncepMakeExecTask.objects.get(id=id)\n sequence_result = {'backupdbName': sql_detail.backup_dbname, 'sequence': sql_detail.sequence}\n rollback_sql = GetBackupApi(sequence_result).get_rollback_statement()\n\n exec_log = sql_detail.exec_log if sql_detail.exec_log else ''\n\n # 此处要将exec_log去字符串处理,否则无法转换为json\n data = {'rollback_log': rollback_sql, 'exec_log': literal_eval(exec_log)}\n context = {'status': 0, 'msg': '', 'data': data}\n else:\n context = {'status': 2, 'msg': '该SQL未被执行,无法查询状态信息'}\n\n return HttpResponse(json.dumps(context))\n\n\nclass PerformDetailsView(View):\n \"\"\"渲染指定执行任务详情页面\"\"\"\n\n def get(self, request, taskid):\n return render(request, 'perform_details.html', {'taskid': taskid})\n\n\nclass PerformDetailsListView(View):\n \"\"\"渲染指定执行任务页面数据\"\"\"\n\n def get(self, request):\n taskid = request.GET.get('taskid')\n\n query = f\"select id,user,sqlsha1,sql_content,taskid,case exec_status \" \\\n f\"when '0' then '未执行' when '1' then '已完成' when '2' then '处理中' when '3' then '回滚中' \" \\\n f\"when '4' then '已回滚' end as exec_status,\" \\\n f\"case category when '0' then '线下任务' when '1' then '线上任务' end as category\" \\\n f\" from auditsql_incep_tasks where taskid={taskid}\".format(taskid=taskid)\n i = 0\n task_details = []\n for row in IncepMakeExecTask.objects.raw(query):\n task_details.append({\n 'sid': i,\n 'id': row.id,\n 'user': row.user,\n 'category': row.category,\n 'sqlsha1': row.sqlsha1,\n 'sql_content': row.sql_content,\n 'taskid': row.taskid,\n 'exec_status': row.exec_status\n })\n i += 1\n del task_details[0]\n return HttpResponse(json.dumps(task_details))\n\n\nclass PerformExecView(View):\n \"\"\"执行任务-开始执行\"\"\"\n\n @method_decorator(check_incep_alive)\n @perform_tasks_permission_required('can_execute')\n @transaction.atomic\n def post(self, request):\n data = format_request(request)\n id = data.get('id')\n obj = IncepMakeExecTask.objects.get(id=id)\n host = obj.dst_host\n database = obj.dst_database\n sql = obj.sql_content + ';'\n\n status = ''\n query = f\"select id,group_concat(exec_status) as exec_status from auditsql_incep_tasks \" \\\n f\"where taskid={obj.taskid} group by taskid\"\n for row in IncepMakeExecTask.objects.raw(query):\n status = row.exec_status.split(',')\n\n # 每次只能执行一条任务,不可同时执行,避免数据库压力\n if '2' in status or '3' in status:\n context = {'status': 2, 'msg': '请等待当前任务执行完成'}\n else:\n # 避免任务重复点击执行\n if obj.exec_status != '0':\n context = {'status': 2, 'msg': '请不要重复操作任务'}\n else:\n # 将任务进度设置为:处理中\n obj.exec_status = 2\n obj.save()\n\n # 如果sqlsha1存在,使用pt-online-schema-change执行\n if obj.sqlsha1:\n # 异步执行SQL任务\n r = incep_async_tasks.delay(user=request.user.username,\n id=id,\n sql=sql,\n host=host,\n database=database,\n sqlsha1=obj.sqlsha1,\n backup='yes',\n exec_status=1)\n task_id = r.task_id\n # 将celery task_id写入到表\n obj.celery_task_id = task_id\n obj.save()\n # 获取OSC执行进度\n get_osc_percent.delay(task_id=task_id)\n\n context = {'status': 0, 'msg': '提交处理,请查看输出'}\n\n else:\n # 当affected_row>2000时,只执行不备份\n if obj.affected_row > 2000:\n incep_async_tasks.delay(user=request.user.username,\n id=id,\n sql=sql,\n host=host,\n database=database,\n exec_status=1)\n else:\n # 当affected_row<=2000时,执行并备份\n incep_async_tasks.delay(user=request.user.username,\n id=id,\n backup='yes',\n sql=sql,\n host=host,\n database=database,\n exec_status=1)\n\n context = {'status': 0, 'msg': '提交处理,请查看输出'}\n return HttpResponse(json.dumps(context))\n\n\nclass PerformStopView(View):\n \"\"\"\n 执行任务-停止OSC执行\n 只支持停止修改表结构的操作\n \"\"\"\n\n @method_decorator(check_incep_alive)\n @perform_tasks_permission_required('can_execute')\n @transaction.atomic\n def post(self, request):\n id = request.POST.get('id')\n obj = IncepMakeExecTask.objects.get(id=id)\n celery_task_id = obj.celery_task_id\n\n if obj.exec_status in ('0', '1', '4'):\n context = {'status': 2, 'msg': '请不要重复操作任务'}\n else:\n # 关闭正在执行的任务\n stop_incep_osc.delay(user=request.user.username,\n id=id,\n celery_task_id=celery_task_id)\n context = {'status': 0, 'msg': '提交处理,请查看输出'}\n return HttpResponse(json.dumps(context))\n\n\nclass PerformRollbackView(View):\n \"\"\"\n 执行任务-回滚操作\n 回滚操作不会进行再次进行备份\n \"\"\"\n\n @method_decorator(check_incep_alive)\n @perform_tasks_permission_required('can_execute')\n @transaction.atomic\n def post(self, request):\n data = format_request(request)\n id = data.get('id')\n obj = IncepMakeExecTask.objects.get(id=id)\n host = obj.dst_host\n database = obj.dst_database\n\n if obj.exec_status in ('0', '3', '4'):\n context = {'status': 2, 'msg': '请不要重复操作'}\n else:\n # 获取回滚语句\n rollback_sql = GetBackupApi(\n {'backupdbName': obj.backup_dbname, 'sequence': obj.sequence}).get_rollback_statement()\n if rollback_sql is None:\n context = {'status': 2, 'msg': '没有找到备份记录,回滚失败'}\n else:\n of_audit = IncepSqlCheck(rollback_sql, obj.dst_host, obj.dst_database, request.user.username)\n result = of_audit.make_sqlsha1()[1]\n\n rollback_sql = result['SQL'] + ';'\n rollback_sqlsha1 = result['sqlsha1']\n\n # 将任务进度设置为:回滚中\n obj.exec_status = 3\n obj.rollback_sqlsha1 = rollback_sqlsha1\n obj.save()\n\n if result['sqlsha1']:\n # 异步执行SQL任务\n r = incep_async_tasks.delay(user=request.user.username,\n id=id,\n host=host,\n database=database,\n sql=rollback_sql,\n sqlsha1=rollback_sqlsha1,\n exec_status=4)\n task_id = r.task_id\n # 将celery task_id写入到表\n obj.celery_task_id = task_id\n obj.save()\n # 获取OSC执行进度\n get_osc_percent.delay(task_id=task_id)\n\n context = {'status': 0, 'msg': '提交处理,请查看输出'}\n else:\n incep_async_tasks.delay(user=request.user.username,\n id=id,\n sql=rollback_sql,\n host=host,\n database=database,\n exec_status=4)\n\n context = {'status': 0, 'msg': '提交处理,请查看输出'}\n return HttpResponse(json.dumps(context))\n","sub_path":"apps/project_manager/pt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"228838206","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('cs2450', '0002_auto_20150127_1658'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserShift',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start_time', models.DateTimeField()),\n ('end_time', models.DateTimeField()),\n ('roles', models.ManyToManyField(to='cs2450.Role', null=True, blank=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='userscheduleevent',\n name='roles',\n ),\n migrations.RemoveField(\n model_name='userscheduleevent',\n name='user',\n ),\n migrations.DeleteModel(\n name='UserScheduleEvent',\n ),\n ]\n","sub_path":"project/cs2450/migrations/0003_auto_20150127_1706.py","file_name":"0003_auto_20150127_1706.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"150440399","text":"\"\"\"\nMetview Python use case\n\nUC-04. The Analyst retrieves, for a given time interval, the values of\ntwo parameters and combines their values on the same map\n\n--------------------------------------------------------------------------------\n1. Analyst retrieves, for a given time interval, the values of two chosen\n parameters (e.g. temperature, and geopotential) from a given source (i.e. MARS,\n files, observation databases)\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n2. Analyst customises many features of his map for each field he wants to plot\n (e.g. temperature field as shaded areas and geopotenti2. al field as isolines)\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n3. Analyst plots the data\n--------------------------------------------------------------------------------\nAnalyst plots data variable t2 with contouring definition t_shade_c, and data\nvariable z with contouring definition mslp_isolines.\nThe fields will be plotted in the order they appear in the mv.plot() command,\nwith the shaded temperature at the bottom, and the geopotential on top.\n\"\"\"\n\nimport metview as mv\n\n\n# read 2m temperature\nt2 = mv.read('./t2_for_UC-04.grib')\n\n# read geopotential\nz = mv.read('./z_for_UC-04.grib')\n\nt_shade_c = mv.mcont(\n legend = True,\n contour_highlight = False,\n contour_level_selection_type = \"interval\",\n contour_interval = 10,\n contour_shade = True,\n contour_shade_max_level = 60,\n contour_shade_min_level = -60,\n contour_shade_method = \"area_fill\",\n contour_shade_max_level_colour = \"red\",\n contour_shade_min_level_colour = \"blue\",\n contour_shade_colour_direction = \"clockwise\"\n )\n\nz_isolines = mv.mcont(\n legend = True,\n contour_line_thickness = 2,\n contour_line_colour = 'black',\n contour_highlight_colour = 'black',\n contour_highlight_thickness = 4,\n contour_level_selection_type = 'interval',\n contour_interval = 5,\n contour_legend_text = 'Geopotential',\n)\n\nmv.setoutput(mv.png_output(output_width = 1000, output_name = './gribplot'))\nmv.plot(t2, t_shade_c, z, z_isolines)\n","sub_path":"examples/UC-04-grib.py","file_name":"UC-04-grib.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"390055759","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, find_packages\n\n\n_VERSION = '0.0.1'\n\nREQUIRED_PACKAGES = [\n 'numpy >= 1.18.2',\n 'matplotlib >= 3.2.1',\n 'psutil >= 5.4.5',\n 'opencv-python >= 4.2.0',\n 'scipy >= 1.4.1',\n 'scikit-image >= 0.16.2',\n 'tqdm >= 4.43.0',\n]\n\nsetup(\n name='pytorch_openpose',\n version=_VERSION,\n description=\n 'Deep Pose Estimation implemented using pytorch',\n install_requires=REQUIRED_PACKAGES,\n url='https://github.com/alexus37/pytorch-openpose',\n author='Alexander Lelidis',\n author_email='alelidis@student.ethz.ch',\n license='Apache License 2.0',\n # check this\n package_dir={\n 'torch_pose_data': 'model'\n },\n packages=['torch_pose_data'] + [pkg_name for pkg_name in setuptools.find_packages() # main package\n if 'torch_openpose' in pkg_name],\n package_data={\n 'torch_pose_data': ['model/body_pose_model.pth', 'model/hand_pose_model.pth']\n },\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"577233159","text":"birthdays = {'Rebecca': 'July 1', 'Michael': 'July 22', 'Mom': 'August 22'}\n\nwhile True:\n print('Enter a name: (blank to quit)')\n name = input()\n if name == '':\n break\n\n if name in birthdays:\n print(birthdays[name] + ' is ' + str(name) + '\\'s birthday.')\n else:\n print('I do not have birthday information for ' + name)\n print('What is their birthday?')\n bday = input()\n birthdays[name] = bday\n print('Birthday database updated.')\n","sub_path":"Birthdays.py","file_name":"Birthdays.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"70343843","text":"#\n# Thierry Parmentelat - INRIA\n#\nfrom PLC.Faults import *\nfrom PLC.Method import Method\nfrom PLC.Parameter import Parameter, Mixed\nfrom PLC.Filter import Filter\nfrom PLC.Auth import Auth\n\nfrom PLC.Persons import Person, Persons\nfrom PLC.PersonTags import PersonTag, PersonTags\nfrom PLC.Sites import Sites, Site\n\nclass GetPersonTags(Method):\n \"\"\"\n Returns an array of structs containing details about\n persons and related settings.\n\n If person_tag_filter is specified and is an array of\n person setting identifiers, only person settings matching\n the filter will be returned. If return_fields is specified, only\n the specified details will be returned.\n \"\"\"\n\n roles = ['admin', 'pi', 'user', 'tech']\n\n accepts = [\n Auth(),\n Mixed([PersonTag.fields['person_tag_id']],\n Parameter(int,\"Person setting id\"),\n Filter(PersonTag.fields)),\n Parameter([str], \"List of fields to return\", nullok = True)\n ]\n\n returns = [PersonTag.fields]\n\n\n def call(self, auth, person_tag_filter = None, return_fields = None):\n\n # only persons can call this (as per roles, but..)\n if not isinstance(self.caller,Person):\n return []\n\n # If we are not admin, make sure to only return viewable accounts\n valid_person_ids=None\n added_fields=[]\n if 'admin' not in self.caller['roles']:\n # Get accounts that we are able to view\n valid_person_ids = [self.caller['person_id']]\n if 'pi' in self.caller['roles'] and self.caller['site_ids']:\n sites = Sites(self.api, self.caller['site_ids'])\n for site in sites:\n valid_person_ids += site['person_ids']\n\n if not valid_person_ids:\n return []\n \n # if we have to filter out on person_id, make sure this is returned from db\n if return_fields:\n added_fields = set(['person_id']).difference(return_fields)\n return_fields += added_fields\n\n person_tags = PersonTags(self.api, person_tag_filter, return_fields)\n \n if valid_person_ids is not None:\n person_tags = [ person_tag for person_tag in person_tags \n if person_tag['person_id'] in valid_person_ids]\n\n # Remove added fields if not initially specified\n if added_fields:\n for person_tag in person_tags:\n for field in added_fields:\n if field in person_tag:\n del person_tag[field]\n return person_tags\n","sub_path":"PLC/Methods/GetPersonTags.py","file_name":"GetPersonTags.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"614782018","text":"import tensorflow as tf\nimport numpy as np\nimport gym\nimport time\n\nenv = gym.make(\"CartPole-v1\")\ninitializer = tf.variance_scaling_initializer()\n\nx = tf.placeholder(tf.float32, shape=[None, 4])\nhidden1 = tf.layers.dense(x, 4, activation=tf.nn.elu, kernel_initializer=initializer)\nhidden2 = tf.layers.dense(hidden1, 4, activation=tf.nn.elu, kernel_initializer=initializer)\nlogits = tf.layers.dense(hidden2, 1, kernel_initializer=initializer)\noutputs = tf.nn.sigmoid(logits)\nprob_left_right = tf.concat(axis=1, values=[outputs, 1 - outputs])\naction = tf.multinomial(tf.log(prob_left_right), num_samples=1)\ny_pred = 1 - tf.to_float(action)\n\ncross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_pred, logits=logits)\noptimizer = tf.train.AdamOptimizer(0.05)\ngrads_and_vars = optimizer.compute_gradients(cross_entropy)\n\ngradients = [grad for grad, _ in grads_and_vars]\n\ngradients_placeholders = []\ngrads_and_vars_feed = []\nfor grad, variable in grads_and_vars:\n gradients_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())\n gradients_placeholders.append(gradients_placeholder)\n grads_and_vars_feed.append((gradients_placeholder, variable))\n\ntraining_op = optimizer.apply_gradients(grads_and_vars_feed)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\n\ndef discount_rewards(rewards, df=0.95):\n discounted_rewards = np.empty(len(rewards))\n cumulative_rewards = 0\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * df\n discounted_rewards[step] = cumulative_rewards\n\n return discounted_rewards\n\n\ndef discount_and_normalize_rewards(all_rewards, discount_factor=0.95):\n all_discounted_rewards = [discount_rewards(rewards, discount_factor) for rewards in all_rewards]\n flat_rewards = np.concatenate(all_discounted_rewards)\n rewards_mean = flat_rewards.mean()\n rewards_std = flat_rewards.std()\n temp = []\n for discounted_rewards in all_discounted_rewards:\n temp.append((discounted_rewards - rewards_mean) / rewards_std)\n return temp\n\n\ndef training(iterations=30, load=True):\n with tf.Session() as sess:\n init.run()\n if load:\n saver.restore(sess, \"PG_w.ckpt\")\n\n for iteration in range(iterations):\n print(\"iterations\", iteration)\n all_rewards = []\n all_gradients = []\n\n sum_step = 0\n for game in range(10):\n current_rewards = []\n current_gradients = []\n\n obs = env.reset()\n for step in range(500):\n action_val, gradients_val = sess.run([action, gradients], feed_dict={x: obs.reshape(1, 4)})\n obs, reward, done, _ = env.step(action_val[0, 0])\n current_rewards.append(reward)\n current_gradients.append(gradients_val)\n\n if done:\n sum_step += step\n break\n\n all_rewards.append(current_rewards)\n all_gradients.append(current_gradients)\n\n print(\"step\", sum_step / 10)\n all_rewards = discount_and_normalize_rewards(all_rewards)\n feed_dict = {}\n for var_index, grad_placeholder in enumerate(gradients_placeholders):\n temp_list = []\n for game_index, rewards in enumerate(all_rewards):\n for step, reward in enumerate(rewards):\n temp_list.append(reward * all_gradients[game_index][step][var_index])\n mean_gradients = np.mean(temp_list, axis=0)\n\n feed_dict[grad_placeholder] = mean_gradients\n sess.run(training_op, feed_dict=feed_dict)\n\n saver.save(sess, \"PG_w.ckpt\")\n\n\ndef play():\n with tf.Session() as sess:\n init.run()\n saver.restore(sess, \"PG_w.ckpt\")\n\n sum_step = 0\n obs = env.reset()\n for step in range(500):\n action_val, gradients_val = sess.run([action, gradients], feed_dict={x: obs.reshape(1, 4)})\n obs, reward, done, _ = env.step(action_val[0, 0])\n # env.render()\n sum_step += 1\n time.sleep(0.05)\n if done:\n print(sum_step)\n env.close()\n break\n\n\nif __name__ == '__main__':\n # training(iterations=10, load=True)\n play()\n","sub_path":"RL/Policy_Gradient.py","file_name":"Policy_Gradient.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"235294552","text":"# -*- coding: utf-8 -*-\nimport os\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nversion = '0.1.dev0'\n\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\n\nsetup(\n name='pycone',\n version=version,\n description=\"pycone package\",\n long_description=read('README.rst'),\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='',\n author='Gael Pasgrimaud',\n author_email='gael@gawel.org',\n url='https://github.com/gawel/pycone/',\n license='MIT',\n packages=find_packages(exclude=['docs', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'pyramid',\n 'waitress',\n 'pyramid_jinja2',\n ],\n extras_require={\n 'test': [\n 'nose', 'webtest', 'coverage'\n ],\n },\n entry_points=\"\"\"\n [console_scripts]\n #pycone = pycone.scripts:main\n \"\"\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"549684722","text":"import socket \nimport time\nimport datetime\nimport pymongo\nfrom pymongo import MongoClient\nimport cloudinary\nimport cloudinary.uploader\nimport os\nimport webbrowser\nimport RPi.GPIO as GPIO\nfrom picamera import PiCamera\n\n\n# Globals:\nin_connection = False\n\n# DB Config:\nconnection_params = {\n 'user': 'cinv',\n 'password': \"cinv123\",\n 'host': 'ds037175.mlab.com',\n 'port': 37175,\n 'namespace': 'cinvestav',\n}\n\nconnection = MongoClient(\n 'mongodb://{user}:{password}@{host}:'\n '{port}/{namespace}?retryWrites=false'.format(**connection_params)\n)\n\ndb = connection.cinvestav\ndb = db[\"schedule\"]\n\ncloudinary.config(\n cloud_name = 'dv0bco9rw', \n api_key = '621813665726586', \n api_secret = '5zNWkLE3ii-a1MrmC7jziLv6WOE' \n)\n\ndef upload_img_to_cloudinary(img_name):\n\tcloudinary.uploader.upload(img_name, public_id = \"oscar\")\n\ndef get_img_from_cloudinary(img_name):\n\treturn cloudinary.utils.cloudinary_url(img_name)\n\n\ndef get_schedule():\n\tschedules = db.find()\n\treturn schedules\n\ndef insert_data_to_db():\n\tdb.insert_many([\n\t\t{\"start\": \"8.50\", \"finish\": \"11\", \"day\": \"Monday\", \"class\": \"Distributed Systems\", \"professor\": \"Dr. Felix Corchado\"},\n\t\t{\"start\": \"8.50\", \"finish\": \"11\", \"day\": \"Wednesday\", \"class\": \"Distributed Systems\", \"professor\": \"Dr. Felix Corchado\"},\n\t\t{\"start\": \"8.50\", \"finish\": \"11\", \"day\": \"Friday\", \"class\": \"Distributed Systems\", \"professor\": \"Dr. Felix Corchado\"},\n\t])\n\n# Function to create the client message\ndef create_client_msg(HOST, PORT, msg):\t\n\tprint(f\"Connecting to server: {HOST} on port: {PORT}\")\n\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sck:\n\t\ttry:\n\t\t\tsck.connect((HOST, PORT))\n\t\texcept Exception as e:\n\t\t\traise SystemExit(f\"We have failed to connect to host: {HOST} on port: {PORT}, because: {e}\")\n\n\t\tsck.sendall(msg.encode('utf-8'))\n\t\t\n\t\tdata = sck.recv(1024)\n\t\tif data:\n\t\t\tprint(f\"RES: {data.decode()}\")\n\n# Function send command of open PDF\ndef open_pdf(pdf_name):\n\treturn f\"open pdf {pdf_name}\"\n\ndef is_time_of_assignement():\n\tnow = datetime.datetime.now()\n\tassignatures_schedule = get_schedule()\n\tfor a in assignatures_schedule:\n\t\tif now.today().strftime(\"%A\") == a[\"day\"] and now.hour >= float(a[\"start\"]) and now.hour <= float(a[\"finish\"]):\n\t\t# if now.today().strftime(\"%A\") == a[\"day\"]:\n\t\t\tprint(f\"Assignature: {a['class']}\" )\n\t\t\treturn True\n\treturn False\n\nwhile False:\n\ttime.sleep(3)\n\n\t# If its time of a assignature, then show de PDF presentation\n\tif not in_connection and is_time_of_assignement() :\n\t\tin_connection = True\n\t\tcreate_client_msg(socket.gethostname(), 4000, open_pdf(\"present\"))\n\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(16, GPIO.IN)\ncamera = PiCamera()\ncamera.vflip = None\na=0\nwhile True:\n\ti = GPIO.input(16)\n\tif i == 0:\n\t\tprint(\"No intruders\", i)\n\t\t#camera.stop_preview()\n\t\ttime.sleep(.3)\n\t\ta=0\n\telif i == 1:\n\t\tprint(\"Intruders detected\", i)\n\t\t \n\t\ttime.sleep(.3)\n\t\ta=a+1\n\t\tif a==1:\n\t\t\t#camera.start_preview() \n\t\t\tcamera.capture('/home/pi/Desktop/image.jpg')\n\t\t\t#upload_img_to_cloudinary('/home/pi/Desktop/image.jpg')\n\n\t\t\n\n#img = get_img_from_cloudinary(\"anime.jpeg\")\n#img = f'{img[0]}.jpeg'\n#webbrowser.open(img) # Go to example.com\n\n\n\n\n\n","sub_path":"sockets2.0/client_rasp.py","file_name":"client_rasp.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"152441453","text":"import numpy as np\nfrom math import exp\n# from sklearn.linear_model import LogisticRegression\n\n\ndef sigmoid(x):\n return np.array([[1 / (1 + exp(-1 * k[0]))] for k in x])\n\n\ndef gradientDescent(X, y, theta, alpha=0.1, lmda=0, num_iters=1000):\n '''\n GRADIENTDESCENT Performs gradient descent to learn theta\n \n Updates theta by taking num_iters gradient steps with learning rate alpha and lmda as regularization parameter\n\n Lambda is by default 0, i.e. no regularization\n '''\n\n # Number of training examples\n m = len(y) \n\n # Add column of ones\n X = np.append(np.ones((m,1)), X, axis=1)\n\n for _ in range(num_iters):\n # Vectorized gradient descent update with regularization\n newtheta = theta * (1 - alpha * lmda / m) - X.transpose().dot(sigmoid(X.dot(theta)) - y) * alpha / m\n\n # Update theta0 without regularization\n newtheta[0] = np.array([theta[0,:]]) - np.array([X[:,0]]).dot(sigmoid(X.dot(theta)) - y) * alpha / m\n\n theta = newtheta\n\n return theta\n\n\nclass LogisticRegression:\n def __init__(self, alpha=0.01, iterations=1000):\n 'Logistic Regression using minimum squares and gradient descent'\n self.alpha = alpha\n self.iterations = iterations\n\n\n def fit(self, X, y):\n # Use Gradient descent to fit model\n self.theta = gradientDescent(X, y, theta=np.zeros((len(X[0])+1, 1)), alpha=self.alpha, num_iters=self.iterations)\n\n\n def predict(self, data):\n # sigmoid(data * theta) => vectorized predictions\n predictions = []\n for x in sigmoid(np.append(np.ones((len(data),1)), data, axis=1).dot(self.theta)):\n predictions += [[int(x>=0.5)]]\n return predictions\n\n\nif __name__ == \"__main__\":\n X = []\n y = []\n\n for i in [1,2,3,4]:\n # real fx --> y = 1 if x > 2 else y = 0 \n X += [[i]]\n y += [[i>2]]\n\n X = np.array(X)\n y = np.array(y)\n\n m = LogisticRegression()\n m.fit(X, y)\n print(m.predict([[2]]))","sub_path":"logistic_regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"493267866","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\carbonui\\ui3d.py\nimport trinity\nimport geo2\nfrom carbonui.primitives.desktop import UIRoot\n\nclass InSceneContainer(UIRoot):\n __guid__ = 'ui3d.InSceneContainer'\n TRACKTYPE_BALL = 1\n TRACKTYPE_TRANSFORM = 2\n default_trackType = TRACKTYPE_BALL\n default_faceCamera = False\n\n def __init__(self, *args, **kwargs):\n trinity.device.RegisterResource(self)\n self.renderScene = kwargs['scene']\n self.sceneParent = kwargs.get('sceneParent', self.renderScene.objects)\n self.trackType = kwargs.get('trackType', self.default_trackType)\n self.initialized = False\n self.sceneManager = sm.GetService('sceneManager')\n self.name = kwargs['name']\n self.faceCamera = kwargs.get('faceCamera', self.default_faceCamera)\n try:\n UIRoot.__init__(self, *args, **kwargs)\n self.Create3DRender()\n uicore.uilib.AddRootObject(self)\n self.initialized = True\n finally:\n if not self.initialized:\n self.Close()\n\n def Create3DRender(self):\n self.renderTexture = trinity.TriTextureParameter()\n self.renderTexture.name = 'DiffuseMap'\n self.renderColor = trinity.Tr2Vector4Parameter()\n self.renderColor.name = 'DiffuseColor'\n self.renderColor.value = (1, 1, 1, 1)\n self.renderEffect = trinity.Tr2Effect()\n self.renderEffect.effectFilePath = 'res:/Graphics/Effect/Managed/Space/SpecialFX/TextureColor.fx'\n self.renderEffect.resources.append(self.renderTexture)\n self.renderEffect.parameters.append(self.renderColor)\n self.renderArea = trinity.Tr2MeshArea()\n self.renderArea.effect = self.renderEffect\n self.renderMesh = trinity.Tr2Mesh()\n self.renderMesh.name = 'orbitalBombardmentTarget'\n self.renderMesh.geometryResPath = 'res:/Graphics/Generic/UnitPlane/UnitPlane.gr2'\n self.renderMesh.transparentAreas.append(self.renderArea)\n if self.trackType == self.TRACKTYPE_BALL:\n self.transform = trinity.EveRootTransform()\n else:\n self.transform = trinity.EveTransform()\n if self.faceCamera:\n self.transform.modifier = 1\n self.transform.mesh = self.renderMesh\n self.sceneParent.append(self.transform)\n self.renderJob = trinity.CreateRenderJob()\n self.renderJob.Update(self.renderScene)\n self.renderObject = self.GetRenderObject()\n self.renderObject.is2dPick = False\n self.renderTarget = trinity.Tr2RenderTarget(self.width, self.height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)\n self.renderJob.PushRenderTarget(self.renderTarget)\n self.renderJob.RenderScene(self.renderObject)\n self.renderJob.PopRenderTarget()\n self.renderJob.ScheduleRecurring(insertFront=True)\n self.renderTexture.SetResource(trinity.TriTextureRes(self.renderTarget))\n self.renderSteps[-1].enabled = False\n return self.transform\n\n def Close(self):\n if getattr(self, 'renderJob', None) is not None:\n self.renderJob.UnscheduleRecurring()\n if getattr(self, 'transform', None) is not None and getattr(self, 'renderScene', None) is not None:\n if self.transform in self.sceneParent:\n self.sceneParent.remove(self.transform)\n UIRoot.Close(self)\n uicore.uilib.RemoveRootObject(self)\n return\n\n def OnInvalidate(self, device):\n pass\n\n def OnCreate(self, level):\n self.renderTexture.SetResource(trinity.TriTextureRes(self.renderTarget))\n\n def PickObject(self, x, y):\n if self.sceneManager.GetActiveScene() != self.renderScene:\n return\n else:\n rescale = 1.0 / 10000.0\n projection = trinity.TriProjection()\n projection.PerspectiveFov(trinity.GetFieldOfView(), trinity.GetAspectRatio(), trinity.GetFrontClip(), trinity.GetBackClip())\n view = trinity.TriView()\n view.transform = trinity.GetViewTransform()\n scaling, rotation, translation = geo2.MatrixDecompose(self.transform.worldTransform)\n pZ = geo2.Vec3Transform((0, 0, 1), self.transform.worldTransform)\n surfaceNormal = geo2.Subtract(pZ, translation)\n cameraZ = geo2.Vector(view.transform[0][2], view.transform[1][2], view.transform[2][2])\n if geo2.Vec3Dot(surfaceNormal, cameraZ) < 0:\n return\n self.renderObject.translation = geo2.Vec3Scale(translation, rescale)\n self.renderObject.rotation = rotation\n self.renderObject.scaling = geo2.Vec3Scale(scaling, rescale)\n scaling, rotation, translation = geo2.MatrixDecompose(view.transform)\n translation = geo2.Vec3Scale(translation, rescale)\n view.transform = geo2.MatrixTransformation(None, None, scaling, None, rotation, translation)\n return self.renderObject.PickObject(x, y, projection, view, trinity.device.viewport)\n\n def _GetColor(self):\n return self.renderColor.value\n\n def _SetColor(self, value):\n self.renderColor.value = value\n\n color = property(_GetColor, _SetColor)\n\n def _GetRed(self):\n return self.renderColor.value[0]\n\n def _SetRed(self, value):\n self.renderColor.value = (\n value,\n self.renderColor.value[1], self.renderColor.value[2], self.renderColor.value[3])\n\n red = property(_GetRed, _SetRed)\n\n def _GetBlue(self):\n return self.renderColor.value[1]\n\n def _SetBlue(self, value):\n self.renderColor.value = (\n self.renderColor.value[0], value, self.renderColor.value[2], self.renderColor.value[3])\n\n blue = property(_GetBlue, _SetBlue)\n\n def _GetGreen(self):\n return self.renderColor.value[2]\n\n def _SetGreen(self, value):\n self.renderColor.value = (\n self.renderColor.value[0], self.renderColor.value[1], value, self.renderColor.value[3])\n\n green = property(_GetGreen, _SetGreen)\n\n def _GetAlpha(self):\n return self.renderColor.value[3]\n\n def _SetAlpha(self, value):\n self.renderColor.value = (\n self.renderColor.value[0], self.renderColor.value[1], self.renderColor.value[2], value)\n\n alpha = property(_GetAlpha, _SetAlpha)","sub_path":"client/carbonui/ui3d.py","file_name":"ui3d.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"60678089","text":"# Conway's Game of Life\r\nimport random, time, copy\r\n\r\nWIDTH = 60\r\nHEIGHT = 20\r\n\r\n# Create a list of lists for the cells:\r\nnext_Cells = []\r\nfor x in range(WIDTH):\r\n column = [] # Create a new column\r\n for y in range(HEIGHT):\r\n if random.randint(0,1) == 0:\r\n column.append('#') # Add a living cell\r\n else:\r\n column.append(' ') #adds a dead cell\r\n next_Cells.append(column) #nextCells is a list of column lists\r\n\r\nwhile True: # main program loop\r\n print('\\n\\n\\n\\n\\n') # separates each step w/ new lines\r\n current_Cells = copy.deepcopy(next_Cells)\r\n #Print current_Cells on screen\r\n for y in range(HEIGHT):\r\n for x in range(WIDTH):\r\n print(current_Cells[x][y], end='') #Print the # or space.\r\n print() #Print a newline at the end of the row\r\n # Calculate the next step's cells based on the current steps cells:\r\n for x in range(WIDTH):\r\n for y in range(HEIGHT):\r\n # Get Neighboring coordinates\r\n # '% WIDTH' ensures left_Coord is always between 0 and WIDTH -1\r\n left_Coord = (x - 1) % WIDTH\r\n right_Coord = (x + 1) % WIDTH\r\n above_Coord = (y - 1) % HEIGHT\r\n below_Coord = (y + 1) % HEIGHT\r\n\r\n # Count the number of living neighbors (There has to be a way for this to be more efficient.)\r\n num_Neighbors = 0\r\n if current_Cells [left_Coord][above_Coord] == '#':\r\n num_Neighbors += 1 #Top left Neighbor is alive\r\n if current_Cells[x][above_Coord] == '#':\r\n num_Neighbors += 1 #Top Neighbor is alive\r\n if current_Cells[right_Coord][above_Coord] == '#':\r\n num_Neighbors += 1 #Top right neighbor is alive\r\n if current_Cells[left_Coord][y] == '#':\r\n num_Neighbors += 1 # Left neighbor is alive\r\n if current_Cells[right_Coord][y] == '#':\r\n num_Neighbors += 1 #Righ Neighbor is alive\r\n if current_Cells[left_Coord][below_Coord] == '#':\r\n num_Neighbors += 1 #bottom Left neighbor is alive\r\n if current_Cells[x][below_Coord] == '#':\r\n num_Neighbors += 1 #Bottom neighbor is alive\r\n if current_Cells[right_Coord][below_Coord] == '#':\r\n num_Neighbors += 1 #Bottom right neighbor is alive\r\n\r\n # Set cell based on game rules:\r\n if current_Cells[x][y] == '#' and (num_Neighbors == 2 or num_Neighbors == 3):\r\n # Living cells with 2 or 3 neighbors stay alive:\r\n next_Cells[x][y] = '#'\r\n elif current_Cells[x][y] == '' and num_Neighbors == 3:\r\n # Dead cells w/ 3 neighbors become alive:\r\n next_Cells[x][y] = '#'\r\n else:\r\n # Everythign else dies or stays dead:\r\n next_Cells[x][y] = ' '\r\n time.sleep(1) #Adds a 1 second pause to reduice flickering\r\n\r\n\r\n\r\n","sub_path":"conway.py","file_name":"conway.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"171128668","text":"from flask import Flask, redirect, url_for, render_template, request\nimport webFunctions\napp = Flask(__name__)\n\n'''\nFile ini berisi fungsi untuk back-end dari website\n'''\n\n@app.route(\"/\", methods=[\"POST\", \"GET\", \"SWAP\"])\ndef home():\n if request.method == \"POST\":\n \n inputSentence = request.form[\"inputText\"]\n method = request.form[\"algorithm\"]\n language = request.form[\"language\"]\n translation = webFunctions.translate(inputSentence, method, language)\n\n return render_template(\"homePage.html\", inputText = inputSentence, translatedText = translation )\n else:\n return render_template(\"homePage.html\", inputText = '', translatedText = '')\n\nif __name__ == \"__main__\":\n app.static_folder = 'static'\n app.run(debug=True)\n\n","sub_path":"src/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"609598855","text":"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom asl_data import create_hmmlearn_data\nfrom my_training import *\n\n# Algorithm 1 The Segmentally-Boosted Hidden Markov Models (SBHMMs) Algorithm\n# 1: Train HMMs by Expectation Maximization (EM) using the time sequence training data.\n# 2: Find the optimal state transition path by the Viterbi decoding algorithm.\n# 3: Label every sample with its most likely hidden state.\n# 4: Train AdaBoost ensembles for this labeling.\n# 5: Project the data to a new feature space using the ensembles.\n# 6: Train HMMs by EM in the new feature space.\n# 7: In testing, project the test data to the same new feature space and predict their label\n# using the HMMs computed in Step 6.\n\n\n# Note, since the EM algorithm is a gradient-based optimization method,\n# it will generally get stuck in local optima.\n# TODO You should in general try to run fit with various initializations and select the highest scored model.\n\ndef train_hmms(words_data, model_selector):\n sequences = words_data.get_all_sequences()\n Xlengths = words_data.get_all_Xlengths()\n num_features = len(next(iter(sequences.values()))[0][0])\n mapping_observation_state = np.empty((0, num_features + 1))\n for word in words_data.words:\n model = model_selector(sequences, Xlengths, word,\n n_constant=3).select()\n X, lengths = Xlengths[word]\n word_index = words_data.words.index(word)\n try:\n logp, y = model.decode(X, lengths)\n # print(\"decoded {}\".format(np.array_str(y.reshape(-1, 1))))\n y += (1000 * word_index)\n except Exception as e:\n print(e)\n print(\"model is none or predict error for word {} with {} sequences\".format(word, X.shape[0]))\n y = np.zeros(X.shape[0])\n y += (1000 * word_index)\n\n a = np.concatenate((X, y.reshape(-1, 1)), axis=1)\n mapping_observation_state = np.concatenate((mapping_observation_state, a))\n return mapping_observation_state\n\nn_estimators = 600\n\ndef train_adaboost_ensembles(X,y):\n ensemble_list = []\n le = LabelEncoder()\n y_new = le.fit_transform(y)\n for class_label in le.classes_:\n y_class_label = y_new == le.transform([class_label])\n y_class_label = y_class_label.astype(int) # hack to convert boolean array into numeric array\n ensemble = AdaBoostClassifier(n_estimators=n_estimators, base_estimator=DecisionTreeClassifier(max_depth=2), learning_rate=1)\n ensemble.fit(X, y_class_label)\n ensemble_list.append(ensemble)\n return ensemble_list\n\n\ndef ensemble_scores(ensemble_list, X):\n scores = [] #np.zeros((X.shape[0], len(ensemble_list)))\n for i, ensemble in enumerate(ensemble_list):\n scores.append(ensemble.decision_function(X).tolist())\n return scores\n\n\ndef project_data_to_new_feature_space(ensembles, data):\n new_data = {}\n # data = {\"BOOK\":data[\"BOOK\"]}\n for key, sequences in data.items():\n # print(\"getting ensembles scores for key {} with {} sequences\".format(key,len(sequences)))\n new_data[key] = []\n for sequence in sequences:\n scores = ensemble_scores(ensembles, np.asarray(sequence))\n # print(\"adding ensemble_scores shape {} from sequence of {}\".format(scores.shape, len(sequence)))\n new_data[key].append(scores)\n return new_data\n\n\ndef train_sbhmms(new_sequences, model_selector):\n new_XLengths = create_hmmlearn_data(new_sequences)\n model_dict = {}\n for word in new_sequences.keys():\n model = model_selector(new_sequences, new_XLengths, word,\n n_constant=3).select()\n model_dict[word] = model\n return model_dict\n\n\ndef sbhmms():\n words_data = asl.build_training(features_polar)\n print(\"training HMMs and adding labeling sequences with predictions\")\n labeled_data = train_hmms(words_data, SelectorDIC)\n print(\"training adaboost ensembles on state transition labeled data\")\n ensembles = train_adaboost_ensembles(labeled_data[:,:-1], labeled_data[:,-1])\n pickle.dump(ensembles, open(\"data/sbhmm_ensembles.pkl\", \"wb\"))\n part2(words_data,ensembles)\n\ndef part2(words_data = None,ensembles = None):\n if not ensembles:\n ensembles = pickle.load(open(\"data/sbhmm_ensembles.pkl\", \"rb\"))\n print(\"ensembles loaded from pickle dump\")\n if not words_data:\n words_data = asl.build_training(features_polar)\n new_sequences = project_data_to_new_feature_space(ensembles, words_data._data)\n pickle.dump(new_sequences, open(\"data/projected_data.pkl\", \"wb\"))\n part3(new_sequences,ensembles)\n\n\ndef part3(new_sequences=None, ensembles=None):\n if not new_sequences:\n new_sequences = pickle.load(open(\"data/projected_data.pkl\", \"rb\"))\n print(\"ensembles projected data loaded from pickle dump\")\n if not ensembles:\n ensembles = pickle.load(open(\"data/sbhmm_ensembles.pkl\", \"rb\"))\n print(\"ensembles loaded from pickle dump\")\n\n print(\"training SBHMMs on data projected to ensembles feature space\")\n sbhmm_models = train_sbhmms(new_sequences, SelectorDIC)\n pickle.dump({\"models\": sbhmm_models, \"ensembles\": ensembles}, open(\"data/sbhmm_models.pkl\", \"wb\"))\n part4(ensembles,sbhmm_models)\n\ndef part4(ensembles = None,sbhmm_models= None):\n if not ensembles or not sbhmm_models:\n d = pickle.load(open(\"data/sbhmm_models.pkl\", \"rb\"))\n ensembles = d[\"ensembles\"]\n sbhmm_models = d[\"models\"]\n print(\"ensembles and sbhmm models loaded from pickle dump\")\n\n test_set = asl.build_test(features_polar)\n print(\"projecting test data to new feature space\")\n new_sequences = project_data_to_new_feature_space(ensembles, test_set._data)\n new_XLengths = create_hmmlearn_data(new_sequences)\n print(\"using SBHMMs to guess test data\")\n probabilities, guesses = recognize(sbhmm_models, new_sequences, new_XLengths)\n wer = show_errors(guesses, test_set)\n\n# part2()\nsbhmms()\n\n# part4()","sub_path":"segmentally_boosted_hmms.py","file_name":"segmentally_boosted_hmms.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"88902657","text":"import better_exceptions # noqa\n\nfrom flask import abort, jsonify\nfrom flask import Blueprint\nfrom flask_restful import Api, Resource, reqparse\nfrom flask import Flask\nimport sqlalchemy as sa\n\nfrom aardvark.model import AWSIAMObject\n\n\nmod = Blueprint('advisor', __name__)\napi = Api(mod)\napp = Flask(__name__)\n\n\nclass RoleSearch(Resource):\n \"\"\"\n Search for roles by phrase, regex, or by ARN.\n \"\"\"\n def __init__(self):\n super(RoleSearch, self).__init__()\n self.reqparse = reqparse.RequestParser()\n\n # undocumented convenience pass-through so we can query directly from browser\n @app.route('/advisors')\n def get(self):\n return(self.post())\n\n @app.route('/advisors')\n def post(self):\n \"\"\"Get access advisor data for role(s)\n Returns access advisor information for role(s) that match filters\n ---\n consumes:\n - 'application/json'\n produces:\n - 'application/json'\n\n parameters:\n - name: page\n in: query\n type: integer\n description: return results from given page of total results\n required: false\n - name: count\n in: query\n type: integer\n description: specifies how many results should be return per page\n required: false\n - name: query\n in: body\n schema:\n $ref: '#/definitions/QueryBody'\n description: |\n one or more query parameters in a JSON blob. Filter\n parameters build on eachother.\n\n Options are:\n\n 1) arn list - a list of one or more specific arns\n\n 2) phrase matching - search for ARNs like the one supplied\n\n 3) regex - match a supplied regular expression.\n\n definitions:\n AdvisorData:\n type: object\n properties:\n lastAuthenticated:\n type: number\n lastAuthenticatedEntity:\n type: string\n lastUpdated:\n type: string\n serviceName:\n type: string\n serviceNamespace:\n type: string\n totalAuthenticatedEntities:\n type: number\n QueryBody:\n type: object\n properties:\n phrase:\n type: string\n regex:\n type: string\n arn:\n type: array\n items: string\n Results:\n type: array\n items:\n $ref: '#/definitions/AdvisorData'\n\n responses:\n 200:\n description: Query successful, results in body\n schema:\n $ref: '#/definitions/AdvisorData'\n 400:\n description: Bad request - error message in body\n \"\"\"\n self.reqparse.add_argument('page', type=int, default=1)\n self.reqparse.add_argument('count', type=int, default=30)\n self.reqparse.add_argument('phrase', default=None)\n self.reqparse.add_argument('regex', default=None)\n self.reqparse.add_argument('arn', default=None, action='append')\n try:\n args = self.reqparse.parse_args()\n except Exception as e:\n abort(400, str(e))\n\n page = args.pop('page')\n count = args.pop('count')\n phrase = args.pop('phrase', '')\n arns = args.pop('arn', [])\n regex = args.pop('regex', '')\n items = None\n\n # default unfiltered query\n query = AWSIAMObject.query\n\n try:\n if phrase:\n query = query.filter(AWSIAMObject.arn.ilike('%' + phrase + '%'))\n\n if arns:\n query = query.filter(\n sa.func.lower(AWSIAMObject.arn).in_([arn.lower() for arn in arns]))\n\n if regex:\n query = query.filter(AWSIAMObject.arn.regexp(regex))\n\n items = query.paginate(page, count)\n except Exception as e:\n abort(400, str(e))\n\n if not items:\n items = AWSIAMObject.query.paginate(page, count)\n\n values = dict(page=items.page, total=items.total, count=len(items.items))\n for item in items.items:\n item_values = []\n for advisor_data in item.usage:\n item_values.append(dict(\n lastAuthenticated=advisor_data.lastAuthenticated,\n serviceName=advisor_data.serviceName,\n serviceNamespace=advisor_data.serviceNamespace,\n lastAuthenticatedEntity=advisor_data.lastAuthenticatedEntity,\n totalAuthenticatedEntities=advisor_data.totalAuthenticatedEntities,\n lastUpdated=item.lastUpdated\n ))\n values[item.arn] = item_values\n\n return jsonify(values)\n\n\napi.add_resource(RoleSearch, '/advisors')\n","sub_path":"aardvark/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"108277948","text":"from PySide import *\n\ndef create_button(text, member, args=None):\n button = QtGui.QPushButton(text)\n button.clicked.connect(member)\n return button\n\ndef create_text_edit():\n text_area = QtGui.QTextEdit()\n text_area.setReadOnly(False)\n text_area.setLineWrapMode(QtGui.QTextEdit.NoWrap)\n text_area.moveCursor(QtGui.QTextCursor.End)\n\n font = text_area.font()\n font.setFamily(\"Courier\")\n font.setPointSize(10)\n\n text_area.setCurrentFont(font)\n\n sb = text_area.verticalScrollBar()\n sb.setValue(sb.maximum())\n\n return text_area\n\n\ndef create_text_box(msg):\n tb = QtGui.QLineEdit();\n tb.setFont(QtGui.QFont(\"Arial\", 10))\n tb.setText(msg);\n return tb\n\ndef show_error(err):\n msg = QtGui.QMessageBox()\n msg.setIcon(QtGui.QMessageBox.Warning)\n msg.setText(err)\n msg.setWindowTitle(\"Error\")\n msg.setStandardButtons(QtGui.QMessageBox.Ok)\n retal = msg.exec_()\n\ndef show_yesNo_dialog(msg, submsg, yes_event=None):\n from PySide.QtGui import QMessageBox\n\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Error\")\n msgBox.setText(msg)\n msgBox.setInformativeText(submsg)\n msgBox.setStandardButtons(QMessageBox.No | QMessageBox.Yes)\n msgBox.setDefaultButton(QMessageBox.Yes)\n ret = msgBox.exec_()\n\n if yes_event is not None and ret == QMessageBox.Yes:\n yes_event()\n else:\n pass\n\n","sub_path":"UI_components.py","file_name":"UI_components.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"89436190","text":"class Imposter:\n def __init__(self, Name, protocol, port, req, mode ):\n self.Name = Name\n self.protocol = protocol\n self.port=port\n self.req= req\n self.mode=mode\n\n def toJSON(self):\n return {'Name': self.Name,\n 'protocol': self.protocol,\n 'port': self.port,\n 'req' : self.req,\n 'mode': self.mode}\n\n","sub_path":"getImpModel.py","file_name":"getImpModel.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"192628430","text":"'''\n• Complexity:\n ○ O(n); O(n)\n• Topics:\n ○ dp\n判断某一格积水量:左右最高的墙壁中稍矮的那一个的高度减去当前格的高度。本题的难点在于如何将空间\n复杂度降为0。naive的方法是3 pass,存两个等长的LHS,RHS array。而实际上每个格子只需要左右最高墙壁\n中矮的那个高度即可,可以用two pointers实现(通常O(1)的空间复杂度,都是由Pointer实现的)。移动\n较矮的那个Pointer即可(why works具体就不解释了,画个图就知道)\n'''\n\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n # LHS\n LHS = [0] * len(height)\n for i in range(len(height)):\n if i == 0:\n LHS[i] = 0\n else:\n LHS[i] = max(height[i - 1], LHS[i - 1])\n \n # RHS\n RHS = [0] * len(height)\n for i in range(len(height) - 1, -1, -1):\n if i == len(height) - 1:\n RHS[0] = 0\n else:\n RHS[i] = max(height[i + 1], RHS[i + 1])\n \n count = 0\n for i in range(len(height)):\n diff = min(LHS[i], RHS[i]) - height[i]\n if diff > 0: count += diff\n return count\n\n def betterTrap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n left, right = 0, len(height) - 1\n ans, left_max, right_max = 0, 0, 0\n while left < right:\n if height[left] < height[right]:\n if left_max > height[left]:\n ans += left_max - height[left]\n else:\n left_max = height[left]\n left += 1\n else:\n if right_max > height[right]:\n ans += right_max - height[right]\n else:\n right_max = height[right]\n right -= 1\n return ans\n\n\n","sub_path":"leetcode/42_trapping_rain_water.py","file_name":"42_trapping_rain_water.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"505733688","text":"import os, sys\nimport subprocess\nimport csv\nfrom RateControlUtil import cQualityMetricLine\n\nDEBUG = 1\n\nclass cVqmtTool(object):\n def __init__(self, path):\n self.path = path\n\n def read_results_from_csv(self, output_name, type):\n csv_name = output_name+'_' + type + '.csv'\n if DEBUG:\n dir = os.getcwd()\n sys.stdout.write(\"Reading %s\\n\" %(dir + os.sep + csv_name) )\n csv_file = open(csv_name, 'r')\n reader = csv.reader(csv_file, dialect='excel')\n current_metric_line = cQualityMetricLine(output_name, type)\n for row in reader:\n if row[1] == 'inf':\n current_metric_line.add_data_point(0)\n if row[0] != 'frame' and row[0] != 'average':\n current_metric_line.add_data_point(int(row[0]), float(row[1]))\n if row[0] == 'average':\n current_metric_line.add_average(float(row[1]))\n csv_file.close()\n return current_metric_line\n\n\n\n def compare(self, original, compare_object, width, height, frames, output_name):\n cmdline = str('%s %s %s %d %d %d 1 %s PSNR SSIM'\n % ('./vqmt', original, compare_object, height, width, frames,\n output_name))\n p = subprocess.Popen(cmdline, stderr=subprocess.PIPE, shell=True)\n print(p.communicate()[1])\n metric_line = \\\n self.read_results_from_csv(output_name, 'psnr')\n metric_line.plot_metric_line()\n return metric_line\n","sub_path":"ThirdpartyUtil.py","file_name":"ThirdpartyUtil.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"301114156","text":"def get_input():\n n = int(input())\n return n\n\n\ndef classes_dict(n):\n d = dict()\n for i in range(n):\n input_str = input()\n class_list = input_str.split()\n for item in class_list:\n if item != \":\" and item not in d:\n d[item] = []\n for parent in class_list[2:]:\n d[class_list[0]].append(parent)\n return d\n\n\ndef question(d, q):\n answers = []\n for i in range(q):\n input_str = input()\n question_list = input_str.split()\n parents = []\n parents_all = get_parents(d, question_list[1], parents)\n # print(parents_all)\n # answer = check(d, question_list[0], question_list[1])\n # answers.append(answer)\n if question_list[0] in parents_all or question_list[0] == question_list[1]:\n answers.append(\"Yes\")\n else:\n answers.append(\"No\")\n return answers\n\n\ndef get_parents(d, child, parents):\n for parent in d[child]:\n parents.append(parent)\n try:\n for parent_parent in d[parent]:\n parents.append(parent_parent)\n get_parents(d, parent_parent, parents)\n except KeyError:\n continue\n return parents\n\n\ndef main():\n n = get_input()\n d = classes_dict(n)\n #print(d)\n q = get_input()\n answers = question(d, q)\n #print(answers)\n # print(d[\"classD\"])\n # print(d[\"classG\"])\n # print(d[\"classF\"])\n # print(d[\"classH\"])\n for answer in answers:\n print(answer)\n\n\nmain()\n","sub_path":"stepic/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"188559374","text":"\"\"\"Check that there is enough disk space in predefined paths.\"\"\"\n\nimport tempfile\nimport os.path\n\nfrom openshift_checks import OpenShiftCheck, OpenShiftCheckException\n\n\nclass DiskAvailability(OpenShiftCheck):\n \"\"\"Check that recommended disk space is available before a first-time install.\"\"\"\n\n name = \"disk_availability\"\n tags = [\"preflight\"]\n\n # Values taken from the official installation documentation:\n # https://docs.okd.io/latest/install_config/install/prerequisites.html#system-requirements\n recommended_disk_space_bytes = {\n '/var': {\n 'oo_masters_to_config': 40 * 10**9,\n 'oo_nodes_to_config': 15 * 10**9,\n 'oo_etcd_to_config': 20 * 10**9,\n },\n # Used to copy client binaries into,\n # see roles/lib_utils/library/openshift_container_binary_sync.py.\n '/usr/local/bin': {\n 'oo_masters_to_config': 1 * 10**9,\n 'oo_nodes_to_config': 1 * 10**9,\n 'oo_etcd_to_config': 1 * 10**9,\n },\n # Used as temporary storage in several cases.\n tempfile.gettempdir(): {\n 'oo_masters_to_config': 1 * 10**9,\n 'oo_nodes_to_config': 1 * 10**9,\n 'oo_etcd_to_config': 1 * 10**9,\n },\n }\n\n # recommended disk space for each location under an upgrade context\n recommended_disk_upgrade_bytes = {\n '/var': {\n 'oo_masters_to_config': 10 * 10**9,\n 'oo_nodes_to_config': 5 * 10 ** 9,\n 'oo_etcd_to_config': 5 * 10 ** 9,\n },\n }\n\n def is_active(self):\n \"\"\"Skip hosts that do not have recommended disk space requirements.\"\"\"\n group_names = self.get_var(\"group_names\", default=[])\n active_groups = set()\n for recommendation in self.recommended_disk_space_bytes.values():\n active_groups.update(recommendation.keys())\n has_disk_space_recommendation = bool(active_groups.intersection(group_names))\n return super(DiskAvailability, self).is_active() and has_disk_space_recommendation\n\n def run(self):\n group_names = self.get_var(\"group_names\")\n user_config = self.get_var(\"openshift_check_min_host_disk_gb\", default={})\n try:\n # For backwards-compatibility, if openshift_check_min_host_disk_gb\n # is a number, then it overrides the required config for '/var'.\n number = float(user_config)\n user_config = {\n '/var': {\n 'oo_masters_to_config': number,\n 'oo_nodes_to_config': number,\n 'oo_etcd_to_config': number,\n },\n }\n except TypeError:\n # If it is not a number, then it should be a nested dict.\n pass\n\n self.register_log(\"recommended thresholds\", self.recommended_disk_space_bytes)\n if user_config:\n self.register_log(\"user-configured thresholds\", user_config)\n\n # TODO: as suggested in\n # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,\n # maybe we could support checking disk availability in paths that are\n # not part of the official recommendation but present in the user\n # configuration.\n for path, recommendation in self.recommended_disk_space_bytes.items():\n free_bytes = self.free_bytes(path)\n recommended_bytes = max(recommendation.get(name, 0) for name in group_names)\n\n config = user_config.get(path, {})\n # NOTE: the user config is in GB, but we compare bytes, thus the\n # conversion.\n config_bytes = max(config.get(name, 0) for name in group_names) * 10**9\n recommended_bytes = config_bytes or recommended_bytes\n\n # if an \"upgrade\" context is set, update the minimum disk requirement\n # as this signifies an in-place upgrade - the node might have the\n # required total disk space, but some of that space may already be\n # in use by the existing OpenShift deployment.\n context = self.get_var(\"r_openshift_health_checker_playbook_context\", default=\"\")\n if context == \"upgrade\":\n recommended_upgrade_paths = self.recommended_disk_upgrade_bytes.get(path, {})\n if recommended_upgrade_paths:\n recommended_bytes = config_bytes or max(recommended_upgrade_paths.get(name, 0)\n for name in group_names)\n\n if free_bytes < recommended_bytes:\n free_gb = float(free_bytes) / 10**9\n recommended_gb = float(recommended_bytes) / 10**9\n msg = (\n 'Available disk space in \"{}\" ({:.1f} GB) '\n 'is below minimum recommended ({:.1f} GB)'\n ).format(path, free_gb, recommended_gb)\n\n # warn if check failed under an \"upgrade\" context\n # due to limits imposed by the user config\n if config_bytes and context == \"upgrade\":\n msg += ('\\n\\nMake sure to account for decreased disk space during an upgrade\\n'\n 'due to an existing OpenShift deployment. Please check the value of\\n'\n ' openshift_check_min_host_disk_gb={}\\n'\n 'in your Ansible inventory, and lower the recommended disk space availability\\n'\n 'if necessary for this upgrade.').format(config_bytes)\n\n self.register_failure(msg)\n\n return {}\n\n def find_ansible_submounts(self, path):\n \"\"\"Return a list of ansible_mounts that are below the given path.\"\"\"\n base = os.path.join(path, \"\")\n return [\n mount\n for mount in self.get_var(\"ansible_mounts\")\n if mount[\"mount\"].startswith(base)\n ]\n\n def free_bytes(self, path):\n \"\"\"Return the size available in path based on ansible_mounts.\"\"\"\n submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))\n mount = self.find_ansible_mount(path)\n try:\n return mount['size_available'] + submounts\n except KeyError:\n raise OpenShiftCheckException(\n 'Unable to retrieve disk availability for \"{path}\".\\n'\n 'Ansible facts included a matching mount point for this path:\\n'\n ' {mount}\\n'\n 'however it is missing the size_available field.\\n'\n 'To investigate, you can inspect the output of `ansible -m setup `'\n ''.format(path=path, mount=mount)\n )\n","sub_path":"openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_health_checker/openshift_checks/disk_availability.py","file_name":"disk_availability.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"516661292","text":"# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n# Date: 03.09.2016 #\n# Author: Ole-Johan Skrede #\n# #\n# Solution proposal as part of the exercise program in #\n# INF4300 - Digital image analysis at the University of Oslo #\n# #\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n\"\"\"\nSolution proposal for exercise 1, task 1.\n\"\"\"\n\n# pylint: disable=expression-not-assigned\n# pylint: disable=bad-indentation\n# pylint: disable=redefined-outer-name\n\nimport os\n\nimport numpy as np\nfrom scipy import signal # pylint: disable=import-error\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef plot_image(image, fig_num, name=None, colormap='gray', write_file=None):\n \"\"\"Plot image\"\"\"\n fig = plt.figure(fig_num)\n plt.imshow(image, cmap=colormap, interpolation='none')\n if name is not None:\n plt.title(name)\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n if write_file:\n fig.savefig(write_file, bbox_inches='tight', pad_inches=0)\n fig_num += 1\n return fig_num\n\ndef main():\n \"\"\"main\"\"\"\n\nprint('='*80)\nprint('Solution to weekly exercises in INF4300')\nprint('Exercise 1')\nprint('Task 1')\nprint('-'*80)\n\nimage_dir = '../../images'\nwrite_dir = 'results/images'\nfig_num = 0\n\n# Read 2D graylevel image\nimg_filename = os.path.join(image_dir, 'football.jpg')\nimg = cv2.imread(img_filename, cv2.IMREAD_GRAYSCALE)\nfig_num = plot_image(img, fig_num,\n write_file=os.path.join(write_dir, 'football.png'))\nprint('Shape of original: ', img.shape)\n\n# Construct 5x5 mean kernel\nmean_kernel = np.ones((5, 5)) / (5*5)\n\n#### Full padding mode\nmean_img_full = signal.convolve2d(img, mean_kernel, mode='full')\nfig_num = plot_image(mean_img_full, fig_num,\n write_file=os.path.join(write_dir, 'mean_full.png'))\nprint('Shape of convolved, full mode: ', mean_img_full.shape)\n# Retrieve original: mean_img = mean_img[2:-2, 2:-2]\n\n#### Valid padding mode\nmean_img_valid = signal.convolve2d(img, mean_kernel, mode='valid')\nfig_num = plot_image(mean_img_valid, fig_num,\n write_file=os.path.join(write_dir, 'mean_valid.png'))\nprint('Shape of convolved, valid mode: ', mean_img_valid.shape)\n\n#### Same padding mode\nmean_img_same = signal.convolve2d(img, mean_kernel, mode='same', boundary='symm')\nfig_num = plot_image(mean_img_same, fig_num,\n write_file=os.path.join(write_dir, 'mean_same.png'))\nprint('Shape of convolved, full mode: ', mean_img_same.shape)\n\n\nplt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions/week_01/inf4300_h16_ex01_t01.py","file_name":"inf4300_h16_ex01_t01.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"93969438","text":"'''\nupdated 04/05/2023\n\n@author: rduvalwa2\n'''\nfrom tkinter import *\nfrom Music_Get_Functions import musicGet_Functions\n\n\nclass Application(Frame):\n \"\"\"Application main window class.\"\"\"\n\n def __init__(self, master=None):\n \"\"\"Main frame initialization (mostly delegated)\"\"\"\n Frame.__init__(self, master)\n self.pack()\n self.updateArtistWidgets()\n \n def updateArtistWidgets(self):\n \"\"\"Add all the widgets to the main frame.\"\"\"\n tag_name = Frame(self)\n artist_name = Frame(self)\n newGenre_name = Frame(self)\n# result = Frame()\n self.labeltag = Label(tag_name, text=\"Update Artist Genre\")\n self.labelArtist = Label(artist_name, text=\"Artist Name\")\n self.labelNewGenre = Label(newGenre_name, text=\"New Genre Name\")\n self.labelResult = Label(artist_name, text=\"Result\")\n \n self.text_in_artist = Entry(artist_name)\n self.text_in_genre = Entry(newGenre_name) \n \n self.labeltag.pack()\n self.labelArtist.pack()\n self.labelNewGenre.pack() \n \n self.text_in_artist.pack()\n self.text_in_genre.pack()\n self.labelResult.pack() \n \n tag_name.pack(side=TOP)\n artist_name.pack(side=TOP)\n newGenre_name.pack(side=TOP)\n# result.pack(side=TOP) \n \n bottom_frame = Frame(self)\n bottom_frame.pack(side=TOP)\n# how to disable a button\n self.QUIT = Button(bottom_frame, text=\"Quit\", command=self.quit, state='active')\n self.QUIT.pack(side=LEFT)\n self.handleb = Button(bottom_frame, text=\"Submit\", command=self.handle)\n self.handleb.pack(side=LEFT)\n \n def handle(self):\n \"\"\"Handle a click of the button by processing any text the\n user has placed in the Entry widget according to the selected\n radio button.\"\"\"\n artist = self.text_in_artist.get()\n genre = self.text_in_genre.get()\n muxGet = musicGet_Functions()\n result = muxGet.update_artist(artist, genre)\n output = result\n self.labelResult.config(text=output)\n self.QUIT.config(state='active')\n self.QUIT.pack(side=TOP)\n\n\nroot = Tk()\napp = Application(master=root)\napp.mainloop() \n","sub_path":"Mux_Gui/Update_Artist_Gui.py","file_name":"Update_Artist_Gui.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"372815943","text":"from collections import deque, namedtuple\nimport itertools\nimport os\nimport math\nimport random\n\nfrom moviepy.editor import ImageSequenceClip\nimport numpy as np\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import Transform\nfrom torch.nn.functional import softplus\n\nTransition = namedtuple('Transition', ('state', 'action', 'reward', 'nextstate', 'real_done'))\n\n\nclass MeanStdevFilter():\n def __init__(self, shape, clip=3.0):\n self.eps = 1e-4\n self.shape = shape\n self.clip = clip\n self._count = 0\n self._running_sum = np.zeros(shape)\n self._running_sum_sq = np.zeros(shape) + self.eps\n self.mean = np.zeros(shape)\n self.stdev = np.ones(shape) * self.eps\n\n def update(self, x):\n if len(x.shape) == 1:\n x = x.reshape(1,-1)\n self._running_sum += np.sum(x, axis=0)\n self._running_sum_sq += np.sum(np.square(x), axis=0)\n # assume 2D data\n self._count += x.shape[0]\n self.mean = self._running_sum / self._count\n self.stdev = np.sqrt(\n np.maximum(\n self._running_sum_sq / self._count - self.mean**2,\n self.eps\n ))\n \n def __call__(self, x):\n return np.clip(((x - self.mean) / self.stdev), -self.clip, self.clip)\n\n def invert(self, x):\n return (x * self.stdev) + self.mean\n\n\nclass ReplayPool:\n\n def __init__(self, capacity=1e6):\n self.capacity = int(capacity)\n self._memory = deque(maxlen=int(capacity))\n \n def push(self, transition: Transition):\n \"\"\" Saves a transition \"\"\"\n self._memory.append(transition)\n \n def sample(self, batch_size: int, unique: bool = True, dist=None) -> Transition:\n transitions = random.sample(self._memory, batch_size) if unique else random.choices(self._memory, k=batch_size)\n return Transition(*zip(*transitions))\n\n def get(self, start_idx: int, end_idx: int) -> Transition:\n transitions = list(itertools.islice(self._memory, start_idx, end_idx))\n return transitions\n\n def get_all(self) -> Transition:\n return self.get(0, len(self._memory))\n\n def __len__(self) -> int:\n return len(self._memory)\n\n def clear_pool(self):\n self._memory.clear()\n\n def initialise(self, old_pool: 'ReplayPool'):\n old_memory = old_pool.get_all()\n self._memory.extend(old_memory)\n\n\n# Code courtesy of JPH: https://github.com/jparkerholder\ndef make_gif(policy, env, step_count, state_filter, maxsteps=1000):\n envname = env.spec.id\n gif_name = '_'.join([envname, str(step_count)])\n state = env.reset()\n done = False\n steps = []\n rewards = []\n t = 0\n while (not done) & (t< maxsteps):\n s = env.render('rgb_array')\n steps.append(s)\n action = policy.get_action(state, state_filter=state_filter, deterministic=True)\n action = np.clip(action, env.action_space.low[0], env.action_space.high[0])\n action = action.reshape(len(action), )\n state, reward, done, _ = env.step(action)\n rewards.append(reward)\n t +=1\n print('Final reward :', np.sum(rewards))\n clip = ImageSequenceClip(steps, fps=30)\n if not os.path.isdir('gifs'):\n os.makedirs('gifs')\n clip.write_gif('gifs/{}.gif'.format(gif_name), fps=30)\n\n\ndef make_checkpoint(agent, step_count, env_name, save_replay_pool=False):\n\n save_dir = \"checkpoints/{}\".format(env_name)\n\n save_path = save_dir + \"/{}-{}steps-seed{}.pt\".format(agent.alg_name, step_count, agent._seed)\n \n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\n save_dict = {\n 'double_q_state_dict': agent.q_funcs.state_dict(),\n 'target_double_q_state_dict': agent.target_q_funcs.state_dict(),\n 'policy_state_dict': agent.policy.state_dict(),\n 'replay_pool': agent.replay_pool if save_replay_pool else None,\n 'num_updates': agent._update_counter,\n 'num_steps': step_count,\n 'alg_name': agent.alg_name,\n 'env_name': env_name\n }\n\n if agent.is_soft:\n save_dict['log_alpha'] = agent._log_alpha\n\n if hasattr(agent, \"target_policy\"):\n save_dict['target_policy_state_dict'] = agent.target_policy.state_dict()\n\n print(\"Saving {} Policy at {} Steps\".format(agent.alg_name, step_count))\n torch.save(save_dict, save_path)\n\n\n# Taken from: https://github.com/pytorch/pytorch/pull/19785/files\n# The composition of affine + sigmoid + affine transforms is numerically unstable\n# tanh transform is (2 * sigmoid(2x) - 1)\n# Old Code Below:\n# transforms = [AffineTransform(loc=0, scale=2), SigmoidTransform(), AffineTransform(loc=-1, scale=2)]\nclass TanhTransform(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = \\tanh(x)`.\n It is equivalent to\n ```\n ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)])\n ```\n However this might not be numerically stable, thus it is recommended to use `TanhTransform`\n instead.\n Note that one should use `cache_size=1` when it comes to `NaN/Inf` values.\n \"\"\"\n domain = constraints.real\n codomain = constraints.interval(-1.0, 1.0)\n bijective = True\n sign = +1\n\n @staticmethod\n def atanh(x):\n return 0.5 * (x.log1p() - (-x).log1p())\n\n def __eq__(self, other):\n return isinstance(other, TanhTransform)\n\n def _call(self, x):\n return x.tanh()\n\n def _inverse(self, y):\n # We do not clamp to the boundary here as it may degrade the performance of certain algorithms.\n # one should use `cache_size=1` instead\n return self.atanh(y)\n\n def log_abs_det_jacobian(self, x, y):\n # We use a formula that is more numerically stable, see details in the following link\n # https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80\n return 2. * (math.log(2.) - x - softplus(-2. * x))","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"23842924","text":"from __future__ import print_function, division\nimport scipy\n\nfrom keras.datasets import mnist\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D, MaxPooling2D, AveragePooling3D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\nimport datetime\nimport matplotlib.pyplot as plt\nimport sys\nfrom data_loader_encoder import DataLoader\nfrom head_pose.head_pose_estimation import CnnHeadPoseEstimator\nimport tensorflow as tf\nfrom glob import glob\nimport pickle\nimport PIL.Image\nimport cv2\nimport config\nimport dnnlib\nimport dnnlib.tflib as tflib\nimport numpy as np\nimport os\n\nurl_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl\n\nsynthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8)\n\n_Gs_cache = dict()\n\n\ndef load_Gs(url):\n print(\"loading GS\")\n if url not in _Gs_cache:\n with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:\n _G, _D, Gs = pickle.load(f)\n _Gs_cache[url] = Gs\n print(\"GS loaded\")\n return _Gs_cache[url]\n\nclass Encoder():\n def __init__(self):\n # Input shape\n self.img_rows = 1024\n self.img_cols = 1024\n self.channels = 3\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n\n # Configure data loader\n self.dataset_name = 'out'\n self.data_loader = DataLoader(dataset_name=self.dataset_name, data_path='../data/out/')\n\n\n optimizer = Adam(0.0002, 0.5)\n\n self.Gs = load_Gs(url_ffhq)\n\n # Build and compile the discriminators\n self.encoder = self.build_encoder()\n self.encoder.compile(loss='mse',\n optimizer=optimizer,\n metrics=['accuracy'])\n print(self.encoder.summary())\n i=4\n\n def build_encoder(self):\n # Image input\n d0 = Input(shape=self.img_shape)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d0)\n d=Activation(\"relu\")(d)\n d=MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(36, activation='tanh')(d)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d)\n d=Activation(\"relu\")(d)\n d=MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(36, activation='tanh')(d)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d)\n d = Activation(\"relu\")(d)\n d = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(36, activation='tanh')(d)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d)\n d = Activation(\"relu\")(d)\n d = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(36, activation='tanh')(d)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d)\n d = Activation(\"relu\")(d)\n d = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(36, activation='tanh')(d)\n\n d = Conv2D(20, (5, 5), padding=\"same\")(d)\n d = Activation(\"relu\")(d)\n d = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(d)\n\n #d = Dense(1, activation='tanh')(d)\n\n d=Flatten()(d)\n\n d=Dense(18*512, activation='tanh')(d)\n\n d = Reshape((18, 512))(d)\n\n return Model(d0, d)\n\n\n def train(self, epochs, batch_size=1, sample_interval=50):\n\n for epoch in range(epochs):\n for batch_i, (imgs, latent_vector) in enumerate(self.data_loader.load_batch(batch_size)):\n\n # ----------------------\n # Train Discriminators\n # ----------------------\n\n # Translate images to opposite domain\n #encoder_prediction = self.encoder.predict(imgs)\n\n # Train the discriminators (original images = real / translated = Fake)\n loss = self.encoder.train_on_batch(imgs, latent_vector)\n\n\n\n # Plot the progress\n print (\"[Epoch %d/%d] [Batch %d/%d] [loss: %f]\" % ( epoch, epochs,\n batch_i, self.data_loader.n_batches,\n loss[0]))\n\n # If at save interval => save generated image samples\n if batch_i % sample_interval == 0:\n '''norm_images, images, latent_vectors = self.data_loader.load_test_data()\n for i in range(len(images)):\n np.save('../results/' +str(i)+'_orig_latent_vector.npy', latent_vectors[i])\n np.save('../results/'+str(i)+'_predicted_latent_vector.npy', self.encoder.predict(norm_images)[i])\n scipy.misc.imsave('../results/' + str(i) + '_image.png',images[i])'''\n\n path_images = glob(os.path.join('../data/out/test/images_cage/*'))\n imgs = []\n for img_path in path_images:\n img = scipy.misc.imread(img_path, mode='RGB').astype(np.float)\n imgs.append(img)\n\n imgs_norm = np.array(imgs) / 127.5 - 1.\n\n prediction = self.encoder.predict(imgs_norm)\n\n for i in range(len(imgs_norm)):\n '''r, p, y = self.estimate_head_pose(imgs[i])\n cv2.putText(imgs[i], str(r) + ', ' + str(p) + ', ' + str(y), (10, 50),\n cv2.FONT_HERSHEY_TRIPLEX, 1,\n color=(255, 0, 255))'''\n scipy.misc.imsave('../results/' + str(i) + '_image.png', imgs[i])\n np.save('../results/' + str(i) + '_predicted_latent_vector.npy',\n prediction[i])\n\n\n\n src_images = self.Gs.components.synthesis.run(prediction, randomize_noise=False,\n **synthesis_kwargs)\n i = 0\n for image in src_images:\n '''r, p, y = self.estimate_head_pose(image)\n cv2.putText(image, str(r)+', '+str(p)+', '+str(y), (10, 50),\n cv2.FONT_HERSHEY_TRIPLEX, 1,\n color=(255, 0, 255))'''\n #im = PIL.Image.fromarray(image)\n scipy.misc.imsave('../results/' + str(i) + '.png',image)\n i += 1\n print('done')\n\n #np.save('../results/'+str(batch_i)+'_prediction.npy',self.encoder.predict([im])[0])\n #np.save('../results/prediction.npy', self.encoder.predict(imgs)[0])\n #cv2.imwrite('../results/' + str(batch_i) + '.png',((imgs[0]+1)*127.5))\n #im.save('../results/' + str(batch_i) + '.png')\n\n '''def estimate_head_pose(self, face_img):\n sess = tf.Session() # Launch the graph in a session.\n my_head_pose_estimator = CnnHeadPoseEstimator(sess) # Head pose estimation object\n #\n # # Load the weights from the configuration folders\n my_head_pose_estimator.load_roll_variables(os.path.realpath(\"head_pose/model/roll/cnn_cccdd_30k.tf\"))\n my_head_pose_estimator.load_pitch_variables(\n os.path.realpath(\"head_pose/model/pitch/cnn_cccdd_30k.tf\"))\n my_head_pose_estimator.load_yaw_variables(os.path.realpath(\"head_pose/model/yaw/cnn_cccdd_30k.tf\"))\n #\n # # Get the angles for roll, pitch and yaw\n roll = my_head_pose_estimator.return_roll(face_img) # Evaluate the roll angle using a CNN\n pitch = my_head_pose_estimator.return_pitch(face_img) # Evaluate the pitch angle using a CNN\n yaw = my_head_pose_estimator.return_yaw(face_img) # Evaluate the yaw angle using a CNN\n return roll[0, 0, 0], pitch[0, 0, 0], yaw[0, 0, 0]'''\n\n\n\n\n\nif __name__ == '__main__':\n tflib.init_tf()\n os.makedirs(config.result_dir, exist_ok=True)\n encoder = Encoder()\n encoder.train(epochs=200, batch_size=1, sample_interval=200)","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"356270445","text":"import requests\nfrom airflow.hooks.base_hook import BaseHook\n\n\nclass LaunchHook(BaseHook):\n def __init__(self):\n super().__init__(source=None)\n\n \n def fetch(self, start_date, end_date, **kwargs):\n query = f\"https://launchlibrary.net/1.4/launch?startdate={start_date}&enddate={end_date}\"\n response = requests.get(query)\n print(query)\n return response.json()['launches']\n\n\n","sub_path":"dags/hooks/launch_hook.py","file_name":"launch_hook.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"170442249","text":"import numpy as np\n\ndef vff_load(filename):\n\n\t# Check reconstruction resolution and set image size\n\t# accordingly\n\tif 'LR' in filename:\n\t\tvol = 64\n\telif 'MR' in filename:\n\t\tvol = 128\n\telif 'HR' in filename:\n\t\tvol = 256\n\telif 'ER' in filename:\n\t\tvol = 512\n\n\t# Open file and read data\n\tvff = open(filename)\n\tdata = np.fromfile(vff,np.uint8)\n\tvff.close()\n\n\t# Calculate header offset\n\toffset = len(data) - vol**3 * 4\n\n\t# Read / convert data and shape into image\n\timgdata = np.fromstring(data[offset:],dtype='>f4')\n\timg = imgdata.reshape([vol,vol,vol])\n\n\t# Return image data\n\treturn img","sub_path":"pyvff.py","file_name":"pyvff.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"442062663","text":"'''\nCreated on 15 Sep 2016\n\n@author: richard\n'''\nimport numpy as np\nimport scipy.constants as constants\nimport matplotlib.pyplot as plt\nimport quant_mech.utils as utils\n\nk_B_eV = constants.k / utils.EV_TO_JOULES\n\ndef F2(voltage, E0, temperature):\n return (1. + np.exp(2. * (voltage - E0) / (k_B_eV*temperature))) / (1. + np.exp((voltage - E0) / (k_B_eV*temperature)))**2\n\ndef F2_2(voltage, E0, temperature):\n x = np.exp(-(voltage - E0) / (k_B_eV*temperature))\n return (x**2 + 1.) / (x+1.)**2\n\nvoltage_values = np.linspace(1.2, 1.6, 1000)\nE0 = 1.4\ntemperature = 300.\n\nplt.plot(voltage_values, F2_2(voltage_values, E0, temperature))\nplt.xlim(1.2, 1.6)\nplt.ylim(0.4, 1.1)\nplt.show()","sub_path":"PSIIRC_photocell_counting_statistics/src/test/F2_voltage_analytic_test.py","file_name":"F2_voltage_analytic_test.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"367346518","text":"import os\nimport hangups\n\nfrom hangups.ui.utils import get_conv_name\n\nimport json\nimport requests\n\nclass sender():\n _bot = None\n _config = None\n\n def init():\n if \"HUBOT_URL\" not in sender._config:\n print(\"cannot initalise: config.hooks[].HUBOT_URL not provided\")\n return False\n\n return True\n\n def on_chat_message(event):\n if event.user.is_self:\n # don't send my own messages\n return\n\n event_timestamp = event.timestamp\n\n conversation_id = event.conv_id\n conversation_name = get_conv_name(event.conv)\n conversation_text = event.text\n\n user_full_name = event.user.full_name\n user_id = event.user_id\n\n url = sender._config[\"HUBOT_URL\"] + conversation_id\n payload = {\"from\" : str(user_id.chat_id), \"message\" : conversation_text}\n headers = {'content-type': 'application/json'}\n r = requests.post(url, data = json.dumps(payload), headers = headers, verify=False)","sub_path":"hangupsbot/hooks/hubotsend/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"593352624","text":"import heapq\n\n#python maintians a min heap so we want the minimum elements to \n# be in the heap. So we will store the negaince distance in the heap.\n# O(N*logK) and space K.\nclass Solution:\n def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n euclid = lambda p:-(p[0]**2+p[1]**2)\n heap = []\n for point in points:\n heapq.heappush(heap, (euclid(point), point))\n if len(heap)>K:\n heapq.heappop(heap)\n res = [y for x,y in heap]\n return res\n \n # Naive solution\n # def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n # euclid = lambda p:p[0]**2+p[1]**2\n # res = []\n # for point in points:\n # res.append((point,euclid(point)))\n # res = sorted(res, key=lambda tup:tup[1])\n # output = [res[point][0] for point in range(K)]\n # return output\n","sub_path":"leetcode/973.kClosest_Points/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"189901148","text":"# Required Packages:\n# - py-serial\n# - requests\n\nimport serial\nimport requests\nimport re\n\nser = serial.Serial('/dev/tty.usbmodem1421', 9600)\n\nscanner_id = 1\napi_host = \"localhost:8000\"\n\n\napi_url = \"http://\" + api_host + \"/api/v1/checkin\"\n\nwhile True:\n line = ser.readline().rstrip()\n\n if line[0:4] == \"UID:\":\n uid = re.sub(r'^UID: *', '', line)\n\n # post uid to some server url\n requests.post(api_url, data={ 'uid': uid, 'scanner_id': scanner_id })\n\n\n\n# TV connected to Rasperry Pi, which has internet access\n# Raspberry Pi display some sort of check in screen\n# - when a user scans their card, it should show up on the screen, some beep should happen\n# - \n# \n#\n# New user with a new card scans their card\n# they ","sub_path":"nfc_daemon.py","file_name":"nfc_daemon.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"477760193","text":"\"\"\"\nAuthor: Jing (https://github.com/gnijuohz)\n\n4Sum: https://oj.leetcode.com/problems/4sum \n\nGiven an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.\n\nNote:\n\nElements in a quadruplet (a,b,c,d) must be in non-descending order. (ie, a ≤ b ≤ c ≤ d)\nThe solution set must not contain duplicate quadruplets.\n\n\n\n\n For example, given array S = {1 0 -1 0 -2 2}, and target = 0.\n\n A solution set is:\n (-1, 0, 0, 1)\n (-2, -1, 1, 2)\n (-2, 0, 0, 2) \nTags\nArray, Hash Table, Two Pointers \n\"\"\"\n\nclass Solution:\n # @return a list of lists of length 4, [[val1,val2,val3,val4]]\n def fourSum(self, num, target):\n nums = sorted(num)\n result = set()\n cache = collections.defaultdict(set)\n\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n for half in cache[target - nums[i] - nums[j]]:\n result.add(tuple(list(half) + [nums[i], nums[j]]))\n\n for j in range(i):\n cache[nums[i] + nums[j]].add((nums[j], nums[i]))\n\n return map(list, result)","sub_path":"solutions/4Sum.py","file_name":"4Sum.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"415737489","text":"from tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport re\nfrom re import findall as fa\nimport sqlite3\nimport pymorphy2\nimport math\nimport importlib\nimport pickle\n\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier \nfrom sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\n\nfrom scipy.stats.stats import pearsonr as corr\n\nfrom nltk.tokenize import word_tokenize\n\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import precision_score, recall_score, confusion_matrix\n\n\n\nlib = {\n 'allpos': ['PRED', 'None', 'PRTS', 'ADJF', 'INFN', \n 'PRTF', 'NOUN', 'ADVB', 'VERB', 'NPRO', \n 'NUMR', 'CONJ', 'ADJS', 'PRCL', 'PREP', 'COMP', 'INTJ'],\n 'pos': ['ADJF', 'NOUN', 'ADVB', 'VERB', 'CONJ', 'PREP', 'INTJ', 'None'],\n 'uncert' : ['наверное?', 'может[\\s-]?быть', 'кажеть?ся', \n 'видимо', 'возможно', 'по[\\s-]?видимому', \n 'вероятно', 'должно[\\s-]?быть','пожалуй', 'как[\\s-]?видно'],\n 'cert' : ['очевидно','конечно','точно','совершенно',\n 'не\\s?сомненно','разумееть?ся', \n 'по[\\s-]?любому','сто[\\s-]?пудово?'],\n 'quan' : ['вс[её]x?','всегда','ни-?когда', 'постоянн?о', \n 'ник(?:то|ого|ому|ем)', \n 'кажд(?:ый|ая|ой|ому?|ое|ого|ую|ые|ою|ыми?|ых)',\n 'всяк(?:ий|ая|ое|ого|ую|ому?|ой|ою|ими?|их|ие)',\n 'люб(?:ой|ая|ое|ого|ому?|ую|ой|ыми?|ых|ые)'],\n 'imper' : ['долж(?:ен|на|ны|но)', 'обязан(?:а|ы|о|)', \n 'надо\\W', 'нуж(?:но|ен|на|ны)', \n 'требуеть?ся', 'необходим(?:а|ы|о|)\\W'],\n 'racio' : ['по\\s?этому', 'по\\s?тому,?\\s?что', 'следовательно', \n 'из[\\s-]?за\\s?того,?\\s?что', 'из[\\s-]?за\\s?этого', \n 'по\\s?причине', 'в\\s?следстви[ие]', 'так\\s?как', 'т\\.?к\\.?',\n 'поскольк[оу]', 'чтобы'],\n 'dimin' : ['\\w+[ое]ньк(?:ая|ий|ое|ие|ую|ого|ому|ой|ими|а|о|у|е)', \n '\\w+очек\\s', '\\w+[ие]к(?:ами?|ов|у|а|е|и|)\\s'],\n 'extrem' : ['че?резвычайно', 'слишком', 'чере[cз]чур', 'ужасно',\n 'безумно', 'крайне', 'предельно',\n 'исключительно', 'невероятно', 'в\\s?(?:наи|)вы[сш]шей (?:степени|мере)'],\n 'like' : ['люблю', '[оa]б[оa]жаю','восхища[ею]т',\n 'в\\s?восторге', 'нрав[ия]ть?ся'],\n 'dislike' : ['бес[ия]т', 'ненавижу', 'терпеть\\s?не\\s?могу', \n 'раздража[ею]т', 'зл[ия]т', 'выбешива[ею]т', ],\n 'polite' : ['пожалуй?ст[ао]', 'пожал[ао]ст[ао]', \n 'с?пасиб[оа]?', 'благ[оа]д[ао]рю'],\n 'obscene' : ['\\sбля\\s', '\\sбля[дт]ь\\s', '\\sсук(?:ами?|ax|у|а|е|и|)\\s', \n '\\sху(?:ями?|ем|ях|ю|е|я|й)\\s', '\\sна\\s?хуй\\s', \n '\\w*(?:подъ?|за|на|вы|по|при|от|у|)еб[лa]?\\w*',\n 'мудак\\w*', 'мудил\\w+\\s','пид[оa]р\\w*', \n 'пид[ао]рас\\w*\\s'],\n 'slang' : ['\\s\\w*хайп\\w*\\s', 'хейтер\\w*','\\sчилить','\\sизи\\w*\\s','зашквар\\w*',\n '\\sжиз\\w+','\\sкун\\w*','\\sтянк?\\w*','\\sлойс\\w?', '\\sсорян\\s'],\n}\n\n\ndef ct(x, co=0, steep=0, ec50=0.5, level='max', adb=True):\n def carryover(x, l=0):\n co = []\n co.append((1-l)*x[0])\n for i in range(1,len(x)):\n co.append((1-l)*x[i] + l*co[i-1])\n return co\n\n def adbudg(x, steep, ec50, level):\n cap = max(x) if level == 'max' else level\n adb = []\n for i in x:\n adb.append(0 if i == 0 else cap/(1 + (i/(cap*ec50))**(-steep))) \n return adb\n\n def logcurve(x, steep, ec50, level):\n cap = max(x) if level == 'max' else level*max(x)\n crv = []\n for i in x:\n crv.append(cap/(1+math.exp((-steep)*(i/cap-ec50))) - cap/(1+math.exp(steep*ec50)))\n return crv\n \n ct = x\n if steep > 0:\n ct = adbudg(x, steep, ec50, level) if adb else logcurve(x, steep, ec50, level)\n ct = carryover(ct, co) if co > 0 else ct\n return ct\n\ndef cleanse(s):\n rgxp = '[\\`\\)\\(\\|©~^<>/\\'\\\"\\«№#$&\\*.,;=+?!\\—_@:\\]\\[%\\{\\}\\\\n]'\n return re.sub(' +', ' ', re.sub(rgxp, ' ', s.lower()))\n\ndef set_groups(x, dev=1, M=50, SD=10):\n if x > M+dev*SD:\n return 'high'\n elif x < M-dev*SD:\n return 'low'\n else:\n return 'average'\n \ndef mape(y_true, y_pred): \n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\ndef extract_features(text, morph=pymorphy2.MorphAnalyzer(), \n pos_types=lib['pos'], \n uncert=lib['uncert'],\n cert=lib['cert'], \n quan=lib['quan'], \n imper=lib['imper'], \n racio=lib['racio'], \n dimin=lib['dimin'], \n extrem=lib['extrem'],\n like=lib['like'],\n dislike=lib['dislike'], \n polite=lib['polite'], \n obscene=lib['obscene'], \n slang=lib['slang']):\n \n from re import findall as fa\n #length in chars and words\n len_char = len(text)\n len_word = len(text.split())\n len_sent = len(fa('[^\\.\\!\\?]+[\\.\\!\\?]', text))\n len_sent = len_sent if len_sent else 1\n pun = fa('[\\.+,!\\?:-]',text)\n n_pun = len(pun)\n braсket_list = fa('[\\(\\)]',text)\n \n #POS & grammem \n def parse_text(text, morph=morph):\n tokens = cleanse(text).split()\n return [morph.parse(t) for t in tokens]\n \n parsed_text = parse_text(text)\n pos_list = [str(p[0].tag.POS) for p in parsed_text]\n n_nouns = len([t for t in pos_list if t=='NOUN'])\n n_verbs = len([t for t in pos_list if t=='VERB'])\n n_ad = len([t for t in pos_list if t in ['ADJF','ADVB']])\n anim_list = [str(p[0].tag.animacy) for p in parsed_text]\n pers_list = [str(p[0].tag.person) for p in parsed_text]\n tns_list = [str(p[0].tag.tense) for p in parsed_text]\n asp_list = [str(p[0].tag.aspect) for p in parsed_text]\n \n r = lambda x: round(x, 4)\n d = lambda x, y: x / y if y else 0.0\n \n features = {\n #surface features\n 'len_char': len_char, \n 'len_word': len_word,\n 'len_sent': len_sent,\n 'm_len_word': r(len_char / len_word),\n 'm_len_sent': r(len_word / len_sent),\n #punctuation\n 'p_pun': r(len(pun) / len_char),\n 'p_dot': r(d(len([i for i in pun if i=='.']), len(pun))),\n 'p_qm': r(d(len([i for i in pun if i=='?']), len(pun))),\n 'p_excl': r(d(len([i for i in pun if i=='!']), len(pun))),\n 'p_comma': r(d(len([i for i in pun if i==',']), len(pun))),\n 'p_brkt': r(len(braсket_list) / len_char),\n 'p_brkt_up': r(d(len([i for i in braсket_list if i==')']), len(braсket_list))),\n #POS form\n 'pos_form': ' '.join(pos_list),\n 'pos_richness': len(set(pos_list)),\n #grammem features\n 'p_anim': r(d(len([t for t in anim_list if t=='anim']), n_nouns)),\n 'p_1per': r(d(len([t for t in pers_list if t=='1per']), n_verbs)),\n 'p_3per': r(d(len([t for t in pers_list if t=='3per']), n_verbs)),\n 'p_past': r(d(len([t for t in tns_list if t=='past']), n_verbs)),\n 'p_fut': r(d(len([t for t in tns_list if t=='futr']), n_verbs)),\n 'p_pres': r(d(len([t for t in tns_list if t=='pres']), n_verbs)),\n 'p_perf': r(d(len([t for t in asp_list if t=='perf']), n_verbs)),\n 'p_conj': r(d(len(fa('\\sбы?\\s',text)), n_verbs)),\n #lexical features\n 'p_uncert': r(len(fa('|'.join(uncert), text.lower())) / len_word),\n 'p_cert': r(len(fa('|'.join(cert), text.lower())) / len_word),\n 'p_quan': r(len(fa('|'.join(quan), text.lower())) / len_word),\n 'p_imper': r(len(fa('|'.join(imper), text.lower())) / len_word),\n 'p_racio': r(len(fa('|'.join(racio), text.lower())) / len_word),\n 'p_dimin': r(len(fa('|'.join(dimin), text.lower())) / len_word), \n 'p_extrem': r(len(fa('|'.join(extrem), text.lower())) / len_word), \n 'p_like': r(len(fa('|'.join(like), text.lower())) / len_word), \n 'p_dislike': r(len(fa('|'.join(dislike), text.lower())) / len_word), \n 'p_polite': r(len(fa('|'.join(polite), text.lower())) / len_word), \n 'p_obscene': r(len(fa('|'.join(obscene), text.lower())) / len_word), \n 'p_slang': r(len(fa('|'.join(slang), text.lower())) / len_word)\n }\n \n for f in pos_types:\n features['p_'+f] = r(len([t for t in pos_list if t==f])/len(pos_list))\n \n return features\n\n\nclass TraitModel():\n def __init__(self, xname, traits, word_vectorizer, pos_vectorizer, \n library, test_size, morph, classifier, classifier_params={}, \n curves_params={'co':0, 'steep':0, 'ec50':0.5}):\n self.xname = xname\n self.traits = traits\n self.wv = word_vectorizer\n self.posv = pos_vectorizer\n self.lib = library\n self.test_size = test_size\n self.morph = morph\n self.models = {}\n self.quality = {'train':{}, 'test':{}}\n self.cl = classifier\n self.cl_params = classifier_params\n self.ct_params = curves_params\n \n \n def fit(self, data, mtype='n', summary=True):\n self.mtype = mtype \n #extract features\n df_feat = pd.DataFrame.from_records(list(data[self.xname].apply(\n extract_features, morph=self.morph)))\n df_feat.index = data.index\n data = pd.concat([data, df_feat], axis=1, join='inner')\n feat_names = list(extract_features('ы', morph=self.morph).keys())\n feat_names.remove('pos_form')\n #apply curve transformation\n for f in feat_names:\n data[f] = ct(data[f], **self.ct_params) \n #clean before vectorization\n data[self.xname] = data[self.xname].apply(cleanse)\n #train-test split \n train, test = train_test_split(data, test_size=self.test_size, random_state=42)\n #vectorize\n train_w_vec = self.wv.fit_transform(train.loc[:,'text']) #words tf:idf\n test_w_vec = self.wv.transform(test.loc[:,'text'])\n train_p_vec = self.posv.fit_transform(train.loc[:,'pos_form']) #pos tf:idf\n test_p_vec = self.posv.transform(test.loc[:,'pos_form'])\n X_train = np.hstack((train_w_vec.todense(), \n train_p_vec.todense(), \n train.loc[:,feat_names]))\n X_test = np.hstack((test_w_vec.todense(), \n test_p_vec.todense(), \n test.loc[:,feat_names]))\n \n self.fitted_features = self.wv.get_feature_names() \\\n + self.posv.get_feature_names() \\\n + feat_names \n self.data = data\n self.train = train\n self.test = test\n self.X_train = X_train\n self.X_test = X_test\n self.feat_names = feat_names\n \n # build feature models\n def build_model(X_train, X_test, y_train, y_test, model):\n model.fit(X_train, y_train)\n return model\n \n for trait in self.traits:\n lm = self.cl(**self.cl_params)\n trait = trait+'_nom' if self.mtype == 'n' else trait \n self.models[trait] = build_model(self.X_train, self.X_test, \n self.train.loc[:,trait], \n self.test.loc[:,trait], \n model=lm)\n \n if summary: self.summary(confusion=False, verbose=False)\n \n \n def summary(self, correlations=0.1, vec_tokens=15, coefs = 10,\n all_traits=True, clr=True, confusion=True, verbose=True):\n pr = (lambda x: print(x)) if verbose else (lambda x: None)\n def title(name, m='=', l=50, up=True):\n n = name.upper() if up else name\n return '\\n{}\\n{}\\n{}'.format(m*l, n, m*l)\n \n if correlations > 0:\n pr(title('CORRELATIONS'))\n for trait in self.traits:\n pr(title(trait, m='-', l=20, up=False))\n for feat in self.feat_names:\n cor = corr(self.data.loc[:,trait], self.data.loc[:,feat])\n if abs(cor[0]) > correlations:\n pr('{} | {} : r = {:.2}'.format(feat, trait, cor[0], cor[1]))\n \n def vect_summary(vectorizer, name, show_tokens):\n pr(title(name))\n pr('\\nIncluded tokens ({})'.format(len(vectorizer.get_feature_names())))\n pr(np.array(vectorizer.get_feature_names())\\\n [np.random.randint(0, len(vectorizer.get_feature_names()), show_tokens)])\n pr('\\nExcluded tokens ({})'.format(len(vectorizer.stop_words_)))\n pr(np.array(list(vectorizer.stop_words_))\\\n [np.random.randint(0, len(vectorizer.stop_words_), show_tokens)]) \n \n if vec_tokens > 0:\n vect_summary(self.wv, 'words', vec_tokens)\n vect_summary(self.posv, 'pos tags', vec_tokens) \n \n pr(title('prediction quality'))\n if self.mtype == 'n' and all_traits:\n for trait in self.traits:\n trait = trait+'_nom'\n pr(title(trait, m='-', l=20, up=False))\n model = self.models[trait]\n y_train = self.train.loc[:,trait]\n y_test = self.test.loc[:,trait]\n y_train_pred = model.predict(self.X_train)\n y_test_pred = model.predict(self.X_test)\n self.quality['train'][trait+'_acc'] = accuracy_score(y_train, y_train_pred)\n self.quality['test'][trait+'_acc'] = accuracy_score(y_test, y_test_pred)\n pr('\\nAccuracy on train: {:.2%}'.format(accuracy_score(y_train, y_train_pred)))\n if clr: pr(classification_report(y_train, y_train_pred))\n pr('Accuracy on test: {:.2%}'.format(accuracy_score(y_test, y_test_pred)))\n if clr: pr(classification_report(y_test, y_test_pred))\n if confusion:\n labels = y_train.unique()\n sns.set_context(\"notebook\")\n plt.figure(figsize=(4,3))\n sns.heatmap(data=confusion_matrix(y_test, y_test_pred, labels = labels), \n annot=True, fmt=\"d\", cbar=False, \n xticklabels=labels, yticklabels=labels, cmap='viridis')\n plt.title(\"Confusion matrix\")\n plt.xlabel('True')\n plt.ylabel('Predicted')\n plt.title(\"Confusion matrix for \"+y_train.name, \n fontsize=12, fontweight='bold');\n plt.show()\n \n if self.mtype == 'c' and all_traits: \n for trait in self.traits:\n pr(title(trait, m='-', l=20, up=False))\n model = self.models[trait]\n y_train = self.train.loc[:,trait]\n y_test = self.test.loc[:,trait]\n y_train_pred = model.predict(self.X_train)\n y_test_pred = model.predict(self.X_test)\n self.quality['train'][trait+'_R2'] = r2_score(y_train, y_train_pred)\n self.quality['test'][trait+'_R2'] = r2_score(y_test, y_test_pred)\n pr('MAPE on train: {:.2f}%'.format(mape(y_train, y_train_pred)))\n pr('R2 on train: {:.3f}'.format(r2_score(y_train, y_train_pred)))\n pr('\\nMAPE on test: {:.2f}%'.format(mape(y_test, y_test_pred)))\n pr('R2 on train: {:.3f}'.format(r2_score(y_test, y_test_pred)))\n \n pr(title('Mean quality', m='-', l=20, up=False))\n metric = 'R2' if self.mtype == 'c' else 'Accuracy'\n for k,v in self.quality.items():\n pr('Mean {} on {}: {:.3}'.format(metric, k, sum(v.values())/len(v.values())))\n \n if coefs and str(type(list(self.models.values())[0])) == \\\n \"\":\n pr(title('coefs'))\n for trait in self.traits: \n trait = trait + '_nom' if self.mtype == 'n' else trait\n pr(title(trait, m='-', l=20, up=False))\n for i, level in enumerate(self.models[trait].classes_):\n pr('\\n'+level.upper())\n features = self.fitted_features\n coefs_ = self.models[trait].coef_.tolist()[i]\n coefdf = pd.DataFrame({'Feature' : features, \n 'Coefficient' : coefs_})\n coefdf = coefdf.sort_values(['Coefficient', 'Feature'], ascending=[0, 1])\n pr(coefdf.head(coefs))\n \n \n def export(self, path='models/', fname='model'):\n with open(path+fname+'.pickle', 'wb') as f:\n pickle.dump(self, f)\n \n \n def predict(self, text, prob=False, stimulus=True):\n feats = extract_features(text, morph=self.morph)\n featvec = np.array([feats[f] for f in self.feat_names])\n X = np.hstack((self.wv.transform([text]).todense(), \n self.posv.transform([feats['pos_form']]).todense(), \n np.matrix(featvec)))\n if prob:\n predictions = {}\n for trait in self.traits:\n trait = trait + '_nom' if self.mtype == 'n' else trait\n pred = self.models[trait].predict_proba(X)\n predictions[trait] = {clss: pred[0][i] for i, clss \\\n in enumerate(self.models[trait].classes_)}\n else:\n predictions = {k:v.predict(X) for k, v in self.models.items()}\n if stimulus: \n predictions['X'] = X \n return predictions\n\n\ndef plot_traits(model, text, strict=True, mqw = 0.5, corrw = 0.3, textlen=700):\n values = {'low': 1, 'average': 2, 'high': 3}\n sd = 1\n traits = model.traits\n trait_labels = ['Extraversion', 'Agreeableness', 'Conscientiousness', \n 'Emotionality', 'Openness to Experience', 'Honesty-Humility']\n\n pred = model.predict(text, prob=False, stimulus=False)\n predp = model.predict(text, prob=True, stimulus=False)\n yval = [values[pred[trait+'_nom'][0]] - \\\n predp[trait+'_nom']['low']*corrw + \\\n predp[trait+'_nom']['high']*corrw \\\n for trait in traits]\n xval = np.arange(len(traits))\n limits = [sd*(1-predp[trait+'_nom'][pred[trait+'_nom'][0]]+0.25) for trait in traits]\n\n textlen_penalty = 1 if len(text) > textlen else len(text) / textlen\n if strict:\n conf = np.mean(list(model.quality['test'].values())) * \\\n np.mean([max(v.values()) for v in predp.values()]) * textlen_penalty\n else:\n conf = (np.mean(list(model.quality['test'].values()))*(mqw) + \\\n np.mean([max(v.values()) for v in predp.values()])*(1-mqw)) * textlen_penalty\n\n fig = plt.figure(figsize=(7,6))\n plt.xticks(xval, trait_labels, rotation=-45)\n plt.yticks(sorted(values.values()), ('Low', 'Average', 'High'))\n colors = ['tab:orange', 'tab:green', 'tab:blue', 'tab:cyan', 'tab:purple', 'tab:olive']\n plt.xlabel('Trait', fontsize=14)\n plt.ylabel('Level', fontsize=14)\n for i, c in enumerate(colors): \n plt.errorbar(xval[i], yval[i], yerr=limits[i], \n uplims=True, lolims=True, \n fmt='o', markersize='32',ecolor=c, \n markerfacecolor=c, elinewidth=2, capsize=4)\n plt.ylim(min(min(yval), min(values.values()))-max(limits)*1.5, \n max(max(values.values()), max(yval))+max(limits)*1.5)\n plt.xlim(min(xval)-0.5, max(xval)+0.5)\n plt.title(('Predicted profile (model confidence: {:.1%})'.format(conf)), \n fontsize=16, fontweight='bold');\n fig.patch.set_facecolor('0.2')\n plt.show();","sub_path":"TraitModel.py","file_name":"TraitModel.py","file_ext":"py","file_size_in_byte":21057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"606950051","text":"\"\"\"Development Settings.\"\"\"\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# Django Debug Toolbar\nINSTALLED_APPS += [\n 'debug_toolbar',\n 'project_name.app_name',\n]\n\n# Additional middleware introduced by debug toolbar\nMIDDLEWARE += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n# Show emails to console in DEBUG mode\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# Adding INETRNAL_IPS for Debug Toolbar\n# See: https://django-debug-toolbar.readthedocs.io/en/stable/installation.html\nINTERNAL_IPS = [\n '127.0.0.1',\n]\n","sub_path":"project_container/config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"162385924","text":"import requests_mock\n\nfrom src.modules.discovery.apiserver import ApiServer, ApiServerDiscovery\nfrom src.core.events.types import Event\nfrom src.core.events import handler\n\ndef test_ApiServer():\n\n with requests_mock.Mocker() as m:\n m.get('https://mockOther:443', text='elephant')\n m.get('https://mockKubernetes:443', text='{\"code\":403}')\n\n e = Event()\n e.port = 443\n e.host = 'mockOther'\n\n a = ApiServerDiscovery(e)\n a.execute()\n \n e.host = 'mockKubernetes'\n a.execute()\n\n# We should only generate an ApiServer event for a response that looks like it came from a Kubernetes node\n@handler.subscribe(ApiServer)\nclass testApiServer(object):\n def __init__(self, event):\n assert event.host == 'mockKubernetes'\n","sub_path":"tests/discovery/test_apiserver.py","file_name":"test_apiserver.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"217187007","text":"\"\"\"\nTests for terminal velocity module. Updated by G.W. on 10/1/2018.\n\"\"\"\n\nimport fluidization as fd\nfrom pytest import approx\n\n# Parameters to use for terminal velocity tests\n# ----------------------------------------------------------------------------\n\ncd = 11.6867 # drag coefficient [-]\ndp = 0.00016 # particle diameter [m]\nmu = 1.8e-5 # gas viscosity [kg/(m s)]\nphi = 0.67 # particle sphericity [-]\nrhog = 1.2 # gas density [kg/m^3]\nrhos = 2600 # particle density [kg/m^3]\n\n\n# Functions to test\n# ----------------------------------------------------------------------------\n\ndef test_ut():\n # terminal velocity [m/s]\n ut = fd.ut(cd, dp, rhog, rhos)\n assert ut == approx(0.6227, rel=1e-2)\n\n\ndef test_ut_haider():\n # terminal velocity [m/s]\n ut_haider = fd.ut_haider(dp, mu, phi, rhog, rhos)\n assert ut_haider == approx(0.8857, rel=1e-2)\n\n\ndef test_ut_ganser():\n # drag coefficient [-], reynolds number [-], terminal velocity [m/s]\n cd, re, ut_ganser = fd.ut_ganser(dp, mu, phi, rhog, rhos)\n assert (cd, re, ut_ganser) == approx((11.6867, 6.6453, 0.6230), rel=1e-2)\n","sub_path":"tests/test_terminal_velocity.py","file_name":"test_terminal_velocity.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"92634100","text":"import sys\r\nimport datetime\r\nimport shutil\r\n\r\ndef entry():\r\n if len(sys.argv) == 3:\r\n now = datetime.datetime.now()\r\n newFile = now.strftime('%Y-%m-%d-%H-%M-%S.Exe')\r\n shutil.move(sys.argv[1],sys.argv[2] + '\\\\' + newFile)\r\n print(sys.argv[1] + \" rename \" + newFile)\r\nif __name__ == \"__main__\":\r\n entry()\r\n","sub_path":"ReNameFile.py","file_name":"ReNameFile.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"126249069","text":"'''This script goes along the blog post\n\"Building powerful image classification models using very little data\"\nfrom blog.keras.io.\nIt uses data that can be downloaded at:\nhttps://www.kaggle.com/c/dogs-vs-cats/data\nIn our setup, we:\n- created a data/ folder\n- created train/ and validation/ subfolders inside data/\n- created cats/ and dogs/ subfolders inside train/ and validation/\n- put the cat pictures index 0-999 in data/train/cats\n- put the cat pictures index 1000-1400 in data/validation/cats\n- put the dogs pictures index 12500-13499 in data/train/dogs\n- put the dog pictures index 13500-13900 in data/validation/dogs\nSo that we have 1000 training examples for each class, and 400 validation examples for each class.\nIn summary, this is our directory structure:\n```\ndata/\n train/\n dogs/\n dog001.jpg\n dog002.jpg\n ...\n cats/\n cat001.jpg\n cat002.jpg\n ...\n validation/\n dogs/\n dog001.jpg\n dog002.jpg\n ...\n cats/\n cat001.jpg\n cat002.jpg\n ...\n```\n'''\n\nimport argparse\nimport json\n\nfrom keras import applications\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# Import functions for creating data generators and training model,\n# those are the same and can be reused. Great!\nfrom small_convnet_1 import create_data_generators, train_model\n\ndef create_model(args):\n input_shape = (params.img_height, params.img_width, 3)\n\n # build the VGG16 network\n base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)\n\n # build a classifier model to put on top of the convolutional model\n top_model = Sequential()\n top_model.add(Flatten(input_shape=base_model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu'))\n top_model.add(Dropout(params.drop_rate))\n top_model.add(Dense(1, activation='sigmoid'))\n\n # note that it is necessary to start with a fully-trained\n # classifier, including the top classifier,\n # in order to successfully do fine-tuning\n top_model.load_weights(params.top_path)\n\n # add the model on top of the convolutional base\n model = Model(inputs=base_model.input, outputs=top_model(base_model.output))\n\n # set the first 25 layers (up to the last conv block)\n # to non-trainable (weights will not be updated)\n for layer in model.layers[1:15]:\n layer.trainable = False\n\n # compile the model with a SGD/momentum optimizer\n # and a very slow learning rate.\n model.compile(loss='binary_crossentropy',\n optimizer=SGD(lr=params.learning_rate, momentum=0.9),\n metrics=['accuracy'])\n\n return model\n\ndef main(params):\n model = create_model(params)\n\n train_generator, validation_generator = create_data_generators(params)\n\n history = train_model(model, train_generator, validation_generator, params)\n\n json.dump(history.history, open(params.metrics_path, 'w'))\n model.save_weights(params.save_path)\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--train_data_dir', type=str, default='data/train',\n help=\"Path to directory with training data.\")\n parser.add_argument('--nb_train_samples', type=int, default=2000,\n help=\"Number of training samples.\")\n parser.add_argument('--val_data_dir', type=str, default='data/validation',\n help=\"Path to directory with test data.\")\n parser.add_argument('--nb_val_samples', type=int, default=800,\n help=\"Number of test samples.\")\n parser.add_argument('--img_height', type=int, default=150,\n help=\"Images will be resized to this height.\")\n parser.add_argument('--img_width', type=int, default=150,\n help=\"Images will be resized to this width.\")\n parser.add_argument('--epochs', type=int, default=50,\n help=\"Epochs of training.\")\n parser.add_argument('--batch_size', type=int, default=16,\n help=\"Batch size.\")\n parser.add_argument('--workers', type=int, default=4,\n help=\"Maximum number of that will execute the generator.\")\n parser.add_argument('--learning_rate', type=float, default=0.0001,\n help=\"Momentum(0.9) learning rate.\")\n parser.add_argument('--drop_rate', type=float, default=0.5,\n help=\"Dense layer dropout rate.\")\n parser.add_argument('--shear_range', type=float, default=0.2,\n help=\"Shear intensity (angle) in counter-clockwise direction in degrees.\")\n parser.add_argument('--zoom_range', type=float, default=0.2,\n help=\"Range for random zoom: [1 - zoom_range, 1 + zoom_range].\")\n parser.add_argument('--log_dir', type=str, default='logs/vgg',\n help=\"Where to save TensorBoard logs.\")\n parser.add_argument('--metrics_path', type=str, default='vgg_metrics.json',\n help=\"Where to save json with metrics after training.\")\n parser.add_argument('--top_path', type=str, default='fc_model.h5',\n help=\"Where to load the top model weights from.\")\n parser.add_argument('--save_path', type=str, default='vgg_model.h5',\n help=\"Where to save model weights after training.\")\n params = parser.parse_args()\n\n main(params)\n","sub_path":"jdsz2-materialy-python/DL/3_klasyfikacja/Handson/fine_tune_3.py","file_name":"fine_tune_3.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"82244882","text":"#!/user/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"Base class implemented for tests\"\"\"\n\nfrom contextlib import contextmanager\nimport unittest\nfrom app import createApp\nfrom app.config import TestConfig\n\n@contextmanager\ndef transactionContext(testCase):\n \"\"\"Rollback transactions after tests\"\"\"\n session = testCase.app.db.session\n try:\n session.begin_nested() # open a sub-trasaction\n yield session\n finally:\n session.rollback() # does not persis the changes\n session.close()\n\n\nclass TestCase(unittest.TestCase):\n \"\"\"Basic class for testcase\"\"\"\n def setUp(self):\n \"\"\"Set up application for the tests\"\"\"\n # print(os.environ.get('MYSQL_PASSWORD'))\n self.app = createApp(TestConfig)\n self.appContext = self.app.app_context()\n self.appContext.push()\n self.testApp = self.app.test_client()\n self._savepointContext = transactionContext(self)\n # the context has member of enter or exist\n self._savepointContext.__enter__() # pylint: disable=maybe-no-member\n\n def tearDown(self):\n \"\"\"Close application for the tests\"\"\"\n self._savepointContext.__exit__(None, None, None) # pylint: disable=maybe-no-member\n self.appContext.pop()\n self.appContext = None\n self.testApp = None\n self.app = None\n","sub_path":"backend/tests1/testCase.py","file_name":"testCase.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"600333732","text":"import asyncio\ntry:\n import uvloop\nexcept ImportError:\n uvloop = None\nimport argparse\nimport logging\nimport importlib\nimport time\nfrom email.utils import formatdate\nfrom typing import Callable, Dict, Optional, Union, cast\n\nfrom quic_logger import QuicDirectoryLogger\nimport config\nimport aioquic\nfrom aioquic.tls import SessionTicket\nfrom aioquic.quic.configuration import QuicConfiguration\nfrom aioquic.h0.connection import H0_ALPN, H0Connection\nfrom aioquic.h3.connection import H3_ALPN, H3Connection\nfrom aioquic.h3.events import DataReceived, H3Event, HeadersReceived\nfrom aioquic.h3.exceptions import NoAvailablePushIDError\n\nfrom aioquic.quic.events import (\n DatagramFrameReceived,\n QuicEvent,\n ProtocolNegotiated,\n StreamDataReceived\n)\n\nfrom protocol.socketFactory import QuicFactorySocket\nfrom protocol.server import start_server\n\nAsgiApplication = Callable\nHttpConnection = Union[H0Connection, H3Connection]\n\nSERVER_NAME = \"aioquic/\" + aioquic.__version__\n\nimport logger\n\nlogger = logger.logger('sky_server')\ntotalQuicEvents = 0\n\nclass HttpRequestHandler:\n def __init__(\n self,\n *,\n authority: bytes,\n connection: HttpConnection,\n protocol: QuicFactorySocket,\n scope: Dict,\n stream_ended: bool,\n stream_id: int,\n transmit: Callable[[], None]\n ) -> None:\n self.authority = authority\n self.connection = connection\n self.protocol = protocol\n self.queue: asyncio.Queue[Dict] = asyncio.Queue()\n self.scope = scope\n self.stream_id = stream_id\n self.transmit = transmit\n\n if stream_ended:\n self.queue.put_nowait({\"type\": \"http.request\"})\n\n def http_event_received(self, event: H3Event) -> None:\n if isinstance(event, DataReceived):\n self.queue.put_nowait(\n {\n \"type\": \"http.request\",\n \"body\": event.data,\n \"more_body\": not event.stream_ended,\n }\n )\n elif isinstance(event, HeadersReceived) and event.stream_ended:\n self.queue.put_nowait(\n {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n )\n\n async def run_asgi(self, app: AsgiApplication) -> None:\n await application(self.scope, self.receive, self.send)\n\n async def receive(self) -> Dict:\n return await self.queue.get()\n\n async def send(self, message: Dict) -> None:\n\n logger.debug('got send request: with stream id>{}:for messagetype:{}'.format(self.stream_id, message['type']))\n if message[\"type\"] == \"http.response.start\":\n self.connection.send_headers(\n stream_id=self.stream_id,\n headers=[\n (b\":status\", str(message[\"status\"]).encode()),\n (b\"server\", SERVER_NAME.encode()),\n (b\"date\", formatdate(time.time(), usegmt=True).encode()), \n ]\n + [(k, v) for k, v in message[\"headers\"]],\n )\n elif message[\"type\"] == \"http.response.body\":\n self.connection.send_data(\n stream_id=self.stream_id,\n data=message.get(\"body\", b\"\"),\n end_stream=not message.get(\"more_body\", False),\n )\n elif message[\"type\"] == \"http.response.push\" and isinstance(\n self.connection, H3Connection\n ):\n request_headers = [\n (b\":method\", b\"GET\"),\n (b\":scheme\", b\"https\"),\n (b\":authority\", self.authority),\n (b\":path\", message[\"path\"].encode()),\n ] + [(k, v) for k, v in message[\"headers\"]]\n\n # send push promise\n try:\n push_stream_id = self.connection.send_push_promise(\n stream_id=self.stream_id, headers=request_headers\n )\n except NoAvailablePushIDError:\n return\n\n # fake request\n cast(HttpServerProtocol, self.protocol).http_event_received(\n HeadersReceived(\n headers=request_headers, stream_ended=True, stream_id=push_stream_id\n )\n )\n self.transmit()\n\n\nclass HttpServerProtocol(QuicFactorySocket):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._handlers : Dict[int, HttpRequestHandler] = {}\n self._http : Optional[HttpConnection] = None\n self.quic_client : bool = False\n\n def http_event_received(self, event: H3Event) -> None:\n if isinstance(event, HeadersReceived) and event.stream_id not in self._handlers:\n authority = None\n headers = []\n http_version = \"0.9\" if isinstance(self._http, H0Connection) else \"3\"\n raw_path = b\"\"\n method = \"\"\n protocol = None\n for header, value in event.headers:\n if header == b\":authority\":\n authority = value\n headers.append((b\"host\", value))\n elif header == b\":method\":\n method = value.decode()\n elif header == b\":path\":\n raw_path = value\n elif header == b\":protocol\":\n protocol = value.decode()\n elif header and not header.startswith(b\":\"):\n headers.append((header, value))\n\n if b\"?\" in raw_path:\n path_bytes, query_string = raw_path.split(b\"?\", maxsplit=1)\n else:\n path_bytes, query_string = raw_path, b\"\"\n path = path_bytes.decode()\n self._quic._logger.info(\"HTTP request %s %s\", method, path)\n logger.debug(\"HTTP request:{}, {}\".format(method, path) )\n \n \n # FIXME: add a public API to retrieve peer address\n client_addr = self._http._quic._network_paths[0].addr\n client = (client_addr[0], client_addr[1])\n\n extensions: Dict[str, Dict] = {}\n if isinstance(self._http, H3Connection):\n logger.info('345678765456765424234792346723640328577`2-3460324345678765456765424234792346723640328577`2-3460324')\n extensions[\"http.response.push\"] = {}\n scope = {\n \"client\": client,\n \"extensions\": extensions,\n \"headers\": headers,\n \"http_version\": http_version,\n \"method\": method,\n \"path\": path,\n \"query_string\": query_string,\n \"raw_path\": raw_path,\n \"root_path\": \"\",\n \"scheme\": \"https\",\n \"type\": \"http\",\n }\n handler = HttpRequestHandler(\n authority=authority,\n connection=self._http,\n protocol=self,\n scope=scope,\n stream_ended=event.stream_ended,\n stream_id=event.stream_id,\n transmit=self.transmit,\n )\n logger.info('stream id hai:{}'.format(event.stream_id))\n self._handlers[event.stream_id] = handler\n asyncio.ensure_future(handler.run_asgi(application))\n\n elif (\n isinstance(event, (DataReceived, HeadersReceived))\n and event.stream_id in self._handlers\n ):\n handler = self._handlers[event.stream_id]\n handler.http_event_received(event)\n\n def quic_event_received(self, event: QuicEvent) -> None:\n global totalQuicEvents\n totalQuicEvents += 1\n # logger.info('quic event recieved:{}'.format(totalQuicEvents))\n if isinstance(event, ProtocolNegotiated):\n if event.alpn_protocol.startswith(\"h3-\"):\n self._http = H3Connection(self._quic)\n elif event.alpn_protocol.startswith(\"hq-\"):\n self._http = H0Connection(self._quic)\n elif event.alpn_protocol.startswith(\"quic\"):\n self.quic_client = True\n\n\n if isinstance(event, DatagramFrameReceived):\n if event.data == b'quic':\n self._quic.send_datagram_frame(b'quic-ack')\n\n if isinstance(event, StreamDataReceived):\n # logger.info('aakash kuch to aaya')\n if self.quic_client is True:\n print(f\"print event {event.data}\")\n data = b'quic stream-data recv'\n end_stream = False\n self._quic.send_stream_data(event.stream_id, data, end_stream)\n else:\n #TODO: Yet to handle a http_client streamDataReceived event\n pass\n\n #  pass event to the HTTP layer\n if self._http is not None:\n for http_event in self._http.handle_event(event):\n logger.debug('http event recieved')\n self.http_event_received(http_event)\n\n\nclass SessionTicketStore:\n \"\"\"\n Simple in-memory store for session tickets.\n \"\"\"\n\n def __init__(self) -> None:\n self.tickets: Dict[bytes, SessionTicket] = {}\n\n def add(self, ticket: SessionTicket) -> None:\n self.tickets[ticket.ticket] = ticket\n\n def pop(self, label: bytes) -> Optional[SessionTicket]:\n return self.tickets.pop(label, None)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"QUIC server\")\n parser.add_argument(\n \"app\",\n type=str,\n nargs=\"?\",\n default=\"demo:app\",\n help=\"the ASGI application as :\",\n )\n parser.add_argument(\n \"--host\",\n type=str,\n default=config.IPaddr,\n # default=\"::\",\n help=\"listen on the specified address (defaults to ::)\",\n )\n parser.add_argument(\n \"--port\",\n type=int,\n default=4433,\n help=\"listen on the specified port (defaults to 4433)\",\n )\n parser.add_argument(\n \"-q\",\n \"--quic-log\",\n type=str,\n help=\"log QUIC events to QLOG files in the specified directory\",\n )\n parser.add_argument(\n \"-l\",\n \"--secrets-log\",\n type=str,\n help=\"log secrets to a file, for use with Wireshark\",\n )\n parser.add_argument(\n \"-c\",\n \"--certificate\",\n type=str,\n required=True,\n help=\"load the TLS certificate from the specified file\",\n )\n parser.add_argument(\n \"-k\",\n \"--private-key\",\n type=str,\n required=True,\n help=\"load the TLS private key from the specified file\",\n )\n parser.add_argument(\n \"--retry\", action=\"store_true\", help=\"send a retry for new connections\",\n )\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"increase logging verbosity\"\n )\n args = parser.parse_args()\n\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s %(name)s %(message)s\",\n level=logging.DEBUG if args.verbose else logging.INFO,\n )\n\n # import ASGI application\n module_str, attr_str = args.app.split(\":\", maxsplit=1)\n module = importlib.import_module(module_str)\n application = getattr(module, attr_str)\n\n # create QUIC logger\n if args.quic_log:\n quic_logger = QuicDirectoryLogger(args.quic_log)\n else:\n quic_logger = None\n\n # open SSL log file\n if args.secrets_log:\n secrets_log_file = open(args.secrets_log, \"a\")\n else:\n secrets_log_file = None\n\n configuration = QuicConfiguration(\n alpn_protocols=H3_ALPN + H0_ALPN + [\"quic\"],\n is_client=False,\n max_datagram_frame_size=65536,\n quic_logger=quic_logger,\n secrets_log_file=secrets_log_file,\n )\n\n configuration.load_cert_chain(args.certificate, args.private_key)\n\n ticket_store = SessionTicketStore()\n if uvloop is not None:\n uvloop.install()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n start_server(\n args.host,\n args.port,\n configuration=configuration,\n create_protocol=HttpServerProtocol,\n session_ticket_fetcher=ticket_store.pop,\n session_ticket_handler=ticket_store.add,\n retry=args.retry,\n )\n )\n \n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"338982964","text":"# 全国大学生化工设计竞赛\nimport tornado.httpclient\nfrom bs4 import BeautifulSoup\nhttp_header = {'User-Agent':'Chrome'}\nschool='http://iche.zju.edu.cn/redir.php?catalog_id=186&cmd=saiqu&qid=178&cat=450'#东北 往年参赛学校\nreason=[]\nteam={'school':'','team':''}\nhttp_request=tornado.httpclient.HTTPRequest(url=school,method='GET',headers=http_header,connect_timeout=20,request_timeout=600)\nhttp_client = tornado.httpclient.HTTPClient()\nhttp_response = http_client.fetch(http_request)\n# print(http_response.code)\ntext=BeautifulSoup(http_response.body,'lxml')\n\n#following is for team of this time NUM:2\nsch = text.find_all('table', attrs={'class': \"bmingfo\"})[0]\ndetail = sch.find_all('td',attrs={'align':'left'})\n\nprint([item.string for item in detail])\n #THE END","sub_path":"tornado_cawlr/huagong2.py","file_name":"huagong2.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"557084923","text":"#!/usr/bin/python\n#convert a binary number to base 10\n\nx = 1\nprint(\"enter 0 to quit\")\n\nwhile (x != 0):\n num = str(input('Enter Bin: '))\n x = int(num,2)\n print(x)\n\n","sub_path":"Python/bin2int.py","file_name":"bin2int.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"460344495","text":"from DateTime import DateTime\nfrom zope.interface import alsoProvides\nfrom redomino.revision.interfaces import IRevision\nfrom redomino.revision.interfaces import IRevisionFile\n\ndef setup_contents(portal):\n # File\n portal.invokeFactory('Folder', 'revision1')\n revision1 = portal['revision1']\n revision1.setTitle('title')\n revision1.setDescription('description')\n alsoProvides(revision1, IRevision)\n\n revision1.invokeFactory('File', '1')\n revision11 = revision1['1']\n revision11.setTitle('1')\n revision11.setDescription('1')\n revision11.setSubject(['keyword1'])\n revision11.setCreationDate(DateTime())\n revision11.setEffectiveDate(DateTime())\n alsoProvides(revision11, IRevisionFile)\n\n revision11.content_status_modify('publish')\n\n revision1.invokeFactory('File', '2')\n revision12 = revision1['2']\n revision12.setTitle('2')\n revision12.setDescription('2')\n revision12.setCreationDate(DateTime()+12)\n revision12.setEffectiveDate(DateTime()+15)\n alsoProvides(revision12, IRevisionFile)\n\n revision1.invokeFactory('File', '3')\n revision13 = revision1['3']\n revision13.setTitle('3')\n revision13.setDescription('3')\n revision13.setCreationDate(DateTime()-30)\n revision13.setEffectiveDate(DateTime()-30)\n alsoProvides(revision13, IRevisionFile)\n\n revision13.content_status_modify('submit')\n\n portal.invokeFactory('Document', 'alien')\n alien = portal['alien']\n revision11.setRelatedItems([revision12, alien])\n revision12.setRelatedItems([revision11, alien])\n\n revision11.reindexObject()\n revision12.reindexObject()\n revision13.reindexObject()\n revision1.reindexObject()\n\n # Document\n portal.invokeFactory('Folder', 'revision2')\n revision2 = portal['revision2']\n revision2.setTitle('title')\n revision2.setDescription('description')\n alsoProvides(revision2, IRevision)\n\n revision2.invokeFactory('Document', '1')\n revision21 = revision2['1']\n revision21.setTitle('1')\n revision21.setDescription('1')\n revision21.setCreationDate(DateTime())\n revision21.setEffectiveDate(DateTime())\n alsoProvides(revision21, IRevisionFile)\n\n revision21.content_status_modify('publish_internally')\n\n revision2.invokeFactory('Document', '2')\n revision22 = revision2['2']\n revision22.setTitle('2')\n revision22.setDescription('2')\n revision22.setCreationDate(DateTime()+12)\n revision22.setEffectiveDate(DateTime()+15)\n alsoProvides(revision22, IRevisionFile)\n\n revision2.invokeFactory('Document', '3')\n revision23 = revision2['3']\n revision23.setTitle('3')\n revision23.setDescription('3')\n revision23.setCreationDate(DateTime()-30)\n revision23.setEffectiveDate(DateTime()-30)\n alsoProvides(revision23, IRevisionFile)\n\n revision23.content_status_modify('submit')\n\n alien = portal['alien']\n revision21.setRelatedItems([revision22, alien])\n revision22.setRelatedItems([revision21, alien])\n\n revision21.reindexObject()\n revision22.reindexObject()\n revision23.reindexObject()\n revision2.reindexObject()\n\n # Document2 (without effective date)\n portal.invokeFactory('Folder', 'revision3')\n revision3 = portal['revision3']\n revision3.setTitle('title')\n revision3.setDescription('description')\n alsoProvides(revision3, IRevision)\n\n revision3.invokeFactory('Document', '1')\n revision31 = revision3['1']\n revision31.setTitle('1')\n revision31.setDescription('1')\n revision31.setCreationDate(DateTime())\n alsoProvides(revision31, IRevisionFile)\n\n revision31.reindexObject()\n revision3.reindexObject()\n","sub_path":"redomino/revision/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"136515637","text":"import smtplib as smtp\n\ndef enviar_email():\n de = \"******@gmail.com\" # Usuário do GMail para envio\n senha = '*****' # Senha\n para = [\"evaldowolkers@gmail.com\"] # Destinatário\n mensagem = \"Subject: Teste\\n\\nTeste de envio de e-mail com SSL\" # Mensagem a ser enviada\n\n try:\n with smtp.SMTP_SSL('smtp.gmail.com', 465) as s:\n s.login(de, senha) # Efetuando login com o usuário e senha\n s.sendmail(de, para, mensagem) # Enviando e-mail\n s.close() # Fechando a conexão\n print(\"E-mail enviado!\")\n except Exception as erro:\n print(\"Não foi possível enviar o e-mail. Erro:\", erro)\n\nif __name__ == '__main__':\n enviar_email()","sub_path":"Secao37-TrabalhandoComEmails/Aula01_EnviandoEmailsSimples/codigo/send_mail_ssl.py","file_name":"send_mail_ssl.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"101060133","text":"# Devin Johnson\n# LING 571\n# HW7\n\nimport sys\nimport re\nimport numpy\nimport nltk\nimport math\nimport scipy\n\ndef pre_process(corpus):\n words = []\n # Remove punctuation, make lower\n for i in range(0, len(corpus)):\n curr_word = corpus[i].lower()\n\n # Remove punctuation\n curr_word = re.sub(r\"\\W+\", \"\", curr_word)\n \n if len(curr_word) >= 1:\n words.append(curr_word.lower())\n\n return words\n\ndef create_cooc_dict(words, window):\n # Find occurrences of each word in window\n cooc_dict = {}\n for i in range(0, len(words)):\n # Build appropriate window\n l = i - window\n r = i + window\n while l < 0:\n l += 1 \n while r >= len(words):\n r -= 1\n \n curr_word = words[i]\n if curr_word not in cooc_dict:\n cooc_dict[curr_word] = {}\n \n # Go through words within window\n for j in range(l, r+1):\n if j != i:\n if words[j] in cooc_dict[curr_word]:\n cooc_dict[curr_word][words[j]] += 1\n else:\n cooc_dict[curr_word][words[j]] = 1\n\n return cooc_dict\n\ndef apply_PMI(cooc_dict):\n # Get total words\n table_sum = 0\n for key in cooc_dict:\n for key2 in cooc_dict[key]:\n table_sum += cooc_dict[key][key2]\n\n # Get P(w) for all words\n probabilities = {}\n for key in cooc_dict:\n for key2 in cooc_dict[key]:\n if key not in probabilities:\n probabilities[key] = cooc_dict[key][key2]\n else:\n probabilities[key] += cooc_dict[key][key2]\n probabilities[key] = probabilities[key]/table_sum\n \n # Apply PMI\n for key in cooc_dict:\n for key2 in cooc_dict[key]:\n pw = probabilities[key]\n pf = probabilities[key2]\n pwf = cooc_dict[key][key2] / table_sum\n if pwf != 0:\n pmi = max(math.log(pwf / (pw * pf), 2), 0)\n cooc_dict[key][key2] = pmi\n else:\n cooc_dict[key][key2] = 0\n\n return cooc_dict\n\ndef cos_similarity(cooc_dict, words):\n with open(sys.argv[3], \"r\") as judgment, open(sys.argv[4], \"w\") as output:\n words = sorted(list(set(words)))\n my_similarities = []\n given_similiarities = []\n for line in judgment:\n # Get each word\n split = line.split(\",\")\n word_1 = split[0]\n index_word_i = 0\n word_2 = split[1]\n index_word_j = 0\n given_similiarities.append(split[2])\n \n # Get top 10 for word 1, 2\n word_1_top = sorted(cooc_dict[word_1].items(), key=lambda x: -x[1])\n word_2_top = sorted(cooc_dict[word_2].items(), key=lambda x: -x[1])\n\n # Build vector for word 1 and word 2\n word_1_vec = []\n word_2_vec = []\n \n for word in words:\n if word in cooc_dict[word_1]:\n word_1_vec.append(cooc_dict[word_1][word])\n else:\n word_1_vec.append(0)\n \n for word in words:\n if word in cooc_dict[word_2]:\n word_2_vec.append(cooc_dict[word_2][word])\n else:\n word_2_vec.append(0)\n \n similarity = scipy.spatial.distance.cosine(word_1_vec, word_2_vec)\n my_similarities.append(similarity)\n\n # Printout\n output.write(word_1 + \": \")\n for element in word_1_top:\n output.write(element[0] + \":\" + str(element[1]) + \" \")\n output.write(\"\\n\")\n output.write(word_2 + \": \")\n for element in word_2_top:\n output.write(element[0] + \":\" + str(element[1]) + \" \")\n output.write(\"\\n\")\n output.write(word_1 + \",\" + word_2 + \":\" + str(similarity) + \"\\n\")\n\n output.write(\"Correlation:\" + str(scipy.stats.spearmanr(my_similarities, given_similiarities)[0]))\n\n\n\n# Create a coocurrence matrix (dictionaries), weighted as necessary\nwindow = int(sys.argv[1])\nweighting = sys.argv[2]\nwords = pre_process(nltk.corpus.brown.words())\ncooc_dict = create_cooc_dict(words,window)\nif weighting == \"PMI\":\n cooc_dict = apply_PMI(cooc_dict)\n\n# Compute cosine similarities\ncos_similarity(cooc_dict, words)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LING571/hw7/hw7.py","file_name":"hw7.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"262120240","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n\r\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\r\n s = []\r\n dic = {}\r\n idx = 0\r\n for i in range(len(popped)):\r\n if dic.__contains__(popped[i]):\r\n if s[len(s) - 1] == popped[i]:\r\n del s[len(s) - 1]\r\n else:\r\n return False\r\n else:\r\n while idx < len(pushed) and pushed[idx] != popped[i]:\r\n s.append(pushed[idx])\r\n dic[pushed[idx]] = True\r\n idx += 1\r\n idx += 1\r\n return not s","sub_path":"0946. Validate Stack Sequences/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"72212882","text":"# -*- coding: utf-8 -*-\n# =============================================================================\n# module : arbitrary_shape.py\n# author : Matthieu Dartiailh\n# license : MIT license\n# =============================================================================\nfrom atom.api import (Str, Callable)\nfrom traceback import format_exc\nimport numpy as np\n\nfrom ..entry_eval import exec_entry\nfrom .base_shapes import AbstractShape\n\n\nDEFAULT_FORMULA = \\\n'''def c(self, time, unit):\n return 0.5*np.ones(len(time))'''\n\n\nclass ArbitraryShape(AbstractShape):\n \"\"\" Shape defined entirely by the user.\n\n \"\"\"\n #: Formula used to compute the shape of the pulse. It is compiled as\n #: a function using exec which must be of the following signature:\n #: c(self, time, unit) and return the pulse amplitude as a numpy array.\n #: 'time' is a numpy array which represents the times at which to compute\n #: the pulse\n #: 'unit' is the unit in which the time is expressed.\n #: During compilation, all the sequence local variables can be accessed\n #: (using the {} notation).\n formula = Str(DEFAULT_FORMULA).tag(pref=True)\n\n def eval_entries(self, sequence_locals, missing, errors, index):\n \"\"\" Evaluate the amplitude of the pulse.\n\n Parameters\n ----------\n sequence_locals : dict\n Known locals variables for the pulse sequence.\n\n missing : set\n Set of variables missing to evaluate some entries in the sequence.\n\n errors : dict\n Errors which occurred when trying to compile the pulse sequence.\n\n index : int\n Index of the pulse to which this shape object belongs.\n\n Returns\n -------\n result : bool\n Flag indicating whether or not the evaluation succeeded.\n\n \"\"\"\n prefix = '{}_'.format(index) + 'shape_'\n\n # Executing the formula :\n res, err = self.build_compute_function(sequence_locals, missing)\n\n if err:\n for k in err:\n errors[prefix+k] = err[k]\n\n return res\n\n def compute(self, time, unit):\n \"\"\" Computes the shape of the pulse at a given time.\n\n Parameters\n ----------\n time : ndarray\n Times at which to compute the modulation.\n\n unit : str\n Unit in which the time is expressed.\n\n Returns\n -------\n shape : ndarray\n Amplitude of the pulse.\n\n \"\"\"\n shape = self._shape_factory(self, time, unit)\n assert np.max(shape) < 1.0\n assert np.min(shape) > -1.0\n return shape\n\n def build_compute_function(self, sequence_locals, missing):\n \"\"\"Build the compute function from the formula.\n\n \"\"\"\n try:\n loc = exec_entry(self.formula, sequence_locals, missing)\n if not loc:\n return False, {}\n self._shape_factory = loc['c']\n except Exception:\n return False, {'exec_error': format_exc(limit=1)}\n\n return True, {}\n\n # --- Private API ---------------------------------------------------------\n\n #: Runtime build shape computer.\n _shape_factory = Callable()\n\n\nSHAPES = [ArbitraryShape]\n","sub_path":"hqc_meas/pulses/shapes/arbitrary_shape.py","file_name":"arbitrary_shape.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"364483395","text":"from typing import TYPE_CHECKING, Iterable, Mapping, Optional, Union\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.models import BaseOperator\nfrom airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook\n\nif TYPE_CHECKING:\n from airflow.hooks.dbapi import DbApiHook\n\n\nclass HelloMsSqlOperator(BaseOperator):\n def __init__(\n self,\n *,\n sql: str,\n mssql_conn_id: str = 'mssql_default',\n autocommit: bool = False,\n database: Optional[str] = None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.mssql_conn_id = mssql_conn_id\n self.sql = sql\n self.autocommit = autocommit\n self.database = database\n\n def execute(self, context: dict) -> None:\n self._hello()\n hook = MsSqlHook(mssql_conn_id=self.mssql_conn_id, schema=self.database, conn_type='odbc')\n result = hook.get_first(self.sql)\n print(result)\n return result\n\n def _hello(self):\n print(\"This is from hello\")","sub_path":"plugins/hello_mssql_operator.py","file_name":"hello_mssql_operator.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"328851821","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport os\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], enable=True)\n\ncorpus = open(os.path.join(\"..\", \"pythonProject\", \"Irish_lyrics.txt\")).read()\ncorpus = corpus.lower().split('\\n')\n\ntokenizer = keras.preprocessing.text.Tokenizer()\ntokenizer.fit_on_texts(corpus)\ntotal_words = len(tokenizer.word_index)+1\nprint(tokenizer.word_index) # {'the': 1, 'and': 2, 'i': 3, 'to': 4, 'a': 5, 'of': 6, 'my': 7, 'in': 8, 'me': 9 ...}\nprint(total_words) # 2690\n\n# create n grams, starts from uni-gram, bi-gram, ..., n-gram\n# [What]\n# [What is]\n# [What is love]\n# [What is love by]\n# [What is love by Twice]\nsequences = []\nfor row in corpus:\n sequence = tokenizer.texts_to_sequences([row])[0]\n for i in range(1, len(sequence)):\n sequences.append(sequence[:i+1])\nsequences = np.array(sequences)\nprint(sequences.shape) # (12038,)\nmax_len = max([len(row) for row in sequences])\npadded = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=max_len)\n\nxs, ys = padded[:, :-1], padded[:, -1]\nys = keras.utils.to_categorical(ys, num_classes=total_words)\nprint(np.array(xs).shape, np.array(ys).shape) # (12038, 15) (12038, 2690)\n\nprint(tokenizer.word_index) # {'the': 1, 'and': 2, 'i': 3, 'to': 4, 'a': 5, 'of': 6, 'my': 7, 'in': 8, 'me': 9 ...}\nprint(tokenizer.word_index['in']) # 8\nprint(tokenizer.word_index['the']) # 1\nprint(tokenizer.word_index['town']) # 71\nprint(tokenizer.word_index['of']) # 6\nprint(tokenizer.word_index['athy']) # 713\nprint(tokenizer.word_index['one']) # 39\nprint(tokenizer.word_index['jeremy']) # 1790\nprint(tokenizer.word_index['lanigan']) # 1791\nprint(xs[5], ys[5]) # [0 0 0 0 0 0 0 0 0 51 12 96 1217 48 2] [0. 0. 0. ... 0. 0. 0.]\nprint(xs[6], ys[6]) # [0 0 0 0 0 0 0 0 0 0 0 0 0 0 2] [0. 0. 0. ... 0. 0. 0.]\n\nmodel = keras.models.Sequential([\n keras.layers.Embedding(total_words, 64, input_length=max_len-1),\n keras.layers.Bidirectional(keras.layers.LSTM(32)),\n # keras.layers.Conv1D(128, 5, activation='relu'),\n # keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(32, activation='relu'),\n keras.layers.Dense(total_words, activation='softmax')\n])\nmodel.compile(optimizer=tf.optimizers.Adam(), loss=tf.losses.categorical_crossentropy, metrics=['accuracy'])\nmodel.fit(xs, ys, epochs=300)\n\nreverse_word_index = dict([(index, word) for (word, index) in tokenizer.word_index.items()])\nseed = \"I've got a bad feeling about this\"\nfor _ in range(200):\n seed_pad = tokenizer.texts_to_sequences([seed])\n seed_pad = keras.preprocessing.sequence.pad_sequences(seed_pad, maxlen=max_len-1)\n predict = model.predict(seed_pad)\n predict_word = reverse_word_index[np.argmax(predict)]\n seed += \" \"+predict_word\nprint(seed)\n\"\"\"\nEpoch 297/300\n377/377 [==============================] - 3s 8ms/step - loss: 0.5648 - accuracy: 0.8477\nEpoch 298/300\n377/377 [==============================] - 4s 9ms/step - loss: 0.5609 - accuracy: 0.8446\nEpoch 299/300\n377/377 [==============================] - 4s 10ms/step - loss: 0.5529 - accuracy: 0.8456\nEpoch 300/300\n377/377 [==============================] - 3s 8ms/step - loss: 0.5510 - accuracy: 0.8468\nI've got a bad feeling about this was died strolling moonlight are dim belfast is \nhuff huff pure toome heartfrom easter polkas heartfrom crowds heartfrom there do see \nto save it sat on and it is tell to find the weirs drown of a tear for by the hand he \nbay die in down a kerry true oak part merry toome today to your provost and light i \nwas the mountain side the moonlight crystal sighed workin by by proud saxon i our mountain \nwild are side by by night proud the from dawn dawn tory reminded tune nest chirping the \ngood bubblin tie parlour the land of my word friends the gay weary ground reel door to \na a jail collar reel back reel back craw craw was adoration roam by to me from them and the \nbold deceiver spancil hill i pain weary mans huff i sighed for tough fair much out from \nfive our father jail row a good belfast victory sinking guard reel foaming nonsense finea \nfinea color jail tie goggles huff finea o death glass leave all to my true armless jewel \nwhen is out and it makes my true native are ill be agin the wearin before spancil heartfrom \nsinking gown\n\"\"\"","sub_path":"course3_week4_lesson2.py","file_name":"course3_week4_lesson2.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"502864714","text":"#!/usr/bin/env python\n\"\"\"\nCommand line tool for testing the pyoidc RP library\n\"\"\"\nimport importlib\nimport json\nimport logging\nimport argparse\n\nfrom future.backports.urllib.parse import urlparse\n\nfrom oic.utils.keyio import build_keyjar\n\nfrom otest.parse_cnf import parse_yaml_conf\nfrom otest.common import setup_logger\nfrom otest.io import ClIO\nfrom otest.result import Result\n\nfrom oidctest.op import func\nfrom oidctest.op import check\nfrom oidctest.op.prof_util import ProfileHandler\nfrom oidctest.op.tool import ClTester\nfrom oidctest.session import SessionHandler\n\nfrom requests.packages import urllib3\n\nurllib3.disable_warnings()\n\n__author__ = 'roland'\n\nlogger = logging.getLogger(\"\")\n\nif __name__ == '__main__':\n from oic.oic.message import factory as oic_message_factory\n\n from oidctest.op import profiles\n from oidctest.op import oper\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-y', dest='yaml_flows')\n parser.add_argument('-l', dest=\"log_name\")\n parser.add_argument('-p', dest=\"profile\")\n parser.add_argument('-t', dest=\"testid\")\n parser.add_argument('-g', dest=\"group\")\n parser.add_argument('-i', dest=\"id\")\n parser.add_argument('-x', dest='exit', action='store_true')\n parser.add_argument(dest=\"config\")\n cargs = parser.parse_args()\n\n cls_factories = {'': oper.factory}\n func_factory = func.factory\n FLOWS = parse_yaml_conf(cargs.yaml_flows, cls_factories, func_factory)\n\n CONF = importlib.import_module(cargs.config)\n\n if cargs.log_name:\n setup_logger(logger, cargs.log_name)\n else:\n setup_logger(logger)\n\n # Add own keys for signing/encrypting JWTs\n jwks, keyjar, kidd = build_keyjar(CONF.keys)\n\n # export JWKS\n p = urlparse(CONF.KEY_EXPORT_URL)\n f = open(\".\" + p.path, \"w\")\n f.write(json.dumps(jwks))\n f.close()\n jwks_uri = p.geturl()\n\n kwargs = {\"base_url\": CONF.BASE, \"kidd\": kidd, \"keyjar\": keyjar,\n \"jwks_uri\": jwks_uri, \"flows\": FLOWS['Flows'], \"conf\": CONF,\n \"cinfo\": CONF.INFO, \"order\": FLOWS['Order'],\n \"desc\": FLOWS['Desc'], \"profiles\": profiles, \"operation\": oper,\n \"profile\": cargs.profile, \"msg_factory\": oic_message_factory,\n \"check_factory\": check.factory, \"cache\": {},\n 'profile_handler': ProfileHandler, 'opid': cargs.id}\n\n if cargs.testid:\n io = ClIO(**kwargs)\n sh = SessionHandler(**kwargs)\n sh.init_session(profile=cargs.profile)\n res = Result(sh, ProfileHandler)\n io.session = sh\n tester = ClTester(io, sh, **kwargs)\n tester.run(cargs.testid, **kwargs)\n res.store_test_info()\n res.print_info(cargs.testid)\n elif cargs.group:\n _sh = SessionHandler(**kwargs)\n _sh.init_session(profile=cargs.profile)\n\n for tid in _sh[\"flow_names\"]:\n if not tid.startswith(cargs.group):\n continue\n io = ClIO(**kwargs)\n sh = SessionHandler(**kwargs)\n sh.init_session(profile=cargs.profile)\n io.session = sh\n tester = ClTester(io, sh, **kwargs)\n if not tester.match_profile(tid):\n continue\n elif tester.run(tid, **kwargs):\n print('+ {}'.format(tid))\n else:\n res = Result(sh, ProfileHandler)\n res.result()\n if cargs.exit:\n break\n else:\n _sh = SessionHandler(**kwargs)\n _sh.init_session(profile=cargs.profile)\n\n for tid in _sh[\"flow_names\"]:\n io = ClIO(**kwargs)\n sh = SessionHandler(**kwargs)\n sh.init_session(profile=cargs.profile)\n io.session = sh\n tester = ClTester(io, sh, **kwargs)\n if not tester.match_profile(tid):\n continue\n elif tester.run(tid, **kwargs):\n print('+ {}'.format(tid))\n else:\n res = Result(sh, ProfileHandler)\n res.result()\n if cargs.exit:\n break\n","sub_path":"test_tool/test_rp/rp/cl/clrp.py","file_name":"clrp.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"236879033","text":"import ConfigParser\nimport numpy as np\nfrom ast import literal_eval\n\nclass Params(object):\n\tdef __init__(self,config_file='./config.ini',for_test=False):\n\t\tparser = ConfigParser.ConfigParser()\n\t\tparser.read(config_file)\n\n\t\tself.gpu_nums = 1\n\t\t# model\n\t\tself.network = parser.get(\"model\", \"network\")\n\t\tself.prefix = parser.get(\"model\",\"network\")\n\t\tself.backbone = parser.get(\"model\", \"backbone\")\n\t\tself.frame_num = parser.getint(\"model\",\"frame_num\")\n\t\tself.img_size = parser.getint(\"model\",\"img_size\")\n\n\t\t# epoch\n\t\tself.finetune_epoch = parser.getint(\"epoch\", \"finetune_epoch\")\n\n\t\t# iterator\n\t\tself.batch_size = parser.getint(\"iterator\", \"batch_size\")\n\t\tself.gt_path = parser.get(\"iterator\", \"gt_path\")\n\t\tself.img_path = parser.get(\"iterator\", \"img_path\")\n\t\tself.list_path = parser.get(\"iterator\", \"list_path\")\n\t\tself.gt_path_val = parser.get(\"iterator\", \"gt_path_val\")\n\t\tself.img_path_val = parser.get(\"iterator\", \"img_path_val\")\n\t\tself.list_path_val = parser.get(\"iterator\", \"list_path_val\")\n\t\tself.data_aug = parser.getboolean(\"iterator\", \"data_aug\")\n\t\tself.use_global_stats = parser.getboolean(\"iterator\", \"use_global_stats\")\n\n\t\t# optimizer\n\t\tself.optimizer = parser.get(\"optimizer\", \"name\")\n\t\tself.learning_rate = parser.getfloat(\"optimizer\", \"learning_rate\")\n\t\tself.wd = parser.getfloat(\"optimizer\", \"wd\")\n\t\tself.momentum = parser.getfloat(\"optimizer\", \"momentum\")\n\n\t\t# misc\n\t\tself.description = parser.get(\"misc\", \"description\")\n","sub_path":"CRN_DAVIS16_Oneshot/utils/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"546689810","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom job51.items import Job51Item\nimport logging\nimport re\nclass A51jobSpider(scrapy.Spider):\n name = '51job'\n allowed_domains = ['51job.com']\n start_urls = ['https://m.51job.com/search/joblist.php?keyword=Python+开发工程师&keywordtype=2&jobarea=040000&landmark=&issuedate=&saltype=°ree=&funtype=&indtype=&jobterm=&cotype=&workyear=&cosize=&lonlat=&tubename=&tubeline=&radius=&filttertype=']\n\n\n def parse(self, response):\n \"\"\"首页解析的方法\"\"\"\n # 1.爬取起始页的数据\n item=Job51Item()\n job_list=response.xpath(\"//div[@id='pageContent']/div[@class='items']/a\")\n for job_one in job_list:\n # 职位信息详情url\n item[\"jobHref\"]=job_one.xpath(\"./@href\").extract_first()\n # 工作地点\n item[\"jobErea\"]=job_one.xpath(\"./i/text()\").extract_first()\n # 公司名称\n item[\"jobCompany\"]=job_one.xpath(\"./aside/text()\").extract_first()\n # 薪资待遇\n item[\"jobSalary\"]=job_one.xpath(\"./em/text()\").extract_first()\n # ===============================日志记录==================================\n # logging.warning(\"item::::::: %s\" %item)\n yield scrapy.Request(\n # 职位详情url\n item[\"jobHref\"],\n # 回调函数\n callback=self.parse_detail,\n meta={\"item\":item}\n )\n # 2.爬取下一页的数据\n next_url=response.xpath(\"//div[@id='pageContent']/form[@id='turnpage']/div[@class='paging']/a[@class='next']/@href\").extract_first()\n # 如果存在下一页就需要继续爬取\n # javascript:void(0);\n if not next_url.find(\"javascript\")>=0:\n yield scrapy.Request(\n # 下一页\n next_url,\n # 回调函数\n callback=self.parse\n )\n yield item\n\n def parse_detail(self,response):\n \"\"\"详情解析的url\"\"\"\n item=response.meta[\"item\"]\n # 职位名称 ,例如中级开发工程师\n item[\"jobName\"]=response.xpath(\"//div[@id='pageContent']/div[@class='mod m1']/div[@class='jt']/p/text()\").extract_first()\n # 发布时间\n item[\"date\"]=response.xpath(\"//div[@id='pageContent']/div[@class='mod m1']/div[@class='jt']/span/text()\").extract_first()\n # 岗位jd-岗位jd有多余字符 ,需要处理\n item[\"jobRequest\"]=response.xpath(\"//div[@id='pageContent']/div[@class='mod']/div[@class='ain']/article/p/text()\").extract()\n # 处理特殊字符\n item[\"jobRequest\"]=[re.sub(r\"\\xa0|' '|\\xa0|\\s|\\n\",\"\",i) for i in item[\"jobRequest\"]]\n # 处理空格\n item[\"jobRequest\"]=[i for i in item[\"jobRequest\"] if len(i)>0]\n yield item\n","sub_path":"2-爬虫/前程无忧职位爬虫/job51/job51/spiders/a51job.py","file_name":"a51job.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"362736913","text":"import yaml\nimport os\nimport click\nimport zipfile\nimport urllib2\nimport logging\nimport jsonpointer\nimport jq\nimport glob2 as glob\n\nlog = logging.getLogger(__name__)\n\n\ndef leaf_iterator(jsonable):\n '''\n generator function to yield leafs items of a JSON-like structure alongside\n their position in the structure as determined by a JSONPointer.\n \n :return: tuples (jsonpointer, leaf value)\n '''\n allleafs = jq.jq('leaf_paths').transform(jsonable, multiple_output = True)\n leafpointers = [jsonpointer.JsonPointer.from_parts(x) for x in allleafs]\n for x in leafpointers:\n yield x,x.get(jsonable)\n\ndef discover_initfiles(initdata,sourcedir):\n '''inspect sourcedir, first tries exact path match, and then (possbly recursive) glob'''\n log.info('inspecting %s to discover referenced input files',sourcedir)\n\n # filled_initdata = copy.deepcopy(initdata)\n for pointer,value in leaf_iterator(initdata):\n if type(value) not in [str,unicode]: continue\n within_sourcedir = os.path.join(sourcedir,value)\n globresult = glob.glob(os.path.join(sourcedir,value))\n if os.path.exists(within_sourcedir):\n pointer.set(initdata,within_sourcedir)\n elif globresult:\n pointer.set(initdata,globresult)\n return initdata\n\ndef getinit_data(initfiles, parameters):\n '''\n get initial data from both a list of files and a list of 'pname=pvalue'\n strings as they are passed in the command line is assumed to be a\n YAML parsable string.\n '''\n\n initdata = {}\n for initfile in initfiles:\n log.info('loading initialization data from file %s',initfile)\n initdata.update(**yaml.load(open(initfile)))\n\n for x in parameters:\n key, value = x.split('=')\n initdata[key] = yaml.load(value)\n return initdata\n\ndef prepare_workdir_from_archive(workdir, inputarchive):\n if os.path.exists(workdir):\n raise click.exceptions.ClickException(click.style(\n \"workdirectory exists and input archive give. Can't have both\", fg='red'))\n initdir = os.path.join(workdir,'init')\n os.makedirs(initdir)\n localzipfile = '{}/inputarchive.zip'.format(workdir)\n f = urllib2.urlopen(inputarchive)\n with open(localzipfile,'w') as lf:\n lf.write(f.read())\n with zipfile.ZipFile(localzipfile) as zf:\n zf.extractall(path=initdir)\n os.remove(localzipfile)\n return initdir\n\ndef setupbackend_fromstring(backend, name = 'backendname', cacheconfig=None):\n import backends.packtivitybackend as pb\n backend = pb.PacktivityBackend(packtivity_backendstring = backend, cacheconfig = cacheconfig)\n return backend\n","sub_path":"yadage/clihelpers.py","file_name":"clihelpers.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"132508571","text":"'''\r\nWHAT: impl binary search tree\r\n BSTs can be used to impl a dictionary\r\n O(logn) insertion, deletion, and search.\r\n O(n) traversal creates sorted list\r\nHOW: Everything is fairly easy, except Delete (omfg!)\r\nWHY: Practice impl an ADT\r\n'''\r\n\r\nfrom sys import stdout\r\n\r\nclass Node():\r\n def __init__(self, value):\r\n self.val = value\r\n self.left = None\r\n self.right = None\r\n self.parent = None\r\n\r\n def printer(self):\r\n ''' Fairly hackish, but useful '''\r\n T = str(self.val)\r\n if self.parent is None:\r\n T += '[*]'\r\n else:\r\n T += '[' + str(self.parent.val) + ']'\r\n print(' ', T)\r\n #print \" / \\\\\"\r\n print(\" / \\\\\")\r\n\r\n L = '* '\r\n R = '* '\r\n if self.left != None:\r\n L = str(self.left.val)\r\n L += '(' + str(self.left.parent.val) + ')'\r\n if self.right != None:\r\n R = str(self.right.val)\r\n R += '(' + str(self.right.parent.val) + ')'\r\n print(L + ' ' + R)\r\n print()\r\n\r\nclass BST():\r\n def __init__(self):\r\n self.root = None\r\n self.size = 0\r\n\r\n def length(self):\r\n return self.size\r\n\r\n\r\n def printTree(self):\r\n ''' \r\n Only works for 1-digit vals\r\n Meant as practice, to try out an idea\r\n '''\r\n self.getDepth()\r\n queue = [self.root]\r\n for i in range(self.depth+1):\r\n print('|',end='')\r\n\r\n # Fairly elegant for one-digit vals!\r\n outer = ' '*(2**(self.depth - i) -1)\r\n inner = ' '*(2**(self.depth - i +1)-1)\r\n\r\n wave = []\r\n stdout.write(outer)\r\n for node in queue:\r\n if node == None:\r\n wave.append(None)\r\n wave.append(None)\r\n output = ' '\r\n else:\r\n wave.append(node.left)\r\n wave.append(node.right)\r\n output = str(node.val)\r\n stdout.write(output + inner)\r\n print()\r\n\r\n queue = wave\r\n\r\n def getDepth(self):\r\n ''' \r\n Only needed for printTree()\r\n Could check after each insert/delete\r\n But, that's a PITA. So I just do this one-time operation\r\n printTree() remains O(n)\r\n '''\r\n self.depth = -1\r\n queue = [self.root]\r\n while queue:\r\n # Gather nodes\r\n wave = []\r\n for node in queue:\r\n if node.left is not None:\r\n wave.append(node.left)\r\n if node.right is not None:\r\n wave.append(node.right)\r\n queue = wave\r\n self.depth += 1\r\n return self.depth\r\n\r\n\r\n def insertList(self, vals):\r\n for val in vals:\r\n self.insert(val)\r\n #self.insert_nonRecurse(val)\r\n\r\n def insert(self, val, curr=0):\r\n ''' \r\n Simple heuristic:\r\n smaller goes left\r\n greater goes right.\r\n Note: equal goes right - tho BSTs meant for unique keys\r\n '''\r\n\r\n # Default is root\r\n if curr == 0:\r\n curr = self.root\r\n\r\n if self.root is None:\r\n self.root = Node(val)\r\n self.size += 1\r\n return\r\n\r\n if val >= curr.val:\r\n if curr.right is None:\r\n curr.right = Node(val)\r\n curr.right.parent = curr\r\n self.size += 1\r\n return\r\n else:\r\n self.insert(val, curr.right)\r\n else:\r\n if curr.left is None:\r\n curr.left = Node(val)\r\n curr.left.parent = curr\r\n self.size += 1\r\n return\r\n else:\r\n self.insert(val, curr.left)\r\n\r\n def getNode(self, val, curr=0):\r\n ''' Straightforward binary search '''\r\n\r\n # Default is root\r\n if curr == 0:\r\n curr = self.root\r\n\r\n if curr is None:\r\n return None\r\n elif curr.val == val:\r\n return curr\r\n elif val > curr.val:\r\n return self.getNode(val, curr=curr.right)\r\n else:\r\n return self.getNode(val, curr=curr.left)\r\n\r\n def getNode_nonRecursive(self, val):\r\n ''' A non-recursive version to illustrate similarity '''\r\n curr = self.root\r\n while True:\r\n if curr is None:\r\n return 0\r\n elif curr.val == val:\r\n return curr\r\n elif val > curr.val:\r\n curr = curr.right\r\n else:\r\n curr = curr.left\r\n\r\n def traverse_inOrder(self, arr=[], curr=''):\r\n ''' Shockingly simple! '''\r\n\r\n # Default is root\r\n if curr == '':\r\n curr = self.root\r\n\r\n # End recursion at end of branch\r\n if curr is None:\r\n return\r\n else:\r\n # Key idea: leftmost node is smallest\r\n # So keep grabbing left nodes first, then try right\r\n self.traverse_inOrder(arr, curr.left)\r\n arr.append(curr.val)\r\n self.traverse_inOrder(arr, curr.right)\r\n\r\n # At very end, return arr\r\n if curr == self.root:\r\n return arr\r\n\r\n\r\n def deleteNode(self, val):\r\n ''' Everything else was easy except for this '''\r\n old = self.getNode(val)\r\n\r\n # If not in tree\r\n if old is None:\r\n return\r\n\r\n # If val in tree\r\n self.size -= 1\r\n left = old.left\r\n right = old.right\r\n parent = old.parent\r\n\r\n # Case 1: If leaf (easy!)\r\n if left is None and right is None:\r\n if old is self.root:\r\n self.root = None\r\n elif old is parent.right:\r\n parent.right = None\r\n else:\r\n parent.left = None\r\n return\r\n\r\n # Case 2: Either left or right is empty\r\n elif left is None or right is None:\r\n # Get parent and child\r\n if left is None:\r\n child = right\r\n else:\r\n child = left\r\n\r\n # Adjust child's parents and parent's child\r\n child.parent = parent\r\n if old is self.root:\r\n self.root = child\r\n elif old is parent.right:\r\n parent.right = child\r\n else:\r\n parent.left = child\r\n\r\n # Case 3: Both left and right exist\r\n # BY FAR the hardest\r\n else:\r\n # Can swap left or right, doesn't matter\r\n # Since chose right, by defn, swap *cannot* have left child\r\n swap = self.getNearest_inRightSubtree(old)\r\n\r\n # Case 3A: Swap is adjacent\r\n if swap is right:\r\n # Update old child's parents\r\n left.parent = swap\r\n\r\n swap.left = left\r\n swap.parent = parent\r\n\r\n\r\n # Case 3B: Swap is leaf (and not adj)\r\n elif swap.right is None:\r\n swap.parent.left = None\r\n\r\n # Update old child's parents\r\n left.parent = swap\r\n right.parent = swap\r\n\r\n swap.left = left\r\n swap.right = right\r\n swap.parent = parent\r\n\r\n # Case 3C: Swap non-adj, no swap.left, yes swap.right\r\n else:\r\n # Switch swap with swap.right\r\n swap.parent.left = swap.right\r\n swap.right.parent = swap.parent\r\n\r\n # Update old child's parents\r\n left.parent = swap\r\n right.parent = swap\r\n\r\n swap.left = left\r\n swap.right = right\r\n swap.parent = parent\r\n\r\n # Now do the swap!\r\n if old is self.root:\r\n self.root = swap\r\n elif old is parent.left:\r\n parent.left = swap\r\n else:\r\n parent.right = swap\r\n\r\n def getNearest_inRightSubtree(self, curr):\r\n ''' \r\n Helper fctn for deleteNode()\r\n First go to right child,\r\n then keep going left to get smallest (in this right subtree)\r\n '''\r\n nearest = curr.right\r\n while nearest.left is not None:\r\n nearest = nearest.left\r\n return nearest\r\n\r\ndef main():\r\n t = BST()\r\n\r\n #vals = (4,1,7,9,3,5,0,2,6,8,8,7,3,5,8,0,3)\r\n #vals = (1,2,3,4,5,6)\r\n #vals = (4,1,7,9,3,14,15,5,0)\r\n vals = (4,1,7,9,14,5,0)\r\n #vals = (1,2,6,4)\r\n #vals = (4,7)\r\n t.insertList(vals)\r\n\r\n t.printTree()\r\n t.deleteNode(7)\r\n t.printTree()\r\n\r\n print()\r\n print(\"length:\", t.length())\r\n print(t.traverse_inOrder())\r\n\r\nmain()\r\n\r\n'''\r\n#TODO:\r\nneed to create a 'key' version\r\n to make into a true dict\r\n easy\r\nincorporate 'depth' into tree and all nodes\r\n annoying for deleteNode() cases\r\nself-balancing tree?\r\n red-black tree?\r\n learn about later\r\n\r\n(try impl with an array?\r\n for heaps, works bc fills in symmetrically\r\n but BST can go cray! So might be tons of empty space\r\n\r\n'''\r\n\r\n","sub_path":"algos/_core/binarySearchTree.py","file_name":"binarySearchTree.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"548941268","text":"import spiceypy as spiceypy\nimport numpy as np\nfrom spiops import spiops\nfrom spiops.utils import utils\nfrom spiops.utils import naif\nfrom bokeh.layouts import row\nfrom bokeh.plotting import figure, show, output_notebook\nfrom bokeh.models.glyphs import Ellipse\nfrom bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid\n\n\nclass Body(object):\n def __init__(self, body, time=object(), target=None, mission_config=None):\n\n if isinstance(body, str):\n name = body\n id = spiceypy.bodn2c(body)\n else:\n id = body\n name = spiceypy.bodc2n(body)\n\n if target:\n self.target = target\n\n if mission_config:\n self.mission_config = mission_config\n\n self.name = name\n self.id = id\n self.time = time\n\n #\n # Parameters for the Geometry Computation\n #\n self.previous_tw = []\n self.geometry_flag = False\n\n\n def __getattribute__(self, item, boresight=''):\n\n if item in ['altitude',\n 'distance',\n 'velocity',\n 'zaxis_target_angle',\n 'myaxis_target_angle',\n 'groundtrack',\n 'boresight_groundtrack'\n 'trajectory']:\n self.__Geometry(boresight=boresight)\n return object.__getattribute__(self, item)\n elif item in ['sa_ang_p',\n 'sa_ang_n',\n 'sa_ang',\n 'saa_sa',\n 'saa_sc',\n 'hga_earth',\n 'hga_angles',\n 'mga_earth',\n 'mga_angles',\n 'roll_angles']:\n self.__Structures()\n return object.__getattribute__(self, item)\n else:\n return object.__getattribute__(self, item)\n\n\n def __getattr__(self, item):\n\n if item in ['state_in_window']:\n self.__StateInWindow()\n return object.__getattribute__(self, item)\n\n\n def State(self, target=False, reference_frame=False, current=False):\n\n if self.target and not target and not reference_frame:\n target = self.target.name\n reference_frame = self.target.frame\n\n if not self.target:\n if target is False: target = 'J2000'\n if reference_frame is False: reference_frame = 'J2000'\n\n self.trajectory_reference_frame = reference_frame\n\n if not current:\n current = self.time.current\n\n state, lt = spiceypy.spkezr(target, current, reference_frame,\n self.time.abcorr, self.name)\n\n return state\n\n\n def Orientation(self, frame='', target_frame='', current=False,\n format='msop quaternions'):\n\n if self.target and not target_frame:\n target_frame = self.target.frame\n\n if not self.target and not target_frame:\n target_frame = 'J2000'\n\n if not frame:\n frame = self.frame\n\n if not current:\n current = self.time.current\n else:\n #TODO: need to add a time conversion here\n current = current\n\n rot_mat = spiceypy.pxform(target_frame, frame, current)\n\n if format == 'spice quaternions':\n orientation = spiceypy.m2q(rot_mat)\n\n if format == 'msop quaternions':\n quaternions = spiceypy.m2q(rot_mat)\n orientation = [-quaternions[1],\n -quaternions[2],\n -quaternions[3],\n quaternions[0]]\n\n elif format == 'euler angles':\n orientation = (spiceypy.m2eul(rot_mat, 3, 2, 1))\n\n elif format == 'rotation matrix':\n orientation = rot_mat\n\n return orientation\n\n\n def __ClockDrift(self, enddate=False):\n\n try:\n\n skd_path = None\n sclk_start_coeff_idx = 1\n if self.mission_config is not None:\n skd_path = self.mission_config[\"skd_path\"]\n if \"sclk_start_coeff_idx\" in self.mission_config:\n sclk_start_coeff_idx = self.mission_config[\"sclk_start_coeff_idx\"][self.name.lower()]\n\n sclk_path = naif.get_latest_step_sclk(self.name, skd_path=skd_path)\n\n coeffs = naif.read_sclk_coefficiends(sclk_path)\n\n sclk_start = int(coeffs[sclk_start_coeff_idx][0])\n sclk_end = int(coeffs[-1][0])\n\n except Exception as ex:\n print(\"Error: Could not obtain SCLK time bounds. Error: \" + str(ex))\n return\n\n ticks_per_second = spiceypy.gdpool('SCLK01_MODULI_{}'.format(str(-1*self.id)),0,1000)[1]\n step = int(((sclk_end - sclk_start)/10000)) # 10000 points in the plot\n\n if not enddate:\n et_end = self.time.getTime('finish','utc')\n else:\n et_end = spiceypy.utc2et(enddate)\n\n sclk = []\n ephtime = []\n for clk in range(sclk_start, sclk_end, step):\n sclk.append(clk)\n et = spiceypy.sct2e(self.id, clk)\n ephtime.append(et)\n\n dates = []\n drift = []\n for j in range(0, len(ephtime), 1):\n if ephtime[j] >= et_end:\n break\n drift.append((sclk[j] - sclk[0]) / ticks_per_second - ((ephtime[j] - ephtime[0])))\n dates.append(ephtime[j])\n\n self.clock_dates = dates\n self.clock_drift = drift\n\n return\n\n\n def __StateInWindow(self, target=False, reference_frame=False, start=False,\n finish=False):\n\n state_in_window = []\n for et in self.time.window:\n state_in_window.append(self.State(target, reference_frame, et))\n\n self.state_in_window = state_in_window\n\n return\n\n\n def __Structures(self):\n\n if self.structures_flag is True and \\\n self.time.window.all() == self.previous_tw.all():\n return\n\n time = self.time\n\n #\n # We determine the mission\n #\n if self.name == 'TGO':\n plus_array = 'TGO_SA+Z'\n minus_array = 'TGO_SA-Z'\n elif self.name == 'MPO':\n plus_array = 'MPO_SA'\n minus_array = ''\n elif self.name == 'MTM':\n plus_array = 'MTM_SA+X'\n minus_array = 'MTM_SA-X'\n else:\n plus_array = '{}_SA+Y'.format(self.name.upper())\n minus_array = '{}_SA-Y'.format(self.name.upper())\n\n #\n # Solar Arrays\n #\n sa_ang1_p_list = []\n sa_ang1_n_list = []\n sa_ang2_p_list = []\n sa_ang2_n_list = []\n sa_ang3_p_list = []\n sa_ang3_n_list = []\n\n saa_sa_p_list = []\n saa_sa_n_list = []\n\n saa_sc_x_list = []\n saa_sc_y_list = []\n saa_sc_z_list = []\n\n hga_earth = []\n hga_angles_el = []\n hga_angles_az = []\n\n mga_earth = []\n mga_angles_el = []\n mga_angles_az = []\n\n roll_angle_1 = []\n roll_angle_2 = []\n roll_angle_3 = []\n\n #\n # High Gain Antennas\n #\n for et in time.window:\n\n #\n # SA mechanisms\n #\n try:\n # Of course we need to include all possible cases including only one Solar Array\n\n (sa_ang1_p, sa_ang2_p, sa_ang3_p) = spiops.solar_array_angles(plus_array, et)\n if minus_array:\n (sa_ang1_n, sa_ang2_n, sa_ang3_n) = spiops.solar_array_angles(minus_array, et)\n saa = spiops.solar_aspect_angles(self.name, et)\n\n sa_ang1_p_list.append(sa_ang1_p)\n sa_ang2_p_list.append(sa_ang2_p)\n sa_ang3_p_list.append(sa_ang3_p)\n if minus_array:\n sa_ang1_n_list.append(sa_ang1_n)\n sa_ang2_n_list.append(sa_ang2_n)\n sa_ang3_n_list.append(sa_ang3_n)\n\n saa_sa_p_list.append(saa[0][0])\n if minus_array:\n saa_sa_n_list.append(saa[0][1])\n\n saa_sc_x_list.append(saa[1][0])\n saa_sc_y_list.append(saa[1][1])\n saa_sc_z_list.append(saa[1][2])\n\n except:\n sa_ang1_p_list.append(0)\n sa_ang2_p_list.append(0)\n sa_ang3_p_list.append(0)\n if minus_array:\n sa_ang1_n_list.append(0)\n sa_ang2_n_list.append(0)\n sa_ang3_n_list.append(0)\n\n saa_sa_p_list.append(0)\n if minus_array:\n saa_sa_n_list.append(0)\n\n saa_sc_x_list.append(0)\n saa_sc_y_list.append(0)\n saa_sc_z_list.append(0)\n\n #\n # HGA mechanisms\n #\n if self.name != 'MTM' and self.name != 'JUICE':\n try:\n hga_angles_ang, hga_earth_ang = spiops.hga_angles(self.name, et)\n except:\n hga_angles_ang = [0, 0]\n hga_earth_ang = 0\n\n hga_angles_el.append(hga_angles_ang[1])\n hga_angles_az.append(hga_angles_ang[0])\n hga_earth.append(hga_earth_ang)\n\n #\n # MGA mechanisms\n #\n if self.name != 'MTM':\n try:\n mga_angles_ang, mga_earth_ang = spiops.mga_angles(self.name, et)\n except:\n mga_angles_ang = [0, 0]\n mga_earth_ang = 0\n\n mga_angles_el.append(mga_angles_ang[1])\n mga_angles_az.append(mga_angles_ang[0])\n mga_earth.append(mga_earth_ang)\n\n #\n # Roll angle\n #\n try:\n roll_angle = spiops.roll(et)\n except:\n roll_angle = [0, 0, 0]\n\n roll_angle_1.append(roll_angle[0])\n roll_angle_2.append(roll_angle[1])\n roll_angle_3.append(roll_angle[2])\n\n self.sa_ang_p = [sa_ang1_p_list, sa_ang2_p_list, sa_ang3_p_list]\n self.sa_ang = [sa_ang1_p_list, sa_ang2_p_list, sa_ang3_p_list]\n if minus_array:\n self.sa_ang_n = [sa_ang1_n_list, sa_ang2_n_list, sa_ang3_n_list]\n self.saa_sa = [saa_sa_p_list, saa_sa_n_list]\n else:\n self.sa_ang_p = [sa_ang1_p_list, sa_ang2_p_list, sa_ang3_p_list]\n self.saa_sa = saa_sa_p_list\n\n self.saa_sc = [saa_sc_x_list, saa_sc_y_list, saa_sc_z_list]\n\n self.hga_earth = hga_earth\n self.hga_angles = [hga_angles_el, hga_angles_az]\n\n self.mga_earth = mga_earth\n self.mga_angles = [mga_angles_el, mga_angles_az]\n\n self.roll_angles = [roll_angle_1, roll_angle_2, roll_angle_3]\n\n self.structures_flag = True\n self.previous_tw = self.time.window\n\n return\n\n\n def __Geometry(self, boresight=''):\n\n #if self.geometry_flag is True and \\\n # self.time.window.all() == self.previous_tw.all():\n # return\n\n distance = []\n altitude = []\n boresight_latitude = []\n boresight_longitude = []\n latitude = []\n longitude = []\n subpoint_xyz = []\n subpoint_pgc = []\n subpoint_pcc = []\n zaxis_target_angle = []\n myaxis_target_angle = []\n yaxis_target_angle = []\n xaxis_target_angle = []\n beta_angle = []\n\n qs, qx, qy, qz = [], [], [] ,[]\n x, y, z = [],[],[]\n\n\n tar = self.target\n time = self.time\n\n for et in time.window:\n\n try:\n #\n # Compute the distance\n #\n ptarg, lt = spiceypy.spkpos(tar.name, et, tar.frame, time.abcorr,\n self.name)\n x.append(ptarg[0])\n y.append(ptarg[1])\n z.append(ptarg[2])\n\n vout, vmag = spiceypy.unorm(ptarg)\n distance.append(vmag)\n\n\n #\n # Compute the geometric sub-observer point.\n #\n if tar.frame == 'MARSIAU':\n tar_frame = 'IAU_MARS'\n else:\n tar_frame = tar.frame\n spoint, trgepc, srfvec = spiceypy.subpnt(tar.method, tar.name, et,\n tar_frame, time.abcorr,\n self.name)\n subpoint_xyz.append(spoint)\n\n #\n # Compute the observer's altitude from SPOINT.\n #\n dist = spiceypy.vnorm(srfvec)\n altitude.append(dist)\n\n\n #\n # Convert the sub-observer point's rectangular coordinates to\n # planetographic longitude, latitude and altitude.\n #\n spglon, spglat, spgalt = spiceypy.recpgr(tar.name, spoint,\n tar.radii_equ, tar.flat)\n\n #\n # Convert radians to degrees.\n #\n spglon *= spiceypy.dpr()\n spglat *= spiceypy.dpr()\n\n subpoint_pgc.append([spglon, spglat, spgalt])\n\n #\n # Convert sub-observer point's rectangular coordinates to\n # planetocentric radius, longitude, and latitude.\n #\n spcrad, spclon, spclat = spiceypy.reclat(spoint)\n\n\n #\n # Convert radians to degrees.\n #\n spclon *= spiceypy.dpr()\n spclat *= spiceypy.dpr()\n\n subpoint_pcc.append([spclon, spclat, spcrad])\n latitude.append(spclat) #TODO: Remove with list extraction\n longitude.append(spclon) # TODO: Remove with list extraction\n\n #\n # Compute the geometric sub-boresight point.\n #\n if tar.frame == 'MARSIAU':\n tar_frame = 'IAU_MARS'\n else:\n tar_frame = tar.frame\n\n\n if boresight:\n try:\n id = spiceypy.bodn2c(boresight)\n (shape,framen, bsight, n, bounds) = spiceypy.getfov(id, 80)\n mat = spiceypy.pxform(framen,tar_frame,et)\n except:\n framen = boresight\n bsight = 0,0,1\n else:\n bsight = self.name\n\n try:\n if tar.method == 'INTERCEPT/ELLIPSOID':\n method = 'ELLIPSOID'\n else:\n method = tar.method\n spoint, trgepc, srfvec = spiceypy.sincpt(method, tar.name, et,\n tar_frame, time.abcorr,\n self.name, framen,\n bsight)\n\n #\n # Convert the sub-observer point's rectangular coordinates to\n # planetographic longitude, latitude and altitude.\n #\n spglon, spglat, spgalt = spiceypy.recpgr(tar.name, spoint,\n tar.radii_equ, tar.flat)\n\n #\n # Convert radians to degrees.\n #\n spglon *= spiceypy.dpr()\n spglat *= spiceypy.dpr()\n\n\n #\n # Convert sub-observer point's rectangular coordinates to\n # planetocentric radius, longitude, and latitude.\n #\n spcrad, spclon, spclat = spiceypy.reclat(spoint)\n\n\n #\n # Convert radians to degrees.\n #\n spclon *= spiceypy.dpr()\n spclat *= spiceypy.dpr()\n\n boresight_latitude.append(spclat)\n boresight_longitude.append(spclon)\n\n except:\n pass\n\n #\n # Compute the angle between the observer's S/C axis and the\n # geometric sub-observer point\n #\n obs_tar, ltime = spiceypy.spkpos(tar.name, et,\n 'J2000', time.abcorr,\n self.name)\n obs_zaxis = [0, 0, 1]\n obs_myaxis = [0, -1, 0]\n obs_yaxis = [0, 1, 0]\n obs_xaxis = [1, 0, 0]\n\n #\n # We need to account for when there is no CK attitude available.\n #\n try:\n matrix = spiceypy.pxform(self.frame, 'J2000', et)\n\n z_vecout = spiceypy.mxv(matrix, obs_zaxis)\n zax_target_angle = spiceypy.vsep(z_vecout, obs_tar)\n zax_target_angle *= spiceypy.dpr()\n zaxis_target_angle.append(zax_target_angle)\n\n my_vecout = spiceypy.mxv(matrix, obs_myaxis)\n myax_target_angle = spiceypy.vsep(my_vecout, obs_tar)\n myax_target_angle *= spiceypy.dpr()\n myaxis_target_angle.append(myax_target_angle)\n\n y_vecout = spiceypy.mxv(matrix, obs_myaxis)\n yax_target_angle = spiceypy.vsep(y_vecout, obs_tar)\n yax_target_angle *= spiceypy.dpr()\n yaxis_target_angle.append(yax_target_angle)\n\n x_vecout = spiceypy.mxv(matrix, obs_myaxis)\n xax_target_angle = spiceypy.vsep(x_vecout, obs_tar)\n xax_target_angle *= spiceypy.dpr()\n xaxis_target_angle.append(xax_target_angle)\n\n\n quat = spiceypy.m2q(spiceypy.invert(matrix))\n qs.append(quat[0])\n qx.append(-1*quat[1])\n qy.append(-1*quat[2])\n qz.append(-1*quat[3])\n\n except:\n zaxis_target_angle.append(0.0)\n myaxis_target_angle.append(0.0)\n yaxis_target_angle.append(0.0)\n xaxis_target_angle.append(0.0)\n qs.append(0.0)\n qx.append(0.0)\n qy.append(0.0)\n qz.append(0.0)\n\n beta_angle.append(spiops.beta_angle(self.name, self.target.name,\n et))\n except:\n boresight_latitude = 0\n boresight_longitude = 0\n distance = 0\n altitude = 0\n latitude = 0\n longitude = 0\n subpoint_xyz = [0,0,0]\n subpoint_pgc = [0,0,0]\n subpoint_pcc = [0,0,0]\n zaxis_target_angle = 0\n myaxis_target_angle = 0\n yaxis_target_angle = 0\n xaxis_target_angle = 0\n beta_angle = 0\n (qx, qy, qz, qs) = 0, 0, 0, 0\n (x, y, z) = 0, 0, 0\n\n self.boresight_latitude = boresight_latitude\n self.boresight_longitude = boresight_longitude\n self.distance = distance\n self.altitude = altitude\n self.latitude = latitude\n self.longitude = longitude\n self.subpoint_xyz = subpoint_xyz\n self.subpoint_pgc = subpoint_pgc\n self.subpoint_pcc = subpoint_pcc\n self.zaxis_target_angle = zaxis_target_angle\n self.myaxis_target_angle = myaxis_target_angle\n self.yaxis_target_angle = yaxis_target_angle\n self.xaxis_target_angle = xaxis_target_angle\n self.beta_angle = beta_angle\n self.quaternions = [qx, qy, qz, qs]\n self.trajectory = [x,y,z]\n\n self.geometry_flag = True\n self.previous_tw = self.time.window\n\n return\n\n\n def Plot(self, yaxis = 'distance', date_format='TDB', external_data=[],\n notebook=False, boresight=''):\n\n self.__Geometry(boresight=boresight)\n self.__Structures()\n\n #\n # Not Time in X axis\n #\n if yaxis == 'groundtrack':\n utils.plot(self.__getattribute__('longitude'),\n self.__getattribute__('latitude'),\n notebook=notebook, xaxis_name = 'Longitude',\n yaxis_name='Latitude', mission=self.name,\n target=self.target.name, background_image=True,\n format ='circle_only')\n\n return\n\n if yaxis == 'boresight_groundtrack':\n utils.plot(self.__getattribute__('boresight_longitude', boresight=boresight),\n self.__getattribute__('boresight_latitude', boresight=boresight),\n notebook=notebook, xaxis_name = 'Longitude',\n yaxis_name='Latitude', mission=self.name,\n target=self.target.name, background_image=True,\n format ='circle_only')\n\n return\n\n elif yaxis == 'trajectory':\n\n if notebook:\n output_notebook()\n\n # Make data\n x = float(self.target.radii[0])\n y = float(self.target.radii[1])\n z = float(self.target.radii[2])\n\n\n # create a new plot\n s1 = figure(width=300, plot_height=300, title='X-Y [KM]')\n s1.ellipse(x=0, y=0, width=x, height=y, color=\"orange\")\n\n s1.line(self.__getattribute__('trajectory')[0],\n self.__getattribute__('trajectory')[1],\n color=\"red\")\n\n # create another one\n s2 = figure(width=300, height=300, title='X-Z [KM]')\n s2.ellipse(x=0, y=0, width=x, height=z, color=\"orange\")\n s2.line(self.__getattribute__('trajectory')[0],\n self.__getattribute__('trajectory')[2],\n color=\"green\")\n\n\n # create another one\n s3 = figure(width=300, height=300, title='Y-Z [KM]')\n s3.ellipse(x=0, y=0, width=y, height=z, color=\"orange\")\n s3.line(self.__getattribute__('trajectory')[1],\n self.__getattribute__('trajectory')[2],\n color=\"blue\")\n\n\n # put all the plots in an HBox\n show(row(s1, s2, s3))\n\n return\n\n #\n # Time in X axis\n #\n\n xaxis = self.time.window\n xaxis_name = 'Date'\n\n if (yaxis == 'sa_ang') or (yaxis == 'sa_ang_p'):\n yaxis_name = ['sa_ang1_p','sa_ang2_p', 'sa_ang3_p']\n yaxis_units = 'deg'\n elif yaxis == 'sa_ang_n':\n if self.name != 'MPO':\n yaxis_name = ['sa_ang1_n', 'sa_ang2_n', 'sa_ang3_n']\n yaxis_units = 'deg'\n else:\n yaxis_name = ['sa_ang1_p','sa_ang2_p', 'sa_ang3_p']\n yaxis_units = 'deg'\n elif yaxis == 'saa_sc':\n yaxis_name = ['saa_sc_x', 'saa_sc_y', 'saa_sc_z']\n yaxis_units = 'deg'\n elif yaxis == 'saa_sa':\n if self.name != 'MPO':\n yaxis_name = ['saa_sa_p', 'saa_sa_n']\n yaxis_units = 'deg'\n else:\n yaxis_name = 'saa_sa'\n yaxis_units = 'deg'\n elif yaxis == 'hga_angles':\n yaxis_name = ['hga_el', 'hga_az']\n yaxis_units = 'deg'\n elif yaxis == 'mga_angles':\n yaxis_name = ['mga_el', 'mga_az']\n yaxis_units = 'deg'\n elif yaxis == 'roll_angles':\n yaxis_name = ['roll_1', 'roll_2', 'roll_3']\n yaxis_units = 'deg'\n elif yaxis == 'hga_earth':\n yaxis_name = 'hga_earth'\n yaxis_units = 'deg'\n elif yaxis == 'mga_earth':\n yaxis_name = 'mga_earth'\n yaxis_units = 'deg'\n elif yaxis == 'quaternions':\n yaxis_name = ['qx','qy','qz','qs']\n yaxis_units = ''\n elif yaxis == 'clock_drift':\n self.__ClockDrift()\n xaxis = self.clock_dates\n xaxis_name = 'Date'\n yaxis_name = 'Delta Clock Counts SC-Ground'\n yaxis_units = 's'\n elif yaxis == 'zaxis_target_angle':\n yaxis_units = 'deg'\n yaxis_name = yaxis\n elif yaxis == 'yaxis_target_angle':\n yaxis_units = 'deg'\n yaxis_name = yaxis\n elif yaxis == 'xaxis_target_angle':\n yaxis_units = 'deg'\n yaxis_name = yaxis\n else:\n yaxis_name = yaxis\n yaxis_units = 'km'\n\n\n utils.plot(xaxis, self.__getattribute__(yaxis), notebook=notebook,\n external_data=external_data, xaxis_name=xaxis_name,\n yaxis_name=yaxis_name, mission = self.name, yaxis_units=yaxis_units,\n target = self.target.name, date_format=date_format)\n\n return\n\n\n\n def Plot3D(self, data='trajectory', reference_frame=False):\n\n self.__Geometry()\n\n #TODO: Arrange the reference frame flow\n if not self.state_in_window:\n self.__StateInWindow(reference_frame=reference_frame)\n\n\n data = self.state_in_window\n\n utils.plot3d(data, self, self.target)\n\n return\n\n\nclass Target(Body):\n\n def __init__(self, body, time=object(),\n target=False, frame='',\n method='INTERCEPT/ELLIPSOID'):\n \"\"\"\n\n :param body:\n :type body:\n :param time:\n :type time:\n :param target: It no target is provided the default is 'SUN'\n :type target:\n :param frame:\n :type frame:\n :param method:\n :type method:\n \"\"\"\n\n #\n # In the target parameter for the following if statement we need to use\n # the class object(), which is empty, to avoid A Recursion Error.\n #\n if not target:\n target = Target('SUN', time=time, target=object())\n\n super(Target, self).__init__(body, time=time, target=target)\n\n if not frame:\n self.frame = 'IAU_{}'.format(self.name)\n else:\n self.frame = frame\n\n self.method = method\n\n\n self.__getRadii()\n\n\n def __getRadii(self):\n\n try:\n self.radii = spiceypy.bodvar(self.id, 'RADII', 3)\n\n except:\n print(\"Ephemeris object has no radii\")\n return\n\n self.radii_equ = self.radii[0]\n self.radii_pol = self.radii[2]\n self.flat = (self.radii_equ - self.radii_pol) / self.radii_equ\n\n return\n\n\nclass Observer(Body):\n def __init__(self, body, time=object(), target=False, frame='', mission_config=None):\n\n super(Observer, self).__init__(body, time=time, target=target, mission_config=mission_config)\n\n if not frame:\n self.frame = '{}_SPACECRAFT'.format(self.name)\n if spiceypy.namfrm(self.frame) == 0:\n self.frame = self.name\n if spiceypy.namfrm(self.frame) == 0:\n #TODO: Fix this shit\n self.frame = '{}_LANDER'.format(self.name)\n print('The frame name has not been able to be built; please introduce it manually')\n else:\n self.frame = frame\n\n\n","sub_path":"spiops/classes/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":27618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"26339156","text":"'no imports!'\r\n'no list comprehensions!'\r\n'no lambdas!'\r\n'no inbuild functions!'\r\n'except loops, conditions, return, raise, len, range'\r\n\r\n\r\ndef get_max_common_element(first_list, second_list):\r\n \"\"\"\r\n Select common element from both lists and return one with the max value.\r\n\r\n :raises ValueError: if any of the input lists are empty.\r\n Error message: 'Input lists cannot be empty'\r\n \r\n :raises ValueError: if there are no common elemens.\r\n Error message: 'There are no common elements'\r\n \"\"\"\r\n if not first_list or not second_list:\r\n raise ValueError('Input lists cannot be empty')\r\n\r\n result = []\r\n for elelemt in first_list:\r\n if elelemt in second_list:\r\n result.append(elelemt)\r\n \r\n max_num = result[0]\r\n for item in result:\r\n if item > max_num:\r\n max_num = item\r\n \r\n if not result:\r\n raise ValueError('There are no common elements')\r\n return max_num\r\n\r\n\r\ndef get_odd_elements(x, start):\r\n \"\"\"\r\n Return a list containing first 'x' odd elements starting from 'start'\r\n \"\"\"\r\n result = []\r\n while len(result) is not x:\r\n if start % 2 != 0:\r\n result.append(start)\r\n start += 1\r\n return result\r\n\r\n\r\ndef get_even_numbers(x, stop, z):\r\n \"\"\"\r\n Returns a list containing first 'x' even elements lower than 'stop'.\r\n That elements must be divisible by 'z'.\r\n \"\"\"\r\n result = []\r\n counter = 0\r\n while len(result) is not x:\r\n if counter % 2 == 0 and counter < stop and counter // z:\r\n result.append(counter)\r\n counter += 1\r\n return result\r\n\r\n\r\ndef get_sum_of_greatest_elements(my_list, x):\r\n \"\"\"\r\n Returns a single integer, which is a sum of 'x' biggest elements from 'my_list'\r\n i.e. Returns a sum of 3 biggest elements from [2, 18, 5, -11, 7, 6, 9]\r\n \"\"\"\r\n result = []\r\n for i in range(0, x):\r\n max1 = 0\r\n for j in range(len(my_list)):\r\n if my_list[j] > max1:\r\n max1 = my_list[j]\r\n my_list.remove(max1)\r\n result.append(max1)\r\n\r\n count = 0\r\n for x in result:\r\n count += x\r\n return count\r\n\r\n\r\ndef get_average_of_elements(first_list, second_list):\r\n \"\"\"\r\n Returns a single integer, which is an average from elements that are on \r\n 'first_list' but not on 'second_list'\r\n \"\"\"\r\n count = 0\r\n for x in first_list:\r\n if first_list not in second_list:\r\n count += x\r\n return count // len(first_list)\r\n\r\n\r\n'BONUS'\r\ndef return_prime_numbers_less_tahn_100():\r\n \"\"\"\r\n Returns a list containing prime numbers that are less than 100\r\n \"\"\"\r\n primes = []\r\n for num in range(100):\r\n is_prime = True\r\n for i in range(2, num):\r\n if num % i == 0:\r\n is_prime = False \r\n if is_prime:\r\n primes.append(num)\r\n return primes\r\n\r\n\r\ndef main():\r\n # TODO first_list, second_list, x, stop, z\r\n\r\n # get_max_common_element(first_list, second_list)\r\n # get_odd_elements(x, start)\r\n # get_even_numbers(x, stop, z)\r\n # get_sum_of_greatest_elements(my_list, x)\r\n # get_average_of_elements(first_list, second_list)\r\n print(return_prime_numbers_less_tahn_100())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"241432655","text":"import chainer\n\n### BLOCK ###\nclass ConvolutionBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ConvolutionBlock, self).__init__(\n conv = chainer.links.Convolution2D(in_channels, out_channels, 7, 2, 3, initialW = chainer.initializers.HeNormal()),\n bn_conv = chainer.links.BatchNormalization(out_channels),\n )\n \n def __call__(self, TEST, x):\n h = self.conv(x)\n h = self.bn_conv(h, TEST)\n y = chainer.functions.relu(h)\n \n return y\n\nclass ResidualBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ResidualBlock, self).__init__(\n res_branch2a = chainer.links.Convolution2D(in_channels, out_channels, 3, pad = 1, initialW = chainer.initializers.HeNormal()),\n bn_branch2a = chainer.links.BatchNormalization(out_channels),\n res_branch2b = chainer.links.Convolution2D(out_channels, out_channels, 3, pad = 1, initialW = chainer.initializers.HeNormal()),\n bn_branch2b = chainer.links.BatchNormalization(out_channels)\n )\n \n def __call__(self, TEST, x):\n h = self.res_branch2a(x)\n h = self.bn_branch2a(h, TEST)\n h = chainer.functions.relu(h)\n h = self.res_branch2b(h)\n h = self.bn_branch2b(h, TEST)\n h = x + h\n y = chainer.functions.relu(h)\n \n return y\n\nclass ResidualBlockA():\n def __init__(self):\n pass\n \n def __call__(self):\n pass\n\nclass ResidualBlockB(chainer.Chain):\n def __init__(self, in_channels, out_channels):\n super(ResidualBlockB, self).__init__(\n res_branch1 = chainer.links.Convolution2D(in_channels, out_channels, 1, 2, initialW = chainer.initializers.HeNormal()),\n bn_branch1 = chainer.links.BatchNormalization(out_channels),\n res_branch2a = chainer.links.Convolution2D(in_channels, out_channels, 3, 2, 1, initialW = chainer.initializers.HeNormal()),\n bn_branch2a = chainer.links.BatchNormalization(out_channels),\n res_branch2b = chainer.links.Convolution2D(out_channels, out_channels, 3, pad = 1, initialW = chainer.initializers.HeNormal()),\n bn_branch2b = chainer.links.BatchNormalization(out_channels)\n )\n \n def __call__(self, TEST, x):\n temp = self.res_branch1(x)\n temp = self.bn_branch1(temp, TEST)\n h = self.res_branch2a(x)\n h = self.bn_branch2a(h, TEST)\n h = chainer.functions.relu(h)\n h = self.res_branch2b(h)\n h = self.bn_branch2b(h, TEST)\n h = temp + h\n y = chainer.functions.relu(h)\n \n return y\n### BLOCK ###\n\n### MODEL ###\nclass ResNet18(chainer.Chain):\n def __init__(self):\n super(ResNet18, self).__init__(\n conv1_relu = ConvolutionBlock(3, 32),\n res2a_relu = ResidualBlock(32, 32),\n res2b_relu = ResidualBlock(32, 32),\n res3a_relu = ResidualBlockB(32, 64),\n res3b_relu = ResidualBlock(64, 64),\n res4a_relu = ResidualBlockB(64, 128),\n res4b_relu = ResidualBlock(128, 128),\n res5a_relu = ResidualBlockB(128, 256),\n res5b_relu = ResidualBlock(256, 256)\n )\n \n def __call__(self, TEST, x):\n h = self.conv1_relu(TEST, x)\n h = chainer.functions.max_pooling_2d(h, 3, 2, 1)\n h = self.res2a_relu(TEST, h)\n h = self.res2b_relu(TEST, h)\n h = self.res3a_relu(TEST, h)\n h = self.res3b_relu(TEST, h)\n h = self.res4a_relu(TEST, h)\n h = self.res4b_relu(TEST, h)\n h = self.res5a_relu(TEST, h)\n h = self.res5b_relu(TEST, h)\n y = chainer.functions.average_pooling_2d(h, h.data.shape[2:])\n \n return y\n### MODEL ### \n","sub_path":"visual_stream.py","file_name":"visual_stream.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"298755575","text":"import json\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List, Optional\n\n\nclass JSONCapability:\n def to_json(self):\n return {k: v if not isinstance(v, Vector) else str(v) for k, v in self.__dict__.items() if v is not None}\n\n\n# region primitives\n@dataclass\nclass Vector:\n X: int\n Y: int\n Z: int\n\n @classmethod\n def from_json(cls, data):\n x, y, z = map(int, data.split('/'))\n return cls(x, y, z)\n\n def __str__(self):\n return f\"{self.X}/{self.Y}/{self.Z}\"\n\n\n# endregion\n\n# region battle commands\n\n@dataclass\nclass CommandParameters(JSONCapability):\n pass\n\n\n@dataclass\nclass AttackCommandParameters(CommandParameters):\n Id: int\n Name: str\n Target: Vector\n\n\n@dataclass\nclass MoveCommandParameters(CommandParameters):\n Id: int\n Target: Vector\n\n\n@dataclass\nclass AccelerateCommandParameters(CommandParameters):\n Id: int\n Vector: Vector\n\n\n@dataclass\nclass UserCommand(JSONCapability):\n Command: str\n Parameters: CommandParameters\n\n\n@dataclass\nclass BattleOutput(JSONCapability):\n Message: str = None\n UserCommands: List[UserCommand] = None\n\n\n# endregion\n\n# region draft commands\n@dataclass\nclass DraftChoice(JSONCapability):\n # TODO Make draft choice\n pass\n\n\n@dataclass\nclass DraftOptions:\n # TODO: Parse draft options\n pass\n\n\n# endregion\n\n# region equipment\n\nclass EquipmentType(Enum):\n Energy = 0\n Gun = 1\n Engine = 2\n Health = 3\n\n\nclass EffectType(Enum):\n Blaster = 0\n\n\n@dataclass\nclass EquipmentBlock(JSONCapability):\n Name: str\n Type: EquipmentType\n\n @classmethod\n def from_json(cls, data):\n if EquipmentType(data['Type']) == EquipmentType.Energy:\n return EnergyBlock(**data)\n elif EquipmentType(data['Type']) == EquipmentType.Gun:\n return GunBlock(**data)\n elif EquipmentType(data['Type']) == EquipmentType.Engine:\n return EngineBlock(**data)\n elif EquipmentType(data['Type']) == EquipmentType.Health:\n return HealthBlock(**data)\n\n\n@dataclass\nclass EnergyBlock(EquipmentBlock):\n IncrementPerTurn: int\n MaxEnergy: int\n StartEnergy: int\n Type = EquipmentType.Energy\n\n\n@dataclass\nclass EngineBlock(EquipmentBlock):\n MaxAccelerate: int\n Type = EquipmentType.Engine\n\n\n@dataclass\nclass GunBlock(EquipmentBlock):\n Damage: int\n EffectType: EffectType\n EnergyPrice: int\n Radius: int\n Type = EquipmentType.Gun\n\n\n@dataclass\nclass HealthBlock(EquipmentBlock):\n MaxHealth: int\n StartHealth: int\n\n\n@dataclass\nclass EffectType(EquipmentBlock):\n MaxHealth: int\n StartHealth: int\n Type = EquipmentType.Health\n\n\n# endregion\n\n# region battle state\n\n@dataclass\nclass Ship(JSONCapability):\n Id: int\n Position: Vector\n Velocity: Vector\n Energy: Optional[int] = None\n Health: Optional[int] = None\n Equipment: List[EquipmentBlock] = None\n\n @classmethod\n def from_json(cls, data):\n if data.get('Equipment'):\n data['Equipment'] = list(map(EquipmentBlock.from_json, data.get('Equipment', [])))\n data['Position'] = Vector.from_json(data['Position'])\n data['Velocity'] = Vector.from_json(data['Velocity'])\n return cls(**data)\n\n\n@dataclass\nclass FireInfo(JSONCapability):\n EffectType: EffectType\n Source: Vector\n Target: Vector\n\n @classmethod\n def from_json(cls, data):\n data['Source'] = Vector.from_json(data['Source'])\n data['Target'] = Vector.from_json(data['Target'])\n return cls(**data)\n\n\n@dataclass\nclass BattleState(JSONCapability):\n FireInfos: List[FireInfo]\n My: List[Ship]\n Opponent: List[Ship]\n\n @classmethod\n def from_json(cls, data):\n my = list(map(Ship.from_json, data['My']))\n opponent = list(map(Ship.from_json, data['Opponent']))\n fire_infos = list(map(FireInfo.from_json, data['FireInfos']))\n return cls(fire_infos, my, opponent)\n\n\n# endregion\n\n\ndef make_draft(data: dict) -> DraftChoice:\n # TODO: parse input data\n # TODO: Make draft\n return DraftChoice()\n\n\ndef make_turn(data: dict) -> BattleOutput:\n battle_state = BattleState.from_json(data)\n battle_output = BattleOutput()\n battle_output.Message = f\"I have {len(battle_state.My)} ships and move to center of galaxy and shoot\"\n battle_output.UserCommands = []\n for ship in battle_state.My:\n battle_output.UserCommands.append(UserCommand(Command=\"MOVE\",\n Parameters=MoveCommandParameters(ship.Id, Vector(15, 15, 15))))\n guns = [x for x in ship.Equipment if isinstance(x, GunBlock)]\n if guns:\n battle_output.UserCommands.append(UserCommand(Command=\"ATTACK\",\n Parameters=AttackCommandParameters(ship.Id,\n guns[0].Name,\n Vector(15, 15, 15))))\n return battle_output\n\n\ndef play_game():\n while True:\n raw_line = input()\n line = json.loads(raw_line)\n if 'PlayerId' in line:\n print(json.dumps(make_draft(line), default=lambda x: x.to_json(), ensure_ascii=False))\n elif 'My' in line:\n print(json.dumps(make_turn(line), default=lambda x: x.to_json(), ensure_ascii=False))\n\n\nif __name__ == '__main__':\n play_game()\n","sub_path":"oop_example.py","file_name":"oop_example.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"619407898","text":"import socket\n\nvalid_port = False\nport = -1\nwhile not valid_port:\n try:\n port = int(input(\"Input port number: \"))\n if port < 0:\n print(\"Invalid port: must be positive\")\n continue\n elif port < 1024:\n print(\"Invalid port: ports from 0 to 1023 are well-known ports or system ports and should not be used\")\n continue\n elif port > 65535:\n print(\"Invalid port: port numbers are unsigned 16-bit integers so port numbers greater than 65535 are not possible\")\n continue\n else:\n valid_port = True\n except:\n print(\"Invalid port: must be numerical\")\n continue\n\nds = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nds.bind((\"\", port))\nprint(\"Bound new datagram socket to port \" + str(port))\nprint(\"Ready to receive packets\")\n\ntry:\n while True:\n data, addr = ds.recvfrom(1024)\n message = data.decode(\"utf-8\")\n print(\"Received from \" + str(addr) + \": \" + message)\nfinally:\n print(\"\\nClosing socket\")\n ds.close()\n","sub_path":"DatagramReceiver.py","file_name":"DatagramReceiver.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"146227880","text":"#!/usr/bin/env python\n# @Author: MatthewP\n# @Date: 11/28/2018\n# @Email: matthewhakka@gmail.com\n\nfrom urllib import request, parse\n\nurl = 'http://httpbin.org/post'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',\n 'Host': 'httpbin.org'\n}\nd = {'name': 'Germey'}\ndata = bytes(parse.urlencode(d), encoding='utf-8')\nreq = request.Request(url, data, headers, method='POST')\nresponse = request.urlopen(req)\nprint(response.read().decode('utf-8'))","sub_path":"spyde/urllib/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"404145990","text":"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for `Transpose` Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\nclass _TransposeBijectorTest(object):\n \"\"\"Tests correctness of the `Transpose` bijector.\"\"\"\n\n def testTransposeFromPerm(self):\n perm_ = [2, 0, 1]\n actual_x_ = np.array([\n [[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n ], dtype=np.float32)\n actual_y_ = np.array([\n [[1, 3],\n [5, 7]],\n [[2, 4],\n [6, 8]],\n ], dtype=np.float32)\n if self.is_static:\n actual_x = tf.constant(actual_x_)\n actual_y = tf.constant(actual_y_)\n perm = tf.constant(perm_)\n else:\n actual_x = tf.compat.v1.placeholder_with_default(actual_x_, shape=None)\n actual_y = tf.compat.v1.placeholder_with_default(actual_y_, shape=None)\n perm = tf.compat.v1.placeholder_with_default(perm_, shape=[3])\n\n bijector = tfb.Transpose(perm=perm, validate_args=True)\n y = bijector.forward(actual_x)\n x = bijector.inverse(actual_y)\n fldj = bijector.forward_log_det_jacobian(x, event_ndims=3)\n ildj = bijector.inverse_log_det_jacobian(y, event_ndims=3)\n\n [y_, x_, ildj_, fldj_] = self.evaluate([y, x, ildj, fldj])\n\n self.assertEqual('transpose', bijector.name)\n self.assertAllEqual(actual_y, y_)\n self.assertAllEqual(actual_x, x_)\n self.assertAllEqual(0., ildj_)\n self.assertAllEqual(0., fldj_)\n\n def testTransposeFromEventNdim(self):\n rightmost_transposed_ndims_ = np.array(2, dtype=np.int32)\n actual_x_ = np.array([\n [[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n ], dtype=np.float32)\n actual_y_ = np.array([\n [[1, 3],\n [2, 4]],\n [[5, 7],\n [6, 8]],\n ], dtype=np.float32)\n if self.is_static:\n actual_x = tf.constant(actual_x_)\n actual_y = tf.constant(actual_y_)\n rightmost_transposed_ndims = tf.constant(rightmost_transposed_ndims_)\n else:\n actual_x = tf.compat.v1.placeholder_with_default(actual_x_, shape=None)\n actual_y = tf.compat.v1.placeholder_with_default(actual_y_, shape=None)\n rightmost_transposed_ndims = tf.constant(rightmost_transposed_ndims_)\n\n bijector = tfb.Transpose(\n rightmost_transposed_ndims=rightmost_transposed_ndims,\n validate_args=True)\n y = bijector.forward(actual_x)\n x = bijector.inverse(actual_y)\n fldj = bijector.forward_log_det_jacobian(x, event_ndims=2)\n ildj = bijector.inverse_log_det_jacobian(y, event_ndims=2)\n\n [y_, x_, ildj_, fldj_] = self.evaluate([y, x, ildj, fldj])\n\n self.assertEqual('transpose', bijector.name)\n self.assertAllEqual(actual_y, y_)\n self.assertAllEqual(actual_x, x_)\n self.assertAllEqual(0., ildj_)\n self.assertAllEqual(0., fldj_)\n\n def testInvalidPermException(self):\n msg = '`perm` must be a valid permutation vector.'\n if self.is_static or tf.executing_eagerly():\n with self.assertRaisesRegexp(ValueError, msg):\n bijector = tfb.Transpose(perm=[1, 2], validate_args=True)\n else:\n with self.assertRaisesOpError(msg):\n bijector = tfb.Transpose(\n perm=tf.compat.v1.placeholder_with_default([1, 2], shape=[2]),\n validate_args=True)\n self.evaluate(bijector.forward([[0, 1]]))\n\n def testInvalidEventNdimsException(self):\n msg = '`rightmost_transposed_ndims` must be non-negative.'\n with self.assertRaisesRegexp(ValueError, msg):\n tfb.Transpose(rightmost_transposed_ndims=-1, validate_args=True)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass TransposeBijectorDynamicTest(_TransposeBijectorTest, tf.test.TestCase):\n is_static = False\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass TransposeBijectorStaticTest(_TransposeBijectorTest, tf.test.TestCase):\n is_static = True\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tensorflow_probability/python/bijectors/transpose_test.py","file_name":"transpose_test.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"541930193","text":"import csv, random\nc = csv.writer(open('prob_test.csv', 'wb'))\nheader = []\nfor i in range(30):\n\theader.append('X' + str(i+1))\nheader.append('Y')\nc.writerow(header)\n\nX ={}\nfor i in range(30):\n\tx = [d for d in range(1000)]\n\trandom.shuffle(x)\n\tX[i] = x\n\nfor i in range(1000):\n\trow = []\n\tfor j in range(30):\n\t\trow.append(X[j][i])\n\tif X[j][i] % 2 ==0:\n\t\trow.append(0)\n\telse:\n\t\trow.append(1)\n\n\tc.writerow(row)","sub_path":"datagenerate_test.py","file_name":"datagenerate_test.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"653894988","text":"import os\nimport csv\nimport numpy as np\n\ndef write_csv(file_path, y_list):\n solution_rows = [('id', 'category')] + [(i, 1 - y) for (i, y) in enumerate(y_list)]\n with open(file_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(solution_rows)\n\ndef output_submission_csv(output_file_path, y_test):\n write_csv(output_file_path, y_test)\n","sub_path":"assignment3/assignment3_p2/kaggle_submission.py","file_name":"kaggle_submission.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"372374819","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api , exceptions , _\nfrom openerp.osv import osv\nimport re\nimport base64\n\n\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.colors import magenta, red , black , blue, gray, Color, HexColor\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.lib.pagesizes import letter, A4\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle\nfrom reportlab.lib import colors\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.platypus import Paragraph, Table\nfrom reportlab.lib.units import cm,mm\nfrom reportlab.lib.utils import simpleSplit\nfrom cgi import escape\nimport decimal\n\nclass account_move_comprobante(models.Model):\n\t_name='account.move.comprobante'\n\t_auto = False\n\n\tname = fields.Char('Comprobante')\n\taml_id = fields.Integer('aml_id')\n\tpartner_id = fields.Many2one('res.partner','Partner')\n\n\t@api.model_cr\n\tdef init(self):\n\t\tself.env.cr.execute(\"\"\" \n\t\t\tdrop view if exists account_move_comprobante;\n\t\t\tcreate or replace view account_move_comprobante as (\n\n\nselect aml_id as id,* from\n(\nselect \naml.nro_comprobante || ' (' || coalesce(itd.code,'') || ' )' as name, \naat.type,\naml.id as aml_id, \naml.debit,\naml.credit ,\naml.partner_id\n\nfrom account_move_line aml\nleft join einvoice_catalog_01 itd on itd.id = aml.type_document_it\ninner join account_account aa on aa.id = aml.account_id\ninner join account_account_type aat on aat.id = aa.user_type_id\nwhere aat.type in ('payable','receivable')\nand aml.nro_comprobante is not null and aml.nro_comprobante !=''\nand ( (aat.type = 'payable' and aml.credit >0 ) or (aat.type = 'receivable' and aml.debit >0 ) )\norder by aml.nro_comprobante\n\n\n\n\t\t\t\t\t\t) AS T )\n\n\t\t\t\"\"\")\n\n\n\nclass account_move_line(models.Model):\n\t_inherit='account.move.line'\n\n\t@api.multi\n\tdef edit_linea_it(self):\n\t\tif self.move_id.state != 'draft':\n\t\t\traise osv.except_osv('Alerta!', \"No se puede modificar una linea de un Asiento Asentado.\")\n\t\tdata = {\n\t\t\t'default_glosa':self.name,\n\t\t\t'default_empresa': self.partner_id.id,\n\t\t\t'default_comprobante_manual': self.nro_comprobante,\n\t\t\t'default_account_id':self.account_id.id,\n\t\t\t'default_date_vencimiento': self.date_maturity,\n\t\t\t'default_debit': self.debit,\n\t\t\t'default_credit':self.credit,\n\t\t\t'default_analytic_id': self.analytic_account_id.id,\n\t\t\t'default_import_divisa': self.amount_currency,\n\t\t\t'default_currency_id': self.currency_id.id,\n\t\t\t'default_impuesto':self.tax_code_id.id,\n\t\t\t'default_importe_impuesto':self.tax_amount,\n\t\t\t'default_type_change':self.tc,\n\t\t\t'default_type_doc_id':self.type_document_it.id,\n\t\t\t'default_rendicion_id': self.rendicion_id.id,\n\t\t\t'active_ids_line': self.id,\n\t\t\t'default_uom_id': self.product_uom_id.id,\n\t\t\t'default_cantidad': self.quantity,\n\t\t\t'default_stock_id': self.location_id.id,\n\t\t\t'default_etiqueta_analitica': self.analytic_tag_ids[0].id if len(self.analytic_tag_ids)>0 else False,\n\t\t}\n\t\treturn {\n 'name': 'Agregar Linea',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.move.wizard.add.linea',\n 'target': 'new',\n 'context': data\n\t\t}\n\n\nclass account_move(models.Model):\n\t_inherit= 'account.move'\n\t_name = \"account.move\"\n\n\tdef _get_move_from_lines(self, cr, uid, ids, context=None):\n\t\tline_obj = self.pool.get('account.move.line')\n\t\treturn [line.move_id.id for line in line_obj.browse(cr, uid, ids, context=context)]\n\n\n\t@api.multi\n\tdef button_add_linea(self):\n\t\treturn {\n 'name': 'Agregar Linea',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.move.wizard.add.linea',\n 'target': 'new',\n 'context': {'active_ids':[self.id]},\n\t\t}\n\n\n\t@api.multi\n\tdef export_excel(self):\n\n\t\timport io\n\t\tfrom xlsxwriter.workbook import Workbook\n\t\toutput = io.BytesIO()\n\t\t########### PRIMERA HOJA DE LA DATA EN TABLA\n\t\t#workbook = Workbook(output, {'in_memory': True})\n\t\tdireccion = self.env['main.parameter'].search([])[0].dir_create_file\n\t\tif not direccion:\n\t\t\traise osv.except_osv('Alerta!', \"No fue configurado el directorio para los archivos en Configuración.\")\n\t\tworkbook = Workbook( direccion + 'tempo_account_move_line.xlsx')\n\t\tworksheet = workbook.add_worksheet(\"Asiento Contable\")\n\t\tbold = workbook.add_format({'bold': True})\n\t\tnormal = workbook.add_format()\n\t\tboldbord = workbook.add_format({'bold': True})\n\t\tboldbord.set_border(style=2)\n\t\tnumbertres = workbook.add_format({'num_format':'0.000'})\n\t\tnumberdos = workbook.add_format({'num_format':'0.00'})\n\t\tbord = workbook.add_format()\n\t\tbord.set_border(style=1)\n\t\tnumberdos.set_border(style=1)\n\t\tnumbertres.set_border(style=1)\t\t\t\n\t\tx= 6\t\t\t\t\n\t\ttam_col = [12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\t\ttam_letra = 1.1\n\t\timport sys\n\t\treload(sys)\n\t\tsys.setdefaultencoding('iso-8859-1')\n\n\t\tworksheet.write(0,0, \"Asiento Contable:\", bold)\n\n\t\tworksheet.write(0,1, self.name, normal)\n\n\t\tworksheet.write(1,0, \"Diario:\", bold)\n\n\t\tworksheet.write(1,1, self.journal_id.name, normal)\n\t\n\t\n\t\tworksheet.write(3,0, \"Empresa:\",bold)\n\t\t\n\t\tworksheet.write(3,1, self.partner_id.name if self.partner_id.name else '', normal)\n\t\t\n\n\t\tworksheet.write(1,2, \"Referencia:\", bold)\n\t\t\n\t\tworksheet.write(1,3, self.ref if self.ref else \"\", normal)\n\t\t\n\n\t\tworksheet.write(2,2, \"Fecha:\", bold)\n\t\t\n\t\tworksheet.write(2,3, self.date if self.date else \"\", normal)\n\t\t\n\n\n\n\n\n\t\tworksheet.write(5,1, \"Nombre\",boldbord)\n\t\tworksheet.write(5,2, \"Empresa\",boldbord)\n\t\tworksheet.write(5,3, \"Comprobante\",boldbord)\n\t\tworksheet.write(5,4, \"Cuenta\",boldbord)\n\t\tworksheet.write(5,5, \"Fecha V.\",boldbord)\n\t\tworksheet.write(5,6, \"Debe\",boldbord)\n\t\tworksheet.write(5,7, \"Haber\",boldbord)\n\t\tworksheet.write(5,8, u\"Cta. Analítica\",boldbord)\n\t\tworksheet.write(5,9, u\"Importe Divisa\",boldbord)\n\t\tworksheet.write(5,10, \"Divisa\",boldbord)\n\t\tworksheet.write(5,11, \"Cuenta Impuesto\",boldbord)\n\t\tworksheet.write(5,12, u\"Importe Impuestos\",boldbord)\n\t\tworksheet.write(5,13, u\"Tipo Cambio Sunat\",boldbord)\n\t\tworksheet.write(5,14, \"Tipo Documento\",boldbord)\n\t\tworksheet.write(5,15, u\"Conciliar\",boldbord)\n\n\t\tdict_state = {'draft':'Descuadrado','valid':'Cuadrado'}\n\t\tfor line in self.line_ids:\n\t\t\tworksheet.write(x,1,line.name if line.name else '',bord )\n\t\t\tworksheet.write(x,2,line.partner_id.name if line.partner_id.name else '',bord)\n\t\t\tworksheet.write(x,3,line.nro_comprobante if line.nro_comprobante else '',bord)\n\t\t\tworksheet.write(x,4,line.account_id.code if line.account_id.code else '',bord)\n\t\t\tworksheet.write(x,5,line.date_maturity if line.date_maturity else '',bord)\n\t\t\tworksheet.write(x,6,line.debit ,numberdos)\n\t\t\tworksheet.write(x,7,line.credit,numberdos)\n\t\t\tworksheet.write(x,8,line.analytic_account_id.name if line.analytic_account_id.name else '',bord)\n\t\t\tworksheet.write(x,9,line.amount_currency,bord)\n\t\t\tworksheet.write(x,10,line.currency_id.name if line.currency_id.name else '',bord)\n\t\t\tworksheet.write(x,11,line.tax_code_id.name if line.tax_code_id.name else '',bord)\n\t\t\tworksheet.write(x,12,line.tax_amount,numberdos)\n\t\t\tworksheet.write(x,13,line.tc,numbertres)\n\t\t\tworksheet.write(x,14,line.type_document_it.code if line.type_document_it.code else '',bord)\n\t\t\tworksheet.write(x,15,str(line.full_reconcile_id.name) if line.full_reconcile_id.id else '',bord)\n\n\n\t\t\tx = x +1\n\n\t\tworksheet.set_column('A:A', tam_col[0])\n\t\tworksheet.set_column('B:B', tam_col[1])\n\t\tworksheet.set_column('C:C', tam_col[2])\n\t\tworksheet.set_column('D:D', tam_col[3])\n\t\tworksheet.set_column('E:E', tam_col[4])\n\t\tworksheet.set_column('F:F', tam_col[5])\n\t\tworksheet.set_column('G:G', tam_col[6])\n\t\tworksheet.set_column('H:H', tam_col[7])\n\t\tworksheet.set_column('I:I', tam_col[8])\n\t\tworksheet.set_column('J:J', tam_col[9])\n\t\tworksheet.set_column('K:K', tam_col[10])\n\t\tworksheet.set_column('L:L', tam_col[11])\n\t\tworksheet.set_column('M:M', tam_col[12])\n\t\tworksheet.set_column('N:N', tam_col[13])\n\t\tworksheet.set_column('O:O', tam_col[14])\n\t\tworksheet.set_column('P:P', tam_col[15])\n\t\tworksheet.set_column('Q:Q', tam_col[16])\n\t\tworksheet.set_column('R:R', tam_col[17])\n\t\tworksheet.set_column('S:S', tam_col[18])\n\t\tworksheet.set_column('T:T', tam_col[19])\n\n\n\t\tworkbook.close()\n\t\t\n\t\tf = open( direccion + 'tempo_account_move_line.xlsx', 'rb')\n\t\t\n\t\tvals = {\n\t\t\t'output_name': 'AsientoContable.xlsx',\n\t\t\t'output_file': base64.encodestring(''.join(f.readlines())),\t\t\n\t\t}\n\n\t\tmod_obj = self.env['ir.model.data']\n\t\tact_obj = self.env['ir.actions.act_window']\n\t\tsfs_id = self.env['export.file.save'].create(vals)\n\n\t\treturn {\n\t\t \"type\": \"ir.actions.act_window\",\n\t\t \"res_model\": \"export.file.save\",\n\t\t \"views\": [[False, \"form\"]],\n\t\t \"res_id\": sfs_id.id,\n\t\t \"target\": \"new\",\n\t\t}\n\n\n\nclass account_move_wizard_add_linea(models.TransientModel):\n\t_name = 'account.move.wizard.add.linea'\n\n\tcurrency_id = fields.Many2one('res.currency','Moneda')\n\taccount_id = fields.Many2one('account.account','Cuenta')\n\ttype_change = fields.Float('Tipo Cambio',digits=(12,3))\n\timport_divisa = fields.Float('Importe Divisa', digits=(12,2))\n\tglosa = fields.Char('Glosa',size=200)\n\tdebit = fields.Float('Debe', digits=(12,2))\n\tcredit = fields.Float('Haber', digits=(12,2))\n\tanalytic_id = fields.Many2one('account.analytic.account','Cta. Analítica')\n\n\n\n\tis_pago = fields.Boolean('Se esta registrando un pago')\n\tcomprobante_auto = fields.Many2one('account.move.comprobante','Número Comprobante')\n\tcomprobante_manual = fields.Char('Número Comprobante',size=200)\n\tempresa = fields.Many2one('res.partner','Empresa')\n\n\ttype_doc_id = fields.Many2one('einvoice.catalog.01','Tipo Documento')\n\timpuesto = fields.Many2one('account.tax.code','Impuesto')\n\timporte_impuesto = fields.Float('Importe Impuesto/Base', digits=(12,2))\n\tdate_vencimiento = fields.Date('Fecha Vencimiento')\n\n\trendicion_id = fields.Many2one('account.rendicion.it','Rendicion')\n\n\n\n\tproduct_id = fields.Many2one('product.product', 'Producto')\n\tcantidad = fields.Float('Cantidad')\n\tuom_id = fields.Many2one('product.uom', 'Unidad')\n\tprecio_unitario = fields.Float(string='P.Unit')\n\tetiqueta_analitica = fields.Many2one('account.analytic.tag', string='Etiqueta Analitica')\n\tstock_id = fields.Many2one('stock.location','Almacen')\n\t#comprobante_manual = fields.Many2one('einvoice.catalog.01', 'Numero de Comprobante')\n\n\n\t@api.onchange('comprobante_manual','type_doc_id')\n\tdef onchange_suplier_invoice_number_it(self):\n\t\tif self.comprobante_manual:\n\t\t\tself.comprobante_manual = str(self.comprobante_manual).replace(' ','')\n\t\t\t\n\t\t\tif self.comprobante_manual and self.type_doc_id.id:\n\t\t\t\tself.comprobante_manual = str(self.comprobante_manual).replace(' ','')\n\t\t\t\tt = self.comprobante_manual.split('-')\n\t\t\t\tn_serie = 0\n\t\t\t\tn_documento = 0\n\t\t\t\tself.env.cr.execute(\"select coalesce(n_serie,0), coalesce(n_documento,0) from einvoice_catalog_01 where id = \"+ str(self.type_doc_id.id))\n\t\t\t\t\n\t\t\t\tforelemn = self.env.cr.fetchall()\n\t\t\t\tfor ielem in forelemn:\n\t\t\t\t\tn_serie = ielem[0]\n\t\t\t\t\tn_documento = ielem[1]\n\t\t\t\tif len(t) == 2:\n\t\t\t\t\tparte1= t[0]\n\t\t\t\t\tif len(t[0]) < n_serie:\n\t\t\t\t\t\tfor i in range(0,n_serie-len(t[0])):\n\t\t\t\t\t\t\tparte1 = '0'+parte1\n\t\t\t\t\tparte2= t[1]\n\t\t\t\t\tif len(t[1]) < n_documento:\n\t\t\t\t\t\tfor i in range(0,n_documento-len(t[1])):\n\t\t\t\t\t\t\tparte2 = '0'+parte2\n\t\t\t\t\tself.comprobante_manual = parte1 + '-' + parte2\n\t\t\t\telif len(t) == 1:\n\t\t\t\t\tparte2= t[0]\n\t\t\t\t\tif len(t[0]) < n_documento:\n\t\t\t\t\t\tfor i in range(0,n_documento-len(t[0])):\n\t\t\t\t\t\t\tparte2 = '0'+parte2\n\t\t\t\t\tself.comprobante_manual = parte2\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t@api.onchange('debit')\n\tdef onchange_debit(self):\n\t\tif self.debit==0:\n\t\t\treturn\n\t\tself.credit=0\n\n\t@api.onchange('credit')\n\tdef onchange_credit(self):\n\t\tif self.credit==0:\n\t\t\treturn\n\t\tself.debit=0\n\n\n\t@api.onchange('type_change','currency_id','import_divisa')\n\tdef onchange_type_change_currency(self):\n\t\tif self.currency_id.id:\n\t\t\tif self.import_divisa > 0:\n\t\t\t\tself.debit = float(\"%0.2f\"% ( (self.import_divisa) * float(self.type_change) ))\n\t\t\t\tself.credit = 0\n\t\t\telif self.import_divisa < 0 :\n\t\t\t\tself.credit = float(\"%0.2f\"% ( (-self.import_divisa) * float(self.type_change) ))\n\t\t\t\tself.debit = 0\n\t\t\telse:\n\t\t\t\tself.credit = 0\n\t\t\t\tself.debit = 0\n\n\t@api.multi\n\tdef do_rebuild(self):\n\t\tif 'active_ids_line' in self._context:\n\t\t\ttt = self._context['active_ids_line']\n\n\t\t\tcomprobante = self.comprobante_manual if self.is_pago == False else self.comprobante_auto.name\n\t\t\tdata = {\n\t\t\t\t'name':self.glosa,\n\t\t\t\t'partner_id': self.empresa.id,\n\t\t\t\t'nro_comprobante': comprobante if comprobante else '',\n\t\t\t\t'account_id':self.account_id.id,\n\t\t\t\t'date_maturity': self.date_vencimiento,\n\t\t\t\t'debit': self.debit,\n\t\t\t\t'credit':self.credit,\n\t\t\t\t'analytic_account_id': self.analytic_id.id,\n\t\t\t\t'amount_currency': self.import_divisa,\n\t\t\t\t'currency_id': self.currency_id.id,\n\t\t\t\t'tax_code_id':self.impuesto.id,\n\t\t\t\t'tax_amount':self.importe_impuesto,\n\t\t\t\t'tc':self.type_change,\n\t\t\t\t'type_document_it':self.type_doc_id.id,\n\t\t\t\t'rendicion_id':self.rendicion_id.id,\n\t\t\t\t'product_id': self.product_id.id,\n\t\t\t\t'product_uom_id':self.uom_id.id,\n\t\t\t\t'quantity':self.cantidad,\n\t\t\t\t'location_id': self.stock_id.id,\n\t\t\t\t'analytic_tag_ids': [(6,0,[self.etiqueta_analitica.id])] if self.etiqueta_analitica.id else [(6,0,[])],\n\t\t\t}\n\t\t\tobj_linea = self.env['account.move.line'].search([('id','=',tt)])[0].write(data)\n\t\t\treturn True\n\n\t\tt = self._context['active_ids']\n\t\tif len(t)>1:\n\t\t\traise osv.except_osv('Alerta!', \"Solo debe seleccionar 1 Asiento Contable.\")\n\t\tm = self.env['account.move'].search([('id','=',t[0])])[0]\n\t\tif m.state != 'draft':\n\t\t\traise osv.except_osv('Alerta!', \"Solo se puede agregar si el Asiento Contable esta en borrador.\")\n\n\n\t\tcomprobante = self.comprobante_manual if self.is_pago == False else self.comprobante_auto.name\n\t\tdata = {\n\t\t\t'name':self.glosa,\n\t\t\t'partner_id': self.empresa.id,\n\t\t\t'nro_comprobante': comprobante if comprobante else '',\n\t\t\t'account_id':self.account_id.id,\n\t\t\t'date_maturity': self.date_vencimiento,\n\t\t\t'debit': self.debit,\n\t\t\t'credit':self.credit,\n\t\t\t'analytic_account_id': self.analytic_id.id,\n\t\t\t'amount_currency': self.import_divisa,\n\t\t\t'currency_id': self.currency_id.id,\n\t\t\t'tax_code_id':self.impuesto.id,\n\t\t\t'tax_amount':self.importe_impuesto,\n\t\t\t'tc':self.type_change,\n\t\t\t'type_document_it':self.type_doc_id.id,\n\t\t\t'rendicion_id':self.rendicion_id.id,\n\t\t\t'move_id':m.id,\n\t\t\t'product_id': self.product_id.id,\n\t\t\t'product_uom_id':self.uom_id.id,\n\t\t\t'quantity':self.cantidad,\n\t\t\t'location_id': self.stock_id.id,\t\t\t\n\t\t}\n\t\tj = self.env['account.move.line'].create(data)\n\t\tif self.etiqueta_analitica.id:\n\t\t\tj.write({'analytic_tag_ids': [(6,0,[self.etiqueta_analitica.id])]})\n\t\telse:\n\t\t\tj.write({'analytic_tag_ids': [(6,0,[])]})\n\n\t\tm.write({'line_ids': [(4,j.id)]})\n\n\t\treturn True\n\n\n\n\n\n\n\n\n\n\n\nclass account_move_pdf(osv.TransientModel):\n\t_name = 'account.move.pdf'\n\n\n\t@api.multi\n\tdef do_rebuild(self):\n\t\t\n\t\tif 'active_ids' in self.env.context:\n\t\t\tobj_move = self.env['account.move'].search([('id','=',self.env.context['active_ids'][0])])[0]\n\n\t\t\tself.reporteador(obj_move)\n\t\t\t\n\t\t\timport sys\n\t\t\treload(sys)\n\t\t\tsys.setdefaultencoding('iso-8859-1')\n\t\t\tmod_obj = self.env['ir.model.data']\n\t\t\tact_obj = self.env['ir.actions.act_window']\n\t\t\timport os\n\n\t\t\tdireccion = self.env['main.parameter'].search([])[0].dir_create_file\n\t\t\tvals = {\n\t\t\t\t'output_name': 'AsientoContable.pdf',\n\t\t\t\t'output_file': open(direccion + \"AsientoContable.pdf\", \"rb\").read().encode(\"base64\"),\t\n\t\t\t}\n\t\t\tsfs_id = self.env['export.file.save'].create(vals)\n\t\t\treturn {\n\t\t\t\t\"type\": \"ir.actions.act_window\",\n\t\t\t\t\"res_model\": \"export.file.save\",\n\t\t\t\t\"views\": [[False, \"form\"]],\n\t\t\t\t\"res_id\": sfs_id.id,\n\t\t\t\t\"target\": \"new\",\n\t\t\t}\n\n\n\t@api.multi\n\tdef cabezera(self,c,wReal,hReal,obj_move,titulo):\n\n\t\tc.setFont(\"Calibri-Bold\", 10)\n\t\tc.setFillColor(black)\n\t\tc.drawCentredString((wReal/2)+20,hReal, self.env[\"res.company\"].search([])[0].name.upper())\n\t\tc.drawCentredString((wReal/2)+20,hReal-12, \"ASIENTO CONTABLE: \"+ obj_move.name + \" - \" + obj_move.journal_id.name )\n\n\t\tc.setFont(\"Calibri-Bold\", 8)\n\n\t\tc.drawString( 10,hReal-48, 'Empresa:')\n\t\tc.drawString( 400,hReal-36, 'Referencia:')\n\t\tc.drawString( 400,hReal-48, 'Fecha:')\n\n\n\t\tc.setFont(\"Calibri\", 8)\n\t\tc.drawString( 10+90,hReal-48, obj_move.partner_id.name if obj_move.partner_id.name else '')\n\t\tc.drawString( 400+60,hReal-36, obj_move.ref if obj_move.ref else '')\n\t\tc.drawString( 400+60,hReal-48, obj_move.date if obj_move.date else '')\n\n\n\t\tstyle = getSampleStyleSheet()[\"Normal\"]\n\t\tstyle.leading = 8\n\t\tstyle.alignment= 1\n\t\tparagraph1 = Paragraph(\n\t\t \"Nombre\",\n\t\t style\n\t\t)\n\t\tparagraph2 = Paragraph(\n\t\t \"Empresa\",\n\t\t style\n\t\t)\n\t\tparagraph3 = Paragraph(\n\t\t \"Comprobante\",\n\t\t style\n\t\t)\n\t\tparagraph4 = Paragraph(\n\t\t \"Cuenta\",\n\t\t style\n\t\t)\n\t\tparagraph5 = Paragraph(\n\t\t \"Fecha V.\",\n\t\t style\n\t\t)\n\t\tparagraph6 = Paragraph(\n\t\t \"Debe\",\n\t\t style\n\t\t)\n\t\tparagraph7 = Paragraph(\n\t\t \"Haber\",\n\t\t style\n\t\t)\n\t\tparagraph8 = Paragraph(\n\t\t \"Cta. Analítica\",\n\t\t style\n\t\t)\n\t\tparagraph9 = Paragraph(\n\t\t \"Importe Divisa\",\n\t\t style\n\t\t)\n\t\tparagraph10 = Paragraph(\n\t\t \"Divisa\",\n\t\t style\n\t\t)\n\t\tparagraph11 = Paragraph(\n\t\t \"TC SUNAT\",\n\t\t style\n\t\t)\n\t\tparagraph12 = Paragraph(\n\t\t \"TD\",\n\t\t style\n\t\t)\n\n\t\tparagraph13 = Paragraph(\n\t\t \"Cuenta\",\n\t\t style\n\t\t)\n\t\tparagraph14 = Paragraph(\n\t\t \"Descripción\",\n\t\t style\n\t\t)\n\t\tparagraph15 = Paragraph(\n\t\t \"Debe\",\n\t\t style\n\t\t)\n\t\tparagraph16 = Paragraph(\n\t\t \"Haber\",\n\t\t style\n\t\t)\n\n\t\tif titulo == 1:\n\t\t\tdata= [[ paragraph1 , paragraph2 , paragraph3 , paragraph4, paragraph5 , paragraph6 , paragraph7 ,paragraph12]]\n\t\t\tt=Table(data,colWidths=( 80,120, 80, 90, 50,60,60,25), rowHeights=(9))\n\t\t\tt.setStyle(TableStyle([\n\t\t\t\t('GRID',(0,0),(-1,-1), 1, colors.black),\n\t\t\t\t('ALIGN',(0,0),(-1,-1),'LEFT'),\n\t\t\t\t('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n\t\t\t\t('TEXTFONT', (0, 0), (-1, -1), 'Calibri-Bold'),\n\t\t\t\t('FONTSIZE',(0,0),(-1,-1),4),\n\t\t\t\t('BACKGROUND', (0, 0), (-1, -1), colors.gray)\n\t\t\t]))\n\t\telif titulo == 2:\n\t\t\tdata= [[ paragraph8 ,paragraph9,paragraph10,paragraph11]]\n\t\t\tt=Table(data,colWidths=(100,100,50,60), rowHeights=(9))\n\t\t\tt.setStyle(TableStyle([\n\t\t\t\t('GRID',(0,0),(-1,-1), 1, colors.black),\n\t\t\t\t('ALIGN',(0,0),(-1,-1),'LEFT'),\n\t\t\t\t('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n\t\t\t\t('TEXTFONT', (0, 0), (-1, -1), 'Calibri-Bold'),\n\t\t\t\t('FONTSIZE',(0,0),(-1,-1),4),\n\t\t\t\t('BACKGROUND', (0, 0), (-1, -1), colors.gray)\n\t\t\t]))\n\t\telse:\n\t\t\tdata= [[ paragraph13 ,paragraph14,paragraph15,paragraph16]]\n\t\t\tt=Table(data,colWidths=(70,130,60,60), rowHeights=(9))\n\t\t\tt.setStyle(TableStyle([\n\t\t\t\t('GRID',(0,0),(-1,-1), 1, colors.black),\n\t\t\t\t('ALIGN',(0,0),(-1,-1),'LEFT'),\n\t\t\t\t('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n\t\t\t\t('TEXTFONT', (0, 0), (-1, -1), 'Calibri-Bold'),\n\t\t\t\t('FONTSIZE',(0,0),(-1,-1),4),\n\t\t\t\t('BACKGROUND', (0, 0), (-1, -1), colors.gray)\n\t\t\t]))\n\n\n\t\tt.wrapOn(c,20,500)\n\t\tt.drawOn(c,20,hReal-85)\n\n\t@api.multi\n\tdef x_aument(self,a):\n\t\ta[0] = a[0]+1\n\n\t@api.multi\n\tdef reporteador(self,obj_move):\n\n\t\timport sys\n\t\treload(sys)\n\t\tsys.setdefaultencoding('iso-8859-1')\n\n\n\t\tpdfmetrics.registerFont(TTFont('Calibri', 'Calibri.ttf'))\n\t\tpdfmetrics.registerFont(TTFont('Calibri-Bold', 'CalibriBold.ttf'))\n\n\t\twidth ,height = A4 # 595 , 842\n\t\twReal = width- 30\n\t\thReal = height - 40\n\n\t\tdireccion = self.env['main.parameter'].search([])[0].dir_create_file\n\t\tc = canvas.Canvas( direccion + \"AsientoContable.pdf\", pagesize= A4 )\n\t\tinicio = 0\n\t\tpos_inicial = hReal-83\n\t\tlibro = None\n\t\tvoucher = None\n\t\ttotal = 0\n\t\tdebeTotal = 0\n\t\thaberTotal = 0\n\t\tpagina = 1\n\t\ttextPos = 0\n\t\t\n\t\tself.cabezera(c,wReal,hReal,obj_move,1)\n\n\n\t\tposicion_indice = 1\n\n\t\tfor i in obj_move.line_ids:\n\t\t\tc.setFont(\"Calibri\", 8)\n\t\t\tpagina, pos_inicial = self.verify_linea(c,wReal,hReal,pos_inicial,12,pagina,1,obj_move)\n\t\t\t\n\t\t\tc.drawString( 10 ,pos_inicial, str(posicion_indice) )\n\t\t\tc.drawString( 22 ,pos_inicial, self.particionar_text( i.name,70) )\n\t\t\tc.drawString( 102 ,pos_inicial, self.particionar_text( i.partner_id.name if i.partner_id.id else '',100) )\n\t\t\tc.drawString( 222 ,pos_inicial,self.particionar_text( i.nro_comprobante if i.nro_comprobante else '',70) )\n\t\t\tc.drawString( 302 ,pos_inicial,self.particionar_text( (i.account_id.code + ' - ' + i.account_id.name) if i.account_id.id else '',75) )\n\t\t\tc.drawString( 392 ,pos_inicial,self.particionar_text( i.date_maturity if i.date_maturity else '',40) )\n\t\t\tc.drawRightString( 498 ,pos_inicial, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.debit)))\n\t\t\tc.drawRightString( 558 ,pos_inicial, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.credit)))\n\t\t\tc.drawString( 562 ,pos_inicial,self.particionar_text( i.type_document_it.code if i.type_document_it.id else '',20) )\n\n\t\t\tc.line( 20, pos_inicial-2, 585 ,pos_inicial-2)\n\n\t\t\ttamanios_x = [80,120, 80, 90, 50,60,60,25]\n\n\t\t\tacum_tx = 20\n\t\t\tfor i in tamanios_x:\n\t\t\t\tc.line( acum_tx, pos_inicial-2, acum_tx ,pos_inicial+12)\n\t\t\t\tacum_tx += i\n\t\t\tc.line( acum_tx, pos_inicial-2, acum_tx ,pos_inicial+12)\n\n\t\t\tposicion_indice += 1\n\n\n\n\t\tposicion_indice= 1\n\n\n\n\t\tpagina, pos_inicial = self.verify_linea(c,wReal,hReal,pos_inicial,36,pagina,2,obj_move)\n\n\n\n\t\tstyle = getSampleStyleSheet()[\"Normal\"]\n\t\tstyle.leading = 8\n\t\tstyle.alignment= 1\n\t\tparagraph1 = Paragraph(\n\t\t \"Nombre\",\n\t\t style\n\t\t)\n\t\tparagraph2 = Paragraph(\n\t\t \"Empresa\",\n\t\t style\n\t\t)\n\t\tparagraph3 = Paragraph(\n\t\t \"Comprobante\",\n\t\t style\n\t\t)\n\t\tparagraph4 = Paragraph(\n\t\t \"Cuenta\",\n\t\t style\n\t\t)\n\t\tparagraph5 = Paragraph(\n\t\t \"Fecha V.\",\n\t\t style\n\t\t)\n\t\tparagraph6 = Paragraph(\n\t\t \"Debe\",\n\t\t style\n\t\t)\n\t\tparagraph7 = Paragraph(\n\t\t \"Haber\",\n\t\t style\n\t\t)\n\t\tparagraph8 = Paragraph(\n\t\t \"Cta. Analítica\",\n\t\t style\n\t\t)\n\t\tparagraph9 = Paragraph(\n\t\t \"Importe Divisa\",\n\t\t style\n\t\t)\n\t\tparagraph10 = Paragraph(\n\t\t \"Divisa\",\n\t\t style\n\t\t)\n\t\tparagraph11 = Paragraph(\n\t\t \"TC SUNAT\",\n\t\t style\n\t\t)\n\t\tparagraph12 = Paragraph(\n\t\t \"TD\",\n\t\t style\n\t\t)\n\n\t\tdata= [[ paragraph8 ,paragraph9,paragraph10,paragraph11]]\n\t\tt=Table(data,colWidths=(100,100,50,60), rowHeights=(9))\n\t\tt.setStyle(TableStyle([\n\t\t\t('GRID',(0,0),(-1,-1), 1, colors.black),\n\t\t\t('ALIGN',(0,0),(-1,-1),'LEFT'),\n\t\t\t('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n\t\t\t('TEXTFONT', (0, 0), (-1, -1), 'Calibri-Bold'),\n\t\t\t('FONTSIZE',(0,0),(-1,-1),4),\n\t\t\t('BACKGROUND', (0, 0), (-1, -1), colors.gray)\n\t\t]))\n\n\t\tt.wrapOn(c,20,pos_inicial)\n\t\tt.drawOn(c,20,pos_inicial)\n\n\n\t\tfor i in obj_move.line_ids:\n\t\t\tc.setFont(\"Calibri\", 8)\n\t\t\tpagina, pos_inicial = self.verify_linea(c,wReal,hReal,pos_inicial,12,pagina,2,obj_move)\n\t\t\t\n\t\t\tc.drawString( 10 ,pos_inicial, str(posicion_indice) )\n\t\t\tc.drawString( 22 ,pos_inicial,self.particionar_text( i.analytic_account_id.name if i.analytic_account_id.id else '', 43) )\n\t\t\tc.drawRightString( 218 ,pos_inicial, '{:,.2f}'.format(decimal.Decimal (\"%0.2f\" % i.amount_currency)))\n\t\t\tc.drawString( 222 ,pos_inicial,self.particionar_text( i.currency_id.name if i.currency_id.id else '',24) )\n\t\t\tc.drawRightString( 328 ,pos_inicial, \"%0.3f\" % i.tc)\n\t\t\t\n\t\t\tc.line( 20, pos_inicial-2, 330 ,pos_inicial-2)\n\n\n\t\t\ttamanios_x = [100,100,50,60]\n\n\t\t\tacum_tx = 20\n\t\t\tfor i in tamanios_x:\n\t\t\t\tc.line( acum_tx, pos_inicial-2, acum_tx ,pos_inicial+12)\n\t\t\t\tacum_tx += i\n\t\t\tc.line( acum_tx, pos_inicial-2, acum_tx ,pos_inicial+12)\n\n\t\t\tposicion_indice += 1\n\t\t\n\t\tpagina, pos_inicial = self.verify_linea(c,wReal,hReal,pos_inicial,24,pagina,2,obj_move)\n\n\n\n\n\n\n\t\tc.drawString( 10 ,pos_inicial, 'HECHO POR:' )\n\t\tc.drawString( 60 ,pos_inicial, obj_move.create_uid.name if obj_move.create_uid.id else self.env.uid.name )\n\n\n\t\tpagina, pos_inicial = self.verify_linea(c,wReal,hReal,pos_inicial,50,pagina,3,obj_move)\n\n\t\tc.line( 125-47, pos_inicial+10, 125+47 ,pos_inicial+10)\n\t\tc.line( 290-47, pos_inicial+10, 290+47 ,pos_inicial+10)\n\t\tc.line( 165+290-47, pos_inicial+10, 165+290+47 ,pos_inicial+10)\n\t\tc.drawCentredString( 125 ,pos_inicial, 'HECHO POR:' )\n\t\tc.drawCentredString( 165+290 ,pos_inicial, 'REVISADO:' )\n\t\tc.drawCentredString( 290 ,pos_inicial, 'APROBADO:' )\n\n\t\tc.save()\n\n\n\t@api.multi\n\tdef particionar_text(self,c,tam):\n\t\ttet = \"\"\n\t\tfor i in range(len(c)):\n\t\t\ttet += c[i]\n\t\t\tlines = simpleSplit(tet,'Calibri',8,tam)\n\t\t\tif len(lines)>1:\n\t\t\t\treturn tet[:-1]\n\t\treturn tet\n\n\t@api.multi\n\tdef verify_linea(self,c,wReal,hReal,posactual,valor,pagina,titulo,obj_move):\n\t\tif posactual <40:\n\t\t\tc.showPage()\n\t\t\tself.cabezera(c,wReal,hReal,obj_move,titulo)\n\n\t\t\tc.setFont(\"Calibri-Bold\", 8)\n\t\t\t#c.drawCentredString(300,25,'Pág. ' + str(pagina+1))\n\t\t\treturn pagina+1,hReal-83\n\t\telse:\n\t\t\treturn pagina,posactual-valor","sub_path":"account_move_advanceadd_it/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":25333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"111670491","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 30 23:32:51 2021\n\n@author: HMA\n\"\"\"\n#import libraries\nimport numpy as np\nfrom flask import Flask, render_template,request\nimport pickle\nimport pandas as pd\nfrom scipy.special import inv_boxcox\n\n\ndef computeSubVisFatModel1(age,sex,waist_cm,hip_cm,bmi,weight_kg,ancestry):\n\n age = int(age) \n sex = str(sex)\n waist_cm = float (waist_cm)\n hip_cm = float(hip_cm)\n hip_cm = float(hip_cm)\n bmi = float(bmi)\n weight_kg = float(weight_kg)\n ancestry = str(ancestry)\n\n list_ancestry = ['AFR','AMR','CSA','EAS','EUR','MID']\n list_ancestry.remove(ancestry)\n \n feature_full_list = ['age_when_attended_assessment_centre','Sex','Waist_circumference','Hip_circumference',\n 'Body_mass_index_BMI','weight','Continental_genetic_ancestry']\n\n test_features=[[age,sex,waist_cm,hip_cm,bmi,weight_kg,ancestry]]\n features = pd.DataFrame(test_features, columns = feature_full_list)\n \n if sex == 'male':\n modelFilename = 'model_1_sub_test.sav'\n\n else:\n modelFilename = 'model_1_sub_test_female.sav'\n \n\n data = []\n for i in range(len(list_ancestry)):\n values = [np.nan,sex,np.nan,np.nan,np.nan,np.nan,list_ancestry[i]]\n zipped = zip(feature_full_list, values)\n a_dictionary = dict(zipped)\n print(a_dictionary)\n data.append(a_dictionary)\n \n features = features.append(data, True)\n features_vis = features.copy()\n \n if sex == 'male':\n features.Sex = features.Sex.map({\"female\":0, \"male\":1})\n else:\n features= features.drop('Sex', axis = 1)\n \n features = pd.get_dummies(features)\n features = features.dropna()\n features = np.array(features)\n \n model = pickle.load(open(modelFilename, 'rb'))\n sub_fat_value = model.predict(features)\n \n features_vis.Sex = features_vis.Sex.map({\"female\":0, \"male\":1})\n features_vis = pd.get_dummies(features_vis)\n features_vis = features_vis.dropna()\n features_vis = np.array(features_vis)\n \n best_lam = 0.42846265290966234\n modelFilename_vis = 'model_1_vis_test.sav'\n model_vis = pickle.load(open(modelFilename_vis, 'rb'))\n vis_fat_value = model_vis.predict(features_vis)\n vis_fat_value0 = inv_boxcox(vis_fat_value,best_lam)\n\n \n return sub_fat_value, vis_fat_value0\n\n\napp = Flask(__name__)\n# model = pickle.load(open('model_1_sub_test.sav', 'rb'))\n\n#default page of our web-app\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n#To use the predict button in our web-app\n@app.route('/predict',methods=['POST'])\ndef predict():\n \n \n age = request.values.get('age')\n sex = request.values.getlist('sex')[0]\n waist_cm = request.values.get('waist_cm')\n hip_cm = request.values.get('hip_cm')\n bmi = request.values.get('bmi')\n weight_kg = request.values.get('weight_kg')\n ancestry = request.values.getlist('ancestry')[0]\n \n #For rendering results on HTML GUI\n # string_features = [str(x) for x in request.form.values()]\n # age = int(string_features[0])\n # sex = string_features[1]\n # waist_cm = float(string_features[2])\n # hip_cm = float(string_features[3])\n # bmi = float(string_features[4])\n # weight_kg = float(string_features[5])\n # ancestry = string_features[6]\n \n # ,sex,waist_cm,hip_cm,weight_kg,ancestry\n \n # return render_template(age)\n\n # int_features = [float(x) for x in request.form.values()]\n prediction, prediction_vis = computeSubVisFatModel1(age,sex,waist_cm,hip_cm,bmi,weight_kg,ancestry)\n \n output = prediction[0]\n output_s = np.round(output/1000,4)\n\n output_v = prediction_vis[0]\n output_v0 = np.round(output_v/1000,4)\n\n return render_template('index.html', \n sub_prediction_text=\"Subcutaneous fat: {} \\u00D7 10\\u00b3 ml\".format(output_s),\n vis_prediction_text=\"Visceral fat: {} \\u00D7 10\\u00b3 ml\".format(output_v0))\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n # app.run(debug = False)\n \n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"419242333","text":"# -*- coding: utf-8 -*-\n# @Author: zhaoa\n# @Date: 2018-10-15 06:38:38\n# @Last Modified by: zhaoa\n# @Last Modified time: 2018-10-15 07:03:26\nimport urllib.request\nimport gevent\nfrom gevent import monkey\nimport time\n\nmonkey.patch_all()\n\n\ndef downloader(img_name, img_url):\n req = urllib.request.urlopen(img_url)\n img_b = req.read()\n print(img_b)\n\n with open(\"./img/{}\".format(img_name), \"wb\") as wf:\n wf.write(img_b)\n\n\ndef main():\n gevent.joinall([\n gevent.spawn(downloader, \"1.jpg\", \"https://rpic.douyucdn.cn/asrpic/181015/5577094_5627377_42819_2_0647.jpg\", ),\n gevent.spawn(downloader, \"2.jpg\",\n \"https://rpic.douyucdn.cn/live-cover/appCovers/2018/08/19/5537839_20180819105720_small.jpg\", ),\n ])\n # with open(\"./address\", \"r\") as rfile:\n # while True:\n # img_url = rfile.readline().split()\n # if img_url != []:\n # img_url = img_url.pop()\n # time.sleep(1)\n # print(img_url)\n # # for i in\n # # gev = gevent.spawn(downloader, img_url)\n # # gev.join()\n # else:\n # break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"10月-Python和Linux高级编程/02多任务/03协程/图片下载器/main-v1.py","file_name":"main-v1.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"173520539","text":"import unittest2 as unittest\nfrom zope import event\nfrom collective.conference.testing import INTEGRATION_TESTING\nfrom collective.conference.testing import FUNCTIONAL_TESTING\n\nfrom Products.CMFCore.utils import getToolByName\n\nfrom zope.component import getUtility\n\nfrom collective.conference.events import FollowedEvent\nfrom collective.conference.events import UnfollowedEvent\n\nfrom collective.conference.events import RegisteredConfEvent,UnRegisteredConfEvent,\\\nRegisteredSessionEvent\n\n\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\n\nclass TestEvent(unittest.TestCase):\n \n layer = INTEGRATION_TESTING\n \n def test_followed_event(self):\n from collective.conference.conference import IConference\n from collective.conference.interfaces import IEvaluate\n\n portal = self.layer['portal']\n setRoles(portal, TEST_USER_ID, ('Manager',))\n portal.invokeFactory('collective.conference.conference', 'conference1',\n title=u'conference1',\n participants=['member1'])\n portal['conference1'].invokeFactory('collective.conference.session', 'session1',\n description=u\"description\",\n additional=u\"additional\"\n\n )\n \n file=portal['conference1']['session1']\n file=portal['conference1'] \n event.notify(FollowedEvent(file))\n \n mp = getToolByName(portal,'portal_membership')\n userobject = mp.getAuthenticatedMember()\n username = userobject.getUserName()\n# import pdb\n# pdb.set_trace()\n questionlist = userobject.getProperty('myquestions')\n evlute = IEvaluate(file)\n \n self.assertTrue(file.id in questionlist)\n \n self.assertTrue(evlute.favavailable(username))\n self.assertEqual(1, evlute.followerNum)\n \n event.notify(UnfollowedEvent(file))\n \n mp = getToolByName(portal,'portal_membership')\n userobject = mp.getAuthenticatedMember()\n username = userobject.getUserName()\n questionlist = userobject.getProperty('myquestions')\n evlute = IEvaluate(file)\n \n self.assertFalse(file.id in questionlist)\n self.assertFalse(evlute.available(username))\n self.assertEqual(0, evlute.followerNum)\n# fire register conf event \n event.notify(RegisteredConfEvent(file))\n clists = userobject.getProperty('conferences')\n plists = file.participants\n self.assertTrue(file.id in clists)\n# username = '12@qq.com'\n self.assertTrue(username in plists)\n \n recorders = userobject.getProperty('bonusrecorder')\n useremail = userobject.getUserName()\n \n self.assertTrue(useremail in recorders[-1])\n self.assertTrue(file.title in recorders[-1]) \n# fire unregister conf event \n event.notify(UnRegisteredConfEvent(file))\n clists = userobject.getProperty('conferences')\n plists = file.participants\n self.assertFalse(file.id in clists)\n# username = '12@qq.com'\n self.assertFalse(username in plists)\n \n recorders = userobject.getProperty('bonusrecorder')\n useremail = userobject.getUserName()\n \n self.assertTrue(useremail in recorders[-1])\n self.assertTrue(file.title in recorders[-1])\n \n \n# fire register session event \n event.notify(RegisteredSessionEvent(file))\n slists = userobject.getProperty('speeches')\n plists = file.participants\n speaklists = file.speakers \n \n self.assertTrue(file.id in slists)\n self.assertTrue(username in plists)\n self.assertTrue(username in speaklists) \n\n \nclass TestRendering(unittest.TestCase):\n \n layer = FUNCTIONAL_TESTING","sub_path":"collective/conference/tests/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"510783446","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import View\n\nfrom ..models import Tag\nfrom ..utils import CustomListViewMixin\n\n\nclass TagDetail(CustomListViewMixin, View):\n \"\"\"\n View posts with specified tag\n \"\"\"\n template_name = 'blog/tag_detail.html'\n model = Tag\n paginate_by = 2\n context_list_name = 'post_list'\n context_object_name = 'tag'\n\n def get_queryset(self, **kwargs):\n user = kwargs.get('request').user\n detail_object = self.get_detail_object()\n\n if user.is_superuser is True:\n queryset = detail_object.blog_posts.all()\n else:\n queryset = detail_object.blog_posts.published()\n\n return queryset\n\n def get(self, request, slug):\n self.set_detail_object(get_object_or_404(self.model, slug__iexact=slug))\n context = self.get_context(**{'request': request})\n\n return render(request, self.template_name, context)\n","sub_path":"blog/views/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"226307715","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url('^metadata.js$', views.metadata, name='shortcodes_metadata'),\n url('^dialog/(?P\\w+)', views.dialog, name='shortcodes_dialog'),\n url('^insert/(?P\\w+)/(?P\\w+)', views.insert_shortcode,\n name='insert_shortcode'),\n]\n","sub_path":"shortcodes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"329899577","text":"#!/usr/bin/env python\n\nimport sys\nimport json\nimport argparse\nfrom pprint import pformat\n\nimport jmespath\nfrom jmespath import exceptions\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('expression')\n parser.add_argument('-f', '--filename',\n help=('The filename containing the input data. '\n 'If a filename is not given then data is '\n 'read from stdin.'))\n parser.add_argument('--ast', action='store_true',\n help=('Pretty print the AST, do not search the data.'))\n args = parser.parse_args()\n expression = args.expression\n if args.ast:\n # Only print the AST\n expression = jmespath.compile(args.expression)\n sys.stdout.write(pformat(expression.parsed))\n sys.stdout.write('\\n')\n return 0\n if args.filename:\n with open(args.filename, 'r') as f:\n data = json.load(f)\n else:\n data = sys.stdin.read()\n data = json.loads(data)\n try:\n sys.stdout.write(json.dumps(\n jmespath.search(expression, data), indent=4, ensure_ascii=False))\n sys.stdout.write('\\n')\n except exceptions.ArityError as e:\n sys.stderr.write(\"invalid-arity: %s\\n\" % e)\n return 1\n except exceptions.JMESPathTypeError as e:\n sys.stderr.write(\"invalid-type: %s\\n\" % e)\n return 1\n except exceptions.UnknownFunctionError as e:\n sys.stderr.write(\"unknown-function: %s\\n\" % e)\n return 1\n except exceptions.ParseError as e:\n sys.stderr.write(\"syntax-error: %s\\n\" % e)\n return 1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"bin/jp.py","file_name":"jp.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"410417098","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = 'rrmerugu'\n\nimport os, re, sys, shutil\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\n# readme_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.md')\n# print readme_file\n# readme = open(readme_file).read()\n\n\n\n\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\n\nversion = get_version('rsquarelabs_core')\n\nif sys.argv[-1] == 'publish':\n try:\n import pypandoc\n except ImportError:\n print(\"pypandoc not installed.\\nUse `pip install pypandoc`.\\nExiting.\")\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %s -m 'version %s'\" % (version, version))\n print(\" git push --tags\")\n shutil.rmtree('dist')\n shutil.rmtree('build')\n shutil.rmtree('djangorestframework.egg-info')\n sys.exit()\n\nreadme = \"This is the library of automation pipeline modules developed at RSQUARE LABS.\"\n\ngithub_url = 'http://github.com/rsquarelabs/rsquarelabs-core'\nversion = \"0.0.7dev\"\n\nsetup(name='rsquarelabs-core',\nversion= version,\ndescription='This is the library of automation pipeline modules developed at RSQUARE LABS.',\nlong_description= readme,\nauthor='Ravi RT Merugu',\nauthor_email='rrmerugu@gmail.com',\nurl = github_url,\n# packages = find_packages(),\n# package_data={'rsquarelabs_core' : ['*']},\npackages=get_packages('rsquarelabs_core'),\npackage_data=get_package_data('rsquarelabs_core'),\ninstall_requires=['bottle','termcolor','requests'],\ndownload_url='%s/tarball/%s' %(github_url,version ),\nkeywords = ['Computational Biology', 'Molecular Modelling', 'Bioinformatics', 'Automation'])","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"152335356","text":"#05.09, 05.12\r\n\r\ndef drawSquare(x,y,n):\r\n import turtle\r\n \r\n turtle.penup()\r\n turtle.setposition(x, y)\r\n turtle.pendown()\r\n \r\n for x in range(5):\r\n turtle.forward(n)\r\n turtle.right(90)\r\n turtle.setheading(0)\r\n \r\n \r\ndef drawCircle(x, y, r):\r\n import turtle\r\n \r\n turtle.penup()\r\n turtle.setposition(x, y)\r\n turtle.pendown()\r\n \r\n turtle.circle(r)\r\n \r\n\r\ndef drawTriangle(x, y, m):\r\n import turtle\r\n \r\n turtle.penup()\r\n turtle.setposition(x, y)\r\n turtle.pendown()\r\n \r\n for x in range(3):\r\n turtle.forward(m)\r\n turtle.right(120)\r\n \r\n \r\ndef drawStar():\r\n import turtle\r\n \r\n for x in range(5):\r\n turtle.forward(100)\r\n turtle.right(144)\r\n turtle.forward(100)\r\n turtle.left(72)\r\n \r\n \r\ndef drawBlackSquare(x, y, m):\r\n import turtle\r\n \r\n turtle.penup()\r\n turtle.setposition(x, y)\r\n turtle.pendown()\r\n \r\n turtle.fillcolor('black')\r\n turtle.begin_fill()\r\n for x in range(4):\r\n turtle.forward(m)\r\n turtle.right(90)\r\n turtle.end_fill()\r\n ","sub_path":"05-ModularProgramming/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"542360909","text":"# coding:utf8\n'''\n@Time : 2020/7/8 14:42\n@Author : MaKaiQiang\n@File : test03_infrastructure_modify_case.py\n'''\nimport unittest\nimport ddt\nfrom busniess.InfrastructrueBusniess.infrastructrue_busniess import InfrastructureBusniess\nfrom tools.get_excel_case import GetExcelCase\nfrom tools.log import Logger\nfrom tools.read_conf import ReadConf\nfrom tools.replace_data import ReplaceData\nimport HTMLTestRunner_Chart\nimport time\n\nuuid_file = ReadConf().get_conf('INFRASTRUCTUREUUID').get('infrastructure_uuid')\ninfrastructure_case_file = ReadConf().get_conf('INFRASTRUCTURE').get('infrastructure')\nmodify_excel_data = GetExcelCase(infrastructure_case_file, sheetName='楼栋房屋修改').get_dict_data\n\n\n@ddt.ddt\nclass InfrastructureModifyCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.log = Logger()\n cls.uuid_data = GetExcelCase(uuid_file, '楼栋房屋').get_dict_data\n cls.infrastructrue_b = InfrastructureBusniess()\n cls.replace_infrastructure_modify = ReplaceData(modify_excel_data, infrastructure_case_file, '楼栋房屋修改')\n cls.replace_infrastructure_modify.replace_data_sheet(cls.uuid_data)\n cls.new_infrastructure_modify_data = GetExcelCase(infrastructure_case_file, '楼栋房屋修改').get_dict_data\n\n def setUp(self) -> None:\n time.sleep(1)\n\n @ddt.data(*modify_excel_data)\n @ddt.unpack\n # @unittest.skip\n def test_infrastructure_modify(self, **kwargs):\n try:\n modify_data = self.new_infrastructure_modify_data[kwargs.get('row') - 1]\n actual_result = self.infrastructrue_b.infrastructure_modify_busniess(**modify_data)\n if actual_result:\n self.assertEqual(kwargs.get('expected_result'), actual_result[0].get('msg'), msg=f'失败用例:{kwargs.get(\"case\")}\\n服务器返回内容:'\n f'{actual_result[0]}\\n响应时间:{actual_result[1]}\\n状态码{actual_result[2]}')\n else:\n raise\n except Exception as e:\n self.log.logger.info(e)\n raise e\n else:\n self.log.logger.info(f'\"{kwargs.get(\"case\")}\"用例执行通过,响应时间:{actual_result[1]},状态码{actual_result[2]}')\n\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(InfrastructureCase2, 'test_2_infrastructure_query')\n with open(r'E:\\Auto-interface\\report\\test_report.html', 'wb') as fp:\n runner = HTMLTestRunner_Chart.HTMLTestRunner(\n stream=fp,\n title='My unit test',\n verbosity=2,\n description='This demonstrates the report output by HTMLTestRunner.',\n )\n runner.run(suite)\n fp.close()\n","sub_path":"TestCase/Infrastructure/test03_infrastructure_modify_case.py","file_name":"test03_infrastructure_modify_case.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"557975181","text":"#Uses python3\nimport sys\nimport math\nclass Node:\n def __init__(self, data):\n self.data = data\n self.parent = None\n self.rank = 0\n\n def __str__(self):\n return str(self.data) + \" and parent is \" + str(self.parent.data if self.parent else None)\n\n def __repr__(self):\n return self.__str__()\n\n \nclass DisjointSet:\n def __init__(self):\n self.map = {}\n \n def make_set(self, data):\n node = Node(data)\n node.parent = node\n self.map[data] = node\n\n def find_set(self, data):\n return self._find_set(self.map[data])\n \n def _find_set(self, node):\n parent = node.parent\n if parent == node:\n return parent\n node.parent = self._find_set(node.parent)\n return node.parent\n\n def union(self, data1, data2):\n node1 = self.map[data1]\n node2 = self.map[data2]\n\n parent1 = self._find_set(node1)\n parent2 = self._find_set(node2)\n\n if parent1.data == parent2.data:\n return\n\n if parent1.rank >= parent2.rank:\n if parent1.rank == parent2.rank:\n parent1.rank += parent2.rank\n parent2.parent = parent1\n else:\n parent1.parent = parent2\n\n def __str__(self):\n result = \"\"\n for key, value in self.map.items():\n result += 'value is ' + str(value) + '\\n'\n return result\n\n\nclass Vertex:\n def __init__(self, data):\n self.data = data\n self.connections = {}\n\n def add_neighbours(self, nb, wt):\n self.connections[nb] = wt\n\n def get_connections(self):\n return self.connections\n\n def get_all_connections(self):\n return [(self.data, key.data, value) for key, value in self.connections.items()]\n\n def __str__(self):\n return str(self.data) + ' connected to: ' + str([{key.data: value} for key, value in self.connections.items()])\n\nclass DirectedGraph:\n def __init__(self):\n self.vertexes = {}\n self.size = 0\n\n def add_vertex(self, data):\n self.size += 1\n v = Vertex(data)\n self.vertexes[data] = v\n\n def add_edge(self, fr, to, wt):\n if fr not in self.vertexes:\n self.add_vertex(fr)\n self.size += 1\n \n if to not in self.vertexes:\n self.add_vertex(to)\n self.size += 1\n \n self.vertexes[fr].add_neighbours(self.vertexes[to], wt)\n\n def get_vertex(self, v):\n if v in self.vertexes:\n return self.vertexes[v].get_connections()\n return None\n\n def get_all_vertex(self, v):\n if v in self.vertexes:\n return self.vertexes[v].get_all_connections()\n\n def all_edges(self):\n result = [value.get_all_connections() for key, value in self.vertexes.items()]\n return [item for sublist in result for item in sublist]\n \n def __str__(self):\n result = \"\"\n for key, value in self.vertexes.items():\n result += str(value) + '\\n'\n return result\n\ndef minimum_distance(x, y):\n result = 0.\n #write your code here\n return result\n\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n# def prim(G):\n# cost = {}\n# parent = {}\n# u = None\n# P = PriorityQueue()\n# for v in G.vertexes:\n# if u is None:\n# u = v\n# cost[v] = float('inf')\n# P.add(float('inf'), v)\n# parent[v] = None\n# cost[u] = 0\n# P.change_priority(u, 0)\n# W = 0\n# while not P.isEmpty():\n# v_ele = P.get_min()\n# print(v_ele)\n# vertex = v_ele.data\n# for u, v, w in G.get_all_vertex(vertex):\n# print('fk', u, v, w)\n# if P.check_ele(v) and cost[v] > cost[u] + w:\n# W += w\n# cost[v] = cost[u] + w\n# parent[v] = u\n# P.change_priority(v, cost[v])\n# print(cost)\n# print(parent)\n# print(W)\n\ndef kruskal(G):\n S = DisjointSet()\n for v in G.vertexes:\n S.make_set(v)\n X = set()\n result = []\n E = G.all_edges()\n E = sorted(E, key = lambda item: item[2])\n\n for u, v, w in E:\n if S.find_set(u) != S.find_set(v):\n X.add(u)\n X.add(v)\n result.append((u, v))\n S.union(u, v)\n # print(X)\n # print(result)\n _sum = 0\n for point1, point2 in result:\n _sum += distance(point1[0], point2[0], point1[1], point2[1])\n # print(_sum)\n return _sum\n\ndef distance(x1, x2, y1, y2):\n # x1 = point1.x\n # x2 = point2.x\n # y1 = point1.y\n # y2 = point2.y\n d = math.sqrt(math.pow((x1 - x2), 2) + math.pow((y1 - y2), 2))\n return round(d, 12)\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n x = data[1::2]\n y = data[2::2]\n G = DirectedGraph()\n for i in range(n):\n for j in range(i+1, n):\n # if i == j:\n # continue\n w = distance(x[i], x[j], y[i], y[j])\n G.add_edge((x[i], y[i]), (x[j], y[j]), w)\n\n print(\"{0:.9f}\".format(kruskal(G)))\n","sub_path":"Sequence4/Graph/Week5/connecting_points-1.py","file_name":"connecting_points-1.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"416632980","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Test Info\nmodel_version = '15'\nimg_name = '2018-03-14_10-31-53-311527_leftImg8bit'\n\nimport keras\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport numpy as np\nimport time\nimport tensorflow as tf\n\ndef get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n# use this environment flag to change which GPU to use\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nkeras.backend.tensorflow_backend.set_session(get_session())\n\n\n# ## Load RetinaNet model\nmodel_path = os.path.join('.', 'model', 'model_{}.h5'.format(model_version))\nmodel = models.load_model(model_path, backbone_name='resnet50')\n\n\n# load label to names mapping for visualization purposes\nlabels_to_names = {0: 'vehicle fallback',\n 1: 'bus',\n 2: 'car',\n 3: 'truck',\n 4: 'motorcycle',\n 5: 'autorickshaw',\n 6: 'rider',\n 7: 'person',\n 8: 'traffic light',\n 9: 'traffic sign',\n 10: 'animal',\n 11: 'bicycle',\n 12: 'caravan'}\n\n\n# ## Run detection on example\n\nimage = read_image_bgr('./data/images/{}.jpg'.format(img_name))\n\n# copy to draw on\ndraw = image.copy()\ndraw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\nimage = preprocess_image(image)\nimage, scale = resize_image(image)\n\n# process image\nstart = time.time()\nboxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\nprint(\"processing time: \", time.time() - start)\n\n# correct for image scale\nboxes /= scale\n\n# visualize detections\nfor box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < 0.5:\n break\n \n color = label_color(label)\n \n b = box.astype(int)\n draw_box(draw, b, color=color)\n \n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n print(caption)\n \nplt.figure(figsize=(50, 50)) \nplt.axis('off')\nplt.imshow(draw)\nplt.savefig('./data/Tests/{}_{}.png'.format(img_name, model_version), bbox_inches='tight')\nplt.show()\n\n","sub_path":"tools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"72534847","text":"import os\n\nimport discord\nfrom dotenv import load_dotenv\n\n\nfrom table import *\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\nbot = commands.Bot(command_prefix='.')\n\ntable = Table()\n\n@bot.event\nasync def on_ready():\n print(f'{bot.user} has connected to Discord!\\n')\n print(bot.guilds)\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n else:\n await bot.process_commands(message)\n\n@bot.command()\nasync def commands(ctx, *args):\n message = ''\n for command in bot.commands:\n message += f'{command}\\n'\n await ctx.send(message)\n\n@bot.command()\nasync def join(ctx, *args):\n global table\n author = ctx.message.author\n\n try:\n table.add_player(author)\n except ValueError:\n await ctx.send(f'{author.name} is already at the table.')\n return\n\n await ctx.send(f'{author.name} joined the table!')\n\n@bot.command()\nasync def leave(ctx, *args):\n global table\n author = ctx.message.author\n\n try:\n table.remove_player(author)\n except ValueError:\n await ctx.send(f'{author.name} is already away from the table.')\n return\n\n await ctx.send(f'{author.name} left the table.')\n\n@bot.command()\nasync def reset_table(ctx, *args):\n global table\n table = Table()\n await ctx.send('Table has reset!')\n\n@bot.command()\nasync def players(ctx, *args):\n global table\n player_names = []\n for player in table.players:\n player_names.append(player.name)\n if len(player_names) == 0:\n await ctx.send('Players: [none]')\n return\n await ctx.send('Players: { ' + ', '.join(player_names) + ' }')\n\n# @bot.command(name='test')\n# async def test(ctx, *args):\n# await ctx.send('Your message was: ' + ' '.join(args))\n\nbot.run(TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"631808049","text":"# -*- coding:utf-8 -*-\n# @Desc : \n# @Author : Administrator\n# @Date : 2019-09-16 14:42\n\n# 迁木网: http://www.qianmu.org/ranking/1528.htm\n\nimport requests\nfrom lxml import etree\n\nurl = 'http://www.qianmu.org/ranking/1528.htm'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n}\n# 发送请求\nres = requests.get(url, headers=headers)\n\n# 使用lxml作为解析对象\nhtmlElement = etree.HTML(res.content.decode('utf-8'))\n\n# 使用xpath提取数据信息\ntr_list = htmlElement.xpath(\"//div[@class='rankItem'][2]//tr\")\n# tr_list = htmlElement.xpath(\"//div[@class='rankItem'][2]//tr[@class='xh-highlight']\")\n# print(len(tr_list), type(tr_list))\n# print(tr_list)\n\n# for tr in tr_list[1:10]: # 测试一部分数据信息\nfor tr in tr_list[1:]:\n ranking = tr.xpath(\"./td[1]/text()\")[0]\n if not tr.xpath(\"./td[2]/a\"):\n school_name_zh = tr.xpath(\"./td[2]/text()\")[0]\n else:\n school_name_zh = tr.xpath(\"./td[2]/a/text()\")[0]\n school_detail = tr.xpath(\"./td[2]/a/@href\")[0]\n school_name_en = tr.xpath(\"./td[3]/text()\")[0].strip()\n if not tr.xpath(\"./td[4]/text()\"):\n country = ''\n else:\n country = tr.xpath(\"./td[4]/text()\")[0]\n # print(type(ranking),type(school_name_zh),type(school_detail),type(school_name_en),type(country))\n print('排名: ' + ranking + ', 学校名字(中文): ' + school_name_zh + ', 学校详情: ' + school_detail + ', 学校名字(英文): ' + school_name_en + ', 国家地区: '+ country)\n\n\n\n\n\n","sub_path":"[14]Python-网络爬虫部分/25示例-02requests+lxml.py","file_name":"25示例-02requests+lxml.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"155993740","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom web_test_base import WebTestBase\n\nclass TestContentPage(WebTestBase):\n\n def test_navigate_to_page(self):\n driver = self.driver\n driver.get(self.WEBSITE_URL)\n \n # Attempts to navigate to content page\n navbar_nav = driver.find_element(By.CLASS_NAME, \"navbar-nav\")\n content_button = navbar_nav.find_element(By.XPATH, \"//a[@href='content.html']\")\n content_button.click()\n\n # Asserts if the wrapper of content page is found\n self.assertIn(\"contentWrapper\", driver.page_source)\n \n def test_image(self):\n driver = self.driver\n driver.get(self.get_url_to(\"content\"))\n\n # Gets undraw image in content page\n image = driver.find_element(By.ID, \"undrawCodeThinking\").get_attribute(\"src\")\n self.assertIn(\"undraw_code_thinking.svg\", image)\n\n def test_quiz_correct_answer(self):\n driver = self.driver\n driver.get(self.get_url_to(\"content\"))\n \n # Gets the correct answer for the quiz\n quiz = driver.find_element(By.ID, \"quiz\")\n correct_answer = quiz.get_attribute(\"data-answer\")\n \n # Finds and clicks on the correct answer\n quiz_answers = quiz.find_element(By.ID, \"quizAnswers\")\n correct_quiz_selection = quiz_answers.find_element(By.XPATH, \"//input[@value=\" + correct_answer + \"]\")\n correct_quiz_selection.click()\n\n # Finds and clicks on the submit button\n submit_button = quiz.find_element(By.XPATH, \"//input[@type='button']\")\n submit_button.click()\n\n # Switches to alert, asserts if specific string is found in it, and then closes the alert\n alert = driver.switch_to.alert\n alert_text = alert.text\n self.assertIn(\"rätt\", alert_text.lower())\n alert.accept()\n\n def test_quiz_wrong_answer(self):\n driver = self.driver\n driver.get(self.get_url_to(\"content\"))\n\n # Gets the correct answer for the quiz\n quiz = driver.find_element(By.ID, \"quiz\")\n correct_answer = quiz.get_attribute(\"data-answer\")\n\n # Sets a variable containing a different answer than the correct one\n quiz_answers = quiz.find_element(By.ID, \"quizAnswers\")\n quiz_answer_amount = len(quiz_answers.find_elements(By.XPATH, \"//input[@name='quizOption']\"))\n wrong_answer = str((int(correct_answer) + 1) % quiz_answer_amount)\n\n # Gets alert text of the correct answer\n correct_answer_text = quiz_answers.find_element(By.XPATH, \"//input[@value=\" + correct_answer + \"]\").find_element(By.XPATH, \"..\").text\n\n # Finds and clicks on the incorrect answer\n incorrect_quiz_selection = quiz_answers.find_element(By.XPATH, \"//input[@value=\" + wrong_answer + \"]\")\n incorrect_quiz_selection.click()\n\n # Finds and clicks on the submit button\n submit_button = quiz.find_element(By.XPATH, \"//input[@type='button']\")\n submit_button.click()\n\n # Switches to alert, asserts if specific string is found in it, and then closes the alert\n alert = driver.switch_to.alert\n alert_text = alert.text\n self.assertIn(correct_answer_text, alert_text)\n alert.accept()\n","sub_path":"tests/webtests/test_content_page.py","file_name":"test_content_page.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"457688394","text":"\"\"\"\nThis is a simple application for sentence embeddings: semantic search\n\nWe have a corpus with various sentences. Then, for a given query sentence,\nwe want to find the most similar sentence in this corpus.\n\nThis script outputs for various queries the top 5 most similar sentences in the corpus.\n\"\"\"\nfrom sentence_transformers import SentenceTransformer\nimport scipy.spatial\nimport json\nimport time\nimport torch\nembedder = SentenceTransformer('bert-base-nli-mean-tokens')\n\nkpcorpus = []\n#file = open('data/keyphrase/json/kp20k/kp20k_train.json','r')\nfiles_path = ['data/keyphrase/json/kp20k/kp20k_train.json',\n 'data/keyphrase/json/kp20k/kp20k_valid.json',\n 'data/keyphrase/json/kp20k/kp20k_test.json']\nfor file_path in files_path:\n file = open(file_path, 'r')\n for line in file.readlines():\n dic = json.loads(line)\n #print(dic)\n kpcorpus.append(dic['title'] + ' ' + dic['abstract'])\n #print(kpcorpus)\n\n# Corpus with example sentences\n# corpus = ['A man is eating a food.',\n# 'A man is eating a piece of bread.',\n# 'The girl is carrying a baby.',\n# 'A man is riding a horse.',\n# 'A woman is playing violin.',\n# 'Two men pushed carts through the woods.',\n# 'A man is riding a white horse on an enclosed ground.',\n# 'A monkey is playing drums.',\n# 'A cheetah is running behind its prey.'\n# ]\nnum_of_corpusexample = len(kpcorpus)\nprint(num_of_corpusexample)\ntime_a = time.time()\ncorpus_embeddings = embedder.encode(kpcorpus[:num_of_corpusexample])\nprint(\"corpus embeddings cost time: \", time.time() - time_a)\n\n# Query sentences:\n#queries = ['A man is eating pasta.', 'Someone in a gorilla costume is playing a set of drums.', 'A cheetah chases prey on across a field.']\n#query_embeddings = embedder.encode(queries)\nqueries = kpcorpus[:num_of_corpusexample]\nquery_embeddings = corpus_embeddings\ntime_a = time.time()\n# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity\nclosest_n = 6\nsimilar_docs_matrix = None\ni = -1\nfor query, query_embedding in zip(queries, query_embeddings):\n i += 1\n distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, \"cosine\")[0]\n\n results = zip(range(len(distances)), distances)\n results = sorted(results, key=lambda x: x[1])\n\n print(\"\\n\\n======================\\n\\n\")\n print(\"Query:\", query)\n print(\"\\nTop 5 most similar sentences in corpus:\")\n\n #for idx, distance in results[0:closest_n]:\n #print(kpcorpus[idx].strip(), \"(Score: %.4f)\" % (1-distance))\n idxs = []\n for idx, distance in results[0:closest_n]:\n idxs.append(idx)\n idxs = torch.LongTensor(idxs)\n one_similar = torch.zeros(1, num_of_corpusexample)\n one_similar[0,idxs] = 1\n print(\"one_hot\", one_similar)\n if i == 0:\n similar_docs_matrix = one_similar\n else:\n similar_docs_matrix = torch.cat([similar_docs_matrix, one_similar])\nprint(\"semantic search cost time: \", time.time() - time_a)\ntorch.save(similar_docs_matrix, './data/similar_docs_matrix')","sub_path":"simi_test.py","file_name":"simi_test.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"548064196","text":"import numpy as np\nimport tensorflow as tf\nimport gym\nimport logz\nimport scipy.signal\nimport scipy as sp\n\n\ndef normc_initializer(std=1.0):\n \"\"\"\n Initialize array with normalized columns\n \"\"\"\n\n def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n\n return _initializer\n\n\ndef dense(x, size, name, weight_init=None,regularizer=None, scope = None):\n \"\"\"\n Dense (fully connected) layer\n \"\"\"\n with tf.variable_scope(scope):\n w = tf.get_variable(name + \"/w\", [x.get_shape()[1], size], initializer=weight_init, regularizer=regularizer)\n b = tf.get_variable(name + \"/b\", [size], initializer=tf.zeros_initializer())\n\n return tf.matmul(x, w) + b\n\n\ndef fancy_slice_2d(X, inds0, inds1):\n \"\"\"\n Like numpy's X[inds0, inds1]\n \"\"\"\n inds0 = tf.cast(inds0, tf.int64)\n inds1 = tf.cast(inds1, tf.int64)\n shape = tf.cast(tf.shape(X), tf.int64)\n ncols = shape[1]\n Xflat = tf.reshape(X, [-1])\n return tf.gather(Xflat, inds0 * ncols + inds1)\n\n\ndef gaussian_log_prob(mean, logstdev, ac_taken):\n dist = tf.contrib.distributions.MultivariateNormalDiag(loc=mean, scale_diag=tf.exp(logstdev))\n logprob = dist.log_prob(ac_taken)\n logprob = tf.Print(logprob, [logprob], message=\"This is LogProb: \")\n\n return logprob\n\n\ndef discount(x, gamma):\n \"\"\"\n Compute discounted sum of future values\n out[i] = in[i] + gamma * in[i+1] + gamma^2 * in[i+2] + ...\n \"\"\"\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\ndef explained_variance_1d(ypred, y):\n \"\"\"\n Var[ypred - y] / var[y].\n https://www.quora.com/What-is-the-meaning-proportion-of-variance-explained-in-linear-regression\n \"\"\"\n assert y.ndim == 1 and ypred.ndim == 1\n vary = np.var(y)\n return np.nan if vary == 0 else 1 - np.var(y - ypred) / vary\n\n\ndef categorical_sample_logits(logits):\n \"\"\"\n Samples (symbolically) from categorical distribution, where logits is a NxK\n matrix specifying N categorical distributions with K categories\n\n specifically, exp(logits) / sum( exp(logits), axis=1 ) is the\n probabilities of the different classes\n\n Cleverly uses gumbell trick, based on\n https://github.com/tensorflow/tensorflow/issues/456\n \"\"\"\n U = tf.random_uniform(tf.shape(logits))\n return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1)\n\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\n\nclass LinearValueFunction(object):\n coef = None\n\n def fit(self, X, y):\n Xp = self.preproc(X)\n A = Xp.T.dot(Xp)\n nfeats = Xp.shape[1]\n A[np.arange(nfeats), np.arange(nfeats)] += 1e-3 # a little ridge regression\n b = Xp.T.dot(y)\n self.coef = np.linalg.solve(A, b)\n\n def predict(self, X):\n if self.coef is None:\n return np.zeros(X.shape[0])\n else:\n return self.preproc(X).dot(self.coef)\n\n def preproc(self, X):\n return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X) / 2.0], axis=1)\n\n\nclass CriticNetwork(object):\n #\t'''\n def __init__(self, ob_dim=10, ac_dim=10, params=[128, 128], scope = \"Default\"):\n self.scope = scope\n self.ob_dim = ob_dim\n self.ac_dim = ac_dim\n self.minibatch_size = 64\n with tf.variable_scope(scope):\n self.alpha = 1e-2\n\n #self.regloss = tf.zeros([1])\n #self.sy_ob_no_p = tf.placeholder(shape=[None, ob_dim], name=\"ob\", dtype=tf.float32)\n #self.sy_ac_n_p = tf.placeholder(shape=[None, ac_dim], name=\"ac\", dtype=tf.float32)\n with tf.variable_scope(scope + \"special\"):\n self.sy_ob_no = tf.Variable(np.ones((self.minibatch_size, ob_dim)), name=\"ob\", dtype=tf.float32)\n self.sy_ac_n = tf.Variable(np.ones((self.minibatch_size, ac_dim)), name=\"ac\", dtype=tf.float32)\n #self.sy_ac_v = tf.variable(np.ones((1, ac_dim)), name=\"ac\", dtype=tf.float32)\n\n # self.sy_ob_ac_n = tf.concat([self.sy_ob_no, self.sy_ac_n], axis = 1)\n with tf.variable_scope(scope + \"trainable\"):\n scope = scope + \"trainable\"\n\n self.sy_y = tf.placeholder(shape=[None, 1], name=\"qtrue\", dtype=tf.float32)\n sy_h1 = tf.nn.elu(dense(self.sy_ob_no, params[0], \"hs\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = self.alpha),scope = scope))\n self.sy_h1_ac_n = tf.concat([sy_h1, self.sy_ac_n], axis=1)\n sy_h = tf.nn.elu(dense(self.sy_h1_ac_n, params[0], \"h1\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = self.alpha),scope = scope))\n\n # sy_h = tf.nn.elu(dense(self.sy_ob_ac_n, params[0], \"h1\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = self.alpha),scope = scope))\n # sy_h = self.sy_ob_ac_n\n for i, l in enumerate(params[1:]):\n sy_h = tf.nn.elu(dense(sy_h, l, \"h\" + str(i + 2), weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = self.alpha), scope = scope)) # hidden layer\n\n # self.sy_value = dense(tf.get_variable(\"sy_h\" + len(params)),1, \"op\", weight_init=normc_initializer(1.0))\n self.sy_qvalue = dense(sy_h, 1, \"qvalue\", weight_init=normc_initializer(), regularizer=tf.contrib.layers.l2_regularizer(scale = self.alpha), scope = scope)\n self.loss = tf.reduce_mean(tf.square(self.sy_qvalue - self.sy_y)) + tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,scope))\n self.sy_stepsize = tf.placeholder(shape=[],\n dtype=tf.float32) # Symbolic, in case you want to change the stepsize during optimization. (We're not doing that currently)\n # self.optimizer = tf.train.AdamOptimizer(self.sy_stepsize).minimize(self.loss)\n optimizer = tf.train.AdamOptimizer(self.sy_stepsize)\n # optimizer = tf.train.AdamOptimizer(learning_rate=self.sy_stepsize)\n self.gvs = optimizer.compute_gradients(self.loss)\n capped_gvs = [(tf.clip_by_value(grad, -100., 100.), var) for grad, var in self.gvs if grad != None]\n self.train_op = optimizer.apply_gradients(capped_gvs)\n self.tau = 0.7\n\n def AssignOp(self, obs, action):\n return [tf.assign(self.sy_ob_no, obs), tf.assign(self.sy_ac_n, action)]\n\n\n\n def fit(self, X, y, nepoch=2, init_step=1e-3, minibatch_size=64):\n # with self.graph.as_default():\n y = np.reshape(y, (len(y), 1))\n index = np.arange(X.shape[0])\n #ploss = 10000.0\n step = init_step\n tau = nepoch / 10.0\n # print (\"start\")\n for e in range(nepoch):\n curr_index = np.random.choice(index, minibatch_size, replace=False)\n x_batch = X[curr_index, :]\n y_batch = y[curr_index, :]\n # loss, gvs = self.sess.run([self.loss, self.gvs], feed_dict={self.sy_ob_no: x_batch[:,:self.ob_dim],self.sy_ac_n: x_batch[:,(self.ob_dim):], self.sy_y: y})\n #loss = self.sess.run(self.loss, feed_dict={self.sy_ob_no: x_batch[:,:self.ob_dim],self.sy_ac_n: x_batch[:,(self.ob_dim):], self.sy_y: y_batch, self.sy_stepsize: step})\n # print (loss)\n step = init_step * (tau / max(e, tau))\n # print loss, step, np.sum([np.linalg.norm(grad) for grad, var in gvs])\n # if((loss[0] - ploss) > 0.1 and (e > 500)):\n #\tstep = step/(1.1)\n # ploss = loss\n assign_op = self.AssignOp(x_batch[:,:self.ob_dim].reshape(-1, self.ob_dim),x_batch[:,(self.ob_dim):].reshape(-1, self.ac_dim))\n self.sess.run(assign_op)\n _, loss = self.sess.run([self.train_op,self.loss], feed_dict = {self.sy_stepsize: step, self.sy_y: y_batch.reshape(-1,1)})\n #print (loss)\n\n# for i in range(minibatch_size):\n# assign_op = self.AssignOp(x_batch[i,:self.ob_dim].reshape(-1, self.ob_dim),x_batch[i,(self.ob_dim):].reshape(-1, self.ac_dim))\n# self.sess.run(assign_op)\n# _, loss = self.sess.run([self.train_op,self.loss], feed_dict = {self.sy_stepsize: step, self.sy_y: y_batch[i].reshape(1,1)})\n\n #print (loss)\n #self.sess.run([self.train_op], feed_dict={self.sy_ob_no: x_batch[:,:self.ob_dim],self.sy_ac_n: x_batch[:,(self.ob_dim):], self.sy_y: y_batch, self.sy_stepsize: step})\n\n def predict(self, X):\n value = []\n\n #value = self.sess.run(self.sy_qvalue, feed_dict={self.sy_ob_no: X[:,:self.ob_dim],self.sy_ac_n: X[:,(self.ob_dim):]})\n\n #for i in range(X.shape[0] % self.minibatch_size):\n assign_op = self.AssignOp(X[:,:self.ob_dim].reshape(-1, self.ob_dim),X[:,(self.ob_dim):].reshape(-1, self.ac_dim))\n\n# assign_op = self.AssignOp(X[i*self.minibatch_size: 2*i*self.minibatch_size,:self.ob_dim].reshape(-1, self.ob_dim),X[i*self.minibatch_size: 2*i*self.minibatch_size,(self.ob_dim):].reshape(-1, self.ac_dim))\n self.sess.run(assign_op)\n y = self.sess.run(self.sy_qvalue)\n value.append(y.reshape(-1))\n\n return np.asarray(value)\n\n def registerSession(self, sess):\n self.sess = sess\n\n def BN(self, layer, size):\n batch_mean2, batch_var2 = tf.nn.moments(layer, [0])\n scale2 = tf.Variable(tf.ones([size]))\n beta2 = tf.Variable(tf.zeros([size]))\n return tf.nn.batch_normalization(layer, batch_mean2, batch_var2, beta2, scale2, 1e-8)\n\n\n def UpdateParams(self, values, tau):\n return [tf.assign(var, tau*values[i] + (1 - tau)*self.sess.run(var)) for i, var in enumerate(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope))]\n\n def GetParams(self):\n values = []\n for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope):\n values.append(self.sess.run(var))\n return values\n\n\n def GetGradientA(self):\n return tf.gradients(-self.sy_qvalue, self.sy_ac_n)\n '''\n def dense(self, x, size, name, weight_init=None):\n \"\"\"\n Dense (fully connected) layer\n\n \"\"\"\n # with self.graph.as_default():\n w = tf.get_variable(name + \"/w\", [x.get_shape()[1], size], initializer=weight_init)\n b = tf.get_variable(name + \"/b\", [size], initializer=tf.zeros_initializer())\n self.regloss += tf.norm(w)\n return tf.matmul(x, w) + b\n\n def lrelu(self, x, leak=0.2):\n f1 = 0.5 * (1 + leak)\n f2 = 0.5 * (1 - leak)\n return f1 * x + f2 * abs(x)\n\n\t'''\n\n # sy_h2 = lrelu(dense(sy_h1, 30, \"h2\", weight_init=normc_initializer(1.0)))\n # sy_h3 = lrelu(dense(sy_h2, 20, \"h3\", weight_init=normc_initializer(1.0)))\n # sy_mean = dense(sy_ob_no, ac_dim, \"mean\", weight_init=normc_initializer(1.0))\n #\t\tfor i,l in enumerate(layers):\n #\t\t\tsy_h1 = lrelu(dense()\n # YOUR CODE HERE\n\ndef BN(layer, size):\n batch_mean2, batch_var2 = tf.nn.moments(layer, [0])\n scale2 = tf.Variable(tf.ones([size]))\n beta2 = tf.Variable(tf.zeros([size]))\n return tf.nn.batch_normalization(layer, batch_mean2, batch_var2, beta2, scale2, 1e-8)\n\n\n\ndef lrelu(x, leak=0.2):\n f1 = 0.5 * (1 + leak)\n f2 = 0.5 * (1 - leak)\n return f1 * x + f2 * abs(x)\n\n\ndef gaussian_sample_action(mean, logstdev, a_lb, a_ub):\n #\tmean = tf.Print(mean,[mean],\"mean is:\")\n\n U = mean + tf.exp(logstdev) * tf.random_normal(tf.shape(mean), mean=0.0, stddev=1.0)\n # U = tf.clip_by_value(U,a_lb,a_ub)\n #\tU = tf.Print(U,[U],\"U is:\")\n # lb_index = U < a_lb\n # ub_index = U > a_ub\n #\tprint lb_index.shape, ub_index.shape\n\n # ub = lb = tf.ones(tf.shape(U))\n # lb = a_lb*lb\n # ub = a_ub*ub\n # lb = tf.reshape(np.ones()a_lb,tf.shape(U))\n # ub = tf.reshape(a_ub,tf.shape(U))\n # lb = a_lb\n # ub = a_ub\n # U = tf.where(lb_index,U,lb)\n # U = tf.where(ub_index,U,ub)\n # U = tf.where(lb_index, tf.cast(lb,tf.float32), U)\n # U = tf.where(ub_index, tf.cast(ub,tf.float32), U)\n return U\n\n\ndef getGaussianKL(mean1, mean2, logstd1, logstd2, n):\n # d = shape[0]\n\n d = 1\n # std = tf.exp(logstd)\n std1 = tf.exp(logstd1)\n std2 = tf.exp(logstd2)\n\n #\ttr = (std1**2)/(std2**2)\n\n mean1 = tf.Print(mean1, [mean1, std1], message=\"This is Mean1: \")\n mean2 = tf.Print(mean2, [mean2, std2], message=\"This is Mean2: \")\n\n delMean = tf.cast(mean2 - mean1, tf.float32)\n delMean = tf.Print(delMean, [delMean], message=\"This is delMean: \")\n p = tf.log(std2 / (std1 + 1e-8)) + ((std1 ** 2) + (delMean) ** 2) / (2 * (std2 ** 2) + 1e-8)\n # p = tf.Print(p, [p, (std1**2 + (delMean)**2)/(2*(std2**2) + 1e-8), tf.log(std2/(std1 + 1e-8)) ], message = \"This is p\")\n return tf.reduce_mean(p - 0.5)\n\n\n#\treturn 0.5*tf.reduce_mean(tf.log((std2**2)/(std1**2)) + tr + tf.multiply(delMean,(delMean/(std2**2))) - d)\n\n\ndef getGaussianDiffEntropy(mean, logstd):\n std = tf.reduce_prod(tf.exp(logstd))\n diffEnt = 0.5 * tf.log(2 * np.pi * np.exp(1) * (std) ** 2)\n return diffEnt\n\ndef SampleFromReplayBuffer(replay_buffer, N=100, maxLen=1e+6):\n\n if (len(replay_buffer) > maxLen):\n del replay_buffer[0]\n length = len(replay_buffer)\n index = np.random.choice(length, length, replace = 'True')\n if (length >= N):\n index = np.random.choice(length, N, replace = 'True')\n\n ob_no = np.array([replay_buffer[i][0] for i in index])\n ac_n = np.array([replay_buffer[i][1] for i in index])\n r_n = np.array([replay_buffer[i][2] for i in index])\n ob_next = np.array([replay_buffer[i][3] for i in index])\n t_batch = np.array([replay_buffer[i][4] for i in index])\n\n return [ob_no, ac_n, r_n, ob_next, t_batch]\n #return np.array([np.concatenate([replay_buffer[i][0], replay_buffer[i][1], replay_buffer[i][2], replay_buffer[i][3]]) for i in index])\n\n\n\ndef main_pendulum(logdir, seed, n_iter, gamma, min_timesteps_per_batch, initial_stepsize, desired_kl, vf_type,\n vf_params, tau = 0.001, animate=False):\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env = gym.make(\"Pendulum-v0\")\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.shape[0]\n logz.configure_output_dir(logdir)\n critic = CriticNetwork(ob_dim, ac_dim, scope = \"CriticNetwork\")\n critic_target = CriticNetwork(ob_dim, ac_dim, scope = \"ACriticNetworkTarget\")\n\n replay_buffer = []\n\n\n print (\"bounds calculation\")\n a_lb = env.action_space.low\n a_ub = env.action_space.high\n a_bnds = a_ub - a_lb\n\n print (\"Symbolic init\")\n sy_ob_no = tf.placeholder(shape=[None, ob_dim], name=\"ob\", dtype=tf.float32) # batch of observations\n sy_ob_next = tf.placeholder(shape=[None, ob_dim], name=\"ob_next\", dtype=tf.float32) # batch of observations\n\n sy_ac_n = tf.placeholder(shape=[None, ac_dim], name=\"ac\",\n dtype=tf.float32) # batch of actions taken by the policy, used for policy gradient computation\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32) # advantage function estimate\n sy_r_n = tf.placeholder(shape = [None, 1], name = \"rew\", dtype = tf.float32)\n\n with tf.variable_scope(\"ActorNetwork\"):\n actor_alpha = 1e-2\n sy_h1 = lrelu(BN(dense(sy_ob_no, 128, \"h1\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = actor_alpha), scope = \"ActorNetwork\"), 128)) # hidden layer\n sy_h2 = lrelu(BN(dense(sy_h1, 128, \"h2\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = actor_alpha), scope = \"ActorNetwork\"),128))\n sy_h3 = lrelu(BN(dense(sy_h2, 128, \"h3\", weight_init=tf.contrib.layers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = actor_alpha),scope = \"ActorNetwork\"), 128))\n\n# sy_mean = tf.tanh(dense(sy_h3, ac_dim, \"mean\", weight_init=tf.contrib.layers.xavier_initializer(), scope = \"ActorNetwork\"))*2\n sy_mean = dense(sy_h3, ac_dim, \"mean\", weight_init=normc_initializer(),regularizer=tf.contrib.layers.l2_regularizer(scale = actor_alpha), scope = \"ActorNetwork\")\n\n\n with tf.variable_scope(\"NActorNetworkTarget\"):\n sy_h1_t = lrelu(BN(dense(sy_ob_no, 128, \"h1\", weight_init=tf.contrib.layers.xavier_initializer(),scope = \"NActorNetworkTarget\"),128)) # hidden layer\n sy_h2_t = lrelu(BN(dense(sy_h1_t, 128, \"h2\", weight_init=tf.contrib.layers.xavier_initializer(),scope =\"NActorNetworkTarget\"),128))\n sy_h3_t = lrelu(BN(dense(sy_h2_t, 128, \"h3\", weight_init=tf.contrib.layers.xavier_initializer(),scope =\"NActorNetworkTarget\"), 128))\n# sy_mean_t = tf.tanh(dense(sy_h3_t, ac_dim, \"mean\", weight_init=tf.contrib.layers.xavier_initializer(), scope = \"NActorNetworkTarget\"))*2\n sy_mean_t = dense(sy_h3_t, ac_dim, \"mean\", weight_init=normc_initializer(), scope = \"NActorNetworkTarget\")\n\n\n #sy_logstd = tf.get_variable(\"logstdev\", sy_ob_ac_p[ac_dim], initializer=tf.zeros_initializer()) # Variance\n #sy_logstd = tf.constant(np.ones(ac_dim)*np.exp(2.0), dtype = tf.float32)\n sy_logstd = tf.placeholder(dtype = tf.float32)\n sigma = 2.0\n sigma_decay = 0.97\n sy_sampled_ac = gaussian_sample_action(sy_mean, sy_logstd, a_lb, a_ub)\n\n #sy_logprob_n = gaussian_log_prob(sy_mean, logstd, sy_ac_n)\n sy_n = tf.shape(sy_ob_no)[0]\n #sy_N = tf.constant(100, dtype = tf.int32)\n #sy_surr = -tf.reduce_mean(tf.multiply(sy_adv_n,\n # sy_logprob_n))\n print (tf.slice(sy_mean, [0,0], [0, 1]))\n vars_actor = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope = \"ActorNetwork\")\n vars_actor_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope = \"NActorNetworkTarget\")\n\n vars_critic = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope = \"CriticNetwork\")\n vars_critic_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope = \"NCriticNetworkTarget\")\n\n\n #print sy_critic_g, tf.gradients(sy_mean, vars)\n #sy_actor_g = [tf.gradients(tf.slice(sy_mean, [0,0], [1, (i + 1)]), vars_actor) for i in range(ac_dim)]\n #sy_ob = tf.placeholder(shape=[None, ob_dim], dtype = tf.float32)\n #assign_op = critic.AssignOp(sy_ob_no, sy_mean)\n sy_critic_g = critic.GetGradientA()\n sy_actor_g = tf.gradients(sy_mean, vars_actor, sy_critic_g)\n #critic_g = tf.Placeholder(dtype = tf.float32)\n #sy_actor_loss = tf.reduce_mean(tf.matmul(sy_mean,critic_g) + tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,scope = \"ActorNetwork\"))\n #sy_actor_loss = -tf.reduce_mean(critic.sy_qvalue) + tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,scope = \"ActorNetwork\"))\n\n\n\n #sy_grad = [tf.add_n([tf.squeeze(tf.slice(sy_critic_g[i], [0, 0], [1 , i+1]))*sy_actor_g[i][j] for i in range(ac_dim)]) for j in range(len(sy_actor_g[0]))]\n\n\n\n sy_g = [tf.placeholder(dtype = tf.float32) for _ in range(len(vars_actor))]\n grad_var = zip(sy_g, vars_actor)\n\n optimizer = tf.train.AdamOptimizer(1e-3)\n #update_op = tf.train.AdamOptimizer(5e-6).minimize(sy_actor_loss, var_list = vars_actor)\n\n #update_op = optimizer.\n update_op = optimizer.apply_gradients(grad_var)\n\n #tau = 0.8\n sess = tf.Session()\n sy_tau = tf.placeholder(dtype = tf.float32)\n actor_update = [tf.assign(var2, sy_tau * var1 + (1 - sy_tau) * var2) for var1, var2 in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"ActorNetwork\"), tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \"NActorNetworkTarget\"))]\n\n \n print (vars_critic)\n\n\n\n '''\n sy_oldmean = tf.placeholder(shape=[None, ac_dim], name='oldmean', dtype=tf.float32)\n sy_oldlogstd = tf.placeholder(shape=[None, ac_dim], name='oldlogstdev', dtype=tf.float32)\n sy_kl = getGaussianKL(sy_oldmean, sy_mean, sy_oldlogstd, sy_logstd, sy_n)\n sy_ent = getGaussianDiffEntropy(sy_mean, sy_logstd)\n '''\n\n\n sess.__enter__() # equivalent to `with sess:`\n tf.global_variables_initializer().run() # pylint: disable=E1101\n critic.registerSession(sess)\n critic_target.registerSession(sess)\n\n\n\n total_timesteps = 0\n stepsize = initial_stepsize\n params = critic.GetParams()\n sess.run(critic_target.UpdateParams(params, 1.0))\n sess.run(actor_update, feed_dict={sy_tau:1.0})\n\n n_update = 5\n import time\n N = 64\n for i in range(n_iter):\n print(\"********** Iteration %i ************\" % i)\n '''\n print \"critic_params\", critic.GetParams()\n print \"critic_target\", critic_target.GetParams()\n print \"actor_params\", sess.run(vars_actor)\n print \"actor_target_params\", sess.run(vars_actor_target)\n '''\n timesteps_this_batch = 0\n paths = []\n\n while True:\n sigma = sigma * sigma_decay\n if (sigma < 1e-3):\n sigma = 1e-3\n ob = env.reset()\n print (\"reseted\", sigma)\n terminated = False\n obs, acs, rewards = [], [], []\n animate_this_episode = (len(paths) == 0 and (i % 10 == 0) and animate)\n while True:\n \n if animate_this_episode:\n env.render()\n ob = ob.reshape((1, ob_dim))\n\n obs.append(ob)\n # ob = ob.reshape((1,ob_dim))\n ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no: np.reshape(ob, (1, ob_dim)), sy_logstd: np.log(sigma)})\n ac = np.clip(ac, -2.0, 2.0)\n #print (ac)\n acs.append(ac)\n\n ob_next, rew, done, _ = env.step(ac)\n rewards.append(rew)\n if(done):\n terminated = True\n replay_buffer.append([ob, ac, rew, ob_next, terminated])\n ob = ob_next\n\n if (len(replay_buffer) < N):\n if (done):\n break\n\n continue\n\n ob_no, ac_n, r_n, ob_next_no, t_batch = SampleFromReplayBuffer(replay_buffer, N)\n\n ob_no = np.reshape(ob_no, (-1, ob_dim))\n ac_n = np.reshape(ac_n, (-1, ac_dim))\n r_n = np.reshape(r_n, (-1, 1))\n ob_next_no = np.reshape(ob_next_no, (-1, ob_dim))\n t_batch = np.reshape(t_batch, (-1, 1))\n ##All Fitings\n ob_ac_n = np.concatenate([ob_no, ac_n], axis=1)\n ac_next = sess.run(sy_mean_t, feed_dict={sy_ob_no: ob_next_no})\n ob_ac_next = np.concatenate([ob_next_no, ac_next], axis=1)\n q_next = critic_target.predict(ob_ac_next)\n y = r_n + gamma * q_next.reshape((-1, 1))\n critic.fit(ob_ac_n, y)\n\n print (\"reward_mean\", np.mean(r_n), \"q_next_mean\", np.mean(q_next))\n\n ##All grad calculations\n actor_mean = sess.run(sy_mean, feed_dict={sy_ob_no: ob_no})\n sess.run(critic.AssignOp(ob_no, actor_mean))\n actor_g = sess.run(sy_actor_g, feed_dict={sy_ob_no: ob_no})\n feed_dict = {i: d for i, d in zip(sy_g, actor_g)}\n for _ in range(1):\n # actor_mean = sess.run(sy_mean, feed_dict={sy_ob_no: ob_no})\n # sess.run(critic.AssignOp(ob_no, actor_mean))\n # print (np.mean(sess.run(-critic.sy_qvalue)), (np.mean(actor_mean)))\n sess.run(update_op, feed_dict=feed_dict)\n\n params = critic.GetParams()\n sess.run(critic_target.UpdateParams(params, tau))\n sess.run(actor_update, feed_dict={sy_tau: tau})\n\n #sess.run(update_op, feed_dict=fd)\n \n \n if(done):\n break\n # print \"path\", len(obs), np.array(obs)\n path = {\"observation\": np.squeeze(np.array(obs)), \"terminated\": terminated,\n \"reward\": np.squeeze(np.array(rewards)), \"action\": np.array(acs)}\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n total_timesteps += timesteps_this_batch\n\n break\n # print timesteps_this_batch\n if timesteps_this_batch > min_timesteps_per_batch:\n break\n # Estimate advantage function\n #'''\n\n\n # Build arrays for policy update\n # print len(advs), advs[0]\n\n '''\n index =\n ob_no = #np.concatenate([path[\"observation\"] for path in paths])\n\n ac_n = np.concatenate([path[\"action\"] for path in paths])\n adv_n = np.concatenate(advs)\n r_n -\n # print adv_n.shape\n\n standardized_adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + 1e-8)\n vtarg_n = np.concatenate(vtargs)\n vpred_n = np.concatenate(vpreds)\n # print \"Here2\"\n # print ob_no.shape\n ob_no = np.squeeze(ob_no)\n ac_n = np.reshape(ac_n, (ac_n.shape[0], ac_dim))\n vf.fit(ob_no, vtarg_n)\n # sh=sy_mean.get_shape().as_list()\n # print sh\n # oldoldlogstd = 1.01\n\n\n # sy_surr = tf.Print(sy_surr,[sy_surr],\"Loss =\")\n print \"Loss = \", sess.run(sy_surr, feed_dict={sy_ob_no: ob_no, sy_ac_n: ac_n, sy_adv_n: standardized_adv_n,\n sy_stepsize: stepsize})\n # Policy update\n oldmean, oldlogstd, oldlogprob = sess.run([sy_mean, sy_logstd, sy_logprob_n],\n feed_dict={sy_ob_no: ob_no, sy_ac_n: ac_n,\n sy_adv_n: standardized_adv_n, sy_stepsize: stepsize})\n sess.run([update_op],\n feed_dict={sy_ob_no: ob_no, sy_ac_n: ac_n, sy_adv_n: standardized_adv_n, sy_stepsize: stepsize})\n\n # print \"ac = \", ac_n, \"oldmean = \", oldmean,\"oldlogstd = \", oldlogstd\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n std1 = np.exp(oldoldlogstd)\n std2 = np.exp(oldlogstd)\n tr = (std1**2)/(std2**2)\n delMean = oldoldmean - oldmean\n print 0.5*np.mean(np.log((std2**2)/(std1**2)) + tr + np.multiply(delMean,delMean/(std2**2)) - 1)\n oldoldmean = oldmean\n oldoldlogstd = oldlogstd\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n kl, ent = sess.run([sy_kl, sy_ent], feed_dict={sy_ob_no: ob_no, sy_oldmean: oldmean,\n sy_oldlogstd: np.reshape(oldlogstd, [1, ac_dim])})\n # oldoldmean = oldmean\n # if (n_iter > 150):\n # \tdesired_kl = 1e-5\n\n print \"Done\"\n if kl > desired_kl * 2:\n stepsize /= 1.5\n print('stepsize -> %s' % stepsize)\n elif kl < desired_kl / 2:\n stepsize *= 1.5\n print('stepsize -> %s' % stepsize)\n else:\n print('stepsize OK')\n\n print \"Diag\"\n \n '''\n\n\n logz.log_tabular(\"EpRewMean\", np.mean([path[\"reward\"].sum() for path in paths]))\n logz.log_tabular(\"EpLenMean\", np.mean([pathlength(path) for path in paths]))\n # logz.log_tabular(\"KLOldNew\", kl)\n # logz.log_tabular(\"Entropy\", ent)\n # logz.log_tabular(\"EVBefore\", explained_variance_1d(vpred_n, vtarg_n))\n # logz.log_tabular(\"EVAfter\", explained_variance_1d(vf.predict(ob_no), vtarg_n))\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n # If you're overfitting, EVAfter will be way larger than EVBefore.\n # Note that we fit value function AFTER using it to compute the advantage function to avoid introducing bias\n logz.dump_tabular()\n\n\ndef main_pendulum1(d):\n return main_pendulum(**d)\n\n\nif __name__ == \"__main__\":\n if 0:\n main_cartpole(logdir=None) # when you want to start collecting results, set the logdir\n\n if 1:\n general_params = dict(gamma=0.97, animate=False, min_timesteps_per_batch=2500, n_iter=1000,\n initial_stepsize=1e-7)\n params = [\n\n dict(logdir=None, seed=0, desired_kl=2e-6, vf_type='linear', vf_params={}, **general_params),\n\n # dict(logdir= '/tmp/ref11/linearvf-kl2e-3-seed0' , seed=0, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),\n # dict(logdir= '/tmp/ref0/nnvf-kl2e-3-seed0' , seed=0, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),\n # dict(logdir= '/tmp/ref0/linearvf-kl2e-3-seed1' , seed=1, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),\n # dict(logdir= '/tmp/ref0/nnvf-kl2e-3-seed1' , seed=1, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),\n # dict(logdir= '/tmp/ref0/linearvf-kl2e-3-seed2' , seed=2, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),\n # dict(logdir= '/tmp/ref0/nnvf-kl2e-3-seed2' , seed=2, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),\n ]\n p = dict(logdir=None, seed=0, desired_kl=2e-6, vf_type='linear', vf_params={}, **general_params),\n\n main_pendulum(logdir=None, seed=0, desired_kl=2e-3, vf_type='nn', vf_params={}, gamma=0.99, animate=False,\n min_timesteps_per_batch=1000, n_iter=1000, initial_stepsize=1e-6)\n# import multiprocessing\n\n# p = multiprocessing.Pool()\n# p.map(main_pendulum1, params)\n","sub_path":"hw4/DDPGv2.py","file_name":"DDPGv2.py","file_ext":"py","file_size_in_byte":29732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"26916816","text":"import webapp2\nimport jinja2\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nimport os\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True\n)\n\nclass AssignValue(webapp2.RequestHandler):\n def post(self):\n taskboardName = self.request.get('taskBoarddata')\n email = self.request.get('email')\n unique = taskboardName+\"\"+email\n taskTitleName = self.request.get('Assign_Email')\n select_task_owner = self.request.get('select_task_owner')\n taskdata = ndb.Key('taskdata', unique).get()\n for i in range(0,len(taskdata.Title)):\n if taskdata.Title[i] == taskTitleName:\n taskdata.Task_assigned[i] = select_task_owner\n taskdata.put()\n self.redirect('/invite?taskBoarddata='+taskboardName+'&email='+email)\n\napp = webapp2.WSGIApplication([\n ('/AssignValue',AssignValue)\n], debug=True)\n","sub_path":"AssignValue.py","file_name":"AssignValue.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"547376464","text":"from ESock import ESock\nfrom Sync import SyncClient\nfrom Utils import is_socket_related_error\nfrom Utils import capture_trace\nfrom Utils import is_wallaby\nimport Routing\nimport Logging\nimport Config\n\nimport socket\nimport time\nimport os\nimport sys\nimport platform\nimport subprocess\nimport _thread\n\nCHANNEL = \"w\"\nIS_WALLABY = is_wallaby()\nPATH = \"/home/root/Documents/KISS/bin/\" if IS_WALLABY else (sys.argv[1] if len(sys.argv) > 1 else None)\n\nif not PATH:\n\tLogging.error(\"No path specified. (Necessary on simulated Wallaby controllers.)\")\n\texit(1)\n\nif not IS_WALLABY:\n\tLogging.warning(\"Binaries that were created for Wallaby Controllers will not run on a simulated Wallaby.\")\n\nclass WallabyControl(Routing.ClientRoute):\n\tdef __init__(self, output_unbuffer):\n\t\tself.output_unbuffer = output_unbuffer\n\t\tself.actions_with_params = {\"run\" : self.run_program}\n\t\tself.actions_without_params = {\"disconnect\" : self.disconnect,\n\t\t\"reboot\" : self.reboot, \"shutdown\" : self.shutdown, \"stop\" : self.stop}\n\t\tself.currently_running_program = None\n\n\tdef run(self, data, handler):\n\t\tif type(data) is str:\n\t\t\tif data in self.actions_without_params.keys():\n\t\t\t\tself.actions_without_params[data](handler)\n\t\telif type(data) is dict:\n\t\t\tfor action in data:\n\t\t\t\tif action in self.actions_with_params.keys():\n\t\t\t\t\t_thread.start_new_thread(self.actions_with_params[action], (handler, data[action]))\n\n\n\tdef run_program(self, handler, program):\n\t\tcommand = [self.output_unbuffer, \"-i0\", \"-o0\", \"-e0\"]\n\t\tcommand.append(\"%s%s/botball_user_program\" % (handler.sync.folder, program))\n\t\tself.currently_running_program = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n\t\t# Poll process for new output until finished\n\t\tfor line in iter(self.currently_running_program.stdout.readline, b\"\"):\n\t\t\thandler.sock.send(line.decode(), \"std_stream\")\n\n\t\tself.currently_running_program.wait()\n\t\thandler.sock.send({\"return_code\" : self.currently_running_program.returncode}, \"std_stream\")\n\t\tself.currently_running_program = None\n\n\n\tdef stop(self, handler):\n\t\tif self.currently_running_program != None:\n\t\t\tLogging.info(\"Killing currently running programm.\")\n\t\t\tself.currently_running_program.kill()\n\t\telse:\n\t\t\tLogging.info(\"No program started by fl0w.\")\n\n\n\tdef reboot(self, handler):\n\t\tself.disconnect(handler)\n\t\tos.system(\"reboot\")\n\t\texit(0)\n\n\tdef shutdown(self, handler):\n\t\tself.disconnect(handler)\n\t\tos.system(\"shutdown -h 0\")\n\n\tdef disconnect(self, handler):\n\t\tself.stop(handler)\n\t\thandler.sock.close()\n\n\ndef get_wallaby_hostname():\n\treturn open(\"/etc/hostname\", \"r\").read()\n\nclass GetInfo(Routing.ClientRoute):\n\tdef run(self, data, handler):\n\t\tif data == \"\":\n\t\t\thandler.sock.send({\"type\" : CHANNEL,\n\t\t\t\t\"name\" : platform.node() if not IS_WALLABY else get_wallaby_hostname()}, \"get_info\")\n\t\telif \"name\" in data:\n\t\t\tif IS_WALLABY:\n\t\t\t\topen(\"/etc/hostname\", \"w\").write(str(data[\"name\"]))\n\t\t\telse:\n\t\t\t\tLogging.info(\"Hostname change: '%s'\" % str(data[\"name\"]))\n\n\nclass WallabyClient:\n\tdef __init__(self, host_port_pair, routes, debug=False):\n\t\tself.sock = ESock(socket.create_connection(host_port_pair), debug=debug)\n\t\tself.connected = True\n\t\tself.debug = debug\n\t\tself.sync = SyncClient(self.sock, PATH, \"w_sync\", debug=True)\n\t\troutes.update({\"w_sync\" : self.sync})\n\t\tself.routes = routes\n\n\n\tdef start(self):\n\t\tself.sync.start()\n\t\twhile 1 and self.connected:\n\t\t\tdata = self.sock.recv()\n\t\t\ttry:\n\t\t\t\tif data[1] in self.routes:\n\t\t\t\t\tself.routes[data[1]].run(data[0], self)\n\t\t\texcept Exception as e:\n\t\t\t\tif not is_socket_related_error(e):\n\t\t\t\t\tcapture_trace()\n\t\t\t\tbreak\n\n\n\tdef stop(self):\n\t\tself.sock.close()\n\n\n\nCONFIG_PATH = \"wallaby.cfg\"\n\nconfig = Config.Config()\nconfig.add(Config.Option(\"server_address\", (\"127.0.0.1\", 3077)))\nconfig.add(Config.Option(\"debug\", True, validator=lambda x: True if True or False else False))\nconfig.add(Config.Option(\"output_unbuffer\", \"stdbuf\"))\nconfig.add(Config.Option(\"compression_level\", 0, validator=lambda x: x >= 0 and x <= 9))\n\ntry:\n\tconfig = config.read_from_file(CONFIG_PATH)\nexcept FileNotFoundError:\n\tconfig.write_to_file(CONFIG_PATH)\n\tLogging.info(\"Config file created. Please modify to reflect your setup.\")\n\texit(1)\n\tconfig = config.read_from_file(CONFIG_PATH)\n\nwallaby_client = WallabyClient(config.server_address,\n\t{\"wallaby_control\" : WallabyControl(config.output_unbuffer), \"get_info\" : GetInfo()},\n\tdebug=config.debug)\ntry:\n\twallaby_client.start()\nexcept KeyboardInterrupt:\n\twallaby_client.stop()\n","sub_path":"Wallaby/Wallaby.py","file_name":"Wallaby.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"505620969","text":"import urllib\nfrom json import dumps, loads\nfrom django.http import HttpResponse\nfrom pymongo import MongoClient\nfrom django.views.decorators.csrf import csrf_exempt\nfrom promo_codes_app.utils import verify_number\n\n\n@csrf_exempt\ndef create(request):\n # try to get parametres\n try:\n request_data = loads(request.body.decode('utf-8'))\n data = {\n \"tel\": str(request_data[\"tel\"]),\n \"cost\": int(request_data[\"cost\"])\n }\n if not verify_number(data[\"tel\"]):\n return HttpResponse(dumps({'code': \"error\", \"response\": \"tel number is not correct\"}))\n except Exception as e:\n return HttpResponse(dumps({'code': \"error\", \"response\": \"failed loads: \" + str(e)}))\n data[\"code\"] = request_data.get(\"code\")\n\n client = MongoClient()\n db = client.promo\n # verify tel and cost\n try:\n cursor_order = db.orders.find_one({\"tel\": data[\"tel\"], \"cost\": data[\"cost\"]})\n if cursor_order is not None:\n print(\"tel or cost isnt correct\")\n return HttpResponse(\n dumps({'code': \"error\", \"response\": \"tel or cost isnt correct \"}))\n except Exception as e:\n print(\"error find order : \" + str(e))\n return HttpResponse(dumps({'code': \"error\", \"response\": \"error find order\"}))\n\n if data[\"code\"] is None:\n try:\n order = db.orders.insert_one(\n {\n \"tel\": data[\"tel\"],\n \"is_cancelled\": False,\n \"cost\": data[\"cost\"]\n }\n )\n except Exception as e:\n print(\"error insert order : \" + str(e))\n return HttpResponse(dumps({'code': \"error\", \"response\": \"error insert order: \" + str(e)}))\n return HttpResponse(dumps({'code': \"OK\", \"response\": \"all right\"}))\n\n # insert temp order to get id\n try:\n order = db.orders.insert_one(\n {\n \"tel\": data[\"tel\"],\n \"is_cancelled\": False,\n \"cost\": data[\"cost\"],\n \"promo_code\": \"not confirmed\"\n }\n )\n except Exception as e:\n print(\"error insert order : \" + str(e))\n return HttpResponse(dumps({'code': \"error\", \"response\": \"error insert order: \" + str(e)}))\n\n # try to update promo code\n try:\n cursor_code = db.codes.update_one(\n {\"current_amount\": {\"$gt\": 0}, \"code\": data[\"code\"]},\n {\"$push\": {\"orders\": {\"id_of_order\": order.inserted_id, \"tel\": data[\"tel\"],\n \"cost\": data[\"cost\"]}}, \"$inc\": {\"current_amount\": -1}}\n )\n if cursor_code.modified_count == 0 or cursor_code.matched_count == 0:\n print(\"code is not correct or is not available\")\n response = delete_temp_order(db, data[\"tel\"], data[\"cost\"])\n return HttpResponse(\n dumps({'code': \"error\", \"response\": \"code is not correct or is not available\" + response}))\n except Exception as e:\n print(\"error update code : \" + str(e))\n response = delete_temp_order(db, data[\"tel\"], data[\"cost\"])\n return HttpResponse(dumps({'code': \"error\", \"response\": \"error update\" + response}))\n\n # update order to property state\n try:\n cursor_order = db.orders.update_one(\n {\"tel\": data[\"tel\"], \"cost\": data[\"cost\"]},\n {\"$set\": {\"promo_code\": data[\"code\"]}}\n )\n except Exception as e:\n print(str(e) + \"WARNING: order with number\" + data[\"tel\"] + \" and cost \" + str(\n data[\"cost\"]) + \"is not correct now\")\n return HttpResponse(dumps({'code': \"error\", \"response\": \"error update order: \" + str(e)}))\n if cursor_order.modified_count == 0:\n print(\"WARNING: order with number\" + data[\"tel\"] + \" and cost \" + str(\n data[\"cost\"]) + \"is not correct now\")\n return HttpResponse(dumps({'code': \"error\", \"response\": \"dont found temp order \"}))\n return HttpResponse(dumps({'code': \"OK\", \"response\": \"all right\"}))\n\n\ndef delete_temp_order(db, tel, cost):\n warning = \"\"\n try:\n cursor = db.orders.delete_one({\"tel\": tel, \"cost\": cost})\n except Exception as warning:\n print(warning)\n if cursor.deleted_count == 0:\n warning = \"order not found\"\n return str(warning)\n","sub_path":"promo_codes_project/promo_codes_app/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"65640669","text":"# coding=utf-8\nfrom sqlalchemy import Column\nfrom sqlalchemy.types import CHAR, Integer, VARCHAR, BIGINT\nfrom sqlalchemy.orm import relationship, backref\nfrom ..extensions.mysql.mysql_base import Base\nfrom ..extensions.mysql.session import sessionCM\n\n\nclass IncomeSupport(Base):\n __tablename__ = 'income_support'\n id = Column(Integer, primary_key=True, doc='唯一id')\n uid = Column(Integer, doc='用户id')\n collection_time = Column(Integer, doc='领取补助时间')\n income_support_times = Column(Integer, doc='领取补助次数')\n\n def __init__(self, data):\n self.uid = data['uid']\n self.collection_time = data['collection_time']\n self.income_support_times = data['income_support_times']\n\n @classmethod\n def get_income_support_by_uid(cls, uid):\n with sessionCM() as session:\n res = session.query(cls).filter(cls.uid == uid).first()\n if res:\n return cls.to_dict(res)\n else:\n return {}\n\n @classmethod\n def add(cls, data):\n with sessionCM() as session:\n income_support = cls(data)\n session.add(income_support)\n session.flush()\n id = income_support.id\n session.commit()\n if id > 0:\n return id\n else:\n return 0\n\n @classmethod\n def update_income_support_by_id(cls, id, data):\n with sessionCM() as session:\n try:\n session.query(cls).filter(cls.id == id).update(\n {cls.collection_time: data['collection_time'],\n cls.income_support_times: data['income_support_times']}\n )\n session.commit()\n return True\n except Exception:\n return False\n\n @classmethod\n def to_dict(cls, income_support):\n data = {'id': income_support.id, 'uid': income_support.uid, 'collection_time': income_support.collection_time,\n 'income_support_times': income_support.income_support_times}\n return data\n","sub_path":"echecs_hall/app/models/income_support.py","file_name":"income_support.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"63362678","text":"import fractions, decimal\r\n\r\n# Classic expected value branching problem. Several insights: things only depend on how many 0s are left to be covered.\r\n# Things fit nicely into a recurrence: E(n 0s) = 1+(1/2^n)*((n choose 0)E(n 0s)+(n choose 1)E(n-1 0s) + (n choose 2)E(n-2 0s)+ etc.)\r\n# rearranges to (-1+2^n)/(2^n) E(n 0s) = 1+...\r\n\r\nmemory = {}\r\n\r\ndef binomial(n,k): #accepts two integers n,k\r\n #returns binom(n,k)\r\n valueN = 1\r\n valueD = 1\r\n if n < 0 or k < 0 or n < k:\r\n return 0\r\n else: #now 0 =< k =< n\r\n k = min(k,n-k)\r\n for i in range(k):\r\n valueN *= (n-i)\r\n valueD *= (1+i)\r\n return valueN//valueD\r\n\r\ndef expected(n):\r\n global memory\r\n if n == 0:\r\n return 0\r\n elif n in memory:\r\n return memory[n]\r\n leadingcoeff = fractions.Fraction(-1+2**n,2**n)\r\n otherentries = [binomial(n,i)*expected(n-i) for i in range(1,n+1)]\r\n value = (1+fractions.Fraction(sum(otherentries),2**n)) / leadingcoeff\r\n memory[n] = value\r\n return value\r\n\r\ndecimal.getcontext().prec = 11\r\nx = expected(32)\r\nprint(decimal.Decimal(x.numerator)/decimal.Decimal(x.denominator))\r\n","sub_path":"323.py","file_name":"323.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"113457354","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nADDRESS = 'www.pw.edu.pl'\n\n\nclass Spider(CrawlSpider):\n name = ADDRESS\n allowed_domains = [ADDRESS]\n start_urls = ['http://' + ADDRESS]\n rules = [\n Rule(LinkExtractor(deny=('engpw', 'Uczelnia/Galeria', 'content/download')), callback='parse_item', follow=True)\n ]\n\n def __init__(self, *a, **kw):\n super().__init__(*a, **kw)\n self._driver = webdriver.Firefox()\n\n def parse_item(self, response):\n if 'html' in response.text:\n self._driver.get(response.url)\n soup = BeautifulSoup(self._driver.page_source)\n article = soup.find('div', attrs={'class': 'content-view-full'})\n if article:\n yield {\n 'url': response.url,\n 'text': article.get_text(separator='')\n }","sub_path":"asr_scraper/spiders/text_spider.py","file_name":"text_spider.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"153512152","text":"n = int(input())\na = [int(x) for x in input().split()]\npows = [2**i for i in range(32)]\nb = set(a)\npossible = False\nnum = 1\nans = (a[0],)\nfor i in range(n):\n for j in pows:\n if num == 1 and a[i]+j in b:\n num = 2\n ans = (a[i],a[i]+j)\n if num < 3 and a[i]+j in b and a[i]+j*2 in b:\n num = 3\n ans = (a[i],a[i]+j,a[i]+j*2)\n possible = True\n if num==3: break\n\nprint( num)\nfor q in ans: print(q,end=\" \")","sub_path":"CodeForces/486_3_D_powerofTwo.py","file_name":"486_3_D_powerofTwo.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"428662178","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2018 Kyoto University (Hirofumi Inaguma)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Beam search (prefix search) decoder in numpy implementation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom neural_sp.models.torch_utils import tensor2np\n\nLOG_0 = -float(\"inf\")\nLOG_1 = 0\n\n\nclass BeamSearchDecoder(object):\n \"\"\"Beam search decoder.\n\n Arga:\n blank_index (int): the index of the blank label\n\n \"\"\"\n\n def __init__(self, blank_index):\n self.blank = blank_index\n\n def __call__(self, log_probs, x_lens, beam_width=1,\n rnnlm=None, rnnlm_weight=0., length_penalty=0., space_index=5):\n \"\"\"Performs inference for the given output probabilities.\n\n Args:\n log_probs (torch.autograd.Variable): The output log-scale probabilities\n (e.g. post-softmax) for each time step. `[B, T, vocab]`\n x_lens (list): A list of length `[B]`\n beam_width (int): the size of beam\n rnnlm ():\n rnnlm_weight (float): language model weight\n length_penalty (float): insertion bonus\n space_index (int, optional): the index of the space label This is used for character-level CTC.\n Returns:\n best_hyps (list): Best path hypothesis. `[B, labels_max_seq_len]`\n best_hyps_lens (list): Lengths of best path hypothesis. `[B]`\n\n \"\"\"\n assert isinstance(log_probs, torch.autograd.Variable)\n batch_size, _, vocab = log_probs.size()\n best_hyps = []\n\n for b in range(batch_size):\n # Elements in the beam are (prefix, (p_blank, p_no_blank))\n # Initialize the beam with the empty sequence, a probability of\n # 1 for ending in blank and zero for ending in non-blank\n # (in log space).\n beam = [{'hyp': [],\n 'p_blank': LOG_1,\n 'p_nonblank': LOG_1,\n 'rnnlm_score': LOG_1,\n 'rnnlm_state': None}]\n\n for t in range(x_lens[b]):\n new_beam = []\n\n # Pick up the top-k scores\n log_probs_topk, indices_topk = torch.topk(\n log_probs[:, t, :], k=beam_width, dim=-1, largest=True, sorted=True)\n\n for c in tensor2np(indices_topk)[b]:\n p_t = log_probs[b, t, c].item()\n\n # The variables p_blank and p_nonblank are respectively the\n # probabilities for the prefix given that it ends in a\n # blank and does not end in a blank at this time step.\n for i_beam in range(len(beam)):\n prefix = beam[i_beam]['hyp']\n p_blank = beam[i_beam]['p_blank']\n p_nonblank = beam[i_beam]['p_nonblank']\n rnnlm_score = beam[i_beam]['rnnlm_score']\n rnnlm_state = beam[i_beam]['rnnlm_state']\n\n # If we propose a blank the prefix doesn't change.\n # Only the probability of ending in blank gets updated.\n if c == self.blank:\n new_p_blank = np.logaddexp(\n p_blank + p_t, p_nonblank + p_t)\n new_beam.append({'hyp': beam[i_beam]['hyp'],\n 'p_blank': new_p_blank,\n 'p_nonblank': LOG_0,\n 'rnnlm_score': rnnlm_score,\n 'rnnlm_state': rnnlm_state})\n continue\n\n # Extend the prefix by the new character c and it to the\n # beam. Only the probability of not ending in blank gets\n # updated.\n prefix_end = prefix[-1] if len(prefix) > 0 else None\n new_p_blank = LOG_0\n new_p_nonblank = LOG_0\n if c != prefix_end:\n new_p_nonblank = np.logaddexp(\n p_blank + p_t, p_nonblank + p_t)\n else:\n # We don't include the previous probability of not ending\n # in blank (p_nonblank) if c is repeated at the end. The CTC\n # algorithm merges characters not separated by a\n # blank.\n new_p_nonblank = p_blank + p_t\n\n # Update RNNLM states\n if rnnlm_weight > 0 and rnnlm is not None:\n y_rnnlm = Variable(log_probs.new(1, 1).fill_(c).long(), volatile=True)\n y_rnnlm = rnnlm.embed(y_rnnlm)\n logits_step_rnnlm, rnnlm_out, rnnlm_state = rnnlm.predict(\n y_rnnlm, h=rnnlm_state)\n\n # # Add RNNLM score\n if rnnlm_weight > 0 and rnnlm is not None:\n rnnlm_log_probs = F.log_softmax(\n logits_step_rnnlm.squeeze(1), dim=1)\n assert log_probs[:, t, :].size(\n ) == rnnlm_log_probs.size()\n rnnlm_score = rnnlm_log_probs.data[0, c]\n\n new_beam.append({'hyp': beam[i_beam]['hyp'] + [c],\n 'p_blank': new_p_blank,\n 'p_nonblank': new_p_nonblank,\n 'rnnlm_score': rnnlm_score,\n 'rnnlm_state': rnnlm_state})\n\n # If c is repeated at the end we also update the unchanged\n # prefix. This is the merging case.\n if c == prefix_end:\n new_p_nonblank = p_nonblank + p_t\n new_beam.append({'hyp': beam[i_beam]['hyp'],\n 'p_blank': new_p_blank,\n 'p_nonblank': new_p_nonblank,\n 'rnnlm_score': rnnlm_score,\n 'rnnlm_state': rnnlm_state})\n\n # Sort and trim the beam before moving on to the\n # next time-step.\n beam = sorted(new_beam,\n key=lambda x: np.logaddexp(\n x['p_blank'], x['p_nonblank']) + x['rnnlm_score'] * rnnlm_weight,\n reverse=True)\n beam = beam[:beam_width]\n\n best_hyp = beam[0]['hyp']\n best_hyps.append(np.array(best_hyp))\n\n return np.array(best_hyps)\n","sub_path":"neural_sp/models/seq2seq/decoders/ctc_beam_search_decoder.py","file_name":"ctc_beam_search_decoder.py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"642056355","text":"# coding: utf-8\nimport sys\nimport random\nimport datetime\nimport time\nfrom cityClass import City\nfrom parentClass import Parent\nfrom geneIndexClass import GeneIndex\nfrom printPretty import printPretty as pp\n#import matplotlib.pyplot as plt\nimport copy\n\n\n################################################# HELPER FUNCTION #################################################\n### add all fitness values of current population\n### input: list of Parent()\n### output: float\ndef sumFitness(parents):\n res = 0\n for i in parents:\n \n fit = i.getFitness()\n res += fit\n return res\n\n### get fitness values of all individual chromosomes in increasing order\n### input: list of Parent\n### output: list of float\ndef getFitnessOfAll(parents):\n res = []\n for parent in parents:\n res.append(parent.getFitness())\n return sorted(res)\n\n\n### Parent() -> list of city indices\n### input: Parent()\n### output: list of int\ndef parentToInt(solution):\n return solution.getList()\n\n### get cumulative probability of given Parent()s as a list\n### input: list of Parent()\n### output: list of float\ndef getCumulProb(parents):\n res = []\n i = 0\n for parent in parents:\n prob = parent.getProbability()\n if i == 0:\n res.append(prob)\n \n else:\n res.append(res[-1]+prob)\n \n i += 1\n res[-1] = 1.0\n return res\n\n### Check if picked with a probability of r\n### input: num(not used), probability(0~1.0)\n### output: boolean\ndef isPicked(num, r):\n sampleSpace = []\n firstdecimal_r = round(r, 1)\n win = int(firstdecimal_r * 10)\n lose = 10 - win\n for i in range(win):\n sampleSpace.append(1)\n for j in range(lose):\n sampleSpace.append(0)\n lottery = random.choice(sampleSpace)\n return True if lottery == 1 else False\n\n### get index of particular value\n### input: list of int, int\n### output: int\ndef getIndexList(li, n):\n index = 0\n for i in li:\n if i == n:\n return index\n index += 1\n\n### breed a child b/w dad Parent() and mom Parent() with crossover rate r\n### input: Parent(), Parent(), float(0~1)\n### output: Parent()\ndef breed(dad, mom, r, cities):\n crossover_cities = []\n index_mom_list = []\n dadRoute = dad.getList().copy()\n momRoute = mom.getList().copy()\n \n index_dad = 0\n index_mom = 0\n #type check necessary\n #i: int\n for i in dadRoute:\n if isPicked(i, r):\n a = GeneIndex(i, index_dad) # 1, 2, 4\n crossover_cities.append(a)\n index_mom = getIndexList(momRoute, i) # 3, 5, 6\n index_mom_list.append(index_mom)\n index_dad += 1\n # sort indices in increasing order\n index_mom_list.sort() \n for j in range(len(crossover_cities)):\n momRoute[index_mom_list[j]] = crossover_cities[j].getNum()\n child_fitness = computeFitness(momRoute, cities)\n child = Parent(momRoute, child_fitness)\n return child\n\n### randomly create pairs of Parent()s\n### input: list of Parent()\n### output: list of pair of Parent()\ndef makePair(parents):\n pairs = []\n length = len(parents)\n parentsCopied = parents.copy()\n \n if length % 2 == 0:\n while(len(parentsCopied) != 0):\n dad = random.choice(parentsCopied)\n parentsCopied.remove(dad)\n mom = random.choice(parentsCopied)\n parentsCopied.remove(mom)\n dadAndMom = list()\n dadAndMom.append(dad)\n dadAndMom.append(mom)\n pairs.append(dadAndMom)\n \n else:\n while(len(parentsCopied) != 1):\n dad = random.choice(parentsCopied)\n parentsCopied.remove(dad)\n mom = random.choice(parentsCopied)\n parentsCopied.remove(mom)\n dadAndMom = list()\n dadAndMom.append(dad)\n dadAndMom.append(mom)\n pairs.append(dadAndMom)\n alone = list()\n alone.append(parentsCopied[0])\n pairs.append(alone)\n return pairs\n\n\n \n### swap two num within a list under some probability\n### input: list of int, probability r(0~1.0)\n### output: list of int\ndef mutateIndividual(li, r):\n for n1 in range(len(li)):\n if random.random() < r:\n n2 = int(random.random() * len(li))\n swap1 = li[n1]\n swap2 = li[n2]\n li[n1] = swap2\n li[n2] = swap1\n return li\n\n\n\n\n### align given list of Parent() with respect to their fitness value in increasing order\n### input: list of Parent()\n### output: list of Parent()\ndef alignFitness(parents):\n return sorted(parents, key = lambda parent : parent.getFitness()) \n\n\n################################################# MAJOR FUNCTION #################################################\n### get initial population using random shuffle. Number of initial population able to assign.\n### input: number of cities, num of initial population\n### output: list of Parent()\ndef initialPopulation(num, pop, cities):\n pp(\"Generating intial population...\")\n population = [0 for x in range(pop)]\n alignedList = list(range(1, num+1))\n # generate random permutated lists\n count = 0\n while count < pop:\n l = copy.copy(alignedList)\n random.shuffle(l) # not aligned anymore\n route = l\n fitness = computeFitness(route, cities)\n population[count] = Parent(route, fitness) \n count += 1\n pp(\"Generated intial population\")\n return population\n\n\n### calculate fitness: total distance between cities\n### input: list of int, list of City()\n### output: int\ndef computeFitness(route, cities):\n #pp(\"Computing Fitness...\")\n distance = 0\n actualRoute = [cities[i-1] for i in route] # list of City()\n length = len(actualRoute)\n # starts with i=0\n for i in range(length):\n if i == (length-1):\n distance += actualRoute[i].getDistance(actualRoute[0])\n else:\n distance += actualRoute[i].getDistance(actualRoute[i+1])\n #pp(\"Computed FPS\")\n return distance\n \n\n### compute each probabilty of being selected proportional to each fitness value\n### input: list of Parent(), total sum of fitness, list of float\n### output: list of Parent()\ndef computeFPS(parents, sumFitness, everyFitness):\n print(\"Fitness of all: \", everyFitness)\n minimum = everyFitness[0]\n num = len(parents)\n print(\"최소: \", minimum)\n\n for parent in parents:\n index = getIndexList(everyFitness, parent.getFitness()) \n prob = float( (everyFitness[-(index+1)]-minimum) / (sumFitness-minimum*num) ) \n parent.setProbability(prob)\n return parents\n\n\n\n\n### 룰렛 팔 N개로 N개를 뽑음\n### input: list of Parent(), number of next generation\n### ouput: list of Parent()\ndef sampleSUS(parents, N):\n \n pp(\"Sampling using SUS...\")\n selected = [0 for x in range(N)] #[0, 0, ..., 0]\n cumul_prob = getCumulProb(parents)\n current_member = 0\n i = 0\n r = random.uniform(0, 1/N)\n while(current_member <= N-1):\n while(r <= cumul_prob[i]):\n selected[current_member] = parents[i]\n r += 1/N\n current_member += 1\n i += 1\n pp(\"Sampled using SUS\")\n return selected\n\n### Make pairs of Parent()s, randomly and uniformly pick some part and switch the same number sequence retaining its orders\n### input: list of Parent(), crossover rate(0~1.0), total population int\n### output: list of Parent()\ndef orderedCrossover(selected, r, pop, cities):\n pp(\"Crossovering...\")\n\n childs = []\n numChild = 0\n numPop = int(0.5*pop if pop%2==0 else 0.5*pop+1)\n while numChild < numPop:\n pairs = makePair(selected)\n \n for pair in pairs:\n numChild += 1\n pp(\"Creating Child #{0}...\".format(numChild))\n if len(pair) == 2:\n child = breed(pair[0], pair[1], r, cities)\n else:\n child = pair[0]\n childs.append(child)\n pp(\"Crossovered\")\n return childs\n\n \n\n### with some probability r, swap two cities\n### input: list of Parent(), probability r(0~1.0)\n### output: list of Parent()\ndef mutate(crossovered, r):\n pp(\"Mutating...\")\n for parent in crossovered:\n li = parent.getList()\n \n mutated_li = mutateIndividual(li, r)\n \n parent.setList(mutated_li)\n pp(\"Mutated\")\n return crossovered\n\n\n\n### update fitness for each Parent()\n### input: list of Parent()\n### output: list of Parent()\ndef updateFitness(parents, cities):\n pp(\"Updating fitness of the mutated...\")\n for parent in parents:\n \n fit = computeFitness(parent.getList(), cities)\n parent.setFitness(fit)\n \n \n pp(\"Updated fitness of the mutated\")\n return parents\n\n\n### maintain M best from Parents, get rid of M worst from Child\n### input: list of Parent(), list of Parent(), percentage of elite(0~1.0)\n### output: list of Parent()\ndef chooseBestGeneration(parent, child, m):\n pp(\"Choosing the best generation...\")\n best = []\n l = len(parent)\n numElite = int(l*m)\n #numElite = 2\n \n aligned_parent = alignFitness(parent)\n best_parent = aligned_parent[:numElite]\n \n aligned_child = alignFitness(child)\n best_child = aligned_child[:-numElite]\n \n best = best_parent + best_child\n \n pp(\"Chose the best generation\")\n return best\n\n\n\n\n### choose the best fitness Parent() among Parent()s\n### input: list of Parent()\n### output: Parent()\ndef chooseBestOne(pop):\n pp(\"Choosing the best one...\")\n sorted_pop = alignFitness(pop)\n pp(\"Chose the best one\")\n return sorted_pop[0]\n\n'''\n### draw plot end of every loop\n### input: list of int, list of float, int, float\n### output: -\ndef drawPlot(x, y, count, theBestFitness):\n \n x.append(count)\n y.append(theBestFitness)\n plt.plot(count, theBestFitness, \"ro-\")\n plt.show()\n plt.pause(0.0001)\n''' \n \n\n\n### given list of int with city indices, create csv file\n### input: list of int\n### output: .csv with single column city indices\ndef createCSV(arg):\n pp(\"Creating CSV file...\")\n length = len(arg)\n csv = open('solution_{0}.csv'.format(str(datetime.datetime.now().time())[:-7]), 'w')\n for i in range(length):\n data = str(arg[i])\n csv.write(data+\"\\n\")\n pp(\"Created CSV file\")\n return csv\n\n\n\n################################################# MAIN FUNCTION #################################################\n### main function\n### input: -\n### output: .csv\ndef main():\n prob = sys.argv[1] # file name\n prob_open = open(prob, 'r')\n pop = int(sys.argv[2]) # number of members in population\n loop = int(sys.argv[3]) # number of loop, stop criterion\n elite = float(sys.argv[4]) # elitism percentage to hold til the next generation\n crossoverRate = float(sys.argv[5]) # percentage of being crossovered\n mutationRate = float(sys.argv[6]) # percentage of being mutated\n \n ### for plotting\n '''\n plt.ion()\n fig=plt.figure()\n plt.axis([0,loop,0,100000000])\n plt.ylim(bottom = 80000000)\n plt.yscale('log')\n plt.ylabel('Distance')\n plt.xlabel('Generation')\n xlist=list()\n ylist=list()\n '''\n lines = prob_open.readlines()\n len = lines[3]\n num = int(len[12:])\n lines = lines[6:]\n lines = [i.split(\" \") for i in lines]\n \n #cities: list of City()\n cities = []\n for i in range(0, num):\n x = int(float(lines[i][1]))\n y = int(float(lines[i][2]))\n cities.append(City(i+1, x, y))\n initialPop = initialPopulation(num, pop, cities)\n # main loop\n count = 0\n population = initialPop # list of Parent()\n \n while count < loop:\n \n count += 1\n selected = []\n sumfit = sumFitness(population)\n everyFitness = getFitnessOfAll(population) #list of fitness values for each parent\n #print(\"FITNESS OF ALL: \", everyFitness)\n population = computeFPS(population, sumfit, everyFitness)\n #print(\"fps computed: \", population)\n selected_parent = sampleSUS(population, int(0.5*pop))\n #print(\"sus computed: \", selected_parent)\n crossovered_child = orderedCrossover(selected_parent, crossoverRate, pop, cities)\n #print(\"crossovered: \", crossovered_child)\n mutated_parent = mutate(selected_parent, mutationRate)\n #print(\"mutated parent: \", mutated_parent)\n mutated_child = mutate(crossovered_child, mutationRate)\n #print(\"mutated child: \", mutated_child)\n mutated_parent = updateFitness(mutated_parent, cities)\n #print(\"updated mutated parent: \", mutated_parent)\n mutated_child = updateFitness(mutated_child, cities)\n #print(\"updated mutated child: \", mutated_child)\n ### 여기까지 오케이 ###\n population = chooseBestGeneration(mutated_parent, mutated_child, elite) # list of Parent()\n #print(\"population: \", population)\n theBestOne = chooseBestOne(population) # Parent()\n theBestFitness = float(theBestOne.getFitness())\n\n ### Plotting\n #drawPlot(xlist, ylist, count, theBestFitness)\n\n print(\"THE BEST of GENERATION #{0}: {1}\".format(count, theBestFitness))\n #plt.pause(5)\n #while True:\n # plt.pause(0.05)\n solution = parentToInt(theBestOne)\n createCSV(solution)\n\n prob_open.close()\n return 1\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"pypy_tsp_solver.py","file_name":"pypy_tsp_solver.py","file_ext":"py","file_size_in_byte":13384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"200510358","text":"#########################2018-11-29更新###########################\r\n# !/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport sys\r\nimport re\r\nimport requests\r\nimport json\r\nimport execjs\r\nimport http.client\r\nimport hashlib\r\nimport json\r\nimport urllib\r\nimport time\r\nimport random\r\n#################百度翻译#######################baidu_translate(content, fromLang = 'ru', toLang = 'zh')\r\ndef baidu_translate(content, fromLang = 'ru', toLang = 'en'):\r\n appid = '20151113000005349'\r\n secretKey = 'osubCEzlGjzvw8qdQc41'\r\n httpClient = None\r\n myurl = '/api/trans/vip/translate'\r\n q = content\r\n # fromLang = 'ru' # 源语言\r\n # toLang = 'zh' # 翻译后的语言\r\n salt = random.randint(32768, 65536)\r\n sign = appid + q + str(salt) + secretKey\r\n sign = hashlib.md5(sign.encode()).hexdigest()\r\n myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(\r\n q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(\r\n salt) + '&sign=' + sign\r\n\r\n try:\r\n httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')\r\n httpClient.request('GET', myurl)\r\n # response是HTTPResponse对象\r\n response = httpClient.getresponse()\r\n jsonResponse = response.read().decode(\"utf-8\") # 获得返回的结果,结果为json格式\r\n js = json.loads(jsonResponse) # 将json格式的结果转换字典结构\r\n dst = str(js[\"trans_result\"][0][\"dst\"]) # 取得翻译后的文本结果\r\n return dst\r\n except Exception as e:\r\n return e\r\n finally:\r\n if httpClient:\r\n httpClient.close()\r\n time.sleep(2)\r\n###########################################\r\n\r\nclass Py4Js():\r\n\r\n def __init__(self):\r\n self.ctx = execjs.compile(\"\"\" \r\n\t\tfunction TL(a) { \r\n\t\tvar k = \"\"; \r\n\t\tvar b = 406644; \r\n\t\tvar b1 = 3293161072; \r\n\r\n\t\tvar jd = \".\"; \r\n\t\tvar $b = \"+-a^+6\"; \r\n\t\tvar Zb = \"+-3^+b+-f\"; \r\n\r\n\t\tfor (var e = [], f = 0, g = 0; g < a.length; g++) { \r\n\t\t\tvar m = a.charCodeAt(g); \r\n\t\t\t128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023), \r\n\t\t\te[f++] = m >> 18 | 240, \r\n\t\t\te[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224, \r\n\t\t\te[f++] = m >> 6 & 63 | 128), \r\n\t\t\te[f++] = m & 63 | 128) \r\n\t\t} \r\n\t\ta = b; \r\n\t\tfor (f = 0; f < e.length; f++) a += e[f], \r\n\t\ta = RL(a, $b); \r\n\t\ta = RL(a, Zb); \r\n\t\ta ^= b1 || 0; \r\n\t\t0 > a && (a = (a & 2147483647) + 2147483648); \r\n\t\ta %= 1E6; \r\n\t\treturn a.toString() + jd + (a ^ b) \r\n\t}; \r\n\r\n\tfunction RL(a, b) { \r\n\t\tvar t = \"a\"; \r\n\t\tvar Yb = \"+\"; \r\n\t\tfor (var c = 0; c < b.length - 2; c += 3) { \r\n\t\t\tvar d = b.charAt(c + 2), \r\n\t\t\td = d >= t ? d.charCodeAt(0) - 87 : Number(d), \r\n\t\t\td = b.charAt(c + 1) == Yb ? a >>> d: a << d; \r\n\t\t\ta = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d \r\n\t\t} \r\n\t\treturn a \r\n\t} \r\n\t\"\"\")\r\n\r\n def getTk(self, text):\r\n return self.ctx.call(\"TL\", text)\r\n\r\n\r\ndef google_trans(word, sl=\"zh-CN\", tl=\"en\"):\r\n # 中:zh-CN,英:en,俄:ru\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n\r\n url = 'https://translate.google.cn/translate_a/single?client=t&sl=' + sl + '&tl=' + tl + '&hl='+ tl +'&dt=at&dt=bd&dt=ex&dt=ld&' \\\r\n 'dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&otf=1&ssel=0&tsel=0&kc=2&tk={tk}&q=' + word\r\n\r\n js = Py4Js()\r\n tk = js.getTk(word)\r\n url = url.format(tk=tk)\r\n # while\r\n try:\r\n s = requests.get(url, headers=headers)\r\n sj = s.json()\r\n trans = ''\r\n try:\r\n len(sj[1])\r\n trans += '[' + sj[1][0][0] + ']' + '\\n'\r\n for i in sj[1][0][1]:\r\n trans += str(i) + ','\r\n trans = trans.rstrip(',')\r\n except:\r\n if len(sj[0]) > 1:\r\n for i in sj[0][:-1]:\r\n trans += i[0] + '\\n'\r\n elif len(sj[0]) == 1:\r\n trans = sj[0][0][0]+ '\\n'\r\n except:\r\n trans = word\r\n trans = trans.replace(\"\\n\",\" \")\r\n return trans\r\n\r\n#SZn改写,效率较快\r\ndef google_szn_trans(word, sl=\"zh-CN\", tl=\"en\"):\r\n # 中:zh-CN,英:en,俄:ru\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n\r\n url = 'https://translate.google.cn/translate_a/single?client=t&sl=' + sl + '&tl=' + tl + '&hl='+ tl +'&dt=at&dt=bd&dt=ex&dt=ld&' \\\r\n 'dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&otf=1&ssel=0&tsel=0&kc=2&tk={tk}&q=' + word\r\n\r\n js = Py4Js()\r\n tk = js.getTk(word)\r\n url = url.format(tk=tk)\r\n # while\r\n result = {\"pos\": \"\", \"mean\": []}\r\n try:\r\n s = requests.get(url, headers=headers)\r\n sj_lst = s.json()\r\n if sj_lst[0][0][0] != None:\r\n result[\"mean\"].append((sj_lst[0][0][0], 1.0))\r\n if sj_lst[1] != None:\r\n result[\"pos\"] = sj_lst[1][0][0]\r\n for item in sj_lst[1][0][2]:\r\n if item[-1] > 0.001:\r\n result[\"mean\"].append((item[0], item[-1]))\r\n else:\r\n break\r\n except:\r\n pass\r\n return result\r\n\r\n#SZn改写,翻译句子\r\ndef google_szn_trans_sentence(sentence, sl=\"zh-CN\", tl=\"en\"):\r\n # 中:zh-CN,英:en,俄:ru\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n\r\n url = 'https://translate.google.cn/translate_a/single?client=t&sl=' + sl + '&tl=' + tl + '&hl='+ tl \\\r\n +'&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&otf=1&ssel=0&tsel=0&kc=2&tk={tk}&q=' + sentence\r\n\r\n js = Py4Js()\r\n tk = js.getTk(sentence)\r\n url = url.format(tk=tk)\r\n # while\r\n result = \"\"\r\n try:\r\n s = requests.get(url, headers=headers)\r\n sj_lst = s.json()\r\n if len(sj_lst[0])==1:\r\n result = sj_lst[0][0][0]\r\n else:\r\n for x in sj_lst[0][:-1]:\r\n result += x[0]\r\n except:\r\n pass\r\n return result\r\n\r\n# 利用第三方端口:http://www.liuyanlin.cn/get_translate翻译\r\ndef google_liu_trans(word, sl=\"zh-CN\", tl=\"en\"):\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n url = \"http://www.liuyanlin.cn/get_translate?sl=\"+sl+\"&tl=\"+tl+\"&wd=\"+word\r\n s = requests.get(url, headers=headers)\r\n sj_lst = s.json()[\"data\"]\r\n result = {\"pos\":\"\",\"mean\":[]}\r\n try:\r\n if sj_lst[0][0][0] != None:\r\n result[\"mean\"].append((sj_lst[0][0][0], 1.0))\r\n if sj_lst[1] != None:\r\n result[\"pos\"] = sj_lst[1][0][0]\r\n for item in sj_lst[1][0][2]:\r\n if item[-1] > 0.001:\r\n result[\"mean\"].append((item[0], item[-1]))\r\n else:\r\n break\r\n except:\r\n pass\r\n return result\r\n\r\n#实用爬虫,从俄文网站翻译,尚未完成此功能\r\ndef ru_trans(word):\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n url = \"https://bkrs.info/slovo.php?ch=\" + word\r\n result = None\r\n try:\r\n s = requests.get(url, headers=headers)\r\n result = s.text\r\n except:\r\n pass\r\n return result\r\n\r\nfrom urllib import parse\r\n#SZn改写,获取完整的翻译列表信息\r\ndef google_szn_trans_lst(word, sl=\"zh-CN\", tl=\"en\"):\r\n # 中:zh-CN,英:en,俄:ru\r\n headers = {\r\n 'cookie': '_ga=GA1.3.1163951248.1511946285; NID=131=XX0_dJsOrF47GXs2WNtO1MXyKVCK39bW4HXS0XZZ3ZYHTvMGOz8CVJe1G2XVwAJNF9MYOb1ngCqa_NegB6db2kgJ5A9hT3SScy0ag_L41wvtXHiPpNZweONFGHFNtWR_; 1P_JAR=2018-6-7-15',\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',\r\n }\r\n # word = {'q': word}\r\n # word = parse.urlencode(word)\r\n # word = word.replace(\"+\",\"%20\")\r\n url = 'https://translate.google.cn/translate_a/single?client=t&sl=' + sl + '&tl=' + tl + '&hl='+ tl +'&dt=at&dt=bd&dt=ex&dt=ld&' \\\r\n 'dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&otf=1&ssel=0&tsel=0&kc=2&tk={tk}&q=' + word\r\n\r\n js = Py4Js()\r\n tk = js.getTk(word)\r\n url = url.format(tk=tk)\r\n # while\r\n result = None\r\n try:\r\n s = requests.get(url, headers=headers)\r\n result = s.json()\r\n except:\r\n pass\r\n return result\r\n\r\n#http://www.liuyanlin.cn/get_translate?sl=zh-CN&tl=en&wd=北京\r\n\r\nimport pickle as pkl\r\nif __name__ == '__main__':\r\n # x = google_trans(\"идиоты\",tl=\"en\")#,tl=\"en\"\r\n x = google_szn_trans_sentence(\"这本书很烂\",tl=\"en\")#,tl=\"en\"\r\n print(x)\r\n # x = google_szn_trans(\"идиоты\", sl=\"ru\", tl=\"zh-CN\")\r\n # print(x)\r\n # y = google_szn_trans(\"идиоты\", sl=\"ru\", tl=\"en\")\r\n # print(y)\r\n\r\n # s = \"Один из самых молодых проектов МГЕР Санкт Петербурга продолжает активно развиваться\"\r\n # z = google_szn_trans(s, sl=\"ru\", tl=\"en\")\r\n # print(z)\r\n # import pickle as pkl\r\n # from senticnet.senticnet import SenticNet\r\n # sn_en = SenticNet()\r\n # sn_ru = SenticNet(\"ru\")\r\n #\r\n # id2word = pkl.load(open('process_data_fb/id2word_en_senti', 'rb'))\r\n # for item in id2word.items():\r\n # if isinstance(id2word[item[0]],tuple) and len(id2word[item[0]])==3:\r\n # print(item[0], id2word[item[0]])\r\n # continue\r\n # if item[0]<=14:\r\n # id2word[item[0]] = (item[1],item[1],0)\r\n # else:\r\n # en_trans = None\r\n # try:\r\n # en_trans = google_szn_trans(item[1], sl=\"ru\", tl=\"en\")[\"mean\"][0][0]\r\n # score = float(sn_ru.polarity_value(item[1]))\r\n # except:\r\n # try:\r\n # score = float(sn_en.polarity_value(en_trans))\r\n # except:\r\n # score = 0\r\n # id2word[item[0]] = (item[1], en_trans, score)\r\n # print(item[0], id2word[item[0]])\r\n # if item[0]%100==0:\r\n # pkl.dump(id2word, open('process_data_fb/id2word_en_senti2', 'wb'))\r\n #\r\n s = \"ахуенными\"\r\n ru_trans(s)\r\n # z = google_szn_trans(s, sl=\"ru\", tl=\"en\")\r\n # print(z)\r\n # re_data_list, word_set = pkl.load(open('process_data_fb/r&e_data_list2', 'rb'))\r\n # word_lst = list(word_set)\r\n # word_lst.sort()\r\n # for i,wrd in enumerate(word_lst):\r\n # print(i,wrd)\r\n","sub_path":"trans_google.py","file_name":"trans_google.py","file_ext":"py","file_size_in_byte":12096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"562585261","text":"# -*- coding: utf-8 -*-\n\"\"\" Created on Sun Aug 5 20:12:24 2018, @author: young \"\"\"\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport random\nfrom numba import jit\n\n@jit\ndef NaiveLoop(data):\n l = len(data)\n result = np.zeros(shape=l)\n for i in range(len(data)):\n ind = np.random.randint(0,l)\n result[i] = data[i]*data[ind]\n return np.sum(result)\n\n\ndef naivesum(data):\n s = 0\n for i in range(len(data)):\n s += data[i]\n return s\n\nN = 10000\nmylist = [random.random() for i in range(N)]\nmyarray = np.array(mylist)","sub_path":"any_files/testjit.py","file_name":"testjit.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"379409655","text":"import pytest\nfrom Utils.db_tool.db_sq import DB_mysql\nfrom Utils.common import Common\nfrom Config.Config import login_headers\nimport allure\nimport json\nfrom Utils.random_tools import random_merchantName\n\n\n\nclass Test_merchant():\n def setup_class(self):\n self.database = \"sunxing_ccs\"\n self.db = DB_mysql()\n\n @allure.epic(\"隼骑士小程序\")\n @allure.feature(\"验证商户的基本功能 - 新增> 查询> 删除\")\n @pytest.mark.test\n # 验证商户基本功能 - 新增> 查询> 删除\n def test_process(self, Conftest):\n merchantName_name = str(random_merchantName())\n # 新增\n self.test_update_mer_addorupdate(\n Conftest=Conftest, merchantName_name=merchantName_name)\n # 查询 merchantName_id\n merchantName_id, is_delete = self.test_select_db(\n merchantName_name=merchantName_name)\n add = \"merchantName_id=\" + \\\n str(merchantName_id) + \"is_delete=\" + str(is_delete)\n allure.attach(\n json.dumps(\n merchantName_name,\n ensure_ascii=False,\n indent=4),\n '商户查询接口查询对应的名字-{}'.format(merchantName_name),\n allure.attachment_type.TEXT)\n allure.attach(\n json.dumps(\n add,\n ensure_ascii=False,\n indent=4),\n '商户查询接口查询对应的id和删除id-{}'.format(add),\n allure.attachment_type.TEXT)\n self.test_select_db(merchantName_id)\n # 删除\n self.test_del_met_addorupdate(merchantName_id)\n # 验证数据库\n merchantName_id, is_delete = self.test_select_db(\n merchantName_id=merchantName_id)\n add1 = \"merchantName_id=\" + \\\n str(merchantName_id) + \"is_delete=\" + str(is_delete)\n allure.attach(\n json.dumps(\n add1,\n ensure_ascii=False,\n indent=4),\n '再次查询商户id所获得的merchantName_id 和is_delete ',\n allure.attachment_type.TEXT)\n\n # 调用这个查询方法有名字就查询名字 没有就查询ID 有数据就返回然后取第一个id\n\n def test_select_db(self, merchantName_name=None, merchantName_id=None):\n if merchantName_name:\n sqlstring = '''\n select `id`,\n `is_delete`\n from {}.`ccs_merchant`\n where `merchant_name`= \"{}\"\n '''.format(self.database, merchantName_name)\n else:\n sqlstring = '''\n select `id`,\n `is_delete`\n from {}.`ccs_merchant`\n where `id`= \"{}\"\n '''.format(self.database, merchantName_id)\n a = self.db.select_db(self.database, sqlstring)\n if a:\n return a[0]\n else:\n None\n # 新增商户接口 根据名字增加商户\n\n def test_update_mer_addorupdate(self, Conftest, merchantName_name):\n uri = \"/api/merchant/addOrUpdate\"\n data1 = {\n \"merchantName\": merchantName_name,\n \"address\": \"刘程旭\",\n \"amountReceived\": \"string\",\n \"brandName\": \"string\",\n \"commissionRatio\": \"string\",\n \"contact\": \"test2\",\n \"contactAmount\": \"string\",\n \"contactDeadline\": \"string\",\n \"contactLink\": \"string\",\n \"contactPhone\": \"13671618736\",\n \"contactUuid\": \"string\",\n \"cooperationStatus\": 0,\n \"cityAddress\": [{\n \"areaCode\": \"310000\",\n \"areaName\": \"上海市\",\n \"level\": 1\n }, {\n \"areaCode\": \"310100\",\n \"areaName\": \"上海市\",\n \"level\": 2\n }, {\n \"areaCode\": \"310110\",\n \"areaName\": \"虹口区\",\n \"level\": 3}],\n \"isElmOnline\": 0, # 饿了么是否上线(0.未上线 1.已上线)\n \"elmMonthlySales\": \"\", # 饿了么月销量\n \"isMtOnline\": 0, # 美团是否上线(0.未上线 1.已上线)\n \"mtMonthlySales\": \"\",\n \"intentionality\": 2 # 意向度 0不满意 1一般 2满意 3非常满意\n }\n headers1 = login_headers\n common = Common()\n status = common.post(uri, data1, headers1)\n allure.attach(\n json.dumps(\n status.json(),\n ensure_ascii=False,\n indent=4),\n '新增商户接口新增的名字{},返回的数据'.format(merchantName_name),\n allure.attachment_type.TEXT)\n if status:\n return merchantName_name\n else:\n return None\n # 商户查询接口根据ID来查询\n\n def test_get_mer_addorupdate(self, merchantName_id):\n uri = \"/api/merchant/getMerchant/{}\".format(merchantName_id)\n data = {\"id\": merchantName_id}\n headers1 = login_headers\n comm = Common()\n status = comm.get(uri, data, headers1)\n if status:\n merchantName_id, is_delete = self.test_select_db(\n merchantName_id=merchantName_id)\n print(\"商户的id能够正常的显示,id为{}\".format(merchantName_id))\n else:\n print(\"商户的id\")\n\n # 商户查询接口——删除\n def test_del_met_addorupdate(self, merchantName_id):\n if merchantName_id:\n headers1 = login_headers\n uri = \"/api/merchant/delete\"\n req_Body = {\"id\": merchantName_id}\n status = Common().post(uri, req_Body, headers1)\n allure.attach(\n json.dumps(\n status.json(),\n ensure_ascii=False,\n indent=4),\n '调用删除接口所返回的数据',\n allure.attachment_type.TEXT)\n if status:\n target_Merchant_Id, is_Deleted = self.test_select_db(\n merchantName_id=merchantName_id)\n allure.attach(\n json.dumps(\n is_Deleted,\n ensure_ascii=False,\n indent=4),\n '根据merchantName_id 去查询数据库得到is_Deleted 1就是删除成功 0 就是没删除',\n allure.attachment_type.TEXT)\n if int(is_Deleted) == 1:\n print('实现逻辑删除')\n else:\n print('删除失败,商家还是能被查到')\n else:\n print(\"接口删除这个接口调用失败\")\n","sub_path":"TestSuite/process/test_merchant_process.py","file_name":"test_merchant_process.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"201350907","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom inspect import cleandoc\nimport os\nimport sys\nimport unittest\nimport warnings\n\nfrom swat import CAS, SWATError\n\nimport sasoptpy as so\n\nfrom tests.swat_config import create_cas_connection\n\n\nclass TestOPTMODEL(unittest.TestCase):\n \"\"\"\n Unit tests for the CAS interface\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n so.reset()\n cls.conn = None\n try:\n cls.conn = create_cas_connection()\n except SWATError:\n warnings.warn('CAS connection is not available', RuntimeWarning)\n except TypeError:\n warnings.warn('CAS variables are not available', RuntimeWarning)\n except ValueError:\n print(\"Protocol error, version info:\", sys.version_info)\n raise unittest.SkipTest('Protocol is not available for testing')\n\n @classmethod\n def tearDownClass(cls):\n if cls.conn is not None:\n cls.conn.close()\n\n def test_variable_group_assignments(self):\n\n from sasoptpy.actions import read_data\n\n if TestOPTMODEL.conn is None:\n self.skipTest('CAS Session is not available')\n\n import pandas as pd\n df = pd.DataFrame([\n ['a', 'b', 1],\n ['c', 'd,e', 2],\n ['f,g', 'g,h,i', 3]\n ], columns=['k1', 'k2', 'v'])\n\n\n m = so.Model(name='m', session=TestOPTMODEL.conn)\n setK1 = df['k1'].tolist()\n setK2 = df['k2'].tolist()\n\n x = m.add_variables(setK1, setK2, name='x')\n m.add_constraints((x[i, j] >= 1 for i in setK1 for j in setK2), name='c')\n m.set_objective(so.expr_sum(x[i, j] for i in setK1 for j in setK2), name='obj', sense=so.minimize)\n m.solve(verbose=True)\n self.assertEqual(str(m.get_solution()), cleandoc('''\n Selected Rows from Table SOLUTION\n\n i var value lb ub rc\n 0 1.0 x[a,b] 1.0 -1.797693e+308 1.797693e+308 0.0\n 1 2.0 x[a,'d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0\n 2 3.0 x[a,'g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0\n 3 4.0 x[c,b] 1.0 -1.797693e+308 1.797693e+308 0.0\n 4 5.0 x[c,'d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0\n 5 6.0 x[c,'g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0\n 6 7.0 x['f,g',b] 1.0 -1.797693e+308 1.797693e+308 0.0\n 7 8.0 x['f,g','d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0\n 8 9.0 x['f,g','g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0'''))\n\n so.config['generic_naming'] = True\n self.assertEqual(m.to_optmodel(), cleandoc('''\n proc optmodel;\n var x {{'a','c','f,g'}, {'b','d,e','g,h,i'}};\n x['a', 'b'] = 1.0;\n x['a', 'd,e'] = 1.0;\n x['a', 'g,h,i'] = 1.0;\n x['c', 'b'] = 1.0;\n x['c', 'd,e'] = 1.0;\n x['c', 'g,h,i'] = 1.0;\n x['f,g', 'b'] = 1.0;\n x['f,g', 'd,e'] = 1.0;\n x['f,g', 'g,h,i'] = 1.0;\n con c_0 : x['a', 'b'] >= 1;\n con c_1 : x['a', 'd,e'] >= 1;\n con c_2 : x['a', 'g,h,i'] >= 1;\n con c_3 : x['c', 'b'] >= 1;\n con c_4 : x['c', 'd,e'] >= 1;\n con c_5 : x['c', 'g,h,i'] >= 1;\n con c_6 : x['f,g', 'b'] >= 1;\n con c_7 : x['f,g', 'd,e'] >= 1;\n con c_8 : x['f,g', 'g,h,i'] >= 1;\n min obj = x['a', 'b'] + x['a', 'd,e'] + x['a', 'g,h,i'] + x['c', 'b'] + x['c', 'd,e'] + x['c', 'g,h,i'] + x['f,g', 'b'] + x['f,g', 'd,e'] + x['f,g', 'g,h,i'];\n solve;\n quit;'''))\n\n","sub_path":"tests/interface/test_optmodel_format.py","file_name":"test_optmodel_format.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"25084067","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2006 CCLRC, Graeme Winter\n#\n# This code is distributed under the BSD license, a copy of which is\n# included in the root directory of this package.\n#\n# 24th May 2006\n#\n# An implementation of the Driver class which writes scripts, which are run\n# and the results piped to an output file. The content of this output file\n# is then returned through the output() method.\n#\n# Applicability: Windows/OS X/UNIX\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport subprocess\n\nfrom xia2.Driver.DefaultDriver import DefaultDriver\nfrom xia2.Driver.DriverHelper import script_writer\n\n# Workings on Windows:\n#\n# Write input to a .input file\n#\n# Workings on UNIX:\n#\n# Write a script to run with here document\n#\n# in both cases these can be initiated by os.system() calls e.g.\n# os.system('bash myscript') on UNIX or os.system('myscript.bat') on Windows\n#\n# Would also be useful to be able to get the script-level status out - so\n# when the script is being written ensure that the return codes from the\n# commands are stored and the first to fail is returned.\n#\n# On UNIX these will be /bin/bash scripts - I will just assume that this works\n# since I am supporting only recent linux and OS X installations.\n#\n# Specifications:\n#\n# This will make an input .xin and output .xout file.\n\n\nclass ScriptDriver(DefaultDriver):\n def __init__(self):\n super(ScriptDriver, self).__init__()\n\n self._script_command_line = []\n self._script_standard_input = []\n\n self._script_name = self._name\n\n self._script_status = 0\n\n # this is opened by the close() method and read by output\n # from self._script_name.xout\n\n self._output_file = None\n\n def reset(self):\n DefaultDriver.reset(self)\n\n self._script_command_line = []\n self._script_standard_input = []\n\n self._script_status = 0\n\n # this is opened by the close() method and read by output\n # from self._script_name.xout\n\n self._output_file = None\n\n def set_name(self, name):\n \"\"\"Set the name to something sensible.\"\"\"\n self._script_name = name\n\n def start(self):\n \"\"\"This is pretty meaningless in terms of running things through\n scripts...\"\"\"\n\n for c in self._command_line:\n self._script_command_line.append(c)\n\n def check(self):\n \"\"\"NULL overloading of the default check method.\"\"\"\n return True\n\n def _input(self, record):\n self._script_standard_input.append(record)\n\n def _output(self):\n return self._output_file.readline()\n\n def _status(self):\n return self._script_status\n\n def close(self):\n \"\"\"This is where most of the work will be done - in here is\n where the script itself gets written and run, and the output\n file channel opened when the process has finished...\"\"\"\n\n script_writer(\n self._working_directory,\n self._script_name,\n self._executable,\n self._script_command_line,\n self._working_environment,\n self._script_standard_input,\n )\n\n if os.name == \"posix\":\n pipe = subprocess.Popen(\n [\"bash\", \"%s.sh\" % self._script_name], cwd=self._working_directory\n )\n\n else:\n pipe = subprocess.Popen(\n [\"%s.bat\" % self._script_name], cwd=self._working_directory, shell=True\n )\n\n self._script_status = pipe.wait()\n\n # at this stage I should read the .xstatus file to determine if the\n # process has indeed finished - though it should have done...\n\n try:\n xstatus_file = os.path.join(\n self._working_directory, \"%s.xstatus\" % self._script_name\n )\n self._script_status = int(open(xstatus_file, \"r\").read())\n except Exception:\n # this could happen on windows if the program in question\n # is a batch file...\n self._script_status = 0\n\n self._output_file = open(\n os.path.join(self._working_directory, \"%s.xout\" % self._script_name), \"r\"\n )\n\n def kill(self):\n \"\"\"This is meaningless...\"\"\"\n\n pass\n","sub_path":"modules/xia2/Driver/ScriptDriver.py","file_name":"ScriptDriver.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"343872486","text":"row_col = list(map(int, input().split()))\nrow = row_col[0]\ncol = row_col[1]\n\nstage = []\nfor i in range(row):\n stage.append(list(map(int, input().split())))\n\n\n\nl = []\nd = []\n\nfor i in range(row):\n tr = []\n for j in range(col):\n if j == 0:\n tr.append(stage[i][j])\n else:\n tr.append(tr[j-1] + stage[i][j])\n l.append(tr)\n\nfor i in range(row):\n tr = []\n for j in range(col):\n if i == 0:\n tr.append(stage[i][j])\n else:\n tr.append(d[i-1][j] + stage[i][j])\n d.append(tr)\n\nans = 0\nfor (i, row) in enumerate(stage):\n for (j, p) in enumerate(row):\n if p == 1:\n continue\n else:\n # left\n if j > 0 and l[i][j-1] > 0:\n ans = ans + 1\n #right\n if j < len(row) and l[i][len(row)-1] - l[i][j] > 0:\n ans = ans + 1\n #up\n if i > 0 and d[i-1][j] > 0:\n ans = ans + 1\n #down\n if i < len(stage) and d[len(stage)-1][j] - d[i][j] > 0:\n ans = ans + 1\n\nprint(ans)\n\n\n\n\n","sub_path":"CodeForce/181201/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"590331896","text":"from urllib import request\nfrom bs4 import BeautifulSoup\nimport re,html5lib\n\ndef getLinks(inurl):\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n req = request.Request(url=inurl, headers=headers)\n html = request.urlopen(req)\n baseObj=BeautifulSoup(html,'html5lib')\n \n return baseObj\npages=list()\nfor pageNum in range(3,7):\n indexurl=\"http://www.smzdm.com/mall/amazon_cn/youhui/p\"+str(pageNum)+\"/\"\n SMZDMlist=getLinks(indexurl).find_all('a',{'target':'_blank','class':'picBox'})\n for link in SMZDMlist:\n if 'href' in link.attrs:\n newPage=link.attrs['href']\n #print(newPage)\n pages.append(newPage)\n \nurltime=dict()\nfor url in pages:\n amazonlist=getLinks(url)\n url=amazonlist.find('a',{'itemprop':\"url\",'class':\"pic-Box\",'target':\"_blank\"})\n amazonurl=url.attrs['href']\n timet=re.compile(r'时间:[0-9-: ]+')\n timeattr=amazonlist.find('span',text=timet)\n time=timeattr.get_text()[3:]\n urltime[amazonurl]=time\n\n \nfor asinurl in urltime.keys():\n asinhtml=getLinks(asinurl).script.get_text()\n s=re.compile(r'\\|B[0-9a-zA-Z]{9}\\|')\n asin=re.search(s,asinhtml)\n if asin is not None:\n print('%-12s %s' %(urltime[asinurl],asin.group()[1:11]))\n \n \n \n \n \n \n","sub_path":"smzdm.py","file_name":"smzdm.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"407516717","text":"# Copyright 2014 Google Inc. All Rights Reserved.\n\"\"\"Command for setting scheduling for virtual machine instances.\"\"\"\nfrom googlecloudapis.compute.v1 import compute_v1_messages\nfrom googlecloudsdk.compute.lib import base_classes\nfrom googlecloudsdk.compute.lib import utils\n\nMIGRATION_OPTIONS = sorted(\n compute_v1_messages.Scheduling.OnHostMaintenanceValueValuesEnum\n .to_dict().keys())\n\n\nclass SetSchedulingInstances(base_classes.NoOutputAsyncMutator):\n \"\"\"Set scheduling options for Google Compute Engine virtual machine instances.\n \"\"\"\n\n @staticmethod\n def Args(parser):\n restart_group = parser.add_mutually_exclusive_group()\n\n restart_on_failure = restart_group.add_argument(\n '--restart-on-failure',\n action='store_true',\n help='If true, the instance will be restarted on failure.')\n restart_on_failure.detailed_help = \"\"\"\\\n If provided, the instances will be restarted automatically if they\n are terminated by the system. This flag is mutually exclusive with\n ``--no-restart-on-failure''. This does not affect terminations\n performed by the user.\n \"\"\"\n\n no_restart_on_failure = restart_group.add_argument(\n '--no-restart-on-failure',\n action='store_true',\n help='If true, the instance will be restarted on failure.')\n no_restart_on_failure.detailed_help = \"\"\"\\\n If provided, the instances will not be restarted automatically if they\n are terminated by the system. Mutually exclusive with\n --restart-on-failure. This does not affect terminations performed by the\n user.\n \"\"\"\n\n maintenance_policy = parser.add_argument(\n '--maintenance-policy',\n choices=MIGRATION_OPTIONS,\n type=lambda x: x.upper(),\n help=('Specifies the behavior of the instances when their host '\n 'machines undergo maintenance.'))\n maintenance_policy.detailed_help = \"\"\"\\\n Specifies the behavior of the instances when their host machines undergo\n maintenance. TERMINATE indicates that the instances should be\n terminated. MIGRATE indicates that the instances should be migrated to a\n new host. Choosing MIGRATE will temporarily impact the performance of\n instances during a migration event.\n \"\"\"\n\n parser.add_argument(\n 'name',\n metavar='INSTANCE',\n help='The name of the instance for which to change scheduling options.')\n\n utils.AddZoneFlag(\n parser,\n resource_type='instance',\n operation_type='set scheduling settings for')\n\n @property\n def service(self):\n return self.compute.instances\n\n @property\n def method(self):\n return 'SetScheduling'\n\n @property\n def resource_type(self):\n return 'instances'\n\n def CreateRequests(self, args):\n \"\"\"Returns a list of request necessary for setting scheduling options.\"\"\"\n instance_ref = self.CreateZonalReference(args.name, args.zone)\n\n scheduling_options = self.messages.Scheduling()\n\n if args.restart_on_failure:\n scheduling_options.automaticRestart = True\n elif args.no_restart_on_failure:\n scheduling_options.automaticRestart = False\n\n if args.maintenance_policy:\n scheduling_options.onHostMaintenance = (\n self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(\n args.maintenance_policy))\n\n request = self.messages.ComputeInstancesSetSchedulingRequest(\n instance=instance_ref.Name(),\n project=self.project,\n scheduling=scheduling_options,\n zone=instance_ref.zone)\n\n return [request]\n\n\nSetSchedulingInstances.detailed_help = {\n 'brief': ('Set scheduling options for Google Compute Engine virtual '\n 'machines'),\n 'DESCRIPTION': \"\"\"\\\n *${command}* is used to configure scheduling options for Google Compute\n Engine virtual machines.\n \"\"\",\n}\n","sub_path":"y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/instances/set_scheduling.py","file_name":"set_scheduling.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"42290829","text":"import calendar\nimport datetime\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom wit import Wit\n\nimport config\nfrom api import chat_responses\nfrom api.chat_responses.builder import TextMessage, ResponseBuilder\nfrom app.models import Message, User\nfrom app.views.utils import add_collected_data\n\nRESPONSE_BUILDERS = {\n 'greetings': chat_responses.ResponseGreeting,\n 'thanks': chat_responses.ResponseThanks,\n 'bye': chat_responses.ResponseBye,\n 'recommend_movies': chat_responses.ResponseRecommendMovie,\n 'popular_movies': chat_responses.ResponsePopularMovie,\n 'help': chat_responses.ResponseHelp,\n 'no_intent': chat_responses.ResponseNoIntent\n}\n\n\ndef _increment_date(date, grain):\n \"\"\"\n Creates a range of dates where the starting date is the given date and the\n ending date is the given date incremented for 1 unit of the given grain\n (year, month or day).\n\n :param date: the starting date in string format 'YYYY-MM-DD'\n :param grain: the grain of increment 'year', 'month' or 'day'\n :return: a dictionary with starting and ending date\n \"\"\"\n\n result = {'from': date}\n date_from = datetime.datetime.strptime(date, '%Y-%m-%d')\n\n if grain == 'year':\n date_to = datetime.date(date_from.year + 1, date_from.month, date_from.day)\n elif grain == 'month':\n days_in_month = calendar.monthrange(date_from.year, date_from.month)[1]\n date_to = date_from + datetime.timedelta(days=days_in_month)\n else:\n date_to = date_from + datetime.timedelta(days=1)\n\n result['to'] = str(date_to)[:10] # format 'YYYY-MM-DD'\n return result\n\n\nclass ChatReply(APIView):\n \"\"\"\n View that creates bot response based on user request.\n \"\"\"\n\n @staticmethod\n def _parse_response(response):\n \"\"\"\n Parses data got from Wit as response to user message.\n\n :param response: Wit response\n :return: Dictionary of parsed data from Wit response\n \"\"\"\n\n entities = response['entities']\n\n if 'bye' in entities:\n return {'type': 'bye'}\n elif 'greetings' in entities:\n return {'type': 'greetings'}\n elif 'thanks' in entities:\n return {'type': 'thanks'}\n elif 'intent' in entities:\n summary = {}\n\n # parse intent\n if entities['intent'][0]['confidence'] > 0.5:\n summary['type'] = entities['intent'][0]['value']\n else:\n return None\n\n summary['genres'] = []\n summary['dates'] = {}\n\n # parse genres\n if 'genre' in entities:\n for genre in entities['genre']:\n if genre['confidence'] > 0.5:\n summary['genres'].append(genre['value'])\n\n # parse time\n if 'datetime' in entities:\n date = entities['datetime'][0]\n if date['confidence'] > 0.5:\n if date['type'] == 'value': # exact date\n summary['dates'] = _increment_date(date['value'][:10], date['grain'])\n elif date['type'] == 'interval':\n if 'from' in date:\n summary['dates']['from'] = date['from']['value'][:10]\n if 'to' in date:\n summary['dates']['to'] = date['to']['value'][:10]\n return summary\n else:\n return None\n\n def post(self, request):\n #get and save user message\n message = request.POST.get('message', '')\n user = None\n\n if request.user.is_authenticated:\n user = User.get_user(request.user)\n text_message = TextMessage(message)\n builder = ResponseBuilder()\n builder.add(text_message)\n\n message_to_save = Message(\n user=user,\n sender_type=Message.SENDER_USER,\n content=builder.get_response()\n )\n\n message_to_save.save()\n\n # get chat_responses from Wit\n client = Wit(access_token=config.WIT_ACCESS_TOKEN)\n wit_response = client.message(message)\n wit_response = self._parse_response(wit_response)\n\n try:\n response_builder = RESPONSE_BUILDERS[wit_response['type']]\n except (KeyError, TypeError):\n response_builder = RESPONSE_BUILDERS['no_intent']\n\n # get and save bot message\n bot_response = response_builder().get(wit_response)\n\n if request.user.is_authenticated:\n for bot_message in bot_response['messages']:\n if bot_message['type'] == 'movies':\n add_collected_data(bot_message['content'], user)\n\n message_to_save = Message(\n user=user,\n content=bot_response\n )\n\n message_to_save.save()\n\n return Response(bot_response)\n\n\nclass ChatLoad(APIView):\n \"\"\"\n View that returns messages for current user when presenting chat.\n \"\"\"\n\n def post(self, request):\n if request.user.is_authenticated:\n user = User.get_user(request.user)\n messages_for_user = list(Message.get_messages(user=user))\n\n for message_for_user in messages_for_user:\n if message_for_user['sender_type'] == Message.SENDER_BOT:\n for bot_message in message_for_user['content']['messages']:\n if bot_message['type'] == 'movies':\n add_collected_data(bot_message['content'], user)\n\n return Response(messages_for_user)\n else:\n return Response()\n","sub_path":"api/views/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"159638919","text":"import tensorflow as tf\nimport os\nimport shutil\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n\nsess = tf.InteractiveSession()\n\nreader = tf.TFRecordReader()\nfilename_queue = tf.train.string_input_producer(['/home/vishal/Desktop/sentence_cmle_pipeline-00000-of-00001.tfrecord'])\n\n_, serialized_example = reader.read(filename_queue)\n\ncontext_features = {\n\t'decoder_input_data': tf.FixedLenFeature([], dtype=tf.string),\n 'len': tf.FixedLenFeature([], dtype=tf.int64),\n 'height': tf.FixedLenFeature([], dtype=tf.int64),\n 'width': tf.FixedLenFeature([], dtype=tf.int64),\n 'depth': tf.FixedLenFeature([], dtype=tf.int64)\n}\nsequence_features = {\n\t'label': tf.FixedLenSequenceFeature([], dtype=tf.string),\n 'frames': tf.FixedLenSequenceFeature([],dtype=tf.string),\n}\n\ncontext_data, sequence_data = tf.parse_single_sequence_example(\n serialized=serialized_example,\n context_features=context_features,\n sequence_features=sequence_features)\n\n#cont = tf.decode_raw(context_data['label'], tf.float32)\n\nseqd = tf.decode_raw(sequence_data['label'], tf.float32)\n#seqd = tf.reshape(seqd, [context_data['len'], 60,60, 4])\n\n\ntf.train.start_queue_runners(sess)\n\nprint(seqd.eval().shape)\n# cv2.imwrite('image0.jpg',seqd.eval()[0][:,:,0]*255)\n# cv2.imwrite('image1.jpg',seqd.eval()[1][:,:,1]*255)\n# cv2.imwrite('image2.jpg',seqd.eval()[2][:,:,2]*255)\n# cv2.imwrite('image3.jpg',seqd.eval()[3][:,:,3]*255)","sub_path":"decode_tfrecord.py","file_name":"decode_tfrecord.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"394792198","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os.path\nimport sys\n\nimport json\n\ntry:\n import apiai\nexcept ImportError:\n sys.path.append(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)\n)\nimport apiai\n\n\n\n\n\nCLIENT_ACCESS_TOKEN = '1342cd9f9ef94af59fb45000900dc660'\n\n\n\nwhile(1):\n\n ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\n\n request = ai.text_request()\n\n request.lang = 'en' # optional, default value equal 'en'\n\n # request.session_id = \"\"\n print(\"\\n\\nYou: \",end=\" \")\n request.query = input()\n\n print(\"\\n\\nEco Bot :\",end=\" \")\n response = request.getresponse()\n print(response)\n responsestr = response.read().decode('utf-8')\n print(responsestr)\n response_obj = json.loads(responsestr)\n\n print(response_obj[\"result\"][\"fulfillment\"][\"speech\"])\n\n","sub_path":"code samples/EcoKitchen_backend/eco backup/eco bot/ecobot.py","file_name":"ecobot.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"20741634","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for driblet.models.custom.input_functions.\"\"\"\n\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport parameterized\nimport tensorflow.compat.v1 as tf\nfrom tensorflow_transform.saved import saved_transform_io\n\nfrom driblet.contrib.models.custom import input_functions\n\nTEST_FEATURE_ID = 'id'\nTEST_TARGET_FEATURE = 'test_target'\nTEST_FEATURE = 'test_feature'\nTEST_FEATURE_ID_VALUE = [1]\nTEST_TARGET_FEATURE_VALUE = [1]\nTEST_FEATURE_VALUE = [1.0]\nTEST_DATA_FILE = 'test_data.tfrecord'\nTEST_BUCKET_SIZE = 2\nTEST_METADATA_SCHEMA = \"\"\"\n{\n \"feature\": [{\n \"name\": \"%s\",\n \"fixedShape\": {\n \"axis\": []\n },\n \"type\": \"INT\",\n \"domain\": {\n \"ints\": {}\n },\n \"parsingOptions\": {\n \"tfOptions\": {\n \"varLenFeature\": {}\n }\n }\n },{\n \"name\": \"%s\",\n \"fixedShape\": {\n \"axis\": []\n },\n \"type\": \"INT\",\n \"domain\": {\n \"ints\": {}\n },\n \"parsingOptions\": {\n \"tfOptions\": {\n \"fixedLenFeature\": {}\n }\n }\n },\n {\n \"name\": \"%s\",\n \"fixedShape\": {\n \"axis\": []\n },\n \"type\": \"FLOAT\",\n \"domain\": {\n \"floats\": {}\n },\n \"parsingOptions\": {\n \"tfOptions\": {\n \"fixedLenFeature\": {}\n }\n }\n }]\n}\n\"\"\" % (TEST_FEATURE_ID, TEST_TARGET_FEATURE, TEST_FEATURE)\n\n\ndef _create_test_data():\n \"\"\"Creates serialized test data in tf.Example format.\n\n Returns:\n Serialized tf.Example proto.\n \"\"\"\n feature = {\n TEST_FEATURE_ID:\n tf.train.Feature(\n int64_list=tf.train.Int64List(value=TEST_FEATURE_ID_VALUE)),\n TEST_TARGET_FEATURE:\n tf.train.Feature(\n int64_list=tf.train.Int64List(value=TEST_TARGET_FEATURE_VALUE)),\n TEST_FEATURE:\n tf.train.Feature(\n float_list=tf.train.FloatList(value=TEST_FEATURE_VALUE))\n }\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n return example.SerializeToString()\n\n\ndef _write_test_data_to_disk(testfile):\n \"\"\"Writes test data in tf.Example format to a temporary directory.\n\n Args:\n testfile: Path to test file.\n \"\"\"\n test_data = _create_test_data()\n with tf.python_io.TFRecordWriter(testfile) as writer:\n writer.write(test_data)\n\n\ndef _write_schema_to_disk(tempdir):\n \"\"\"Writes test data schema to temporary a directory.\n\n Args:\n tempdir: Path to temporary directory.\n \"\"\"\n test_transform_dir = os.path.join(tempdir, 'transformed_metadata', 'v1-json')\n test_schema = os.path.join(test_transform_dir, 'schema.json')\n tf.gfile.MakeDirs(test_transform_dir)\n with open(test_schema, 'w') as schema_file:\n schema_file.write(TEST_METADATA_SCHEMA)\n\n\ndef _create_and_write_test_saved_model(tempdir):\n \"\"\"Creates test saved model and writes it to disk.\n\n This test model is used by `example_serving_receiver_fn` to apply\n transformation to test data.\n\n Args:\n tempdir: Path to temporary directory.\n \"\"\"\n export_path = os.path.join(tempdir, 'transform_fn')\n with tf.Graph().as_default():\n with tf.Session().as_default() as session:\n input_placeholder = tf.placeholder(tf.float32, shape=[1])\n output_value = (input_placeholder - 1.0) / 6.0\n input_dict = {\n TEST_FEATURE_ID: tf.placeholder(tf.int64, shape=[1]),\n TEST_FEATURE: input_placeholder\n }\n output_dict = {\n TEST_FEATURE_ID:\n tf.SparseTensor(indices=[[1]], values=[1], dense_shape=[1]),\n 'test_scaled_feature':\n output_value\n }\n saved_transform_io.write_saved_transform_from_session(\n session, input_dict, output_dict, export_path)\n\n\nclass InputFunctionsTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(InputFunctionsTest, cls).setUpClass()\n cls.tempdir = tempfile.mkdtemp()\n\n @classmethod\n def tearDownClass(cls):\n super(InputFunctionsTest, cls).tearDownClass()\n shutil.rmtree(cls.tempdir)\n\n def test_convert_sparse_to_dense_provides_dense_tensor(self):\n \"\"\"Tests that SparseTensor is correctly converted to dense Tensor.\"\"\"\n input_tensor = tf.SparseTensor(indices=[[0]], values=[1], dense_shape=[1])\n\n actual = input_functions.convert_sparse_to_dense(input_tensor)\n\n self.assertShapeEqual(np.array([1]), actual)\n self.assertEqual(actual.dtype, tf.int32)\n self.assertIsInstance(actual, tf.Tensor)\n\n @parameterized.parameterized.expand([\n ('CombinedRegressor'),\n ('CombinedClassifier'),\n ])\n def test_create_feature_columns_creates_transformer_keys(\n self, estimator_type):\n test_categorical_features = ['c1']\n test_numeric_features = ['n1']\n expected_keys = ['tr_c1', 'tr_n1']\n\n actual_keys = []\n actual_linear_features, actual_dense_features = (\n input_functions.create_feature_columns(test_categorical_features,\n test_numeric_features,\n TEST_BUCKET_SIZE,\n estimator_type))\n\n for linear_feature, dense_feature in zip(actual_linear_features,\n actual_dense_features):\n actual_keys.append(linear_feature.key)\n actual_keys.append(dense_feature.key)\n # TODO(zmtbnv): Currently, feature_column module is not visible to\n # this package (http://go/jizas). Implement assertIsInstance to verify\n # if features are instances of feature_column._IdentityCategoricalColumn\n # and feature_column._NumericColumn.\n self.assertListEqual(actual_keys, expected_keys)\n\n @parameterized.parameterized.expand([\n ('Regressor'),\n ('Classifier'),\n ])\n def test_create_feature_columns_provides_embedding_dimension(\n self, estimator_type):\n test_categorical_features = ['c1']\n test_numeric_features = ['n1']\n\n expected_dimension = int(6 * TEST_BUCKET_SIZE**0.25)\n actual_linear_features, _ = (\n input_functions.create_feature_columns(test_categorical_features,\n test_numeric_features,\n TEST_BUCKET_SIZE,\n estimator_type))\n\n for linear_feature in actual_linear_features:\n self.assertEqual(linear_feature.dimension, expected_dimension)\n\n @parameterized.parameterized.expand([\n ('Regressor', 5, 0),\n ('Classifier', 5, 0),\n ('CombinedRegressor', 3, 2),\n ('CombinedClassifier', 3, 2),\n ])\n def test_create_feature_columns_provides_correct_feature_lists(\n self, estimator_type, expected_dense_feature_length,\n expected_linear_feature_length):\n test_categorical_features = ['c1', 'c2']\n test_numeric_features = ['n1', 'n2', 'n3']\n\n actual_linear_features, actual_dense_features = (\n input_functions.create_feature_columns(test_categorical_features,\n test_numeric_features,\n TEST_BUCKET_SIZE,\n estimator_type))\n self.assertEqual(len(actual_dense_features), expected_dense_feature_length)\n self.assertEqual(\n len(actual_linear_features), expected_linear_feature_length)\n\n def test_get_input_fn_povides_correct_features_target_values(self):\n testfile = os.path.join(self.tempdir, TEST_DATA_FILE)\n _write_test_data_to_disk(testfile)\n\n input_fn = input_functions.get_input_fn(\n filename_patterns=[testfile],\n tf_transform_dir=self.tempdir,\n target_feature=TEST_TARGET_FEATURE,\n forward_features=[TEST_FEATURE_ID],\n num_epochs=1,\n batch_size=1)\n features, target = input_fn()\n\n with self.session() as session:\n features, target = session.run((features, target))\n self.assertEqual(target, TEST_TARGET_FEATURE_VALUE)\n self.assertEqual(features[TEST_FEATURE], TEST_FEATURE_VALUE)\n self.assertDTypeEqual(features[TEST_FEATURE], np.float32)\n self.assertDTypeEqual(target, np.int64)\n\n def test_example_serving_receiver_fn(self):\n _write_schema_to_disk(self.tempdir)\n _create_and_write_test_saved_model(self.tempdir)\n raw_feature_spec = {TEST_FEATURE: tf.FixedLenFeature([], tf.float32)}\n expected_feature_keys = [TEST_FEATURE_ID, 'test_scaled_feature']\n\n actual = input_functions.example_serving_receiver_fn(\n self.tempdir, raw_feature_spec, TEST_TARGET_FEATURE, [TEST_FEATURE_ID])\n actual_keys = sorted(actual.features.keys())\n\n self.assertIsInstance(actual, tf.estimator.export.ServingInputReceiver)\n self.assertListEqual(actual_keys, expected_feature_keys)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tests/contrib/models/custom/input_functions_test.py","file_name":"input_functions_test.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"122012281","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n# @author: peidehao\nimport datetime\nimport pandas as pd\n\ndata = pd.DataFrame(pd.read_csv('rb主力连续.csv',header=0,encoding='utf-8'))\nstart = '2020-01-02 10:01:00'\nstart = datetime.datetime.strptime(start,\"%Y-%m-%d %H:%M:%S\")\nfinal = data['时间'].iloc[-1]\nfinal = datetime.datetime.strptime(final,\"%Y-%m-%d %H:%M:%S\")\nend = start+datetime.timedelta(minutes=15)\ncols = []\nperiod = (final-start).days\nfor d in range(0,period+1):\n for i in range(len(data)):\n if start<=datetime.datetime.strptime(data['时间'].iloc[i],\"%Y-%m-%d %H:%M:%S\")> 1, -1):\n for k in range((i >> 1) + 1, min(i, n) + 1):\n #print('k=', k, prime_numbers, factorization_dict)\n is_k_a_prime_num = True\n for prime_num in prime_numbers:\n quotient, remainder = divmod(k, prime_num)\n # k가 소수가 아닌 경우\n if remainder == 0:\n is_k_a_prime_num = False\n prime_num_as_prime_factor_for_k = ()\n prime_num_index = -1\n\n is_prime_num_a_divisor = False\n # k의 약수인 prime_num가 quotient의 약수인지 check\n for l, (prime_factor_in_quotient, how_many) in enumerate(factorization_dict[quotient][0]):\n\n # k의 약수인 prime_num가 quotient의 약수인 경우\n if prime_num == prime_factor_in_quotient:\n prime_num_index = l\n prime_num_as_prime_factor_for_k = (prime_num, how_many + 1)\n is_prime_num_a_divisor = True\n break\n\n # k의 약수인 prime_num가 quotient의 약수인 경우\n if is_prime_num_a_divisor:\n tmp_list = factorization_dict[quotient][0].copy()\n tmp_list[prime_num_index] = prime_num_as_prime_factor_for_k\n factorization_dict[k] = (tmp_list, factorization_dict[quotient][1])\n\n # k의 약수인 prime_num가 quotient의 약수는 아닌 경우, prime_num을 새로운 약수로 추가해야 함\n else:\n # 가독성을 위해서 한번 더 복사하는데 괜찮은건가?\n divisors = factorization_dict[quotient][1]\n insertion_idx = bisect(divisors, prime_num)\n prime_factorized_dict = factorization_dict[quotient][0][:insertion_idx] + [(prime_num, 1)] + factorization_dict[quotient][0][insertion_idx:]\n divisors = divisors[:insertion_idx] + [prime_num] + divisors[insertion_idx:]\n factorization_dict[k] = (prime_factorized_dict, divisors)\n\n break\n\n # k가 소수인 경우\n if is_k_a_prime_num:\n factorization_dict[k] = ([(k, 1)], [k])\n prime_numbers.append(k)\n \n return prime_numbers, factorization_dict\n \n\ndef integer_factorization(n):\n if n > 1:\n prime_numbers = [2]\n factorization_dict = {2:([(2, 1)], [2])}\n\n # 2 ^ (j-1) < n <= 2 ^ j\n # i = 2 ^ j\n j = 1\n i = 2\n powers_of_2 = [1, 2]\n while True:\n if i >= n:\n break\n i = i << 1\n j += 1\n powers_of_2.append(i)\n \n #print('i, j=', i, j)\n for k in range(2, j + 1):\n prime_numbers, factorization_dict = integer_factorization_during_2_j(n, powers_of_2[k], \\\n prime_numbers, \\\n factorization_dict)\n else:\n prime_numbers = []\n factorization_dict = {}\n\n return prime_numbers, factorization_dict\n\nif __name__ == '__main__':\n n = 100\n prime_numbers, factorization_dict= integer_factorization(n)\n print('Prime Numbers until ' + str(n) + ':', prime_numbers)\n print('Integer Factorization from 2 to ' + str(n) + ' are below.')\n for number, factorized in factorization_dict.items():\n print(number, factorized)\n\n \n","sub_path":"math/number_theory/prime_number_and_integer_factorization.py","file_name":"prime_number_and_integer_factorization.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"66461245","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -\n\nfrom flask import Flask\nfrom flask import render_template, request, redirect, url_for\nfrom datetime import datetime\n\n# Importe la base de données et les fonctions\nfrom utils import *\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n### Forum \n\n@app.route('/add', methods=['GET'])\ndef faire_addition():\n return \"%s\" % addition(1, 2)\n\n@app.route('/forum', methods=['GET'])\ndef forum():\n sujets = select_database()\n \n nombre_sujet = count_database() # ???\n \n dernier_sujet_date = dernier_sujet_database()\n \n auteur_actif = auteur_actif_database()\n\n return render_template('forum.html', sujets=sujets, nombre_sujet=nombre_sujet, dernier_sujet_date=dernier_sujet_date, auteur_actif=auteur_actif)\n\n@app.route('/formulaire', methods=['GET'])\ndef formulaire():\n return render_template('formulaire.html')\n\n@app.route('/traitement', methods=['POST'])\ndef traitement():\n auteur = request.form['auteur']\n sujet = request.form['sujet']\n date_sujet = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n formulaire_auteur_database(\n sujet=sujet, \n auteur=auteur, \n date_sujet=date_sujet)\n\n return redirect(url_for('forum'))\n\n@app.route('/suppression', methods=['POST'])\ndef suppression():\n id_suppression = request.form['id']\n suppression_database(id_suppression)\n\n return redirect(url_for('forum'))\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"4_database/webapp/webapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"359105420","text":"import sys\n\nfrom PyQt5 import QtWidgets, QtCore\n\nfrom Enums.DialogEnums import DialogEnums\nfrom gui.Ui_DialogAlert import Ui_Dialog\n\n\nclass DialogAlertView(QtWidgets.QMainWindow, Ui_Dialog):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.assignButtons()\n\n def assignButtons(self):\n self.dlgBtnYesNo.accepted.connect(self.accept)\n self.dlgBtnYesNo.rejected.connect(self.reject)\n\n def show_dialog(self, s: bool, popup_enum: int):\n if popup_enum == DialogEnums.save_student.value:\n self.lblDialogAlert.setText(\"Student saved successfully!\")\n elif popup_enum == DialogEnums.edit_student.value:\n self.lblDialogAlert.setText(\"Student updated successfully!\")\n elif popup_enum == DialogEnums.delete_student_success.value:\n self.lblDialogAlert.setText(\"Student deleted successfully!\")\n elif popup_enum == DialogEnums.delete_student_alert.value:\n self.lblDialogAlert.setText(\"Are you sure you wish to delete the student?\")\n self.show()\n\n def reject(self, s=None):\n self.hide()\n\n def accept(self, s=None):\n self.hide()\n","sub_path":"Views/DialogAlertView.py","file_name":"DialogAlertView.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"473612534","text":"from __future__ import print_function\nimport random\nimport vlc\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(8, GPIO.OUT) #regular\nGPIO.setup(10, GPIO.OUT) #turbo\n\ndef reg_off():\n GPIO.output(8, GPIO.LOW)\n\ndef reg_on():\n GPIO.output(8, GPIO.HIGH)\n\n\ndef turbo_off():\n GPIO.output(10, GPIO.LOW)\n\ndef turbo_on():\n GPIO.output(10, GPIO.HIGH)\n\n\n\ndef rand(range):\n rand = (random.randint(0, range)) #generate random number for rands\n return rand\n\ndef timer(choice):\n if choice == 0:\n t_end = time.time() + 60 * 5 #set t_end for 5 minutes from now; time since unixepoch\n elif choice == 1:\n t_end = time.time() + 60 * 10\n elif choice == 2:\n t_end = time.time() + 60 * 15\n elif choice == 3:\n t_end = time.time() + 60 * 20\n return t_end\n\nreg_off() #turbo GPIO off\nturbo_off() #turbo GPIO off\n\nrand_on_off = rand(1)\nif rand_on_off == 0:\n on_off = 'OFF'\nelse:\n on_off = 'ON'\nprint(\"RANDOM_ON_OFF=:\", on_off)\nwhile(True):\n if on_off == 'ON':\n on_off = 'OFF'\n reg_on() #regular GPIO on\n choice = rand(3)\n timer_end = timer(choice)\n turbo_check = 'OFF'\n p = vlc.MediaPlayer(\"/home/pi/Red.mp3\")\n p.play()\n\n\n while(time.time() < timer_end): #loop while current time is less than timer_end\n\n if turbo_check == 'OFF':\n turbo = rand(10)\n if turbo == 0:\n turbo_check = 'ON'\n reg_off() #regular GPIO off\n turbo_on() #turbo GPIO on\n turbotime = time.time() + 60\n timer_end = timer_end - 60\n p = vlc.MediaPlayer(\"/home/pi/nocigar.mp3\")\n p.play()\n while(time.time() < turbotime):\n print(\"Turbo ON\")\n print(\"Turbotime=\", turbotime)\n print(\"Current= \", time.time())\n time.sleep( 1 )\n turbo_off() #turbo GPIO off\n reg_on() #regular GPIO on\n print(\"Turbocheck: \", turbo_check)\n print(\"Choice: \", choice)\n print(\"Zeit: \", time.time())\n print(\"Zeitende: \", timer_end)\n print(\"\\n\")\n time.sleep( 1 )\n else:\n on_off = 'ON'\n reg_off() #regular GPIO off\n choice = rand( 3 )\n timer_end = timer(choice)\n while(time.time() < timer_end):\n print(\"Choice: \", choice)\n print(\"Zeit: \", time.time())\n print(\"Zeitende: \", timer_end)\n print(\"\\n\")\n time.sleep( 1 )\n\n","sub_path":"lanoween_lampe_pi.py","file_name":"lanoween_lampe_pi.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"510442337","text":"with open('../data/hightemp.txt', 'r') as data_in:\n with open('../data/col1.txt', 'w') as data_out1:\n with open('../data/col2.txt', 'w') as data_out2:\n for line in data_in:\n line = line.strip().replace('\\t', ' ')\n data_out1.write(line.split()[0] + '\\n')\n data_out2.write(line.split()[1] + '\\n')\nprint('exit')\n\n# cat ../data/hightemp.txt | cut -f1 # # col1.txt #\n# cat ../data/hightemp.txt | cut -f2 # # col2.txt #\n","sub_path":"Shi-ma/chapter02/knock12.py","file_name":"knock12.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"322508583","text":"from sklearn import datasets\n# import data\niris = datasets.load_iris()\nX = iris.data[:, :]\ny = iris.target\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 456)\n\n# Fitting SVM to the Training set\nfrom sklearn.svm import SVC\nclassifier_linear = SVC(kernel = 'linear', random_state=456)\nclassifier_linear.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred_linear = classifier_linear.predict(X_test)\n\n#Accuracy of linear model\nfrom sklearn.metrics import accuracy_score\nprint(\"Accuracy of Linear Kernel\")\nprint(accuracy_score(y_test, y_pred_linear))\n\n# Fitting SVM to the Training set\nclassifier_RBF = SVC(kernel = 'rbf', random_state = 456, C=15)\nclassifier_RBF.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred_rbf = classifier_RBF.predict(X_test)\n\n#Accuracy of RBF\nprint(\"Accuracy of rbf Kernel\")\nprint(accuracy_score(y_test, y_pred_rbf))\n","sub_path":"LABASSIGN-3/Source code/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"185742772","text":"\n# TO-DO: Complete the selection_sort() function below\n\n\"\"\"\nStart with current index = 0\n\nFor all indices EXCEPT the last index:\n\na. Loop through elements on right-hand-side of current index and find the smallest element\n\nb. Swap the element at current index with the smallest element found in above loop\n\"\"\"\n\nunsorted = [14, 4, 11, 8, 7, 1, 9, 19, 13,\n 17, 5, 3, 16, 18, 2, 20, 10, 15, 6, 12]\n\n\n# O(n - 1 * O(.5n)) -> O(n^2)\ndef selection_sort(arr):\n # loop through n-1 elements\n for i in range(len(arr) - 1): # O(n - 1)\n cur_index = i # O(1)\n smallest_index = cur_index # O(1)\n # TO-DO: find next smallest element\n # a. Loop through elements on right-hand-side of current index and find the smallest element\n for j in range(cur_index, len(arr)): # O(n - i) -> average would be O(.5n) -> O(n)\n if arr[j] < arr[smallest_index]: # O(1)\n smallest_index = j # O(1)\n # TO-DO: swap\n # b. Swap the element at current index with the smallest element found in above loop\n arr[i], arr[smallest_index] = arr[smallest_index], arr[i] # O(1)\n print(arr)\n return arr\n\n\nselection_list = unsorted.copy()\nselection_sort(selection_list)\n\n\"\"\"\nLoop through your array\nCompare each element to its neighbor\nIf elements in wrong position (relative to each other, swap them)\nIf no swaps performed, stop. Else, go back to the element at index 0 and repeat step 1.\n\"\"\"\n\n# O(n) - best case with sorted array\n# Average and worst - O(n^2)\n\n\ndef bubble_sort(arr):\n swapped = True\n while swapped:\n # If no swaps performed, stop. Else, go back to the element at index 0 and repeat step 1.\n swapped = False\n # Loop through your array\n # go to length - 1 because we're comparing everything to the element in front\n # if we don't we'll get an IndexError\n for j in range(len(arr) - 1):\n # Compare each element to its neighbor\n if arr[j] > arr[j + 1]:\n # If elements in wrong position (relative to each other, swap them)\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n swapped = True\n print(arr)\n return arr\n\n\nbubble_list = unsorted.copy()\nbubble_sort(bubble_list)\n\n\n# O(n + k) - we must know the maximum\ndef count_sort(arr, maximum=-1):\n count_arr = [0] * maximum\n print(count_arr)\n for i in arr:\n # increment the count of the number\n count_arr[i - 1] += 1\n print(count_arr)\n # incrementer\n j = 0\n for i in range(len(count_arr)):\n while count_arr[i] > 0:\n # set the current index of j to be the number\n arr[j] = i + 1\n # increment j\n j += 1\n # decrement the count\n count_arr[i] -= 1\n print(arr)\n return arr\n\n\ncount_list = unsorted.copy()\ncount_sort(count_list, 20)\n","sub_path":"sg3/algorithms/sorting/day-2/iterative_sorts.py","file_name":"iterative_sorts.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"338608132","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nimport unittest,time\nfrom time import sleep\nfrom web_test.data.The_login_data import the_Login\nfrom web_test.public.Denglu import Deng_lu\nfrom web_test.data.search_data import Search\nimport re\nfrom web_test.public.Screen_shot import Screenshot\nfrom selenium.webdriver import ActionChains\n\nclass Xearch_e(unittest.TestCase):\n u\"\"\"舆情监测\"\"\"\n driver = webdriver.Firefox()\n def setUp(self):\n dr = self.driver\n dr.get(\"http://t.yqboom.com\")\n #\n # @unittest.skip('test_search_1')\n def test_5search(self):\n u\"\"\"小搜索,使用关键字搜索后查看:时间范围“近60天”是否显示视频数据\"\"\"\n deng_lu = Deng_lu(self.driver)\n deng_lu.denglu()\n sleep(10)\n self.driver.implicitly_wait(30)\n # 鼠标悬浮操作,鼠标悬浮到“舆情监测”\n ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[3]/span[2]/span/span')).perform()\n time.sleep(2)\n # 鼠标悬浮操作,鼠标悬浮到“自建方案”\n ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[3]/span[2]/ul/li[2]')).perform()\n # 点击元素”哈哈“\n self.driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[3]/span[2]/ul/li[2]/ul/div[1]/li/div/span[3]').click()\n # 获取到“抖音视频数”并截取数字\n self.driver.implicitly_wait(30)\n self.driver.find_element_by_xpath(Search['x近60天'][0]).click()\n self.driver.implicitly_wait(30)\n doy = self.driver.find_element_by_xpath(Search['x抖音视频数'][0]).text\n print(re.findall(r\"\\d+\\.?\\d*\", doy))\n sleep(5)\n self.driver.implicitly_wait(30)\n weib = self.driver.find_element_by_xpath(Search['x微博视频数'][0]).text\n print(re.findall(r\"\\d+\\.?\\d*\", weib))\n sleep(5)\n self.driver.implicitly_wait(30)\n tt = self.driver.find_element_by_xpath(Search['x头条视频数'][0]).text\n print(re.findall(r\"\\d+\\.?\\d*\", tt))\n sleep(5)\n self.driver.implicitly_wait(30)\n kuais = self.driver.find_element_by_xpath(Search['x快手视频数'][0]).text\n print(re.findall(r\"\\d+\\.?\\d*\", kuais))\n sleep(5)\n self.driver.implicitly_wait(30)\n try:\n self.assertTrue(re.findall(r\"\\d+\\.?\\d*\", doy)>['1'],msg='抖音视频数小于1,没有数据')\n self.assertTrue(re.findall(r\"\\d+\\.?\\d*\", weib) > ['1'], msg='微博视频数小于1,没有数据')\n self.assertTrue(re.findall(r\"\\d+\\.?\\d*\", tt) > ['1'], msg='头条视频数小于1,没有数据')\n self.assertTrue(re.findall(r\"\\d+\\.?\\d*\", kuais) > ['1'], msg='快手视频数小于1,没有数据')\n except AssertionError as e:\n # 截图保存\n # 新创建路径“.”表示当前整个.py文件的路径所在的位置,“\\\\”路径分割符,其中的一个是“\\”表示转义字符\n current_time = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n pic_path = (\"D:\\\\kuangjia\\\\screenshots\\\\\" + current_time + \".png\")\n print(pic_path)\n self.driver.save_screenshot(pic_path)\n print(u\"全部视频数小于1,没有数据\")\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"web_test/Automation/sixty.py","file_name":"sixty.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"465653396","text":"# coding=utf-8\nfrom datetime import datetime, timedelta\nimport time\nimport os,sys\n\nif __name__ == \"__main__\": \n app_dir = os.path.dirname(__file__) + \"./\"\n sys.path.append (app_dir )\n\n\nimport webapi_conf as Config\n\n\nSECONDS_PER_MINUTE = 60\nSECONDS_PER_HOUR = 60 * SECONDS_PER_MINUTE\nSECONDS_PER_DAY = 24*SECONDS_PER_HOUR\nMINUTES_PER_DAY = 24*60\nHOURS_PER_DAY = 24\n\n\ndef timestamp_2_datetime(ts):\n return datetime.fromtimestamp(ts)\n\ndef datetime_2_timestamp(dt):\n return time.mktime(dt.timetuple())\n\ndef timestamp_2_datetimestring(ts, fmt=None):\n try:\n dt = ''\n if fmt == None:\n fmt = Config.DateFormat_SaveOrder_Input\n\n dt = datetime.strftime( timestamp_2_datetime (ts), fmt)\n\n except Exception as e:\n sys.stdout.write(\"%s: timestamp_2_datetimestring Exception %s \\n\" % (datetime.strftime(datetime.now(), Config.LOG_DATETIME_FORMAT), str(e)))\n\n return dt\n\n\ndef datetimestring_2_timestamp(dts, fmt=None):\n try:\n dt=0\n\n if len(dts) > 0:\n dt = datetimestring_2_tuple(dts, fmt=fmt)\n\n if dt:\n return datetime_2_timestamp(dt)\n\n except Exception as e:\n sys.stdout.write(\"%s: datetimestring_2_timestamp Exception %s \\n\" % (datetime.strftime(datetime.now(), Config.LOG_DATETIME_FORMAT), str(e)))\n\n return dt\n\n\n\ndef datetimestring_2_tuple(dts, fmt=None):\n try:\n if fmt == None:\n fmt = Config.DateFormat_SaveOrder_Input\n\n dt = datetime.strptime( dts, fmt)\n\n except Exception as e:\n sys.stdout.write(\"%s: datetimestring_2_tuple Exception %s \\n\" % (datetime.strftime(datetime.now(), Config.LOG_DATETIME_FORMAT), str(e)))\n\n return dt\n\ndef convert_2_datetimestring(timestamp=0, hours=0, minutes=0, date_fmt= Config.DateFormat_SaveOrder_Input):\n try:\n t_end =''\n time_end = timestamp_2_datetime (timestamp) + timedelta(hours=hours, minutes=minutes)\n t_end = datetime.strftime( time_end, date_fmt) \n\n except Exception as e:\n sys.stdout.write(\"%s: get_datetime_string Exception %s \\n\" % (datetime.strftime(datetime.now(), Config.LOG_DATETIME_FORMAT), str(e)))\n\n return t_end\n\nif __name__ == \"__main__\": \n tm = 1486494296\n fmt = '%Y-%m-%d %H:%M:%S'\n \n print ('ts = ' , tm)\n\n x = timestamp_2_datetimestring(tm, fmt=fmt)\n print ( 'x =', x)\n\n y = datetimestring_2_tuple(x, fmt=fmt)\n print ( 'y=', y )\n \n w = datetimestring_2_timestamp(x, fmt=fmt)\n print ( 'w=', w )\n\n x = \"2017-02-08 12:00:00\"\n w = datetimestring_2_timestamp(x, fmt=fmt)\n print ( 'w=', w )\n","sub_path":"webapi/date_utils.py","file_name":"date_utils.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"380486517","text":"import json\nimport re\nimport time\nimport subprocess\n\ndef missingnointhearray():\n # input = [4,3,5,1,2,7]\n # print (\"input the array with missing no\")\n input1 = input(\"input the array with missing no\")\n #input =input1\n sum_of_elements = sum(input1)\n print (sum_of_elements)\n n=len(input1)+1\n actual_sum = (n*(n+1))/2\n print(\"hi\")\n print (actual_sum)\n missing_no = actual_sum-sum_of_elements\n print(missing_no)\n return missing_no\n\nif __name__ == \"__main__\":\n missingnointhearray()\n","sub_path":"missingnointhearray.py","file_name":"missingnointhearray.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"109195253","text":"#!/usr/bin/python3\n# 3/28/2019: Skeetzo\nimport re\nimport random\nimport os\nimport shutil\nimport json\nimport sys\nimport pathlib\nimport chromedriver_binary\nimport time\nfrom datetime import datetime, timedelta\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\n##\nfrom .colorize import colorize\nfrom .settings import Settings\nfrom .element import Element\nfrom .profile import Profile\n\n###################\n##### Globals #####\n###################\nBROWSER = None\nLOGGED_IN = False\n\n# Urls\nONLYFANS_HOME_URL = 'https://onlyfans.com'\nONLYFANS_MESSAGES_URL = \"/my/chats/\"\nONLYFANS_NEW_MESSAGE_URL = \"/my/chats/send\"\nONLYFANS_CHAT_URL = \"/my/chats/chat\"\nONLYFANS_SETTINGS_URL = \"/my/settings\"\nONLYFANS_USERS_ACTIVE_URL = \"/my/subscribers/active\"\nONLYFANS_USERS_FOLLOWING_URL = \"/my/subscriptions/active\"\n#\nLOGIN_FORM = \"b-loginreg__form\"\nSEND_BUTTON_XPATH = \"//button[@type='submit' and @class='g-btn m-rounded']\"\nSEND_BUTTON_CLASS = \"g-btn.m-rounded\"\nSEND_BUTTON_CLASS2 = \"button.g-btn.m-rounded\"\n# Login References\nLIVE_BUTTON_CLASS = \"b-make-post__streaming-link\"\nTWITTER_LOGIN0 = \"//a[@class='g-btn m-rounded m-flex m-lg']\"\nTWITTER_LOGIN1 = \"//a[@class='g-btn m-rounded m-flex m-lg btn-twitter']\"\nTWITTER_LOGIN2 = \"//a[@class='btn btn-default btn-block btn-lg btn-twitter']\"\nTWITTER_LOGIN3 = \"//a[@class='g-btn m-rounded m-flex m-lg m-with-icon']\"\nUSERNAME_XPATH = \"//input[@id='username_or_email']\"\nPASSWORD_XPATH = \"//input[@id='password']\"\n# IDs and xpaths not yet required fancy element sorting\nONLYFANS_POST_TEXT_ID = \"new_post_text_input\"\nONLYFANS_MESSAGES = \"b-chat__message__text\"\nMESSAGE_CONFIRM = \"g-btn.m-rounded.b-chat__btn-submit\"\nDISCOUNT_INPUT = \"form-control.b-fans__trial__select\"\nONLYFANS_TWEET = \"//label[@for='new_post_tweet_send']\"\nONLYFANS_PRICE2 = \"button.b-chat__btn-set-price\"\nPOLL_INPUT_XPATH = \"//input[@class='form-control']\"\nREMEMBERME_CHECKBOX_XPATH = \"//input[@id='remember']\"\nDISCOUNT_USER_BUTTONS = \"g-btn.m-rounded.m-border.m-sm\"\n\ndef print_same_line(text):\n sys.stdout.write('\\r')\n sys.stdout.flush()\n sys.stdout.write(text)\n sys.stdout.flush()\n\nclass Driver:\n\n def __init__():\n pass\n # BROWSER = None\n\n @staticmethod\n def auth():\n spawned = Driver.check_spawn()\n if not spawned: return False\n logged_in = False\n global LOGGED_IN\n if not LOGGED_IN or LOGGED_IN == None:\n logged_in = Driver.login()\n else: logged_in = True\n if logged_in == False: print(\"Error: Failure to Login\")\n LOGGED_IN = logged_in\n return logged_in\n\n @staticmethod\n def check_spawn():\n global BROWSER\n spawned = False\n if not BROWSER or BROWSER == None:\n spawned = Driver.spawn_browser()\n else: spawned = True\n if spawned == False: print(\"Error: Failure to Spawn Browser\")\n return spawned\n\n ####################\n ##### Discount #####\n ####################\n\n @staticmethod\n def discount_user(discount=None):\n if not discount:\n print(\"Error: Missing Discount\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n discount.get()\n months = discount.months\n amount = discount.amount\n user = discount.username\n if int(months) > int(Settings.get_discount_max_months()):\n print(\"Warning: Months Too High, Max -> {} days\".format(Settings.get_discount_max_months()))\n months = Settings.get_discount_max_months()\n elif int(months) < int(Settings.get_discount_min_months()):\n print(\"Warning: Months Too Low, Min -> {} days\".format(Settings.get_discount_min_months()))\n months = Settings.get_discount_min_months()\n if int(amount) > int(Settings.get_discount_max_amount()):\n print(\"Warning: Amount Too High, Max -> {} days\".format(Settings.get_discount_max_months()))\n amount = Settings.get_discount_max_amount()\n elif int(amount) < int(Settings.get_discount_min_amount()):\n print(\"Warning: Amount Too Low, Min -> {} days\".format(Settings.get_discount_min_months()))\n amount = Settings.get_discount_min_amount()\n try:\n print(\"Discounting User: {}\".format(user))\n Driver.go_to_page(ONLYFANS_USERS_ACTIVE_URL)\n end_ = True\n count = 0\n while end_:\n elements = BROWSER.find_elements_by_class_name(\"m-fans\")\n for ele in elements:\n username = ele.find_element_by_class_name(\"g-user-username\").get_attribute(\"innerHTML\").strip()\n if str(user) == str(username): \n BROWSER.execute_script(\"arguments[0].scrollIntoView();\", ele)\n end_ = False\n if not end_: continue\n if len(elements) == int(count): break\n print_same_line(\"({}/{}) scrolling...\".format(count, len(elements)))\n count = len(elements)\n BROWSER.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n print()\n users = Driver.find_elements_by_name(\"discountUsers\")\n if int(len(users)) == 0:\n print(\"Error: Missing Users\")\n return False\n # get all the users\n Settings.dev_print(\"finding user\")\n user__ = None\n for user_ in users:\n text = user_.get_attribute(\"innerHTML\")\n # Settings.dev_print(\"user text: {}\".format(text))\n if str(user) in text:\n user__ = user_\n Settings.dev_print(\"found user: {} - {}\".format(user__, user_))\n break\n if user__ == None:\n print(\"Warning: Unable to Find User\")\n return False\n ActionChains(BROWSER).move_to_element(user__).perform()\n Settings.dev_print(\"moved to user\")\n Settings.dev_print(\"finding discount btn\")\n buttons = user__.find_elements_by_class_name(DISCOUNT_USER_BUTTONS)\n for button in buttons:\n if \"Discount\" in button.get_attribute(\"innerHTML\") and button.is_enabled() and button.is_displayed():\n try:\n Settings.dev_print(\"clicking discount btn\")\n button.click()\n Settings.dev_print(\"clicked discount btn\")\n break\n except Exception as e:\n Driver.error_checker(e)\n print(\"Warning: Unable To Find User\")\n return False\n time.sleep(1)\n Settings.dev_print(\"finding months and discount amount btns\")\n (months_, discount_) = BROWSER.find_elements_by_class_name(DISCOUNT_INPUT)\n Settings.dev_print(\"found months and discount amount\")\n # removed in 2.10, inputs changed to above\n # months_ = BROWSER.find_element_by_class_name(MONTHS_INPUT)\n # if discount_.get_attribute(\"value\") != \"\":\n # print(\"Warning: Existing Discount\")\n # discount_.clear()\n Settings.dev_print(\"entering discount amount\")\n for n in range(11):\n discount_.send_keys(str(Keys.UP))\n for n in range(round(int(amount)/5)-1):\n discount_.send_keys(Keys.DOWN)\n Settings.dev_print(\"entered discount amount\")\n Settings.dev_print(\"entering discount months\")\n for n in range(11):\n months_.send_keys(str(Keys.UP))\n for n in range(int(months)-1):\n months_.send_keys(Keys.DOWN)\n Settings.dev_print(\"entered discount months\")\n Settings.debug_delay_check()\n Settings.dev_print(\"applying discount\")\n buttons_ = Driver.find_elements_by_name(\"discountUserButton\")\n for button in buttons_:\n if not button.is_enabled() and not button.is_displayed(): continue\n if \"Cancel\" in button.get_attribute(\"innerHTML\") and Settings.is_debug():\n button.click()\n print(\"Skipping: Save Discount (Debug)\")\n Settings.dev_print(\"### Discount Successfully Canceled ###\")\n return True\n elif \"Apply\" in button.get_attribute(\"innerHTML\"):\n button.click()\n print(\"Discounted User: {}\".format(user))\n Settings.dev_print(\"### Discount Successful ###\")\n return True\n Settings.dev_print(\"### Discount Failure ###\")\n except Exception as e:\n print(e)\n Driver.error_checker(e)\n buttons_ = Driver.find_elements_by_name(\"discountUserButtons\")\n for button in buttons_:\n if \"Cancel\" in button.get_attribute(\"innerHTML\"):\n button.click()\n Settings.dev_print(\"### Discount Successful Failure ###\")\n return False\n Settings.dev_print(\"### Discount Failure ###\")\n return False\n\n @staticmethod\n def enter_text(text):\n try:\n Settings.dev_print(\"finding text\")\n sendText = BROWSER.find_element_by_id(ONLYFANS_POST_TEXT_ID)\n Settings.dev_print(\"found text\")\n sendText.clear()\n Settings.dev_print(\"sending text\")\n sendText.send_keys(str(text))\n return True\n except Exception as e:\n print(e)\n Settings.dev_print(e)\n return False\n\n @staticmethod\n def error_checker(e):\n if \"Unable to locate element\" in str(e):\n print(\"Warning: OnlySnarf may require an update\")\n if \"Message: \" in str(e): return\n Settings.dev_print(e)\n Settings.dev_print(e)\n\n @staticmethod\n def error_window_upload():\n try:\n element = Element.get_element_by_name(\"errorUpload\")\n error_buttons = BROWSER.find_elements_by_class_name(element.getClass())\n Settings.dev_print(\"errors btns: {}\".format(len(error_buttons)))\n for butt in error_buttons:\n if butt.get_attribute(\"innerHTML\").strip() == \"Close\" and butt.is_enabled():\n Settings.maybe_print(\"Warning: Upload Error Message, Closing\")\n butt.click()\n Settings.maybe_print(\"Success: Upload Error Message Closed\")\n return True\n return False\n except Exception as e:\n Driver.error_checker(e)\n return False\n\n ######################\n ##### Expiration #####\n ######################\n\n @staticmethod\n def expires(expiration=None):\n if not expiration:\n print(\"Error: Missing Expiration\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n Settings.dev_print(\"expires\")\n try:\n # go_to_home() # this should be run only from upload anyways\n print(\"Expiration:\")\n print(\"- Period: {}\".format(expiration))\n Driver.open_more_options()\n # open expires window\n Settings.dev_print(\"adding expires\")\n Driver.get_element_to_click(\"expiresAdd\").click()\n # select duration\n Settings.dev_print(\"selecting expires\")\n nums = Driver.find_elements_by_name(\"expiresPeriods\")\n for num in nums:\n ##\n # 1 day\n # 3 days\n # 7 days\n # 30 days\n # No limit\n ##\n inner = num.get_attribute(\"innerHTML\")\n if \">1<\" in str(inner) and int(expiration) == 1: num.click()\n if \">3<\" in str(inner) and int(expiration) == 3: num.click()\n if \">7<\" in str(inner) and int(expiration) == 7: num.click()\n if \">30<\" in str(inner) and int(expiration) == 30: num.click()\n if \">o limit<\" in str(inner) and int(expiration) == 99: num.click()\n Settings.dev_print(\"selected expires\")\n Settings.debug_delay_check()\n # save\n if Settings.is_debug():\n print(\"Skipping: Expiration (debug)\")\n Settings.dev_print(\"skipping expires\")\n Driver.get_element_to_click(\"expiresCancel\").click()\n Settings.dev_print(\"canceled expires\")\n Settings.dev_print(\"### Expiration Successfully Canceled ###\")\n else:\n Settings.dev_print(\"saving expires\")\n Driver.get_element_to_click(\"expiresSave\").click()\n Settings.dev_print(\"saved expires\")\n print(\"Expiration Entered\")\n Settings.dev_print(\"### Expiration Successful ###\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Enter Expiration\")\n try:\n Settings.dev_print(\"canceling expires\")\n Driver.get_element_to_click(\"expiresCancel\").click()\n Settings.dev_print(\"canceled expires\")\n Settings.dev_print(\"### Expiration Successful Failure ###\")\n except: \n Settings.dev_print(\"### Expiration Failure Failure\")\n return False\n\n ######################################################################\n\n # should already be logged in\n @staticmethod\n def find_element_by_name(name):\n if BROWSER == None: return False\n element = Element.get_element_by_name(name)\n if not element:\n print(\"Error: Unable to find Element Reference\")\n return False\n # prioritize id over class name\n eleID = None\n try: eleID = BROWSER.find_element_by_id(element.getId())\n except: eleID = None\n if eleID: return eleID\n for className in element.getClasses():\n ele = None\n eleCSS = None\n try: ele = BROWSER.find_element_by_class_name(className)\n except: ele = None\n try: eleCSS = BROWSER.find_element_by_css_selector(className)\n except: eleCSS = None\n Settings.dev_print(\"class: {} - {}:css\".format(ele, eleCSS))\n if ele: return ele\n if eleCSS: return eleCSS\n raise Exception(\"Error: Unable to Locate Element\")\n\n @staticmethod\n def find_elements_by_name(name):\n if BROWSER == None: return False\n element = Element.get_element_by_name(name)\n if not element:\n print(\"Error: Unable to find Element Reference\")\n return False\n eles = []\n for className in element.getClasses():\n eles_ = []\n elesCSS_ = []\n try: eles_ = BROWSER.find_elements_by_class_name(className)\n except: eles_ = []\n try: elesCSS_ = BROWSER.find_elements_by_css_selector(className)\n except: elesCSS_ = []\n Settings.dev_print(\"class: {} - {}:css\".format(len(eles_), len(elesCSS_)))\n eles.extend(eles_)\n eles.extend(elesCSS_)\n eles_ = []\n for i in range(len(eles)):\n # Settings.dev_print(\"ele: {} -> {}\".format(eles[i].get_attribute(\"innerHTML\").strip(), element.getText()))\n if eles[i].is_displayed():\n Settings.dev_print(\"found displayed ele: {}\".format(eles[i].get_attribute(\"innerHTML\").strip()))\n eles_.append(eles[i])\n if len(eles_) == 0:\n raise Exception(\"Error: Unable to Locate Elements\")\n return eles_\n\n @staticmethod\n def get_element_to_click(name):\n Settings.dev_print(\"finding click: {}\".format(name))\n element = Element.get_element_by_name(name)\n if not element:\n print(\"Error: Unable to find Element Reference\")\n return False\n for className in element.getClasses():\n eles = []\n elesCSS = []\n try: eles = BROWSER.find_elements_by_class_name(className)\n except: eles = []\n try: elesCSS = BROWSER.find_elements_by_css_selector(className)\n except: elesCSS = []\n Settings.dev_print(\"class: {} - {}:css\".format(len(eles), len(elesCSS)))\n eles.extend(elesCSS)\n for i in range(len(eles)):\n # Settings.dev_print(\"ele: {} -> {}\".format(eles[i].get_attribute(\"innerHTML\").strip(), element.getText()))\n if (eles[i].is_displayed() and element.getText() and str(element.getText().lower()) == eles[i].get_attribute(\"innerHTML\").strip().lower()) and eles[i].is_enabled():\n Settings.dev_print(\"found matching ele\")\n # Settings.dev_print(\"found matching ele: {}\".format(eles[i].get_attribute(\"innerHTML\").strip()))\n return eles[i]\n elif (eles[i].is_displayed() and element.getText() and str(element.getText().lower()) == eles[i].get_attribute(\"innerHTML\").strip().lower()):\n Settings.dev_print(\"found text ele\")\n # Settings.dev_print(\"found text ele: {}\".format(eles[i].get_attribute(\"innerHTML\").strip()))\n return eles[i]\n elif eles[i].is_displayed() and not element.getText() and eles[i].is_enabled():\n Settings.dev_print(\"found enabled ele\")\n # Settings.dev_print(\"found enabled ele: {}\".format(eles[i].get_attribute(\"innerHTML\").strip()))\n return eles[i]\n if len(eles) > 0: return eles[0]\n Settings.dev_print(\"unable to find element - {}\".format(className))\n raise Exception(\"Error Locating Element\")\n\n ######################################################################\n\n ##############\n ### Go Tos ###\n ##############\n\n\n # waits for page load\n def get_page_load():\n time.sleep(5)\n # try: WebDriverWait(BROWSER, 120, poll_frequency=10).until(EC.visibility_of_element_located((By.CLASS_NAME, \"main-wrapper\")))\n # except Exception as e: pass\n\n def handle_alert():\n try:\n alert_obj = BROWSER.switch_to.alert or None\n if alert_obj:\n alert_obj.accept()\n except: pass\n # alert = WebDriverWait(s.mydriver, 3).until(EC.alert_is_present(),\"Enter Party Name\")\n # alert.send_keys() – used to enter a value in the Alert text box.\n # alert.accept()\n # Settings.dev_print(\"alert accepted\")\n\n @staticmethod\n def go_to_page(page):\n auth_ = Driver.auth()\n if not auth_: return False\n if str(BROWSER.current_url) == str(page) or str(page) in str(BROWSER.current_url):\n Settings.maybe_print(\"at -> {}\".format(page))\n BROWSER.execute_script(\"window.scrollTo(0, 0);\")\n else:\n Settings.maybe_print(\"goto -> {}\".format(page))\n BROWSER.get(\"{}/{}\".format(ONLYFANS_HOME_URL, page))\n Driver.handle_alert()\n Driver.get_page_load()\n\n @staticmethod\n def go_to_home():\n auth_ = Driver.auth()\n if not auth_: return False\n if str(BROWSER.current_url) == str(ONLYFANS_HOME_URL):\n Settings.maybe_print(\"at -> onlyfans.com\")\n BROWSER.execute_script(\"window.scrollTo(0, 0);\")\n else:\n Settings.maybe_print(\"goto -> onlyfans.com\")\n BROWSER.get(ONLYFANS_HOME_URL)\n Driver.get_page_load()\n\n # onlyfans.com/my/settings\n @staticmethod\n def go_to_settings(settingsTab):\n auth_ = Driver.auth()\n if not auth_: return False\n if str(BROWSER.current_url) == str(ONLYFANS_SETTINGS_URL) and str(settingsTab) == \"profile\":\n Settings.maybe_print(\"at -> onlyfans.com/settings/{}\".format(settingsTab))\n BROWSER.execute_script(\"window.scrollTo(0, 0);\")\n else:\n if str(settingsTab) == \"profile\": settingsTab = \"\"\n Settings.maybe_print(\"goto -> onlyfans.com/settings/{}\".format(settingsTab))\n BROWSER.get(\"{}/{}\".format(ONLYFANS_SETTINGS_URL, settingsTab))\n # fix above with correct element to locate\n Driver.get_page_load()\n\n ##################\n ###### Login #####\n ##################\n\n @staticmethod\n def login():\n print('Logging into OnlyFans')\n username = str(Settings.get_username())\n password = str(Settings.get_password())\n if not username or username == \"\":\n username = Settings.prompt_username()\n if not password or password == \"\":\n password = Settings.prompt_password()\n if str(username) == \"\" or str(password) == \"\":\n print(\"Error: Missing Login Info\")\n return False\n try:\n BROWSER.get(ONLYFANS_HOME_URL)\n Settings.dev_print(\"logging in\")\n # twitter = BROWSER.find_element_by_xpath(TWITTER_LOGIN3).click()\n # Settings.dev_print(\"twitter login clicked\")\n # rememberMe checkbox doesn't actually cause login to be remembered\n # rememberMe = BROWSER.find_element_by_xpath(REMEMBERME_CHECKBOX_XPATH)\n # if not rememberMe.is_selected():\n # rememberMe.click()\n # if str(Settings.MANUAL) == \"True\":\n # print(\"Please Login\")\n elements = BROWSER.find_elements_by_tag_name(\"a\")\n [elem for elem in elements if '/twitter/auth' in str(elem.get_attribute('href'))][0].click()\n # twitter = BROWSER.find_element_by_xpath(\"//a[@class='g-btn m-rounded m-flex m-lg m-with-icon']\").click() \n BROWSER.find_element_by_xpath(\"//input[@id='username_or_email']\").send_keys(username)\n Settings.dev_print(\"username entered\")\n # fill in password and hit the login button \n password_ = BROWSER.find_element_by_xpath(\"//input[@id='password']\")\n password_.send_keys(password)\n Settings.dev_print(\"password entered\")\n password_.send_keys(Keys.ENTER)\n try:\n Settings.dev_print(\"waiting for loginCheck\")\n WebDriverWait(BROWSER, 120, poll_frequency=6).until(EC.visibility_of_element_located((By.CLASS_NAME, Element.get_element_by_name(\"loginCheck\").getClass())))\n print(\"OnlyFans Login Successful\")\n return True\n except TimeoutException as te:\n Settings.dev_print(str(te))\n print(\"Login Failure: Timed Out! Please check your Twitter credentials.\")\n print(\": If the problem persists, OnlySnarf may require an update.\")\n except Exception as e:\n Driver.error_checker(e)\n print(\"Login Failure: OnlySnarf may require an update\")\n return False\n except Exception as e:\n Settings.dev_print(\"login failure\")\n Driver.error_checker(e)\n print(\"OnlyFans Login Failed\")\n return False\n\n ####################\n ##### Messages #####\n ####################\n\n @staticmethod\n def message(username=None, user_id=None):\n if not username and not user_id:\n print(\"Error: Missing User to Message\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n try:\n type__ = None # default\n if str(username).lower() == \"all\": type__ = \"messageAll\"\n elif str(username).lower() == \"recent\": type__ = \"messageRecent\"\n elif str(username).lower() == \"favorite\": type__ = \"messageFavorite\"\n successful = False\n if type__ != None:\n Driver.go_to_page(ONLYFANS_NEW_MESSAGE_URL)\n Settings.dev_print(\"clicking message type: {}\".format(username))\n Driver.get_element_to_click(type__).click()\n successful = True\n else:\n successful = Driver.message_user(username=username, user_id=user_id)\n Settings.dev_print(\"successfully started message: {}\".format(username))\n return successful\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failure to Message - {}\".format(username))\n return False\n \n @staticmethod\n def message_confirm():\n try:\n WAIT = WebDriverWait(BROWSER, 120, poll_frequency=30)\n i = 0\n Settings.dev_print(\"waiting for message confirm to be clickable\")\n while True:\n try: \n WAIT.until(EC.element_to_be_clickable((By.CLASS_NAME, MESSAGE_CONFIRM)))\n Settings.dev_print(\"message confirm is clickable\")\n break\n except Exception as e:\n print('uploading...')\n Driver.error_checker(e)\n i += 1\n if i == int(Settings.get_upload_max_duration()):\n print('Error: Max Upload Time Reached')\n return False\n Settings.dev_print(\"getting confirm to click\")\n confirm = Driver.get_element_to_click(\"new_post\")\n if Settings.is_debug():\n print('OnlyFans Message: Skipped (debug)')\n Settings.dev_print(\"### Message Successful (debug) ###\")\n Settings.debug_delay_check()\n return True\n Settings.dev_print(\"clicking confirm\")\n confirm.click()\n print('OnlyFans Message: Sent')\n Settings.dev_print(\"### Message Successful ###\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failure to Confirm Message\")\n Settings.dev_print(\"### Message Failure ###\")\n return False\n\n @staticmethod\n def message_files(files=[]):\n if len(files) == 0: return True\n try:\n print(\"Uploading file(s): {}\".format(len(files)))\n Settings.dev_print(\"uploading files\")\n Driver.upload_files(files=files)\n Settings.maybe_print(\"file(s) Entered\")\n Settings.debug_delay_check()\n return True\n except Exception as e:\n print(e)\n Driver.error_checker(e)\n print(\"Error: Failure to Upload File(s)\")\n return False\n\n @staticmethod\n def message_price(price):\n try:\n if not price or price == None or str(price) == \"None\":\n print(\"Error: Missing Price\")\n return False\n print(\"Enter price: {}\".format(price))\n Settings.dev_print(\"waiting for price area to enter price\")\n priceElement = WebDriverWait(BROWSER, 600, poll_frequency=10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ONLYFANS_PRICE2)))\n Settings.dev_print(\"entering price\")\n priceElement.click()\n actions = ActionChains(BROWSER)\n actions.send_keys(str(price)) \n actions.perform()\n Settings.dev_print(\"entered price\")\n # Settings.debug_delay_check()\n Settings.dev_print(\"saving price\")\n Driver.get_element_to_click(\"priceClick\").click() \n Settings.dev_print(\"saved price\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failure to Enter Price\")\n return False\n\n @staticmethod\n def message_text(text):\n try:\n # auth_ = Driver.auth()\n # if not auth_: return False\n # Driver.go_to_page(ONLYFANS_HOME_URL)\n if not text or text == None or str(text) == \"None\":\n print(\"Error: Missing Text\")\n return False\n print(\"Enter text: {}\".format(text))\n Settings.dev_print(\"finding text area\")\n message = Driver.find_element_by_name(\"messageText\") \n # message = BROWSER.find_element_by_name(\"message\") \n Settings.dev_print(\"entering text\")\n message.send_keys(str(text))\n Settings.dev_print(\"entered text\")\n return True\n except Exception as e:\n print(e)\n Driver.error_checker(e)\n print(\"Error: Failure to Enter Message\")\n return False\n\n @staticmethod\n def message_user_by_id(user_id=None):\n user_id = str(user_id).replace(\"@u\",\"\").replace(\"@\",\"\")\n if not user_id or user_id == None or str(user_id) == \"None\":\n print(\"Warning: Missing User ID\")\n return False\n try:\n auth_ = Driver.auth()\n if not auth_: return False\n Driver.go_to_page(\"{}/{}\".format(ONLYFANS_CHAT_URL, user_id))\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failure to Goto User - {}\".format(user_id))\n return False\n\n @staticmethod\n def message_user(username=None, user_id=None):\n auth_ = Driver.auth()\n if not auth_: return None\n if user_id: return Driver.message_user_by_id(user_id=user_id)\n if not username:\n print(\"Error: Missing Username to Message\")\n return False\n try:\n Driver.go_to_page(username)\n elements = BROWSER.find_elements_by_tag_name(\"a\")\n ele = [ele for ele in elements\n if \"/my/chats/chat/\" in str(ele.get_attribute(\"href\"))]\n if len(ele) == 0: \n print(\"Warning: User Cannot Be Messaged\")\n return False\n ele = ele[0]\n Settings.dev_print(\"clicking send message\")\n ele.click()\n Settings.dev_print(\"messaging username: {}\".format(username))\n except Exception as e:\n print(e)\n Driver.error_checker(e)\n print(\"Error: Failed to Message User\")\n return False\n return True\n\n ####################################################################################################\n ####################################################################################################\n ####################################################################################################\n\n # tries both and throws error for not found element internally\n @staticmethod\n def open_more_options():\n def option_one():\n # click on '...' element\n Settings.dev_print(\"opening options (1)\")\n moreOptions = Driver.get_element_to_click(\"moreOptions\")\n if not moreOptions: return False \n moreOptions.click()\n return True\n def option_two():\n # click in empty space\n Settings.dev_print(\"opening options (2)\")\n moreOptions = BROWSER.find_element_by_id(ONLYFANS_POST_TEXT_ID)\n if not moreOptions: return False \n moreOptions.click()\n return True\n try:\n successful = option_one()\n if not successful: return option_two()\n except Exception as e:\n try:\n return option_two()\n except Exception as e: \n Driver.error_checker(e)\n raise Exception(\"Error: Unable to Locate 'More Options' Element\")\n\n ################\n ##### Poll #####\n ################\n\n @staticmethod\n def poll(poll=None):\n if not poll:\n print(\"Error: Missing Poll\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n Settings.dev_print(\"poll\")\n poll.get()\n duration = poll.duration\n questions = poll.questions\n try:\n print(\"Poll:\")\n print(\"- Duration: {}\".format(duration))\n print(\"- Questions:\\n> {}\".format(\"\\n> \".join(questions)))\n # make sure the extra options are shown\n Driver.open_more_options()\n # add a poll\n Settings.dev_print(\"adding poll\")\n Driver.get_element_to_click(\"poll\").click()\n # open the poll duration\n Settings.dev_print(\"adding duration\")\n Driver.get_element_to_click(\"pollDuration\").click()\n # click on the correct duration number\n Settings.dev_print(\"setting duration\")\n # nums = BROWSER.find_elements_by_class_name(Element.get_element_by_name(\"pollDurations\").getClass())\n nums = Driver.find_elements_by_name(\"pollDurations\")\n for num in nums:\n ##\n # 1 day\n # 3 days\n # 7 days\n # 30 days\n # No limit\n ##\n inner = num.get_attribute(\"innerHTML\")\n if \">1<\" in str(inner) and int(duration) == 1: num.click()\n if \">3<\" in str(inner) and int(duration) == 3: num.click()\n if \">7<\" in str(inner) and int(duration) == 7: num.click()\n if \">30<\" in str(inner) and int(duration) == 30: num.click()\n if \">o limit<\" in str(inner) and int(duration) == 99: num.click()\n # save the duration\n Settings.dev_print(\"saving duration\")\n Driver.get_element_to_click(\"pollSave\").click()\n Settings.dev_print(\"saved duration\")\n # add extra question space\n if len(questions) > 2:\n for question in questions[2:]:\n Settings.dev_print(\"adding question\")\n question_ = Driver.get_element_to_click(\"pollQuestionAdd\").click()\n Settings.dev_print(\"added question\")\n # find the question inputs\n Settings.dev_print(\"locating question paths\")\n questions_ = BROWSER.find_elements_by_xpath(POLL_INPUT_XPATH)\n Settings.dev_print(\"question paths: {}\".format(len(questions_)))\n # enter the questions\n i = 0\n # print(\"questions: {}\".format(questions))\n for question in list(questions):\n Settings.dev_print(\"entering question: {}\".format(question))\n questions_[i].send_keys(str(question))\n Settings.dev_print(\"entered question\")\n time.sleep(1)\n i+=1\n Settings.debug_delay_check()\n if Settings.is_debug():\n print(\"Skipping: Poll (debug)\")\n cancel = Driver.get_element_to_click(\"pollCancel\")\n cancel.click()\n Settings.dev_print(\"canceled poll\")\n else:\n print(\"Poll Entered\")\n Settings.dev_print(\"### Poll Successful ###\")\n time.sleep(3)\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Enter Poll\")\n return False\n\n ################\n ##### Post #####\n ################\n\n @staticmethod\n def post(message=None):\n if not message:\n print(\"Error: Missing Message\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n Settings.dev_print(\"posting\")\n try:\n Driver.go_to_home()\n message.get_post()\n files = message.files\n # files = \n text = message.format_text()\n keywords = message.keywords\n performers = message.performers\n tags = message.tags\n expires = message.expiration\n schedule = message.schedule\n poll = message.poll\n if str(text) == \"None\": text = \"\"\n print(\"Posting:\")\n print(\"- Files: {}\".format(len(files)))\n print(\"- Keywords: {}\".format(keywords))\n print(\"- Performers: {}\".format(performers))\n print(\"- Tags: {}\".format(tags))\n print(\"- Text: {}\".format(text))\n print(\"- Tweeting: {}\".format(Settings.is_tweeting()))\n ## Expires, Schedule, Poll\n if expires: Driver.expires(expires)\n if schedule: Driver.schedule(schedule)\n if poll: Driver.poll(poll)\n WAIT = WebDriverWait(BROWSER, 600, poll_frequency=10)\n ## Tweeting\n if Settings.is_tweeting():\n Settings.dev_print(\"tweeting\")\n WAIT.until(EC.element_to_be_clickable((By.XPATH, ONLYFANS_TWEET))).click()\n else:\n Settings.dev_print(\"not tweeting\")\n ## Files\n successful_upload = False\n try:\n Settings.dev_print(\"uploading files\")\n successful_upload = Driver.upload_files(files) or False\n except Exception as e:\n print(e)\n ## Text\n successful_text = Driver.enter_text(text)\n if not successful_text:\n print(\"Error: Unable to Enter Text\")\n return False\n ## Confirm\n i = 0\n while successful_upload:\n try:\n WebDriverWait(BROWSER, 600, poll_frequency=10).until(EC.element_to_be_clickable((By.CLASS_NAME, SEND_BUTTON_CLASS)))\n Settings.dev_print(\"upload complete\")\n break\n except Exception as e:\n # try: \n # # check for existence of \"thumbnail is fucked up\" modal and hit ok button\n # # haven't seen in long enough time to properly add\n # BROWSER.switchTo().frame(\"iframe\");\n # BROWSER.find_element_by_class(\"g-btn m-rounded m-border\").send_keys(Keys.ENTER)\n # print(\"Error: Thumbnail Missing\")\n # break\n # except Exception as ef:\n # Settings.maybe_print(ef)\n print('uploading...')\n Driver.error_checker(e)\n i+=1\n if i == int(Settings.get_upload_max_duration()):\n print('Error: Max Upload Time Reached')\n return False\n try:\n send = Driver.get_element_to_click(\"new_post\")\n if send:\n Settings.debug_delay_check()\n if Settings.is_debug():\n print('Skipped: OnlyFans Post (debug)')\n Settings.dev_print(\"### Post Maybe Successful ###\")\n return True\n Settings.dev_print(\"confirming upload\")\n send.click()\n else:\n Settings.maybe_print(\"Error: Unable to locate 'Send Post' button\")\n return False\n except Exception as e:\n print(\"Error: Unable to Send Post\")\n Settings.dev_print(e)\n return False\n # send[1].click() # the 0th one is disabled\n Settings.dev_print(\"### Post Successful ###\")\n print('OnlyFans Post Complete')\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: OnlyFans Post Failure\")\n return False\n\n ######################\n ##### Promotions #####\n ######################\n\n # or email\n @staticmethod\n def promotional_trial_link(promotion=None):\n if not promotion:\n print(\"Error: Missing Promotion\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n # go to onlyfans.com/my/subscribers/active\n try:\n promotion.get()\n limit = promotion.limit\n expiration = promotion.expiration\n months = promotion.months\n user = promotion.user\n Settings.maybe_print(\"goto -> /my/promotions\")\n BROWSER.get(('https://onlyfans.com/my/promotions'))\n Settings.dev_print(\"creating promotional trial\")\n Driver.get_element_to_click(\"promotionalTrial\").click()\n # limit dropdown\n Settings.dev_print(\"setting trial count\")\n limitDropwdown = Driver.find_element_by_name(\"promotionalTrialCount\")\n for n in range(11): # 11 max subscription limits\n limitDropwdown.send_keys(str(Keys.UP))\n Settings.debug_delay_check()\n if int(limit) == 99: limit = 1\n for n in range(int(limit)-1):\n limitDropwdown.send_keys(Keys.DOWN)\n Settings.debug_delay_check()\n # expiration dropdown\n Settings.dev_print(\"settings trial expiration\")\n expirationDropdown = Driver.find_element_by_name(\"promotionalTrialExpiration\")\n for n in range(11): # 31 max days\n expirationDropdown.send_keys(str(Keys.UP))\n Settings.debug_delay_check()\n if int(expiration) == 99: expiration = 1\n for n in range(int(expiration)-1):\n expirationDropdown.send_keys(Keys.DOWN)\n Settings.debug_delay_check()\n # months dropdown\n Settings.dev_print(\"settings trial months\")\n durationDropwdown = Driver.find_element_by_name(\"promotionalTrialDuration\")\n for n in range(11): # 32 max months\n durationDropwdown.send_keys(str(Keys.UP))\n Settings.debug_delay_check()\n if int(months) == 99: months = 1\n for n in range(int(months)-1):\n durationDropwdown.send_keys(Keys.DOWN)\n Settings.debug_delay_check()\n # find and click promotionalTrialConfirm\n if Settings.is_debug():\n Settings.dev_print(\"finding trial cancel\")\n Driver.get_element_to_click(\"promotionalTrialCancel\").click()\n print(\"Skipping: Promotion (debug)\")\n Settings.dev_print(\"Successful trial cancellation\")\n return True\n Settings.dev_print(\"finding trial save\")\n save_ = Driver.get_element_to_click(\"promotionalTrialConfirm\")\n Settings.dev_print(\"saving promotion\")\n save_.click()\n Settings.dev_print(\"promotion saved\")\n Settings.dev_print(\"copying trial link\")\n Driver.find_element_by_name(\"promotionalTrialLink\").click()\n Settings.dev_print(\"copied trial link\")\n\n # go to /home\n # enter copied paste into new post\n # get text in new post\n # email link to user\n \n # Actions actions = new Actions(Driver.driver);\n # actions.sendKeys(Keys.chord(Keys.LEFT_CONTROL, \"v\")).build().perform();\n # sendemail(from_addr = 'python@RC.net', \n # to_addr_list = ['RC@gmail.com'],\n # cc_addr_list = ['RC@xx.co.uk'], \n # subject = 'Howdy', \n # message = 'Howdy from a python function', \n # login = 'pythonuser', \n # password = 'XXXXX')\n\n Settings.dev_print(\"Successful Promotion\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Apply Promotion\")\n return None\n\n @staticmethod\n def promotion_user_directly(promotion=None):\n if not promotion:\n print(\"Error: Missing Promotion\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n # go to onlyfans.com/my/subscribers/active\n promotion.get()\n expiration = promotion.expiration\n months = promotion.months\n user = promotion.user\n message = promotion.message\n if int(expiration) > int(Settings.get_discount_max_amount()):\n print(\"Warning: Discount Too High, Max -> {}%\".format(Settings.get_discount_max_amount()))\n discount = Settings.get_discount_max_amount()\n elif int(expiration) > int(Settings.get_discount_min_amount()):\n print(\"Warning: Discount Too Low, Min -> {}%\".format(Settings.get_discount_min_amount()))\n discount = Settings.get_discount_min_amount()\n if int(months) > int(Settings.get_discount_max_months()):\n print(\"Warning: Duration Too High, Max -> {} days\".format(Settings.get_discount_max_months()))\n months = Settings.get_discount_max_months()\n elif int(months) < int(Settings.get_discount_min_months()):\n print(\"Warning: Duration Too Low, Min -> {} days\".format(Settings.get_discount_min_months()))\n months = Settings.get_discount_min_months()\n try:\n Settings.maybe_print(\"goto -> /{}\".format(user.username))\n Driver.go_to_page.get(user.username)\n # click discount button\n Driver.get_element_to_click(\"discountUser\").click()\n # enter expiration\n expirations = Driver.find_element_by_name(\"promotionalTrialExpirationUser\")\n # enter duration\n durations = Driver.find_element_by_name(\"promotionalTrialDurationUser\")\n # enter message\n message = Driver.find_element_by_name(\"promotionalTrialMessageUser\")\n # save\n Settings.dev_print(\"entering expiration\")\n for n in range(11):\n expirations.send_keys(str(Keys.UP))\n for n in range(round(int(expiration)/5)-1):\n expirations.send_keys(Keys.DOWN)\n Settings.dev_print(\"entered expiration\")\n Settings.dev_print(\"entering duration\")\n for n in range(11):\n durations.send_keys(str(Keys.UP))\n for n in range(int(months)-1):\n durations.send_keys(Keys.DOWN)\n Settings.dev_print(\"entered duration\")\n Settings.debug_delay_check()\n Settings.dev_print(\"entering message\")\n message.clear()\n message.send_keys(message)\n Settings.dev_print(\"entered message\")\n Settings.dev_print(\"applying discount\")\n save = Driver.find_element_by_name(\"promotionalTrialApply\")\n if Settings.is_debug():\n Driver.find_element_by_name(\"promotionalTrialCancel\").click()\n print(\"Skipping: Save Discount (Debug)\")\n Settings.dev_print(\"### Discount Successfully Canceled ###\")\n cancel.click()\n return True\n save.click()\n print(\"Discounted User: {}\".format(user.username))\n Settings.dev_print(\"### User Discount Successful ###\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n try:\n Driver.find_element_by_name(\"promotionalTrialCancel\").click()\n Settings.dev_print(\"### Discount Successful Failure ###\")\n return False\n except Exception as e:\n Driver.error_checker(e)\n Settings.dev_print(\"### Discount Failure ###\")\n return False\n\n ######################################################################\n\n @staticmethod\n def read_user_messages(user):\n auth_ = Driver.auth()\n if not auth_: return False\n try:\n # go to onlyfans.com/my/subscribers/active\n Driver.message_user(user)\n messages_from_ = Driver.find_elements_by_name(\"messagesFrom\")\n # print(\"first message: {}\".format(messages_to_[0].get_attribute(\"innerHTML\")))\n # messages_to_.pop(0) # drop self user at top of page\n messages_all_ = Driver.find_elements_by_name(\"messagesAll\")\n messages_all = []\n messages_to = []\n messages_from = []\n # timestamps_ = BROWSER.find_elements_by_class_name(\"b-chat__message__time\")\n # timestamps = []\n # for timestamp in timestamps_:\n # Settings.maybe_print(\"timestamp1: {}\".format(timestamp))\n # timestamp = timestamp[\"data-timestamp\"]\n # timestamp = timestamp.get_attribute(\"innerHTML\")\n # Settings.maybe_print(\"timestamp: {}\".format(timestamp))\n # timestamps.append(timestamp)\n for message in messages_all_:\n Settings.maybe_print(\"all: {}\".format(message.get_attribute(\"innerHTML\")))\n messages_all.append(message.get_attribute(\"innerHTML\"))\n messages_and_timestamps = []\n # messages_and_timestamps = [j for i in zip(timestamps,messages_all) for j in i]\n # Settings.maybe_print(\"Chat Log:\")\n # for f in messages_and_timestamps:\n # Settings.maybe_print(\": {}\".format(f))\n for message in messages_from_:\n # Settings.maybe_print(\"from1: {}\".format(message.get_attribute(\"innerHTML\")))\n message = message.find_element_by_class_name(ONLYFANS_MESSAGES)\n Settings.maybe_print(\"from: {}\".format(message.get_attribute(\"innerHTML\")))\n messages_from.append(message.get_attribute(\"innerHTML\"))\n i = 0\n for message in messages_all:\n from_ = False\n to_ = False\n for mess in messages_from:\n if str(message) == str(mess):\n from_ = True\n for mess in messages_to:\n if str(message) == str(mess):\n to_ = True\n if not from_:\n # Settings.maybe_print(\"to_: {}\".format(message))\n # messages_to[i] = [timestamps[i], message]\n # messages_to[i] = message\n messages_to.append(message)\n # Settings.maybe_print(\"to_: {}\".format(messages_to[i]))\n # elif from_:\n # Settings.maybe_print(\"from_: {}\".format(message))\n # messages_from[i] = [timestamps[i], message]\n # messages_from[i] = message\n # Settings.maybe_print(\"from_: {}\".format(messages_from[i]))\n i += 1\n Settings.maybe_print(\"to: {}\".format(messages_to))\n Settings.maybe_print(\"from: {}\".format(messages_from))\n Settings.maybe_print(\"Messages From: {}\".format(len(messages_from)))\n Settings.maybe_print(\"Messages To: {}\".format(len(messages_to)))\n Settings.maybe_print(\"Messages All: {}\".format(len(messages_all)))\n return [messages_all, messages_and_timestamps, messages_to, messages_from]\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failure to Read Chat - {}\".format(user.username))\n return [[],[],[]]\n\n #################\n ##### Reset #####\n #################\n\n # Reset to home\n @staticmethod\n def reset():\n if not BROWSER or BROWSER == None:\n print('OnlyFans Not Open, Skipping Reset')\n return True\n try:\n BROWSER.get(ONLYFANS_HOME_URL)\n print('OnlyFans Reset')\n return True\n except Exception as e:\n Driver.error_checker(e)\n print('Error: Failure Resetting OnlyFans')\n return False\n\n ####################\n ##### Schedule #####\n ####################\n\n @staticmethod\n def schedule(theSchedule=None):\n if not theSchedule:\n print(\"Error: Missing Schedule\")\n return False\n auth_ = Driver.auth()\n if not auth_: return False\n try:\n theSchedule.get()\n month_ = theSchedule.month\n day_ = theSchedule.day\n year_ = theSchedule.year\n hour_ = theSchedule.hour\n minute_ = theSchedule.minute\n today = datetime.now()\n Settings.dev_print(\"today: {} {}\".format(today.strftime(\"%B\"), today.strftime(\"%Y\")))\n date__ = datetime.strptime(str(theSchedule.date), \"%Y-%m-%d %H:%M:%S\")\n if date__ < today:\n print(\"Error: Unable to Schedule Earlier Date\")\n return False\n print(\"Schedule:\")\n print(\"- Date: {}\".format(theSchedule.date))\n print(\"- Time: {}\".format(theSchedule.time))\n Driver.open_more_options()\n # click schedule\n Settings.dev_print(\"adding schedule\")\n Driver.get_element_to_click(\"scheduleAdd\").click()\n # find and click month w/ correct date\n while True:\n Settings.dev_print(\"getting date\")\n existingDate = Driver.find_element_by_name(\"scheduleDate\").get_attribute(\"innerHTML\")\n Settings.dev_print(\"date: {} - {} {}\".format(existingDate, month_, year_))\n if str(month_) in str(existingDate) and str(year_) in str(existingDate): break\n else: Driver.get_element_to_click(\"scheduleNextMonth\").click()\n # set day in month\n Settings.dev_print(\"setting days\")\n days = Driver.find_elements_by_name(\"scheduleDays\")\n for day in days:\n inner = day.get_attribute(\"innerHTML\").replace(\"\",\"\").replace(\"\",\"\")\n if str(day_) == str(inner):\n day.click()\n Settings.dev_print(\"clicked day\")\n Settings.debug_delay_check()\n # save schedule date\n saves = Driver.get_element_to_click(\"scheduleSave\")\n Settings.dev_print(\"found save button, clicking\")\n saves.click()\n Settings.dev_print(\"clicked save button\")\n # set hours\n Settings.dev_print(\"setting hours\")\n hours = Driver.find_elements_by_name(\"scheduleHours\")\n for hour in hours:\n inner = hour.get_attribute(\"innerHTML\")\n if str(hour_) in str(inner) and hour.is_enabled():\n hour.click()\n Settings.dev_print(\"hours set\")\n # set minutes\n Settings.dev_print(\"setting minutes\")\n minutes = Driver.find_elements_by_name(\"scheduleMinutes\")\n for minute in minutes:\n inner = minute.get_attribute(\"innerHTML\")\n if str(minute_) in str(inner) and minute.is_enabled():\n minute.click()\n Settings.dev_print(\"minutes set\")\n # save time\n Settings.dev_print(\"saving schedule\")\n Settings.debug_delay_check()\n if Settings.is_debug():\n print(\"Skipping: Schedule (debug)\")\n Driver.get_element_to_click(\"scheduleCancel\").click()\n Settings.dev_print(\"canceled schedule\")\n else:\n Driver.get_element_to_click(\"scheduleSave\").click()\n Settings.dev_print(\"saved schedule\")\n print(\"Schedule Entered\")\n Settings.dev_print(\"### Schedule Successful ###\")\n return True\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Enter Schedule\")\n return False\n\n ####################\n ##### Settings #####\n ####################\n\n # gets all settings from whichever page its on\n # or get a specific setting\n # probably just way easier and resourceful to do it all at once\n # though it would be ideal to also be able to update individual settings without risking other settings\n\n # goes through the settings and get all the values\n @staticmethod\n def settings_get_all():\n auth_ = Driver.auth()\n if not auth_: return False\n print(\"Getting All Settings\")\n try:\n pages = Profile.get_pages()\n for page in pages:\n variables = Profile.get_variables_for_page(page)\n Settings.dev_print(\"going to settings page: {}\".format(page))\n Driver.go_to_settings(page)\n Settings.dev_print(\"reached settings: {}\".format(page))\n data = Profile({})\n for var in variables:\n name = var[0]\n page_ = var[1]\n type_ = var[2]\n status = None\n Settings.dev_print(\"searching: {} - {}\".format(name, type_))\n try:\n element = Driver.find_element_by_name(name)\n Settings.dev_print(\"Successful ele: {}\".format(name))\n except Exception as e:\n Driver.error_checker(e)\n continue\n if str(type_) == \"text\":\n # get attr text\n status = element.get_attribute(\"innerHTML\").strip() or None\n status2 = element.get_attribute(\"value\").strip() or None\n print(\"{} - {}\".format(status, status2))\n if not status and status2: status = status2\n elif str(type_) == \"toggle\":\n # get state true|false\n status = element.is_selected()\n elif str(type_) == \"dropdown\":\n ele = Driver.find_element_by_name(name)\n Select(driver.find_element_by_id(ele.getId()))\n status = element.first_selected_option\n elif str(type_) == \"list\":\n status = element.get_attribute(\"innerHTML\")\n elif str(type_) == \"file\":\n # can get file from image above\n # can set once found\n # status = element.get_attribute(\"innerHTML\")\n pass\n elif str(type_) == \"checkbox\":\n status = element.is_selected()\n if status is not None: Settings.dev_print(\"Successful value: {}\".format(status))\n Settings.maybe_print(\"{} : {}\".format(name, status))\n data.set(name, status)\n Settings.dev_print(\"Successfully got settings\")\n print(\"Settings Retrieved\")\n return data\n except Exception as e:\n Driver.error_checker(e)\n\n # goes through each page and sets all the values\n @staticmethod\n def settings_set_all(data):\n auth_ = Driver.auth()\n if not auth_: return False\n print(\"Updating All Settings\")\n try:\n # Driver.go_to_home()\n pages = Profile.get_pages()\n for page in pages:\n variables = Profile.get_variables_for_page(page)\n Settings.dev_print(\"going to settings page: {}\".format(page))\n Driver.go_to_settings(page)\n Settings.dev_print(\"reached settings: {}\".format(page))\n for var in variables:\n name = var[0]\n page_ = var[1]\n type_ = var[2]\n status = None\n Settings.dev_print(\"searching: {} - {}\".format(name, type_))\n try:\n element = Driver.find_element_by_name(name)\n Settings.dev_print(\"Successful ele: {}\".format(name))\n except Exception as e:\n Driver.error_checker(e)\n continue\n if str(type_) == \"text\":\n element.send_keys(data.get(name))\n elif str(type_) == \"toggle\":\n # somehow set the other toggle state\n pass\n elif str(type_) == \"dropdown\":\n ele = Driver.find_element_by_name(name)\n Select(driver.find_element_by_id(ele.getId()))\n # go to top\n # then go to matching value\n pass\n elif str(type_) == \"list\":\n element.send_keys(data.get(name))\n elif str(type_) == \"file\":\n element.send_keys(data.get(name))\n elif str(type_) == \"checkbox\":\n element.click()\n # Settings.dev_print(\"Successful value: {}\".format(status))\n Driver.settings_save(page=page)\n Settings.dev_print(\"Successfully set settings\")\n print(\"Settings Updated\")\n except Exception as e:\n Driver.error_checker(e)\n\n # saves the settings page if it is a page that needs to be saved\n # has save:\n # profile\n # account\n # security\n ##\n # doesn't have save:\n # story\n # notifications\n # other\n @staticmethod\n def settings_save(page=None):\n if str(page) not in [\"profile\", \"account\", \"security\"]:\n Settings.dev_print(\"not saving: {}\".format(page))\n return\n try:\n Settings.dev_print(\"saving: {}\".format(page))\n element = Driver.find_element_by_name(\"profileSave\")\n Settings.dev_print(\"derp\")\n element = Driver.get_element_to_click(\"profileSave\")\n Settings.dev_print(\"found page save\")\n if Settings.is_debug():\n print(\"Skipping: Save (debug)\")\n else:\n Settings.dev_print(\"saving page\")\n element.click()\n Settings.dev_print(\"page saved\")\n except Exception as e:\n Driver.error_checker(e)\n\n @staticmethod\n def spawn_browser(): \n global BROWSER\n if BROWSER: return True\n print(\"Spawning Browser\")\n CHROMEDRIVER_PATH = chromedriver_binary.chromedriver_filename\n options = webdriver.ChromeOptions()\n # options.setExperimentalOption('useAutomationExtension', false);\n # options.binary_location = chromedriver_binary.chromedriver_filename\n if not Settings.is_show_window():\n options.add_argument('--headless')\n #\n options.add_argument('--disable-smooth-scrolling')\n options.add_argument('--disable-software-rasterizer')\n options.add_argument(\"disable-infobars\") # disabling infobars\n options.add_argument(\"--disable-extensions\") # disabling extensions\n options.add_argument(\"--disable-gpu\") # applicable to windows os only\n #\n options.add_argument('--disable-login-animations')\n options.add_argument('--disable-modal-animations')\n options.add_argument('--disable-sync')\n # options.add_argument('--incognito')\n options.add_argument('--user-agent=MozillaYerMomFox')\n #\n options.add_argument(\"--disable-dev-shm-usage\") # overcome limited resource problems\n options.add_argument(\"--no-sandbox\") # Bypass OS security model\n # options.add_experimental_option(\"prefs\", {\n # \"download.default_directory\": str(DOWNLOAD_PATH),\n # \"download.prompt_for_download\": False,\n # \"download.directory_upgrade\": True,\n # \"safebrowsing.enabled\": True\n # })\n driver = None\n try:\n driver = webdriver.Chrome(chrome_options=options)\n except Exception as e:\n # print(e)\n print(\"Warning: Missing Chromedriver\")\n return False\n driver.implicitly_wait(30) # seconds\n driver.set_page_load_timeout(1200)\n print(\"Browser Spawned\")\n BROWSER = driver\n return True\n\n # update chat logs for all users\n @staticmethod\n def update_chat_logs():\n global USER_CACHE_LOCKED\n USER_CACHE_LOCKED = True\n print(\"Updating User Chats\")\n users = Driver.users_get()\n for user in users:\n Driver.update_chat_log(user)\n USER_CACHE_LOCKED = False\n\n @staticmethod\n def update_chat_log(user):\n print(\"Updating Chat: {}\".format(user.username))\n if not user:\n return print(\"Error: Missing User\")\n user.readChat()\n\n ##################\n ##### Upload #####\n ##################\n\n # uploads image into post or message\n @staticmethod\n def upload_files(files=[]):\n if Settings.is_skip_download(): \n print(\"Skipping Upload (download)\")\n return True\n elif Settings.is_skip_upload(): \n print(\"Skipping Upload (upload)\")\n return True\n if len(files) == 0: return False\n if Settings.is_skip_upload():\n print(\"Skipping Upload: Disabled\")\n return False\n files = files[:int(Settings.get_upload_max_messages())]\n Settings.dev_print(\"uploading image files: {}\".format(len(files)))\n i = 1\n for file in files:\n print('Uploading: {} - {}/{}'.format(file.get_title(), i, len(files)))\n i += 1\n uploadable = file.prepare() # downloads if Google_File\n if not uploadable:\n print(\"Error: Unable to Upload - {}\".format(file.get_title()))\n continue\n enter_file = BROWSER.find_element_by_id(\"fileupload_photo\")\n enter_file.send_keys(str(file.get_path()))\n time.sleep(1)\n Driver.error_window_upload()\n ###\n def fix_filename(file):\n # move file to change its name\n filename = os.path.basename(file.get_path())\n filename = os.path.splitext(filename)[0]\n if \"_fixed\" in str(filename): return\n Settings.dev_print(\"fixing filename...\")\n filename += \"_fixed\"\n ext = os.path.splitext(filename)[1].lower()\n Settings.dev_print(\"{} -> {}.{}\".format(os.path.dirname(file.get_path()), filename, ext))\n dst = \"{}/{}.{}\".format(os.path.dirname(file), filename, ext)\n shutil.move(file.get_path(), dst)\n file.path = dst\n # add file to end of list so it gets retried\n files.append(file)\n # if this doesn't force it then it'll loop forever without a stopper\n ###\n # one last final check\n Driver.error_window_upload()\n Settings.debug_delay_check()\n Settings.dev_print(\"### Files Upload Successful ###\")\n return True\n\n #################\n ##### Users #####\n #################\n\n # returns list of accounts you follow\n @staticmethod\n def following_get():\n auth_ = Driver.auth()\n if not auth_: return False\n users = []\n try:\n Driver.go_to_page(ONLYFANS_USERS_FOLLOWING_URL)\n count = 0\n while True:\n elements = BROWSER.find_elements_by_class_name(\"m-subscriptions\")\n if len(elements) == count: break\n print_same_line(\"({}/{}) scrolling...\".format(count, len(elements)))\n count = len(elements)\n BROWSER.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n print()\n elements = BROWSER.find_elements_by_class_name(\"m-subscriptions\")\n for ele in elements:\n username = ele.find_element_by_class_name(\"g-user-username\").get_attribute(\"innerHTML\").strip()\n name = ele.find_element_by_class_name(\"g-user-name\").get_attribute(\"innerHTML\")\n name = re.sub(\"\", \"\", name)\n name = re.sub(\"<.*\\\">\", \"\", name)\n name = re.sub(\"\", \"\", name).strip()\n # print(\"username: {}\".format(username))\n # print(\"name: {}\".format(name))\n users.append({\"name\":name, \"username\":username}) \n Settings.maybe_print(\"Found: {}\".format(len(users)))\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Find Subscriptions\")\n return users\n\n # returns list of accounts that follow you\n @staticmethod\n def users_get():\n auth_ = Driver.auth()\n if not auth_: return False\n users = []\n try:\n Driver.go_to_page(ONLYFANS_USERS_ACTIVE_URL)\n count = 0\n while True:\n elements = BROWSER.find_elements_by_class_name(\"m-fans\")\n if len(elements) == int(count): break\n print_same_line(\"({}/{}) scrolling...\".format(count, len(elements)))\n count = len(elements)\n BROWSER.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n print()\n elements = BROWSER.find_elements_by_class_name(\"m-fans\")\n for ele in elements:\n username = ele.find_element_by_class_name(\"g-user-username\").get_attribute(\"innerHTML\").strip()\n name = ele.find_element_by_class_name(\"g-user-name\").get_attribute(\"innerHTML\")\n name = re.sub(\"\", \"\", name)\n name = re.sub(\"<.*\\\">\", \"\", name)\n name = re.sub(\"\", \"\", name).strip()\n # print(\"username: {}\".format(username))\n # print(\"name: {}\".format(name))\n # start = datetime.strptime(str(datetime.now()), \"%m-%d-%Y:%H:%M\")\n users.append({\"name\":name, \"username\":username}) # ,\"id\":user_id, \"started\":start})\n Settings.maybe_print(\"Found: {}\".format(len(users)))\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Find Users\")\n return users\n\n @staticmethod\n def user_get_id(username):\n auth_ = Driver.auth()\n if not auth_: return None\n user_id = None\n try:\n Driver.go_to_page(username)\n time.sleep(3) # this should realistically only fail if they're no longer subscribed but it fails often from loading\n elements = BROWSER.find_elements_by_tag_name(\"a\")\n ele = [ele.get_attribute(\"href\") for ele in elements\n if \"/my/chats/chat/\" in str(ele.get_attribute(\"href\"))]\n if len(ele) == 0: \n print(\"Warning: User Cannot Be Messaged\")\n return None\n ele = ele[0]\n ele = ele.replace(\"https://onlyfans.com/my/chats/chat/\", \"\")\n user_id = ele\n Settings.maybe_print(\"found user id: {}\".format(user_id))\n except Exception as e:\n Driver.error_checker(e)\n print(\"Error: Failed to Find User ID\")\n return user_id\n\n ################\n ##### Exit #####\n ################\n\n @staticmethod\n def exit():\n global BROWSER\n if BROWSER == None: return\n if Settings.is_save_users():\n print(\"Saving and Exiting OnlyFans\")\n User.write_users_local()\n else:\n print(\"Exiting OnlyFans\")\n BROWSER.quit()\n BROWSER = None\n print(\"Browser Closed\")\n\n##################################################################################\n\ndef parse_users(user_ids, starteds, users, usernames):\n # usernames.pop(0)\n # print(\"My User Id: {}\".format(user_ids[0]))\n # user_ids.pop(0)\n Settings.dev_print(\"user_ids: \"+str(len(user_ids)))\n Settings.dev_print(\"starteds: \"+str(len(starteds)))\n useridsFailed = False\n startedsFailed = False\n if len(user_ids) == 0:\n Settings.maybe_print(\"Warning: Unable to find user ids\")\n useridsFailed = True\n if len(starteds) == 0:\n Settings.maybe_print(\"Warning: Unable to find starting dates\")\n startedsFailed = True\n users_ = []\n try:\n user_ids_ = []\n starteds_ = []\n for i in range(len(user_ids)):\n if user_ids[i].get_attribute(\"href\"):\n user_ids_.append(user_ids[i].get_attribute(\"href\"))\n for i in range(len(starteds)):\n text = starteds[i].get_attribute(\"innerHTML\")\n match = re.findall(\"Started.*([A-Za-z]{3}\\s[0-9]{1,2},\\s[0-9]{4})\", text)\n if len(match) > 0:\n starteds_.append(match[0])\n if len(user_ids_) == 0:\n Settings.maybe_print(\"Warning: Unable to find user ids\")\n useridsFailed = True\n if len(starteds_) == 0:\n Settings.maybe_print(\"Warning: Unable to find starting dates\")\n startedsFailed = True\n # Settings.maybe_print(\"ids vs starteds vs avatars: \"+str(len(user_ids_))+\" - \"+str(len(starteds_))+\" - \"+str(len(avatars)))\n Settings.maybe_print(\"users vs ids vs starteds vs usernames:\"+str(len(users))+\" - \"+str(len(user_ids_))+\" - \"+str(len(starteds_))+\" - \"+str(len(usernames)))\n # for user in usernames:\n # print(user.get_attribute(\"innerHTML\"))\n if len(usernames) > 2:\n # first 2 usernames are self\n usernames.pop(0)\n usernames.pop(0)\n if len(users) > 2:\n users.pop(0)\n users.pop(0)\n for i in range(len(users)): # the first is you and doesn't count towards total\n try:\n if not startedsFailed:\n start = starteds_[i]\n else:\n start = datetime.now().strftime(\"%b %d, %Y\")\n if not useridsFailed:\n user_id = user_ids_[i][35:] # cuts out initial chars instead of unwieldy regex\n else:\n user_id = None\n name = users[i]\n username = usernames[i]\n name = str(name.get_attribute(\"innerHTML\"))\n # print(\"name: \"+name)\n # if \"\", \"\", name)\n # print(name)\n # if \"<\" in str(name) and \">\" in str(name):\n name = re.sub(\"<.*\\\">\", \"\", name).strip()\n # print(name)\n name = re.sub(\"\", \"\", name).strip()\n # print(name)\n # name = re.sub(name, \"<.*>\", \"\").strip()\n # print(name)\n # name = re.sub(name, \"\", \"\")\n username = str(username.get_attribute(\"innerHTML\"))\n # print(\"username: \"+username)\n # if \"\", \"\", username)\n # print(username)\n # if \"<\" in str(username) and \">\" in str(username):\n username = re.sub(\"<.*\\\">\", \"\", username).strip()\n # print(username)\n username = re.sub(\"\", \"\", username).strip()\n username = username.replace(\"@\",\"\")\n # print(username)\n # username = re.sub(\"<.*>\", \"\", username).strip()\n # print(username)\n # username = re.sub(username, \"\", \"\")\n # Settings.maybe_print(\"name: \"+str(name))\n # Settings.maybe_print(\"username: \"+str(username))\n # Settings.maybe_print(\"user_id: \"+str(user_id))\n # if str(Settings.get_username()).lower() in str(username).lower():\n # Settings.maybe_print(\"(): %s = %s\" % (Settings.get_username(), username))\n # # first user is always active user but just in case find it in list of users\n # Settings.USER_ID = username\n # else:\n users_.append({\"name\":name, \"username\":username, \"id\":user_id, \"started\":start})\n except Exception as e: Settings.dev_print(e)\n except Exception as e: Driver.error_checker(e)\n return users_\n\n\n\nimport smtplib\n\ndef sendemail(from_addr, to_addr_list, cc_addr_list,\n subject, message,\n login, password,\n smtpserver='smtp.gmail.com:587'):\n header = 'From: %s\\n' % from_addr\n header += 'To: %s\\n' % ','.join(to_addr_list)\n header += 'Cc: %s\\n' % ','.join(cc_addr_list)\n header += 'Subject: %s\\n\\n' % subject\n message = header + message\n \n server = smtplib.SMTP(smtpserver)\n server.starttls()\n server.login(login,password)\n problems = server.sendmail(from_addr, to_addr_list, message)\n server.quit()\n return problems","sub_path":"OnlySnarf/src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":77310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"337606353","text":"from django.shortcuts import render\nfrom urllib.parse import urlencode\nfrom django.http import HttpResponseRedirect\nimport json, requests\nfrom .models import Token\n\nrecognition_endpoint = \"https://login.salesforce.com/services/oauth2/authorize\"\ntoken_endpoint = \"https://login.salesforce.com/services/oauth2/token\"\ncallback_url = \"https://127.0.0.1:8000/callback/\"\nclient_id = \"3MVG9dZJodJWITSuD9n94bPpxTIVTnh.ciMFSXhBJ5sNNsdSqbjvy4FI6QFw2tY5AkkvcACR4U63okdFu_J7d\"\nclient_secret = \"7386651278074704985\"\n\ndef index(request):\n content = {\n \"response_type\": \"code\",\n \"client_id\" : client_id,\n \"redirect_uri\" : callback_url,\n }\n result = recognition_endpoint + \"?\" + urlencode(content)\n return HttpResponseRedirect(result)\n\ndef callback(request):\n content = {\n \"grant_type\": \"authorization_code\",\n \"client_secret\": client_secret,\n \"client_id\" : client_id,\n \"redirect_uri\" : callback_url,\n \"code\" : request.GET['code'],\n }\n response_data = requests.post(token_endpoint,\n headers={\"Content-Type\":\"application/x-www-form-urlencoded\"},\n data=content)\n decoded_data = json.loads(response_data.content)\n save_token(decoded_data)\n return render(request, 'oauth/result.html', {\"data_list\":decoded_data})\n\ndef save_token(decoded_data):\n if decoded_data.get('access_token'):\n Token.objects.create(\n access_token = decoded_data.get('access_token'),\n id_token = decoded_data.get('id_token'),\n id = decoded_data.get('id'),\n instance_url = decoded_data.get('instance_url'),\n )\n","sub_path":"oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"54025151","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nimport numpy as np\ndef sir(par, distr_pc, flow_pc, distr_pt, flow_pt, iterations, inf_pc):\n \n r = flow_pc.shape[0]\n n = flow_pc.shape[1]\n \n N_pc = distr_pc[0].sum() # total population, we assume that N = sum(flow) \n Svec_pc = distr_pc[0].copy()\n Ivec_pc = np.zeros(n)\n Rvec_pc = np.zeros(n)\n \n N_pt = distr_pt[0].sum() # total population, we assume that N = sum(flow) \n Svec_pt = distr_pt[0].copy()\n Ivec_pt = np.zeros(n)\n Rvec_pt = np.zeros(n)\n \n if par.I0 is None:\n initial = np.zeros(n)\n # randomly choose inf infections\n for i in range(inf_pc):\n loc = np.random.randint(n)\n if (Svec_pc[loc] > initial[loc]):\n initial[loc] += 1.0\n \n else:\n initial = par.I0\n assert ((Svec_pc < initial).sum() == 0)\n \n Svec_pc =- initial\n Ivec_pc =+ initial\n \n res_pc = np.zeros((iterations, 4))\n res_pc[0,:] = [Svec_pc.sum(), Ivec_pc.sum(), Rvec_pc.sum(), 0]\n realflow_pc = flow_pc.copy() # copy!\n \n \n res_pt = np.zeros((iterations, 4))\n res_pt[0,:] = [Svec_pt.sum(), Ivec_pt.sum(), Rvec_pt.sum(), 0]\n realflow_pt = flow_pt.copy() # copy!\n \n newI_pc_rp = np.zeros((iterations,n))\n newI_pt_rp = np.zeros((iterations,n))\n\n # The two lines below normalise the flows and then multiply them by the alpha values. \n # This is actually the \"wrong\" the way to do it because alpha will not be a *linear* measure \n # representing lockdown strength but a *nonlinear* one.\n # The normalisation strategy has been chosen for demonstration purposes of numpy functionality.\n # (Optional) can you rewrite this part so that alpha remains a linear measure of lockdown strength? :)\n realflow_pc = realflow_pc / realflow_pc.sum(axis=2)[:,:, np.newaxis] \n \n realflow_pt = realflow_pt / realflow_pt.sum(axis=2)[:,:, np.newaxis] \n \n history_pc = np.zeros((iterations, 4, n))\n history_pc[0,0,:] = Svec_pc\n history_pc[0,2,:] = Ivec_pc\n history_pc[0,3,:] = Rvec_pc\n \n eachIter_pc = np.zeros(iterations + 1)\n \n history_pt = np.zeros((iterations, 4, n))\n history_pt[0,0,:] = Svec_pt\n history_pt[0,2,:] = Ivec_pt\n history_pt[0,3,:] = Rvec_pt\n \n eachIter_pt = np.zeros(iterations + 1)\n \n # run simulation\n for iter in range(0, iterations - 1):\n realOD_pc = realflow_pc[iter % r]\n \n realOD_pt = realflow_pt[iter % r]\n \n d_pc = distr_pc[iter % r] + 1\n \n d_pt = distr_pt[iter % r] + 1\n if ((d_pc>N_pc+1).any()): #assertion!\n print(\"Miracle, we have a problem!\")\n return res_pc, history_pc,res_pt, history_pt\n # N = S + I + R\n \n p_pc = ((Ivec_pc + Ivec_pt) / (d_pc + d_pt)) * (par.R0 / par.DI)\n p_pt = Ivec_pt/d_pt * par.R0 / par.DI\n \n newI_pc = Svec_pc * p_pc\n newR_pc = Ivec_pc / par.DI\n \n newI_pt = Svec_pt * (p_pc+p_pt)\n newR_pt = Ivec_pt / par.DI\n \n Svec_pc = Svec_pc - newI_pc\n\n Ivec_pc = Ivec_pc + newI_pc - newR_pc\n \n Rvec_pc = Rvec_pc + newR_pc\n \n Svec_pt = Svec_pt - newI_pt\n \n Ivec_pt = Ivec_pt + newI_pt - newR_pt\n \n Rvec_pt = Rvec_pt + newR_pt\n \n res_pc[iter + 1,:] = [Svec_pc.sum(), Ivec_pc.sum(), Rvec_pc.sum(), 0]\n eachIter_pc[iter + 1] = newI_pc.sum()\n res_pc[iter + 1, 3] = eachIter_pc[max(0, iter - par.HospiterIters) : iter].sum() * par.HospitalisationRate\n \n history_pc[iter + 1,0,:] = Svec_pc\n history_pc[iter + 1,1,:] = Ivec_pc\n history_pc[iter + 1,2,:] = Rvec_pc\n \n res_pt[iter + 1,:] = [Svec_pt.sum(), Ivec_pt.sum(), Rvec_pt.sum(), 0]\n eachIter_pt[iter + 1] = newI_pt.sum()\n res_pt[iter + 1, 3] = eachIter_pt[max(0, iter - par.HospiterIters) : iter].sum() * par.HospitalisationRate\n \n history_pt[iter + 1,0,:] = Svec_pt\n history_pt[iter + 1,1,:] = Ivec_pt\n history_pt[iter + 1,2,:] = Rvec_pt\n \n newI_pc_rp[iter+1,:] = newI_pc\n newI_pt_rp[iter+1,:] = newI_pt\n return res_pc, history_pc,res_pt, history_pt,newI_pc_rp,newI_pt_rp\n\n","sub_path":"SIR-without-mobility-mode.py","file_name":"SIR-without-mobility-mode.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"176155222","text":"class wfc_node:\n def __init__(self, state_list, x, y):\n self._states = state_list.copy()\n self._x = x\n self._y = y\n def get_string(self):\n return ':'.join(self._states)\n\n def get_states(self):\n return self._states\n\n def is_collapsed(self):\n return len(self._states) == 1\n\n def get_len(self):\n return len(self._states)\n\n\n def get_pos(self):\n return (self._x, self._y)\n\n def collapse(self, state):\n if state in self._states:\n self._states = [state]\n else:\n raise KeyError(f\"No such state, {state} found at node\")\n\nclass wfc_map:\n def __init__(self, width, height, state_list):\n self._width = width\n self._height = height\n self._state_list = state_list\n self.map_data = []\n\n self.allowable_states = [('A', 'B'), ('A','C'), ('A', 'A'), ('B', 'C')]\n for y in range(height):\n row = []\n for x in range(width):\n row.append(wfc_node(self._state_list, x, y))\n self.map_data.append(row)\n\n def collapse_to(self, coord, state):\n x = coord[0]\n y = coord[1]\n if x >= self._width-1:\n self.map_data[y][x].collapse(state)\n else:\n next_node = self.map_data[y][x+1]\n allow = False\n for s in next_node.get_states():\n if (state, s) in self.allowable_states:\n allow = True\n break\n if allow == False:\n print(\"NONONO\")\n else:\n self.map_data[y][x].collapse(state)\n\n\n def get_lowest_entropy(self):\n m = len(self._state_list)\n ret_node = None\n\n for y in range(self._height):\n for x in range(self._width):\n node = self.map_data[y][x]\n if (not node.is_collapsed()) and (node.get_len() <= m):\n m = node.get_len()\n ret_node = node\n return ret_node\n\n\n def __str__(self):\n map_string = ''\n for y, row in enumerate(self.map_data):\n for x, node in enumerate(row):\n map_string += f'({x},{y} )'\n map_string += node.get_string()\n map_string += ',\\t'\n map_string += '\\n'\n return map_string\n\ntest_map = wfc_map(5, 5, ['A','B','C'])\ntest_map.collapse_to((1,1),'B')\n\nwhile test_map.get_lowest_entropy():\n node = test_map.get_lowest_entropy()\n test_map.collapse_to(node.get_pos(), 'A')\nprint(test_map)\n\n\n","sub_path":"wfc.py","file_name":"wfc.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"100688717","text":"#!/usr/bin/python3\n# Python script that takes in a string and sends a search request to the\n# Star Wars API Use the Star Wars API search people endpoint\n# Use the string argument as the search value of the request\n# The body response must be JSON and converted to a Python dictionary.\n# Display: Number of results: \n# Display the name of each result (see example below)\n# You must use the packages requests and sys\n# You must manage the pagination to display all results\n# You are not allowed to import packages other than requests and sys\n# You don’t need to check arguments passed to the script (number or type)\n\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = 'https://swapi.co/api/people/'\n search = sys.argv[1]\n r = requests.get(url, params={'search': search})\n print(\"Number of results:\", r.json().get('count'))\n for i in r.json().get('results'):\n print(i.get('name'))\n while r.json()['next'] is not None:\n r = requests.get(r.json()['next'])\n for i in r.json().get('results'):\n print(i.get('name'))\n","sub_path":"0x11-python-network_1/101-starwars.py","file_name":"101-starwars.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"331274537","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\nTHEME_COLOR = \"#375362\"\nclass QuizInterface:\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"Quizzler\")\n self.window.config(bg=THEME_COLOR, padx=20, pady=20)\n self.score = 0\n self.score_label = Label(text=f\"Score: {self.score}\", fg=\"#FFFFFF\", bg=THEME_COLOR)\n self.score_label.grid(row=0, column=1)\n self.canvas = Canvas(width=300, height=250, bg=\"#FFFFFF\")\n self.text = self.canvas.create_text(150, 125, width=280, text=\"Some question\", fill= THEME_COLOR, font=\"Arial 20 italic\")\n self.canvas.grid(row=1, column=0, columnspan=2, pady=50)\n correct_button_img = PhotoImage(file=\"images/true.png\")\n self.correct_button = Button(image=correct_button_img, highlightthickness=0, command=self.pressed_correct)\n self.correct_button.grid(row=2, column=0)\n wrong_button_img = PhotoImage(file=\"images/false.png\")\n self.wrong_button = Button(image=wrong_button_img, highlightthickness=0, command=self.pressed_wrong)\n self.wrong_button.grid(row=2, column=1)\n self.get_next_question()\n self.window.mainloop()\n\n def get_next_question(self):\n self.canvas.config(bg=\"white\")\n if self.quiz.still_has_questions():\n self.score_label.config(text=f\"score: {self.quiz.score}\")\n q_text = self.quiz.next_question()\n self.canvas.itemconfig(self.text, text=q_text)\n else:\n self.canvas.itemconfig(self.text, text=\"You have reached the end of the quizzler\")\n self.correct_button.config(state=\"disabled\")\n self.wrong_button.config(state=\"disabled\")\n def pressed_correct(self):\n is_right = self.quiz.check_answer(\"True\")\n self.give_feedback(is_right)\n def pressed_wrong(self):\n is_right = self.quiz.check_answer(\"False\")\n self.give_feedback(is_right)\n def give_feedback(self, is_right):\n if is_right:\n self.canvas.config(bg=\"green\")\n else:\n self.canvas.config(bg=\"red\")\n self.window.after(1000, func=self.get_next_question)\n\n\n\n","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"581870823","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n########## RNN - Long Sentence With Batch ###################\r\nsentence = (\"영국이 낳은 세계 최고 극작가로 불리고 있는 셰익스피어는 잉글랜드 중부의 영국의 전형적인 소읍 스트랫퍼드 어폰 에이번에서 출생하였다. 셰익스피어는 아름다운 숲과 계곡으로 둘러싸인 인구 2000명 정도의 작은 마을 스트랫퍼드에서 존 부부의 첫 번째아들로, 8남매 중 셋째로 태어났고, 이곳에서 학교를 다녔다\")\r\n\r\nchar_set = list(set(sentence))\r\nchar_dic = {w: i for i,w in enumerate(char_set)}\r\n\r\ndata_dim = len(char_dic)\r\nhidden_size = len(char_dic)\r\nnum_classes = len(char_dic)\r\nsequence_length = 40\r\nlearning_rate = 0.1\r\n\r\n# X, Y 여러개 - batch_size 만큼 여러개\r\ndataX = []\r\ndataY = []\r\n\r\nfor i in range(0,len(sentence)-sequence_length):\r\n\r\n\tx_str = sentence[i:i+sequence_length]\r\n\ty_str = sentence[i+1:i+1+sequence_length]\r\n\r\n\tx = [char_dic[c] for c in x_str]\r\n\ty = [char_dic[c] for c in y_str]\r\n\r\n\tdataX.append(x)\r\n\tdataY.append(y)\r\n\r\nbatch_size = len(dataX)\r\n\r\nX = tf.placeholder(tf.int32, [None, sequence_length])\r\nY = tf.placeholder(tf.int32, [None, sequence_length])\r\n\r\nX_one_hot = tf.one_hot(X,num_classes)\r\n\r\ndef lstm_cell():\r\n\tcell = tf.contrib.rnn.BasicLSTMCell(hidden_size, state_is_tuple=True)\r\n\treturn cell\r\n\r\nmulti_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell()] * 2, state_is_tuple=True)\r\noutputs, _state = tf.nn.dynamic_rnn(multi_cells, X_one_hot, dtype=tf.float32)\r\n\r\n## FC Layer\r\nX_for_fc = tf.reshape(outputs, [-1,hidden_size])\r\noutputs = tf.contrib.layers.fully_connected(X_for_fc, num_classes, activation_fn=None)\r\n\r\noutputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])\r\n\r\nweights = tf.ones([batch_size, sequence_length])\r\nsequence_loss = tf.contrib.seq2seq.sequence_loss(\r\n\tlogits=outputs, targets=Y, weights=weights)\r\nmean_loss = tf.reduce_mean(sequence_loss)\r\ntrain_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(mean_loss)\r\n\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\n\r\nfor i in range(1000):\r\n\t_, l, results = sess.run([train_op, mean_loss, outputs], feed_dict={X:dataX, Y:dataY})\r\n\r\n\tfor j, result in enumerate(results):\r\n\t\tindex = np.argmax(result, axis=1)\r\n\t\tprint (i,j,''.join([char_set[t] for t in index]))\r\n\r\nresults = sess.run(outputs, feed_dict={X:dataX})\r\nfor j, result in enumerate(results):\r\n\tindex = np.argmax(result, axis=1)\r\n\tif j is 0:\r\n\t\tprint (''.join([char_set[t] for t in index]), end='')\r\n\telse:\r\n\t\tprint (char_set[index[-1]], end='')\r\n\r\n################ RNN - if you want you ######################\r\n# sample = \"if you want you\"\r\n# idx2char = list(set(sample))\r\n# # char -> idx\r\n# char2idx = {c:i for i,c in enumerate(idx2char)}\r\n\r\n# dic_size = len(char2idx) # input dim\r\n# hidden_size = len(char2idx)\r\n# num_classes = len(char2idx)\r\n# batch_size = 1\r\n# sequence_length = len(sample)-1\r\n# learning_rate = 0.1\r\n\r\n# sample_idx = [char2idx[c] for c in sample] #[1,1,2,3,4,2,5,3]\r\n# x_data = [sample_idx[:-1]]\r\n# y_data = [sample_idx[1:]]\r\n\r\n# X = tf.placeholder(tf.int32, [None, sequence_length])\r\n# Y = tf.placeholder(tf.int32, [None, sequence_length])\r\n\r\n# x_one_hot = tf.one_hot(X,num_classes) # [1]->[010000000]\r\n\r\n# cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)\r\n# initial_state = cell.zero_state(batch_size,tf.float32)\r\n# outputs, _state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)\r\n\r\n# # FC Layer\r\n# x_for_fc = tf.reshape(outputs, [-1,hidden_size])\r\n# outputs = tf.contrib.layers.fully_connected(x_for_fc, num_classes, activation_fn=None)\r\n\r\n# outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])\r\n# weights = tf.ones([batch_size, sequence_length])\r\n# sequence_loss = tf.contrib.seq2seq.sequence_loss(\r\n# \t\t\t\tlogits=outputs, targets=Y, weights=weights)\r\n# loss = tf.reduce_mean(sequence_loss)\r\n# train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\r\n\r\n# prediction = tf.argmax(outputs, axis = 2)\r\n\r\n# with tf.Session() as sess:\r\n# \tsess.run(tf.global_variables_initializer())\r\n\t\r\n# \tfor i in range(100):\r\n# \t\tl, _ = sess.run([loss,train], feed_dict={X:x_data, Y:y_data})\r\n# \t\tresult = sess.run(prediction, feed_dict={X:x_data})\r\n# \t\tresult_str = [idx2char[c] for c in np.squeeze(result)] # 보기 좋게 char로 변환\r\n# \t\tprint (i, \"loss:\",l,\"Prediction:\",''.join(result_str))\r\n\r\n\r\n################ RNN - hihello ############################\r\n# #idx2char[2]->e\r\n# idx2char = ['h','i','e','l','o']\r\n# # hihell - > ihello\r\n# x_data = [[0,1,0,2,3,3]] #hihell\r\n# x_one_hot = [[[1,0,0,0,0], #h 0\r\n# \t\t\t [0,1,0,0,0], #i 1\r\n# \t\t\t [1,0,0,0,0], #h 0\r\n# \t\t\t [0,0,1,0,0], #e 2\r\n# \t\t\t [0,0,0,1,0], #l 3\r\n# \t\t\t [0,0,0,1,0]]] #l 3\r\n# y_data = [[1,0,2,3,3,4]] #ihello\r\n\r\n# num_classes = 5\r\n# input_dim = 5\r\n# hidden_size = 5\r\n# batch_size = 1\r\n# sequence_length = 6\r\n# learning_rate = 0.1\r\n\r\n# # Shape!\r\n# X = tf.placeholder(tf.float32, [None, sequence_length, input_dim])\r\n# Y = tf.placeholder(tf.int32, [None, sequence_length])\r\n\r\n# cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)\r\n# initial_state = cell.zero_state(batch_size, tf.float32)\r\n# outputs, _states = tf.nn.dynamic_rnn(cell,X,initial_state=initial_state,\r\n# \t\t\t\t\t\t\t\t\t dtype=tf.float32)\r\n\r\n# # FC Layer\r\n# x_for_fc = tf.reshape(outputs, [-1, hidden_size])\r\n# outputs = tf.contrib.layers.fully_connected(\r\n# \t\t\tinputs=x_for_fc, num_outputs=num_classes, activation_fn=None)\r\n\r\n# outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])\r\n\r\n# weights = tf.ones([batch_size, sequence_length])\r\n\r\n# # sequence loss : sequence를 위한 cost function! \r\n# sequence_loss = tf.contrib.seq2seq.sequence_loss(\r\n# \t\t\t\t\t\tlogits=outputs, targets=Y, weights=weights)\r\n# loss = tf.reduce_mean(sequence_loss)\r\n\r\n# train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\r\n# prediction = tf.argmax(outputs, axis=2)\r\n\r\n# with tf.Session() as sess:\r\n# \tsess.run(tf.global_variables_initializer())\r\n# \tfor i in range(50):\r\n# \t\tl, _ = sess.run([loss, train], feed_dict={X:x_one_hot, Y:y_data})\r\n# \t\tresult = sess.run(prediction, feed_dict={X:x_one_hot})\r\n# \t\tprint (i, \"loss:\", l, \"prediction:\",result, \"true Y:\",y_data)\r\n\r\n# \t\tresult_str = [idx2char[c] for c in np.squeeze(result)] # 배열만들어주기\r\n# \t\tprint (\"\\t Prediction str:\",\"\".join(result_str)) # 배열 -> 문자열\r\n\r\n\r\n################ Ensemble CNN MNIST #######################\r\n\r\n# 0. Hypter Parameter Setting & Data Load\r\n# from tensorflow.examples.tutorials.mnist import input_data\r\n# mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n\r\n# learning_rate = 0.001\r\n# training_epochs = 20\r\n# batch_size = 100\r\n\r\n# # 1. Convolutional Layer 3개 짜리 Model Class 생성\r\n# class Model:\r\n\t\r\n# \tdef __init__(self, sess, name):\r\n# \t\tself.sess = sess\r\n# \t\tself.name = name\r\n# \t\tself._build_net()\r\n\r\n# \tdef _build_net(self):\r\n# \t\twith tf.variable_scope(self.name):\r\n\r\n# \t\t\t#T : 학습 단계, F : 실행 단계 - 드랍아웃 적용시 필요\r\n# \t\t\tself.training = tf.placeholder(tf.bool)\r\n\r\n# \t\t\tself.X = tf.placeholder(tf.float32, [None, 784])\r\n# \t\t\tX_img = tf.reshape(self.X, [-1,28,28,1])\r\n\r\n# \t\t\tself.Y = tf.placeholder(tf.float32, [None, 10])\r\n\r\n# \t\t\t# Conv Layer 1\r\n# \t\t\t# Image = [28,28,1]\r\n# \t\t\t# conv1 = [28,28,32]\r\n# \t\t\t# pool1 = [14,14,32]\r\n# \t\t\tconv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3,3],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", activation=tf.nn.relu)\r\n# \t\t\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", strides=2)\r\n# \t\t\tdropout1 = tf.layers.dropout(inputs=pool1, rate=0.7, training=self.training)\r\n\r\n# \t\t\t# Conv Layer 2\r\n# \t\t\t# Image = [14,14,32]\r\n# \t\t\t# conv2 = [14,14,64]\r\n# \t\t\t# pool2 = [7,7,64]\r\n# \t\t\tconv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3,3],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", activation=tf.nn.relu)\r\n# \t\t\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", strides=2)\r\n# \t\t\tdropout2 = tf.layers.dropout(inputs=pool2, rate=0.7, training=self.training)\r\n\r\n# \t\t\t# Conv Layer 3\r\n# \t\t\t# Image = [7,7,64]\r\n# \t\t\t# conv3 = [7,7,128]\r\n# \t\t\t# pool3 = [4,4,128]\r\n# \t\t\tconv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3,3],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", activation=tf.nn.relu)\r\n# \t\t\tpool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2,2],\r\n# \t\t\t\t\t\t\tpadding=\"SAME\", strides=2)\r\n# \t\t\tdropout3 = tf.layers.dropout(inputs=pool3, rate=0.7, training=self.training)\r\n\r\n# \t\t\t# FC(Fully Connected) Layer / Dense Layer\r\n# \t\t\tflat = tf.reshape(dropout3, [-1, 128 * 4 * 4])\r\n# \t\t\tdense4 = tf.layers.dense(inputs=flat, units=625, activation=tf.nn.relu)\r\n# \t\t\tdropout4 = tf.layers.dropout(inputs=dense4, rate=0.5, training=self.training)\r\n\r\n# \t\t\t# Logistic Layer\r\n# \t\t\tself.logits = tf.layers.dense(inputs=dropout4, units=10)\r\n\r\n# \t\tself.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n# \t\t\tlogits=self.logits, labels=self.Y))\r\n# \t\tself.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\r\n\r\n# \t\tcorrect_prediction = tf.equal(tf.argmax(self.logits,1), tf.argmax(self.Y,1))\r\n# \t\tself.accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\n\r\n# \tdef predict(self,x_test, training=False):\r\n# \t\treturn self.sess.run(self.logits, feed_dict={\r\n# \t\t\tself.X : x_test, self.training: training})\r\n\r\n# \tdef get_accuracy(self, x_test, y_test, training=False):\r\n# \t\treturn self.sess.run(self.accuracy, feed_dict={\r\n# \t\t\tself.X:x_test, self.Y:y_test, self.training:training})\r\n\r\n# \tdef train(self, x_data, y_data, training=True):\r\n# \t\treturn self.sess.run([self.cost, self.optimizer], feed_dict={\r\n# \t\t\tself.X : x_data, self.Y : y_data, self.training: training})\r\n\r\n# sess = tf.Session()\r\n# m1 = Model(sess, \"m1\")\r\n\r\n# sess.run(tf.global_variables_initializer())\r\n\r\n# print (\"Learning Start!\")\r\n# for epoch in range(training_epochs):\r\n# \tavg_cost = 0\r\n# \ttotal_batch = int(mnist.train.num_examples / batch_size)\r\n\r\n# \tfor i in range(total_batch):\r\n# \t\tbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\r\n# \t\tc, _ = m1.train(batch_xs, batch_ys)\r\n# \t\tavg_cost += c/total_batch\r\n\r\n# \tprint (\"Epoch:\",'%04d' % (epoch+1), 'cost=','{:.9f}'.format(avg_cost))\r\n\r\n# print (\"Learning Finished!\")\r\n\r\n# print (\"Accuracy:\", m1.get_accuracy(mnist.test.images, mnist.test.labels))\r\n\r\n# 2. Ensemble 적용\r\n# sess = tf.Session()\r\n\r\n# models = []\r\n# num_models = 2\r\n\r\n# for m in range(num_models):\r\n# \tmodels.append(Model(sess, \"model\"+str(m)))\r\n\r\n# sess.run(tf.global_variables_initializer())\r\n\r\n# print (\"Learning Start!\")\r\n\r\n# for epoch in range(training_epochs):\r\n# \tavg_cost_list = np.zeros(len(models))\r\n# \ttotal_batch = int(mnist.train.num_examples/batch_size)\r\n\r\n# \tfor i in range(total_batch):\r\n# \t\tbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\r\n\r\n# \t\t# models = [model0, model1]\r\n# \t\tfor m_idx, m in enumerate(models):\r\n# \t\t\tc, _ = m.train(batch_xs, batch_ys)\r\n# \t\t\tavg_cost_list[m_idx] += c/total_batch\r\n\r\n# \tprint ('Epoch:','%04d' % (epoch+1),'cost=',avg_cost_list)\r\n\r\n# print (\"Learning Finished!\")\r\n# test_size = len(mnist.test.labels)\r\n# predictions = np.zeros([test_size,10])\r\n# for m_idx, m in enumerate(models):\r\n# \tprint (m_idx, 'Accuracy:', m.get_accuracy(mnist.test.images, mnist.test.labels))\r\n# \tp = m.predict(mnist.test.images)\r\n# \tpredictions += p\r\n\r\n# ensemble_correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(\r\n# \t\t\t\t\t\t\t\tmnist.test.labels,1))\r\n# ensemble_accuracy = tf.reduce_mean(tf.cast(ensemble_correct_prediction, tf.float32))\r\n# print (\"Ensemble Accuracy:\", sess.run(ensemble_accuracy))","sub_path":"180109.py","file_name":"180109.py","file_ext":"py","file_size_in_byte":11709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"236917339","text":"#!/usr/bin/python3\n\"\"\" module users view \"\"\"\nfrom api.v1.views import app_views, get, delete, post, put\nfrom flask import jsonify, request, abort\nfrom models import storage\n\n\n@app_views.route('/users/', strict_slashes=False,\n methods=['GET', 'DELETE', 'PUT'])\n@app_views.route('/users', strict_slashes=False,\n methods=['GET', 'POST'])\ndef crud_users(user_id=None):\n \"\"\" Crud for users\n Return request mothods\n \"\"\"\n data = {\n 'id': user_id, # id for user\n 'class': 'User', # Name class\n 'f_id': None, # foreign key value\n 'exists': ['email', 'password'], # attr that must exist\n }\n # All methods\n req_methods = {\n 'GET': get,\n 'DELETE': delete,\n 'POST': post,\n 'PUT': put\n }\n return req_methods[request.method](data)\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"402972973","text":"from config import config\nfrom common import unlock_user, superuser_login\n\n\ndef main():\n auth_token = superuser_login()[\"access_token\"]\n unlock_user(auth_token, \"pb.dalhousie\", \"DEV_DALHOUSIE_EMP\")\n print(\"User is unlocked\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"unlock_user.py","file_name":"unlock_user.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"438214611","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 27 16:32:26 2020\n\n@author: aimachine\n\"\"\"\n\nimport os\nimport sys\nimport warnings\nimport numpy as np\nimport imageio\nfrom tifffile import imread, imwrite\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\nfrom skimage.transform import resize\nfrom skimage.morphology import label\nfrom keras.utils import Progbar\nfrom scipy.ndimage.morphology import binary_dilation\nfrom scipy.ndimage.morphology import binary_fill_holes\nfrom scipy.ndimage.measurements import find_objects\nfrom csbdeep.utils import normalize\nfrom csbdeep.io import Path\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage as ndi\nfrom skimage.segmentation import relabel_sequential\nfrom skimage.util import invert as invertimage\nfrom stardist.models import Config2D, StarDist2D, StarDistData2D, Config3D, StarDistData3D, StarDist3D\nfrom tqdm import tqdm\nfrom csbdeep.data import RawData, create_patches\nimport glob\nfrom stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available\nfrom stardist import Rays_GoldenSpiral\nfrom skimage.filters import sobel\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\n\n# Data Path\n\n\n\ndef _raise(e):\n raise e\n\ndef _fill_label_holes(lbl_img, **kwargs):\n lbl_img_filled = np.zeros_like(lbl_img)\n for l in (set(np.unique(lbl_img)) - set([0])):\n mask = lbl_img==l\n mask_filled = binary_fill_holes(mask,**kwargs)\n lbl_img_filled[mask_filled] = l\n return lbl_img_filled\ndef fill_label_holes(lbl_img, **kwargs):\n \"\"\"Fill small holes in label image.\"\"\"\n # TODO: refactor 'fill_label_holes' and 'edt_prob' to share code\n def grow(sl,interior):\n return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))\n def shrink(interior):\n return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)\n objects = find_objects(lbl_img)\n lbl_img_filled = np.zeros_like(lbl_img)\n for i,sl in enumerate(objects,1):\n if sl is None: continue\n interior = [(s.start>0,s.stop 1, \"not enough training data\"\n rng = np.random.RandomState(42)\n ind = rng.permutation(len(X))\n n_val = max(1, int(round(0.15 * len(ind))))\n ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n print('number of images: %3d' % len(X))\n print('- training: %3d' % len(X_trn))\n print('- validation: %3d' % len(X_val)) \n \n \n print(Config2D.__doc__)\n conf = Config2D (\n n_rays = n_rays,\n train_epochs = epochs,\n train_learning_rate = learning_rate,\n unet_n_depth = depth,\n train_batch_size= batch_size,\n train_patch_size = (256,256),\n n_channel_in = 1,\n train_checkpoint= model_dir + model_name +'.h5',\n grid = (1,1),\n train_loss_weights=(1, 0.05),\n use_gpu = use_gpu\n \n )\n print(conf)\n vars(conf)\n \n \n Starmodel = StarDist2D(conf, name=model_name, basedir=model_dir)\n if os.path.exists(model_dir + model_name + '/' + 'weights_now.h5'):\n Starmodel.load_weights( )\n \n Starmodel.train(X_trn, (Y_trn), validation_data=(X_val,(Y_val)), epochs = epochs)\n Starmodel.optimize_thresholds(X_val, Y_val)\n\n\n# Function read train images and mask return as nump array\n \n#2D training\n\ndef SliceTraining(MouseColon, directory, filename, model_name, model_dir, weights = None, PatchX=256, PatchY=256, use_gpu = True, batch_size = 1, depth = 5, kern_size = 7, n_rays = 16, epochs = 400, learning_rate = 0.0001):\n \n \n X_train = []\n Binary_Y_train = []\n \n \n Raw_path = os.path.join(MouseColon + '/Raw/', '*tif')\n \n \n Mask_path = os.path.join(MouseColon + '/Mask/', '*tif')\n \n \n TwoDRaw_path = MouseColon + '/2DRaw/'\n TwoDMask_path = MouseColon + '/2DMask/'\n \n \n Path(TwoDRaw_path).mkdir(exist_ok = True)\n Path(TwoDMask_path).mkdir(exist_ok = True)\n \n \n axis_norm = (0,1)\n \n filesRaw = glob.glob(Raw_path) \n for fname in filesRaw:\n \n image = imread(fname)\n for i in range(0, image.shape[0]):\n Name = os.path.basename((os.path.splitext(fname)[0]))\n sliceimage = image[i,:]\n imwrite((TwoDRaw_path + Name + str(i) + '.tif' ) , sliceimage)\n\n filesMask = glob.glob(Mask_path) \n for fname in filesMask:\n \n image = imread(fname)\n for i in range(0, image.shape[0]):\n Name = os.path.basename((os.path.splitext(fname)[0]))\n sliceimage = image[i,:]\n imwrite((TwoDMask_path + Name + str(i) + '.tif' ) , sliceimage)\n\n \n \n filesRaw = sorted(glob.glob(os.path.join(TwoDRaw_path, '*tif'))) \n print('loading Raw images')\n for fname in filesRaw:\n \n image = imread(fname)\n X_train.append(image)\n \n\n filesMask = sorted(glob.glob(os.path.join(TwoDMask_path, '*tif')))\n print('loading Mask images') \n for fname in filesMask:\n \n image = imread(fname)\n Binary_Y_train.append(image)\n \n \n \n \n raw_data = RawData.from_folder (\n basepath = MouseColon,\n source_dirs = ['2DRaw/'],\n target_dir = '2DMask/',\n axes = 'YX',\n )\n \n X, Y, XY_axes = create_patches (\n raw_data = raw_data,\n patch_size = (PatchY,PatchX),\n n_patches_per_image = 16,\n save_file = directory + filename,\n )\n axis_norm = (0,1)\n \n X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X_train)]\n Y = [label(y) for y in tqdm(Binary_Y_train)]\n \n assert len(X) > 1, \"not enough training data\"\n rng = np.random.RandomState(42)\n ind = rng.permutation(len(X))\n n_val = max(1, int(round(0.15 * len(ind))))\n ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n print('number of images: %3d' % len(X))\n print('- training: %3d' % len(X_trn))\n print('- validation: %3d' % len(X_val)) \n \n \n print(Config2D.__doc__)\n conf = Config2D (\n n_rays = n_rays,\n train_epochs = epochs,\n train_learning_rate = learning_rate,\n unet_n_depth = depth,\n train_batch_size= batch_size,\n train_patch_size = (PatchX,PatchY),\n n_channel_in = 1,\n train_checkpoint= model_dir + model_name +'.h5',\n grid = (1,1),\n train_loss_weights=(1, 0.05),\n use_gpu = use_gpu\n \n )\n print(conf)\n vars(conf)\n \n \n Starmodel = StarDist2D(conf, name=model_name, basedir=model_dir)\n if weights is not None:\n Starmodel.load_weights(weights)\n \n elif os.path.exists(model_dir + model_name + '/' + 'weights_now.h5'):\n Starmodel.load_weights(model_dir + model_name + '/' + 'weights_now.h5')\n \n Starmodel.train(X_trn, (Y_trn), validation_data=(X_val,(Y_val)), epochs = epochs)\n Starmodel.optimize_thresholds(X_val, Y_val)\n \n#3D training \ndef MembraneTraining(MouseColon, directory, filename, model_name, model_dir, PatchX=256, PatchY=256, PatchZ = 16, use_gpu = True, batch_size = 4, depth = 3, kern_size = 7, n_rays = 16, epochs = 400, learning_rate = 0.0001):\n \n # Get train and test IDs\n \n \n X_train = []\n Binary_Y_train = []\n \n \n Raw_path = os.path.join(MouseColon + '/Raw/', '*tif')\n \n \n Mask_path = os.path.join(MouseColon + '/Mask/', '*tif')\n \n filesRaw = sorted(glob.glob(Raw_path)) \n filesMask = sorted(glob.glob(Mask_path)) \n print('loading images')\n for fname in filesRaw:\n \n image = imread(fname)\n X_train.append(image)\n print('loading masks')\n for fname in filesMask:\n \n image = imread(fname)\n Binary_Y_train.append(image)\n \n \n\n X_train = np.asarray(X_train)\n Binary_Y_train = np.asarray(Binary_Y_train)\n \n print(X_train.shape)\n \n\n raw_data = RawData.from_folder (\n basepath = MouseColon,\n source_dirs = ['Raw/'],\n target_dir = 'Mask/',\n axes = 'ZYX',\n )\n \n X, Y, XY_axes = create_patches (\n raw_data = raw_data,\n patch_size = (PatchZ,PatchY,PatchX),\n n_patches_per_image = 128,\n save_file = directory + filename,\n )\n axis_norm = (0,1,2)\n \n X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X_train)]\n Y = [label(y) for y in tqdm(Binary_Y_train)]\n \n \n assert len(X) > 1, \"not enough training data\"\n rng = np.random.RandomState(42)\n ind = rng.permutation(len(X))\n n_val = max(1, int(round(0.15 * len(ind))))\n ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n print('number of images: %3d' % len(X))\n print('- training: %3d' % len(X_trn))\n print('- validation: %3d' % len(X_val)) \n \n \n print(Config3D.__doc__)\n extents = calculate_extents(Y)\n anisotropy = tuple(np.max(extents) / extents)\n rays = Rays_GoldenSpiral(n_rays, anisotropy=anisotropy)\n conf = Config3D (\n rays = rays,\n backbone='unet',\n train_epochs = epochs,\n train_learning_rate = learning_rate,\n unet_n_depth = depth,\n train_checkpoint = model_dir + model_name +'.h5',\n unet_kernel_size = (kern_size, kern_size, kern_size),\n train_patch_size = (PatchZ, PatchX, PatchY ),\n train_batch_size = batch_size,\n train_loss_weights=(1, 0.05),\n grid = tuple(1 if a > 1.5 else 2 for a in anisotropy),\n use_gpu = use_gpu,\n n_channel_in = 1\n )\n \n print(conf)\n vars(conf)\n \n \n Starmodel = StarDist3D(conf, name=model_name, basedir=model_dir)\n \n median_size = calculate_extents(Y, np.median)\n fov = np.array(Starmodel._axes_tile_overlap('ZYX'))\n if any(median_size > fov):\n print(\"WARNING: median object size larger than field of view of the neural network.\")\n if os.path.exists(model_dir + model_name + '/' + 'weights_now.h5'):\n Starmodel.load_weights(model_dir + model_name + '/' + 'weights_now.h5')\n \n Starmodel.train(X_trn, (Y_trn), validation_data=(X_val,(Y_val)), epochs = epochs)\n Starmodel.optimize_thresholds(X_val, Y_val)\n\n\n\n\ndef CCLabels(image):\n \n labelimage = label(image)\n labelimage = ndi.maximum_filter(labelimage, size=4)\n \n nonormimg, forward_map, inverse_map = relabel_sequential(labelimage) \n\n\n return nonormimg \n\ndef BinaryDilation(Image, iterations = 1):\n\n DilatedImage = binary_dilation(Image, iterations = iterations) \n \n return DilatedImage\n ","sub_path":"Terminator/TerminatorUtils/LoadKaggle.py","file_name":"LoadKaggle.py","file_ext":"py","file_size_in_byte":14031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"638467997","text":"###\n# Copyright (c) 2016, Santiago Gil\n# Copyright (c) 2020, oddluck \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n###\n\nimport supybot.utils as utils\nfrom supybot.commands import *\nimport supybot.plugins as plugins\nimport supybot.ircutils as ircutils\nimport supybot.callbacks as callbacks\nimport json\nimport urllib.request\nimport pendulum\nimport requests\n\ntry:\n from supybot.i18n import PluginInternationalization\n _ = PluginInternationalization('NHL')\nexcept ImportError:\n # Placeholder that allows to run the plugin on a bot\n # without the i18n module\n _ = lambda x: x\n\nclass NHL(callbacks.Plugin):\n \"\"\"Get scores from NHL.com.\"\"\"\n def __init__(self, irc):\n self.__parent = super(NHL, self)\n self.__parent.__init__(irc)\n\n self._SCOREBOARD_ENDPOINT = (\"https://statsapi.web.nhl.com/api/v1/schedule?startDate={}&endDate={}\" +\n \"&expand=schedule.teams,schedule.linescore,schedule.broadcasts.all,schedule.ticket,schedule.game.content.media.epg\" +\n \"&leaderCategories=&site=en_nhl&teamId=\")\n # https://statsapi.web.nhl.com/api/v1/schedule?startDate=2016-12-15&endDate=2016-12-15\n # &expand=schedule.teams,schedule.linescore,schedule.broadcasts,schedule.ticket,schedule.game.content.media.epg\n # &leaderCategories=&site=en_nhl&teamId=\n\n self._FUZZY_DAYS = ['yesterday', 'tonight', 'today', 'tomorrow',\n 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']\n\n # These two variables store the latest data acquired from the server\n # and its modification time. It's a one-element cache.\n # They are used to employ HTTP's 'If-Modified-Since' header and\n # avoid unnecessary downloads for today's information (which will be\n # requested all the time to update the scores).\n self._today_scores_cached_url = None\n self._today_scores_last_modified_time = None\n self._today_scores_last_modified_data = None\n \n self._TEAMS_BY_TRI = self._getTeams()\n\n #pendulum.set_formatter('alternative')\n\n def nhl(self, irc, msg, args, optional_team, optional_date):\n \"\"\"[] []\n Get games for a given date (YYYY-MM-DD). If none is specified, return games\n scheduled for today. Optionally add team abbreviation to filter\n for a specific team.\"\"\"\n\n # Check to see if there's optional input and if there is check if it's\n # a date or a team, or both.\n tz = None\n if optional_team is None:\n team = \"all\"\n if optional_date:\n if '--tz' in optional_date:\n tz = optional_date.split()[2]\n optional_date = optional_date.split()[0]\n try:\n date = self._checkDateInput(optional_date)\n #print(\"1\")\n except ValueError as e:\n irc.reply('ERROR: {0!s}'.format(e))\n return\n else:\n if optional_team == '--tz':\n tz = optional_date\n team = 'all'\n date = None\n else:\n date = self._checkDateInput(optional_team)\n #print(\"2\")\n if date: # and len(date) != 3:\n team = \"all\"\n# elif date and len(date) == 3:\n# team = date\n# date = None\n else:\n team = optional_team.upper()\n try:\n date = self._checkDateInput(optional_date)\n #print(\"3\")\n except ValueError as e:\n irc.reply('ERROR: {0!s}'.format(e))\n return\n\n if date is None:\n if not tz:\n tz = 'US/Eastern'\n games = self._getTodayGames(team, tz)\n games_string = self._resultAsString(games)\n if not games_string:\n irc.reply(\"No games found for {}\".format(team))\n return\n try:\n tdate = pendulum.from_format(games[0], 'YYYY-MM-DD').strftime('%m/%d/%y')\n games_string_date = ircutils.bold(tdate + ': ')\n except:\n games_string_date = ''\n #print(games[1]['clock'], games[1]['ended'])\n if len(games) == 2:\n if not games[1]['ended']:\n broadcasts = games[1]['broadcasts']\n games_string += ' [{}]'.format(broadcasts)\n #print(games)\n irc.reply(games_string_date + games_string)\n else:\n games = self._getGamesForDate(team, date)\n games_string = self._resultAsString(games)\n #print(games_string)\n if games_string == '':\n irc.reply(\"No games found for {}\".format(team))\n return\n try:\n tdate = pendulum.from_format(games[0], 'YYYY-MM-DD').strftime('%m/%d/%y')\n games_string_date = ircutils.bold(tdate + ': ')\n except:\n games_string_date = ''\n if len(games) == 1:\n if not games[1]['ended']:\n try:\n broadcasts = games[1]['broadcasts']\n games_string += ' [{}]'.format(broadcasts)\n except:\n pass\n #irc.reply(games_string)\n irc.reply(games_string_date + games_string)\n\n nhl = wrap(nhl, [optional('somethingWithoutSpaces'), optional('somethingWithoutSpaces')])\n \n def _getTeams(self):\n \n url = 'https://statsapi.web.nhl.com/api/v1/teams'\n try:\n data = requests.get(url).json()\n data = data['teams']\n except:\n return None\n \n teams = []\n for team in data:\n teams.append(team['abbreviation'])\n return teams\n\n def nhltv(self, irc, msg, args, optional_team, optional_date):\n \"\"\"[] []\n Get television broadcasts for a given date (YYYY-MM-DD). If none is specified, return broadcasts\n scheduled for today. Optionally add team abbreviation to filter\n for a specific team.\"\"\"\n\n # Check to see if there's optional input and if there is check if it's\n # a date or a team, or both.\n if optional_team is None:\n team = \"all\"\n try:\n date = self._checkDateInput(optional_date)\n except ValueError as e:\n irc.reply('ERROR: {0!s}'.format(e))\n return\n else:\n date = self._checkDateInput(optional_team)\n if date:\n team = \"all\"\n else:\n team = optional_team.upper()\n try:\n date = self._checkDateInput(optional_date)\n except ValueError as e:\n irc.reply('ERROR: {0!s}'.format(e))\n return\n\n if date is None:\n games = self._getTodayTV(team)\n games_string = self._resultTVAsString(games)\n try:\n tdate = pendulum.from_format(games[0], 'YYYY-MM-DD').strftime('%m/%d/%y')\n games_string_date = ircutils.bold(tdate + ': ')\n except:\n games_string_date = ''\n #print(games[0]['clock'], games[0]['ended'])\n if len(games) == 1:\n if not games[1]['ended']:\n broadcasts = games[1]['broadcasts']\n games_string += ' [{}]'.format(broadcasts)\n irc.reply(games_string_date + games_string)\n else:\n games = self._getTVForDate(team, date)\n if isinstance(games, str):\n irc.reply(games)\n return\n games_string = self._resultTVAsString(games)\n try:\n tdate = pendulum.from_format(games[0], 'YYYY-MM-DD').strftime('%m/%d/%y')\n games_string_date = ircutils.bold(tdate + ': ')\n except:\n games_string_date = ''\n if len(games) == 1:\n if not games[1]['ended']:\n try:\n broadcasts = games[1]['broadcasts']\n games_string += ' [{}]'.format(broadcasts)\n except:\n pass\n #irc.reply(games_string)\n irc.reply(games_string_date + games_string)\n \n #if date is None:\n # irc.reply(self._getTodayTV(team))\n #else:\n # irc.reply(self._getTVForDate(team, date))\n\n nhltv = wrap(nhltv, [optional('somethingWithoutSpaces'), optional('somethingWithoutSpaces')])\n\n def _getTodayGames(self, team, tz='US/Eastern'):\n games = self._getGames(team, self._getTodayDate(), tz)\n return games\n\n def _getGamesForDate(self, team, date):\n #print(date)\n games = self._getGames(team, date)\n return games\n\n def _getTodayTV(self, team):\n games = self._getGames(team, self._getTodayDate())\n return games\n\n def _getTVForDate(self, team, date):\n #print(date)\n games = self._getGames(team, date)\n return games\n\n############################\n# Content-getting helpers\n############################\n def _getGames(self, team, date, tz='US/Eastern'):\n \"\"\"Given a date, populate the url with it and try to download its\n content. If successful, parse the JSON data and extract the relevant\n fields for each game. Returns a list of games.\"\"\"\n url = self._getEndpointURL(date)\n\n # (If asking for today's results, enable the 'If-Mod.-Since' flag)\n use_cache = (date == self._getTodayDate())\n #use_cache = False\n response = self._getURL(url, use_cache)\n if isinstance(response, str):\n return \"ERROR: Something went wrong, check input\"\n\n json = self._extractJSON(response)\n games = self._parseGames(json, team, tz)\n return games\n\n def _getEndpointURL(self, date):\n return self._SCOREBOARD_ENDPOINT.format(date, date)\n\n def _getURL(self, url, use_cache=False):\n \"\"\"Use urllib to download the URL's content. The use_cache flag enables\n the use of the one-element cache, which will be reserved for today's\n games URL. (In the future we could implement a real cache with TTLs).\"\"\"\n user_agent = 'Mozilla/5.0 \\\n (X11; Ubuntu; Linux x86_64; rv:45.0) \\\n Gecko/20100101 Firefox/45.0'\n header = {'User-Agent': user_agent}\n response = None\n\n # ('If-Modified-Since' to avoid unnecessary downloads.)\n if use_cache and self._haveCachedData(url):\n header['If-Modified-Since'] = self._today_scores_last_modified_time\n\n request = urllib.request.Request(url, headers=header)\n #print(url)\n\n try:\n response = urllib.request.urlopen(request)\n except urllib.error.HTTPError as error:\n if use_cache and error.code == 304: # Cache hit\n self.log.info(\"{} - 304\"\n \"(Last-Modified: \"\n \"{})\".format(url, self._cachedDataLastModified()))\n return self._cachedData()\n else:\n self.log.error(\"HTTP Error ({}): {}\".format(url, error.code))\n pass\n\n self.log.info(\"{} - 200\".format(url))\n \n if not response:\n return \"ERROR: Something went wrong, check input\"\n\n if not use_cache:\n return response.read()\n\n # Updating the cached data:\n self._updateCache(url, response)\n return self._cachedData()\n\n def _extractJSON(self, body):\n return json.loads(body.decode('utf-8'))\n\n def _parseGames(self, json, team, tz='US/Eastern'):\n \"\"\"Extract all relevant fields from NHL.com's json\n and return a list of games.\"\"\"\n games = []\n if json['totalGames'] == 0:\n return games\n games.append(json['dates'][0]['date'])\n for g in json['dates'][0]['games']:\n #print(g)\n # Starting times are in UTC. By default, we will show Eastern times.\n # (In the future we could add a user option to select timezones.)\n tbd_check = self._ISODateToEasternTime(g['gameDate'])\n #print(tbd_check)\n if '3:00 AM' in tbd_check:\n starting_time = 'TBD'\n #starting_time_TBD = True\n else:\n if 'US/Eastern' not in tz:\n starting_time = self._convertISODateToTime(g['gameDate'], tz)\n else:\n starting_time = self._ISODateToEasternTime(g['gameDate'])\n broadcasts = []\n try:\n for item in g['broadcasts']:\n broadcasts.append(item['name'])\n except:\n pass\n #print(broadcasts)\n game_info = {'home_team': g['teams']['home']['team']['abbreviation'],\n 'away_team': g['teams']['away']['team']['abbreviation'],\n 'home_score': g['teams']['home']['score'],\n 'away_score': g['teams']['away']['score'],\n 'broadcasts': '{}'.format(', '.join(item for item in broadcasts)),\n 'starting_time': starting_time,\n 'starting_time_TBD': g['status']['startTimeTBD'],\n 'pregame': (True if 'Pre-Game' in g['status']['detailedState'] else False),\n 'period': g['linescore']['currentPeriod'],\n 'clock': g['linescore'].get('currentPeriodTimeRemaining'),\n 'powerplay_h': g['linescore']['teams']['home']['powerPlay'],\n 'powerplay_a': g['linescore']['teams']['away']['powerPlay'],\n 'goaliePulled_h': g['linescore']['teams']['home']['goaliePulled'],\n 'goaliePulled_a': g['linescore']['teams']['away']['goaliePulled'],\n 'ended': (g['status']['statusCode'] == '7' or g['status']['statusCode'] == '9'),\n 'ppd': (g['status']['statusCode'] == '9'),\n 'type': g['gameType']\n }\n #print(game_info)\n if team == \"all\":\n games.append(game_info)\n else:\n if team in game_info['home_team'] or team in game_info['away_team']:\n games.append(game_info)\n else:\n pass\n return games\n\n############################\n# Today's games cache\n############################\n def _cachedData(self):\n return self._today_scores_last_modified_data\n\n def _haveCachedData(self, url):\n return (self._today_scores_cached_url == url) and \\\n (self._today_scores_last_modified_time is not None)\n\n def _cachedDataLastModified(self):\n return self._today_scores_last_modified_time\n\n def _updateCache(self, url, response):\n self._today_scores_cached_url = url\n self._today_scores_last_modified_time = response.headers['last-modified']\n self._today_scores_last_modified_data = response.read()\n\n############################\n# Formatting helpers\n############################\n def _resultAsString(self, games):\n if len(games) == 0:\n return \"No games found\"\n else:\n s = sorted(games[1:], key=lambda k: k['ended']) #, reverse=True)\n #s = [self._gameToString(g) for g in games]\n b = []\n for g in s:\n b.append(self._gameToString(g))\n #print(b)\n #print(' | '.join(b))\n #games_strings = [self._gameToString(g) for g in games]\n return ' | '.join(b)\n\n def _resultTVAsString(self, games):\n if len(games) == 0:\n return \"No games found\"\n else:\n s = sorted(games[1:], key=lambda k: k['ended']) #, reverse=True)\n #s = [self._gameToString(g) for g in games]\n b = []\n for g in s:\n b.append(self._TVToString(g))\n #print(b)\n #print(' | '.join(b))\n #games_strings = [self._gameToString(g) for g in games]\n return ' | '.join(b)\n\n def _TVToString(self, game):\n \"\"\" Given a game, format the information into a string according to the\n context. For example:\n \"MEM @ CLE 07:00 PM ET\" (a game that has not started yet),\n \"HOU 132 GSW 127 F OT2\" (a game that ended and went to 2 overtimes),\n \"POR 36 LAC 42 8:01 Q2\" (a game in progress).\"\"\"\n away_team = game['away_team']\n home_team = game['home_team']\n if game['period'] == 0: # The game hasn't started yet\n starting_time = game['starting_time'] \\\n if not game['starting_time_TBD'] \\\n else \"TBD\"\n starting_time = ircutils.mircColor('PPD', 'red') if game['ppd'] else starting_time\n return \"{} @ {} {} [{}]\".format(away_team, home_team, starting_time, ircutils.bold(game['broadcasts']))\n\n # The game started => It has points:\n away_score = game['away_score']\n home_score = game['home_score']\n\n away_string = \"{} {}\".format(away_team, away_score)\n home_string = \"{} {}\".format(home_team, home_score)\n\n # Highlighting 'powerPlay':\n if game['powerplay_h'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and not game['goaliePulled_h']:\n home_string = ircutils.mircColor(home_string, 'orange') # 'black', 'yellow')\n if game['powerplay_a'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and not game['goaliePulled_a']:\n away_string = ircutils.mircColor(away_string, 'orange') # 'black', 'yellow')\n\n # Highlighting an empty net (goalie pulled):\n if game['goaliePulled_h'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and game['clock'] != \"00:00\":\n home_string = ircutils.mircColor(home_string, 'red')\n if game['goaliePulled_a'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and game['clock'] != \"00:00\":\n away_string = ircutils.mircColor(away_string, 'red')\n\n # Bold for the winning team:\n if int(away_score) > int(home_score):\n away_string = ircutils.bold(away_string)\n elif int(home_score) > int(away_score):\n home_string = ircutils.bold(home_string)\n\n #print('got here ', game['broadcasts'])\n\n base_str = ''\n if not game['ended']:\n base_str = ' [{}]'.format(game['broadcasts'])\n\n game_string = \"{} {} {}{}\".format(away_string, home_string,\n self._clockBoardToString(game['clock'],\n game['period'],\n game['ended'],\n game['pregame'],\n game['type']),\n base_str)\n\n return game_string\n\n def _gameToString(self, game):\n \"\"\" Given a game, format the information into a string according to the\n context. For example:\n \"MEM @ CLE 07:00 PM ET\" (a game that has not started yet),\n \"HOU 132 GSW 127 F OT2\" (a game that ended and went to 2 overtimes),\n \"POR 36 LAC 42 8:01 Q2\" (a game in progress).\"\"\"\n away_team = game['away_team']\n home_team = game['home_team']\n if game['period'] == 0: # The game hasn't started yet\n starting_time = game['starting_time'] \\\n if not game['starting_time_TBD'] \\\n else \"TBD\"\n starting_time = ircutils.mircColor('PPD', 'red') if game['ppd'] else starting_time\n return \"{} @ {} {}\".format(away_team, home_team, starting_time)\n\n # The game started => It has points:\n away_score = game['away_score']\n home_score = game['home_score']\n\n away_string = \"{} {}\".format(away_team, away_score)\n home_string = \"{} {}\".format(home_team, home_score)\n\n # Highlighting 'powerPlay':\n if game['powerplay_h'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and not game['goaliePulled_h']:\n home_string = ircutils.mircColor(home_string, 'orange') # 'black', 'yellow')\n if game['powerplay_a'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and not game['goaliePulled_a']:\n away_string = ircutils.mircColor(away_string, 'orange') # 'black', 'yellow')\n\n # Highlighting an empty net (goalie pulled):\n if game['goaliePulled_h'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and game['clock'] != \"00:00\":\n home_string = ircutils.mircColor(home_string, 'red')\n if game['goaliePulled_a'] and game['clock'].upper() != \"END\" and game['clock'].upper() != \"FINAL\" and game['clock'] != \"00:00\":\n away_string = ircutils.mircColor(away_string, 'red')\n\n # Bold for the winning team:\n if int(away_score) > int(home_score):\n away_string = ircutils.bold(away_string)\n elif int(home_score) > int(away_score):\n home_string = ircutils.bold(home_string)\n\n game_string = \"{} {} {}\".format(away_string, home_string,\n self._clockBoardToString(game['clock'],\n game['period'],\n game['ended'],\n game['pregame'],\n game['type']))\n\n return game_string\n\n def _clockBoardToString(self, clock, period, game_ended, pregame=None, gType=None):\n \"\"\"Get a string with current period and, if the game is still\n in progress, the remaining time in it.\"\"\"\n period_number = period\n # Game hasn't started => There is no clock yet.\n if period_number == 0:\n return \"\"\n\n # Halftime\n #if period:\n # return ircutils.mircColor('Halftime', 'orange')\n\n period_string = self._periodToString(period_number, gType)\n\n # Game finished:\n if game_ended or clock.upper() == \"FINAL\":\n if period_number == 3:\n return ircutils.mircColor('F', 'red')\n else:\n return ircutils.mircColor(\"F/{}\".format(period_string), 'red')\n\n # Game in progress:\n if clock.upper() == \"END\":\n return ircutils.mircColor(\"End {}\".format(period_string), 'light blue')\n else:\n # Period in progress, show clock:\n if pregame:\n return \"{}\".format(ircutils.mircColor('Pre-Game', 'green'))\n return \"{}{}\".format(clock + ' ' if clock != '00:00' else \"\", ircutils.mircColor(period_string, 'green'))\n\n def _periodToString(self, period, gType):\n \"\"\"Get a string describing the current period in the game.\n period is an integer counting periods from 1 (so 5 would be OT1).\n The output format is as follows: {Q1...Q4} (regulation);\n {OT, OT2, OT3...} (overtimes).\"\"\"\n if period <= 3:\n return \"P{}\".format(period)\n\n ot_number = period - 3\n if ot_number == 1:\n return \"OT\"\n # if regular/pre season game, we have shootouts\n if gType == 'R' or gType == 'PR':\n if ot_number > 1:\n return \"SO\"\n return \"{}OT\".format(ot_number)\n\n############################\n# Date-manipulation helpers\n############################\n def _getTodayDate(self):\n \"\"\"Get the current date formatted as \"YYYYMMDD\".\n Because the API separates games by day of start, we will consider and\n return the date in the Pacific timezone.\n The objective is to avoid reading future games anticipatedly when the\n day rolls over at midnight, which would cause us to ignore games\n in progress that may have started on the previous day.\n Taking the west coast time guarantees that the day will advance only\n when the whole continental US is already on that day.\"\"\"\n today = self._pacificTimeNow().date()\n today_iso = today.isoformat()\n return today_iso #.replace('-', '')\n\n def _easternTimeNow(self):\n return pendulum.now('US/Eastern')\n\n def _pacificTimeNow(self):\n return pendulum.now('US/Pacific')\n\n def _convertISODateToTime(self, iso, target='US/Eastern'):\n \"\"\"Convert the ISO date in UTC time that the API outputs into a\n time (target timezone) formatted with am/pm. Defaults to US/Eastern.\"\"\"\n try:\n date = pendulum.parse(iso).in_tz('{}'.format(target))\n except:\n try:\n target = self._checkTarget(target)\n date = pendulum.parse(iso).in_tz('{}'.format(target))\n except:\n date = pendulum.parse(iso).in_tz('{}'.format('US/Eastern'))\n time = date.format('h:mm A zz')\n return \"{}\".format(time)\n\n def _checkTarget(self, target):\n \"\"\"check input among common tz\"\"\"\n target = target.upper()\n common = {'CT': 'US/Central',\n 'CDT': 'US/Central',\n 'CST': 'US/Central',\n 'MT': 'US/Mountain',\n 'MDT': 'US/Mountain',\n 'MST': 'US/Mountain',\n 'PT': 'US/Pacific',\n 'PDT': 'US/Pacific',\n 'PST': 'US/Pacific',\n 'ET': 'US/Eastern',\n 'EDT': 'US/Eastern',\n 'EST': 'US/Eastern',\n 'CENTRAL': 'US/Central',\n 'EASTERN': 'US/Eastern',\n 'PACIFIC': 'US/Pacific',\n 'MOUNTAIN': 'US/Mountain'}\n if target in common:\n target = common[target]\n\n return target\n\n def _ISODateToEasternTime(self, iso):\n \"\"\"Convert the ISO date in UTC time that the API outputs into an\n Eastern time formatted with am/pm. (The default human-readable format\n for the listing of games).\"\"\"\n date = pendulum.parse(iso).in_tz('{}'.format('US/Eastern'))\n time = date.format('h:mm A zz')\n return \"{}\".format(time) # Strip the seconds\n\n def _stripDateSeparators(self, date_string):\n return date_string.replace('-', '')\n\n def _EnglishDateToDate(self, date):\n \"\"\"Convert a human-readable like 'yesterday' to a datetime object\n and return a 'YYYYMMDD' string.\"\"\"\n if date == \"lastweek\":\n day_delta = -7\n elif date == \"yesterday\":\n day_delta = -1\n elif date == \"today\" or date ==\"tonight\":\n day_delta = 0\n elif date == \"tomorrow\":\n day_delta = 1\n elif date == \"nextweek\":\n day_delta = 7\n elif date[:3] == 'sun':\n date_string = pendulum.now('US/Pacific').next(pendulum.SUNDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'mon':\n date_string = pendulum.now('US/Pacific').next(pendulum.MONDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'tue':\n date_string = pendulum.now('US/Pacific').next(pendulum.TUESDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'wed':\n date_string = pendulum.now('US/Pacific').next(pendulum.WEDNESDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'thu':\n date_string = pendulum.now('US/Pacific').next(pendulum.THURSDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'fri':\n date_string = pendulum.now('US/Pacific').next(pendulum.FRIDAY).format('YYYY-MM-DD')\n return date_string\n elif date[:3] == 'sat':\n date_string = pendulum.now('US/Pacific').next(pendulum.SATURDAY).format('YYYY-MM-DD')\n return date_string\n # Calculate the day difference and return a string\n date_string = self._pacificTimeNow().add(days=day_delta).strftime('%Y-%m-%d')\n return date_string\n\n def _checkDateInput(self, date):\n \"\"\"Verify that the given string is a valid date formatted as\n YYYY-MM-DD. Also, the API seems to go back until 2014-10-04, so we\n will check that the input is not a date earlier than that.\"\"\"\n\n error_string = 'Incorrect date format, should be YYYY-MM-DD'\n\n if date is None:\n return None\n\n if date in self._FUZZY_DAYS:\n date = self._EnglishDateToDate(date)\n elif date[:3].lower() in self._FUZZY_DAYS:\n date = self._EnglishDateToDate(date.lower())\n\n if date.isdigit():\n try:\n date = pendulum.from_format(date, 'YYYYMMDD').strftime('%Y-%m-%d')\n except:\n raise ValueError('Incorrect date format, should be YYYY-MM-DD')\n elif date.replace('-','').isdigit():\n try:\n parsed_date = pendulum.from_format(date, 'YYYY-MM-DD')\n except:\n raise ValueError('Incorrect date format, should be YYYY-MM-DD')\n elif date.replace('/','').isdigit():\n if len(date.split('/')) == 2:\n year = '/' + str(pendulum.datetime.now().year)\n date += year\n elif len(date.split('/')) == 3:\n if len(date.split('/')[2]) == 2:\n date = '{}/{}/{}'.format(date.split('/')[0], date.split('/')[1], '20{}'.format(date.split('/')[2]))\n else:\n raise ValueError('Incorrect date format, should be YYYY-MM-DD')\n try:\n date = pendulum.from_format(date, 'MM/DD/YYYY').strftime('%Y-%m-%d')\n except:\n raise ValueError('Incorrect date format, should be YYYY-MM-DD')\n elif '-' not in date and date.isdigit() == False and len(date) > 3:\n if date.title() in ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']:\n return \"Incorrect date format, should be YYYY-MM-DD\"\n try:\n date = date.title()\n year = str(pendulum.datetime.now().year)\n date += year\n try:\n date = pendulum.from_format(date, 'DDMMMYYYY').strftime('%Y-%m-%d')\n except:\n date = pendulum.from_format(date, 'MMMDDYYYY').strftime('%Y-%m-%d')\n except:\n raise ValueError('Incorrect date format, should be YYYY-MM-DD')\n #return \"Incorrect date format, should be YYYY-MM-DD\"\n else:\n return None\n\n return date\n\nClass = NHL\n\n# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:\n","sub_path":"data/codefile/oddluck@limnoria-plugins__33c7a3f__NHL$plugin.py.target.py","file_name":"oddluck@limnoria-plugins__33c7a3f__NHL$plugin.py.target.py","file_ext":"py","file_size_in_byte":31983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"146542227","text":"'''\nThis file will test the Inventory Item API\n'''\n\nfrom os.path import abspath, dirname\nimport sys\n\nproject_dir = abspath(dirname(dirname(__file__)))\nsys.path.insert(0, project_dir)\nfrom django.core.urlresolvers import resolve\nfrom django.test import TestCase\nfrom django.test.client import Client\nfrom website.views.itemAPIViews import *\nfrom website.models import *\nfrom datetime import datetime\nimport json\n\n\nclass InventoryItemAPITest(TestCase):\n def setUp(self):\n generic_user = User.objects.create(pk=1, username='Test User 1')\n generic_action_type = ActionType.objects.create(pk=1, ActionTypeName=\"Action 1\")\n generic_location = Location.objects.create(pk=1,\n BuildingID=Building.objects.create(pk=1),\n RoomNumber=\"404\",\n LocationDescription=\"Generic location\")\n\n first_reservation = Reservation.objects.create(pk=1,\n CustomerID=generic_user,\n OwnerID=generic_user,\n CustomerPhone=\"\",\n CustomerEmail=\"\",\n CustomerDept=\"\",\n CustomerStatus=\"\",\n ReservationNotes=\"\",\n EventTitle=\"\")\n\n second_reservation = Reservation.objects.create(pk=2,\n CustomerID=generic_user,\n OwnerID=generic_user,\n CustomerPhone=\"\",\n CustomerEmail=\"\",\n CustomerDept=\"\",\n CustomerStatus=\"\",\n ReservationNotes=\"\",\n EventTitle=\"\")\n\n first_action = Action.objects.create(ActionID=1,\n AssignedOperatorID=generic_user,\n ActionTypeID=generic_action_type,\n StartTime=datetime.strptime('Jun 1 2014 1:00PM', '%b %d %Y %I:%M%p'),\n EndTime=datetime.strptime('Jun 1 2014 3:00PM', '%b %d %Y %I:%M%p'),\n Origin=generic_location,\n Destination=generic_location,\n ActionStatus=\"\",\n ActionNotes=\"This is action 1\")\n\n first_action.Reservation.add(first_reservation)\n\n second_action = Action.objects.create(ActionID=2,\n AssignedOperatorID=generic_user,\n ActionTypeID=generic_action_type,\n StartTime=datetime.strptime('Jun 1 2014 1:00PM', '%b %d %Y %I:%M%p'),\n EndTime=datetime.strptime('Jun 1 2014 3:00PM', '%b %d %Y %I:%M%p'),\n Origin=generic_location,\n Destination=generic_location,\n ActionStatus=\"\",\n ActionNotes=\"This is action 2\")\n\n second_action.Reservation.add(second_reservation)\n\n generic_category = Label.objects.create(LabelID=1,\n LabelName=\"Label\",\n ParentCategory=None)\n\n generic_collection = Collection.objects.create(CollectionID=1,\n CollectionName=\"Collection 1\",\n CollectionDescription=\"\")\n\n generic_item_brand = ItemBrand.objects.create(BrandID=1,\n BrandName=\"Unit test brand\")\n\n generic_model = ItemModel.objects.create(ModelID=1,\n ModelDesignation=\"Unit test model\")\n\n generic_status = Status.objects.create(StatusID=1,\n StatusDescription=\"Unit test\")\n\n item1 = InventoryItem.objects.create(ItemID=1,\n Description=\"Item 1\",\n CategoryID=generic_category,\n StorageLocation=generic_location,\n CollectionID=generic_collection,\n Notes=\"Created by unit test\",\n AlternateID=None,\n BrandID=generic_item_brand,\n ModelID=generic_model,\n ParentItem=None,\n StatusID=generic_status)\n\n item2 = InventoryItem.objects.create(ItemID=2,\n Description=\"Item 2\",\n CategoryID=generic_category,\n StorageLocation=generic_location,\n CollectionID=generic_collection,\n Notes=\"Created by unit test\",\n AlternateID=None,\n BrandID=generic_item_brand,\n ModelID=generic_model,\n ParentItem=None,\n StatusID=generic_status)\n\n first_action.inventoryitem_set.add(item1)\n first_action.inventoryitem_set.add(item2)\n second_action.inventoryitem_set.add(item1)\n\n def test_api_urls_resolve_correctly(self):\n found = resolve(u'/actionInventoryItems/1')\n self.assertEqual(found.func, actionInventoryItems)\n found = resolve(u'/inventoryItems/')\n self.assertEqual(found.func, inventoryItemList)\n found = resolve(u'/inventoryItems/1')\n self.assertEqual(found.func, inventoryItemDetail)\n\n def test_can_view_all_inventory_items(self):\n client = Client()\n response = client.get(u'/inventoryItems/')\n self.assertEqual(1, response.data[0][u'ItemID'])\n self.assertEqual(2, response.data[1][u'ItemID'])\n\n def test_can_add_new_inventory_item(self):\n client = Client()\n response = client.post(u'/inventoryItems/', {u'Description': u'Item 3',\n u'CategoryID': u'1',\n u'StorageLocation': u'1',\n u'CollectionID': u'1',\n u'Notes': u'Created by a unit test',\n u'Action': u'1',\n u'BrandID': u'1',\n u'ModelID': u'1',\n u'StatusID': u'1'})\n self.assertEqual(3, response.data[u'ItemID'])\n self.assertEqual(u'Created by a unit test', response.data[u'Notes'])\n self.assertEqual(201, response.status_code)\n\n def test_can_view_one_item(self):\n client = Client()\n response = client.get(u'/inventoryItems/1')\n self.assertEqual(1, response.data[u'ItemID'])\n response = client.get(u'/inventoryItems/2')\n self.assertEqual(2, response.data[u'ItemID'])\n\n def test_can_edit_inventory_item(self):\n client = Client()\n response = client.put(u'/inventoryItems/2',\n data=json.dumps({u'Description': u'Updated item 2',\n u'CategoryID': u'1',\n u'StorageLocation': u'1',\n u'CollectionID': u'1',\n u'Notes': u'Created by a unit test',\n u'Action': u'2',\n u'BrandID': u'1',\n u'ModelID': u'1',\n u'StatusID': u'1'}),\n content_type='application/json')\n self.assertEqual(200, response.status_code)\n response = client.get(u'/inventoryItems/2')\n self.assertEqual(u'Updated item 2', response.data[u'Description'])\n\n def test_cant_view_nonexistent_item(self):\n client = Client()\n response = client.get(u'/inventoryItems/3')\n self.assertEqual(404, response.status_code)\n\n def test_can_get_inventory_items_from_action(self):\n client = Client()\n response = client.get(u'/actionInventoryItems/1')\n self.assertEqual(response.data[0][u'Description'], u'Item 1')\n self.assertEqual(response.data[1][u'Description'], u'Item 2')\n response = client.get(u'/actionInventoryItems/2')\n self.assertEqual(response.data[0][u'Description'], u'Item 1')\n response = client.get(u'/actionInventoryItems/3')\n self.assertEqual(response.status_code, 404)\n\n def test_can_delete_item(self):\n client = Client()\n response = client.delete(u'/inventoryItems/2')\n self.assertEqual(204, response.status_code)\n\n def test_can_add_item_to_action(self):\n client = Client()\n response = client.post(u'/addInventoryItemToAction/2', {u'action': u'2'})\n self.assertEqual(201, response.status_code)\n response = client.get(u'/actionInventoryItems/2')\n self.assertEqual(response.data[0][u'Description'], u'Item 1')\n self.assertEqual(response.data[1][u'Description'], u'Item 2')\n\n def test_can_remove_item_from_action(self):\n client = Client()\n response = client.post(u'/removeInventoryItemfromAction/1', {u'action': u'1'})\n self.assertEqual(200, response.status_code)\n response = client.get(u'/actionInventoryItems/1')\n self.assertEqual(response.data[0][u'Description'], u'Item 2')","sub_path":"website/tests/inventoryItem_api_tests.py","file_name":"inventoryItem_api_tests.py","file_ext":"py","file_size_in_byte":10833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"503428933","text":"#!/usr/bin/env python3\r\n\"\"\"\r\nThis script contains the code behind the results in F3 in manuscript \r\nNEUROTRANSMITTER TRANSPORTER/RECEPTOR CO-EXPRESSION SHARES ORGANIZATIONAL TRAITS WITH BRAIN STRUCTURE AND FUNCTION\r\nhttps://doi.org/10.1101/2022.08.26.505274\r\n\"\"\"\r\nfrom enigmatoolbox import datasets\r\nfrom enigmatoolbox.utils.parcellation import parcel_to_surface\r\nimport nilearn\r\nfrom brainspace.null_models import SpinPermutations\r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\nfrom mn_funcs import spin, schaefer_to_surf\r\nfrom plotly.graph_objs import *\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import spearmanr\r\nimport nibabel as nib\r\nfrom nilearn.input_data import NiftiLabelsMasker\r\nimport re\r\nimport os\r\n\r\nparcels = 100\r\ninput_path = 'path/to/data/'\r\nres_p = 'path/to/results/F3/'\r\n\r\nrc_g1 = np.load(input_path + 'rc_g1_{}.npy')\r\nrc_g2 = np.load(input_path + 'rc_g2_{}.npy')\r\nrc_g3 = np.load(input_path + 'rc_g3_{}.npy')\r\n\r\n# cortical thinning\r\ndisorders = ['22q', 'adhd', 'asd', 'bipolar', 'depression', 'epilepsy', 'ocd', 'schizophrenia']\r\nd = {}\r\n\r\n# 22q\r\nsum_stats = datasets.load_summary_stats('22q')\r\nCT = sum_stats['CortThick_case_vs_controls']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['22q'] = ct_surf\r\n\r\n# adhd\r\nsum_stats = datasets.load_summary_stats('adhd')\r\nCT = sum_stats['CortThick_case_vs_controls_adult']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['adhd'] = ct_surf\r\n\r\n# asd\r\nsum_stats = datasets.load_summary_stats('asd')\r\nCT = sum_stats['CortThick_case_vs_controls_meta_analysis']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['asd'] = ct_surf\r\n\r\n# bipolar\r\nsum_stats = datasets.load_summary_stats('bipolar')\r\nCT = sum_stats['CortThick_case_vs_controls_adult']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['bipolar'] = ct_surf\r\n\r\n# epilepsy\r\nsum_stats = datasets.load_summary_stats('epilepsy')\r\nCT = sum_stats['CortThick_case_vs_controls_allepilepsy']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['allepilepsy'] = ct_surf\r\n\r\n# depression\r\nsum_stats = datasets.load_summary_stats('depression')\r\nCT = sum_stats['CortThick_case_vs_controls_adult']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['depression_all'] = ct_surf\r\n\r\n# ocd\r\nsum_stats = datasets.load_summary_stats('ocd')\r\nCT = sum_stats['CortThick_case_vs_controls_adult']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['ocd'] = ct_surf\r\n\r\n# schizophrenia\r\nsum_stats = datasets.load_summary_stats('schizophrenia')\r\nCT = sum_stats['CortThick_case_vs_controls']\r\nCT = CT['d_icv']\r\nct_surf = parcel_to_surface(CT, target_lab='aparc_fsa5')\r\nd['schizophrenia'] = ct_surf\r\n\r\nfsavg = nilearn.datasets.fetch_surf_fsaverage('fsaverage5')\r\nsphere_lh_fs = surface.load_surf_mesh(fsavg['sphere_left'])[0]\r\nsphere_rh_fs = surface.load_surf_mesh(fsavg['sphere_right'])[0]\r\nsp_fs = SpinPermutations(n_rep=1000, random_state=0)\r\nsp_fs.fit(sphere_lh_fs, points_rh=sphere_rh_fs)\r\n\r\n\r\ndef permute(inp):\r\n left = inp[:10242]\r\n right = inp[10242:]\r\n return np.hstack(sp_fs.randomize(left, right))\r\n\r\n\r\ng1 = schaefer_to_surf(parcels, rc_g1)\r\ng2 = schaefer_to_surf(parcels, rc_g2)\r\ng3 = schaefer_to_surf(parcels, rc_g3)\r\n\r\nenig_g1_spin = {}\r\nenig_g2_spin = {}\r\nenig_g3_spin = {}\r\nselect = ['asd', 'ocd', 'schizophrenia', 'allepilepsy', 'depression_all', '22q', 'adhd', 'bipolar']\r\nfor key, val in d.items():\r\n if key in select:\r\n print(key)\r\n permuted = permute(val)\r\n enig_g1_spin[key] = spin(g1, val, permuted)\r\n enig_g2_spin[key] = spin(g2, val, permuted)\r\n enig_g3_spin[key] = spin(g3, val, permuted)\r\n\r\n\r\ndef plot_corr(inp, fname):\r\n enigma_keys = {'DGS': inp['22q'],\r\n 'ADHD': inp['adhd'],\r\n 'ASD': inp['asd'],\r\n 'BPD': inp['bipolar'],\r\n 'EPS': inp['allepilepsy'],\r\n 'MDD': inp['depression_all'],\r\n 'OCD': inp['ocd'],\r\n 'SCZ': inp['schizophrenia']}\r\n plot = pd.DataFrame.from_dict(enigma_keys, orient='index')\r\n plot.columns = ['r', 'p_vgm']\r\n plot.sort_values(by='r', inplace=True)\r\n fig, ax = plt.subplots(1, figsize=[11, 12])\r\n color = ['lightskyblue' if x > 0.05 else 'dodgerblue' for x in plot['p_vgm']]\r\n ax.barh(range(len(plot)), plot['r'], color=color)\r\n ax.set_yticks(range(0, len(plot)))\r\n ax.set_yticklabels(plot.index, fontsize=32)\r\n ax.set_xlabel(\"spearman's r\", fontsize=32)\r\n ax.tick_params(labelsize=32)\r\n plt.xlim([-0.50, 0.50])\r\n plt.tight_layout()\r\n fig.savefig(res_p + fname)\r\n return\r\n\r\n\r\nplot_corr(enig_g1_spin, 'rc_g1_enigma.png')\r\nplot_corr(enig_g2_spin, 'rc_g2_enigma.png')\r\nplot_corr(enig_g3_spin, 'rc_g3_enigma.png')\r\n\r\n# functional activation\r\n\r\ng1 = np.load(input_path + 'rc_g1_{}.npy'.format(parcels))\r\ng2 = np.load(input_path + 'rc_g2_{}.npy'.format(parcels))\r\ng3 = np.load(input_path + 'rc_g3_{}.npy'.format(parcels))\r\n\r\ng1_vgm = np.load(input_path + 'rc_g1_{}_vgm.npy'.format(parcels))\r\ng2_vgm = np.load(input_path + 'rc_g2_{}_vgm.npy'.format(parcels))\r\ng3_vgm = np.load(input_path + 'rc_g3_{}_vgm.npy'.format(parcels))\r\n\r\ngrads = [g1, g2, g3]\r\ngrads_vgm = [g1_vgm, g2_vgm, g3_vgm]\r\n\r\npath_neurosynth = '/path/to/brainstat/data/'\r\nnii_files = os.listdir(path_neurosynth)\r\ndataset = nilearn.datasets.fetch_atlas_schaefer_2018(n_rois=parcels, yeo_networks=7)\r\nmask = NiftiLabelsMasker(dataset['maps'], resampling_target='data', strategy='mean').fit()\r\nnames = []\r\npat = re.compile('__([A-Za-z0-9 ]+).+z$')\r\n\r\n\r\ndef vgm_matcher(inp: list, inp_vgm: list):\r\n n_rand = 1000\r\n correlations = []\r\n p = []\r\n for i in range(len(inp)):\r\n correlations.append(np.zeros(len(nii_files)))\r\n p.append(np.zeros(len(nii_files)))\r\n for idx, val in enumerate(nii_files):\r\n names.append(re.search(pat, val)[1])\r\n feature_data = nib.load(path_neurosynth + val)\r\n trafo = mask.transform(feature_data).squeeze()\r\n for i in range(len(inp)):\r\n grad = inp[i]\r\n grad_vgm = inp_vgm[i]\r\n r_obs = spearmanr(grad, trafo)[0]\r\n correlations[i][idx] = r_obs\r\n r_spin = np.empty(n_rand)\r\n\r\n for j, perm in enumerate(grad_vgm):\r\n r_spin[j] = spearmanr(perm, trafo)[0]\r\n p[i][idx] = np.mean(np.abs(r_spin) >= np.abs(r_obs))\r\n\r\n for i in range(len(inp)):\r\n df = pd.DataFrame({\"Spearman's r\": correlations[i], 'p_vgm': p[i]}, index=names)\r\n df.sort_values(by=\"Spearman's r\", inplace=True)\r\n df.to_csv(input_path + 'g{}_{}_nsynth_vgm.csv'.format(i + 1, parcels))\r\n return\r\n\r\n\r\nvgm_matcher(grads, grads_vgm)\r\n\r\nn_g1 = pd.read_csv(input_path + 'g1_{}_nsynth_vgm.csv'.format(parcels), index_col=0)\r\nn_g2 = pd.read_csv(input_path + 'g2_{}_nsynth_vgm.csv'.format(parcels), index_col=0)\r\nn_g3 = pd.read_csv(input_path + 'g3_{}_nsynth_vgm.csv'.format(parcels), index_col=0)\r\n\r\nwith open(input_path + 'supp_l1.txt', 'r') as l:\r\n interest = l.readlines()\r\n\r\ng1_interest = n_g1.transpose()[interest].transpose()\r\ng2_interest = n_g2.transpose()[interest].transpose()\r\ng3_interest = n_g3.transpose()[interest].transpose()\r\n\r\n# g1\r\ng1_interest = g1_interest[g1_interest['p_vgm'] < 0.05]\r\nkeep_g1 = ['face recognition', 'autobiographical memory', 'mind tom', 'secondary somatosensory', 'parkinson',\r\n 'primary somatosensory','coordination', 'motor imagery', 'painful', 'response selection', 'anticipation',\r\n 'inhibitory control','executive functions', 'control network', 'adhd', 'goal directed', 'insight',\r\n 'focusing', 'illusion','manipulation', 'interference']\r\n\r\ntotal = g1_interest.transpose()[keep_g1].transpose()\r\nfor_treemap = total.reset_index()\r\nfor_treemap['abs corr'] = np.abs(for_treemap[\"Spearman's r\"])\r\ntest = [float(\"{:.2f}\".format(x)) for x in for_treemap['abs corr']]\r\nfor_treemap['abs corr'] = test\r\nbreaks = [x.replace(' ', '
') for x in list(for_treemap['index'])]\r\nfor_treemap['breaks'] = breaks\r\nfor_treemap.iloc[1, 4] = 'autobio-
graphical
memory'\r\nfor_treemap.iloc[17, 4] = 'focu-
sing'\r\n\r\nfig = px.treemap(for_treemap, names='breaks', path=['breaks'], values=\"abs corr\", color=\"Spearman's r\",\r\n range_color=[-0.7, 0.7], color_continuous_scale='rdbu_r')\r\nfig.update_layout(\r\n uniformtext=dict(minsize=28, mode='show'), margin=dict(t=50, l=25, r=25, b=25), width=800, height=1000,\r\n font=dict(family='arial'),\r\n paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')\r\n\r\nfig.update_layout({'plot_bgcolor': 'rgba(255, 255, 255, 255)', 'paper_bgcolor': 'rgba(255, 255, 255, 255)', })\r\nfig.update_coloraxes(colorbar_orientation='h', colorbar_thickness=10,\r\n colorbar_title_font_size=10, colorbar_tickfont_size=10)\r\nfig.write_image(res_p + 'g1_nsynth.png')\r\n\r\n# g2\r\ng2_interest = g2_interest[g2_interest['p_vgm'] < 0.05]\r\nkeep_g2 = ['primary visual', 'executive functions', 'control network', 'response selection', 'belief', 'motor pre',\r\n 'intention', 'painful', 'intelligence', 'working memory', 'disorder ocd', 'parkinson', 'motor sma',\r\n 'uncertainty', 'goal', 'subtraction', 'judge', 'rules', 'secondary somatosensory', 'primary somatosensory',\r\n 'efficiency', 'risky', 'decision making', 'attention', 'cognition', 'thoughts', 'bipolar disorder', 'theory',\r\n 'self', 'mental state', 'beliefs', 'mind','planning']\r\n\r\ntotal = g2_interest.transpose()[keep_g2].transpose()\r\nfor_treemap = total.reset_index()\r\nfor_treemap['abs corr'] = np.abs(for_treemap[\"Spearman's r\"])\r\ntest = [float(\"{:.2f}\".format(x)) for x in for_treemap['abs corr']]\r\nfor_treemap['abs corr'] = test\r\nbreaks = [x.replace(' ', '
') for x in list(for_treemap['index'])]\r\nfor_treemap['breaks'] = breaks\r\nfor_treemap.iloc[19, 4] = 'secondary
somato-
sensory'\r\nfor_treemap.iloc[20, 4] = 'primary
somato-
sensory'\r\nfor_treemap.iloc[25, 4] = 'cog-
nition'\r\nfor_treemap.iloc[24, 4] = 'atten-
tion'\r\nfig = px.treemap(for_treemap, names='breaks', path=['breaks'], values=\"abs corr\", color=\"Spearman's r\",\r\n range_color=[-0.7, 0.7], color_continuous_scale='rdbu_r')\r\nfig.update_layout(\r\n uniformtext=dict(minsize=28, mode='show'), margin=dict(t=50, l=25, r=25, b=25), width=800, height=1000,\r\n font=dict(family='arial'),\r\n paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')\r\n\r\nfig.update_layout({'plot_bgcolor': 'rgba(255, 255, 255, 255)', 'paper_bgcolor': 'rgba(255, 255, 255, 255)', })\r\nfig.update_coloraxes(colorbar_orientation='h', colorbar_thickness=10,\r\n colorbar_title_font_size=10, colorbar_tickfont_size=10)\r\nfig.write_image(res_p + 'g2_nsynth.png')\r\n\r\n# g3\r\ng3_interest = g3_interest[g3_interest['p_vgm'] < 0.05]\r\nkeep_g3 = ['early visual', 'navigation', 'visual attention', 'empathy', 'social cognitive', 'primary auditory',\r\n 'listening', 'consolidation', 'dementia']\r\ntotal = g3_interest.transpose()[keep_g3].transpose()\r\nfor_treemap = total.reset_index()\r\nfor_treemap['abs corr'] = np.abs(for_treemap[\"Spearman's r\"])\r\ntest = [float(\"{:.2f}\".format(x)) for x in for_treemap['abs corr']]\r\nfor_treemap['abs corr'] = test\r\nbreaks = [x.replace(' ', '
') for x in list(for_treemap['index'])]\r\nfor_treemap['breaks'] = breaks\r\nfor_treemap.iloc[19, 4] = 'secondary
somato-
sensory'\r\nfor_treemap.iloc[20, 4] = 'primary
somato-
sensory'\r\nfor_treemap.iloc[25, 4] = 'cog-
nition'\r\nfor_treemap.iloc[24, 4] = 'atten-
tion'\r\nfig = px.treemap(for_treemap, names='breaks', path=['breaks'], values=\"abs corr\", color=\"Spearman's r\",\r\n range_color=[-0.7, 0.7], color_continuous_scale='rdbu_r')\r\nfig.update_layout(\r\n uniformtext=dict(minsize=28, mode='show'), margin=dict(t=50, l=25, r=25, b=25), width=800, height=1000,\r\n font=dict(family='arial'),\r\n paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')\r\n\r\nfig.update_layout({'plot_bgcolor': 'rgba(255, 255, 255, 255)', 'paper_bgcolor': 'rgba(255, 255, 255, 255)', })\r\nfig.update_coloraxes(colorbar_orientation='h', colorbar_thickness=10,\r\n colorbar_title_font_size=10, colorbar_tickfont_size=10)\r\nfig.write_image(res_p + 'g3_nsynth.png')\r\n","sub_path":"receptor_similarity/code/F3.py","file_name":"F3.py","file_ext":"py","file_size_in_byte":12385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"295000320","text":"import smtplib\n\n\ndef sendMail(FROM, TO, SUBJECT, TEXT, SERVER):\n \"\"\"this is some test documentation in the function\"\"\"\n message = \"\"\"\\\n From: %s\n To: %s\n Subject: %s\n %s\n \"\"\" % (FROM, \", \".join(TO), SUBJECT, TEXT)\n # Send the mail\n server = smtplib.SMTP(SERVER)\n \"New part\"\n server.starttls()\n server.login('zhanj', 'engine@1')\n server.sendmail(FROM, TO, message)\n server.quit()\n\n\nif __name__ == '__main__':\n sendMail('zhanjz@sondon.net', 'zhanjz2005@163.com', 'test', 'test', 'zhanjz@sondon.net')\n","sub_path":"demo/tutorial/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"8975247","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom logya.compat import file_open as open\nfrom logya.compat import is3\nfrom logya.globals import allowed_exts\n\n\nclass FileWriter(object):\n \"\"\"Class for writing site files.\"\"\"\n\n def get_canonical_filename(self, name):\n \"\"\"Get file name from given path or file.\n\n If name is not recognized as a file name a /index.html is added. To be\n recognized as a file name it must end in one of self.allowed_exts.\n Leading slashes are stripped off.\n \"\"\"\n\n # TODO explain this\n if not name.startswith('/'):\n name = '/%s' % name\n\n # only allowed extension will be written to a file, otherwise a\n # directory with the name is created and content written to index.html\n fext = os.path.splitext(name)[1]\n if not fext or fext.lstrip('.') not in allowed_exts:\n name = os.path.join(name, 'index.html')\n\n return name.lstrip('/')\n\n def getfile(self, dir_dst, path):\n \"\"\"Determine file to create and return an open file handle for writing.\n\n Paths pointing to a file name will be created as they are. When a path\n points to a directory a file named index.html will be created in that\n directory.\n \"\"\"\n\n filename = self.get_canonical_filename(path)\n # create target directory if it doesn't exist\n dir_target = os.path.join(dir_dst, os.path.dirname(filename))\n if not os.path.exists(dir_target):\n os.makedirs(dir_target)\n return open(os.path.join(dir_dst, filename), 'w', encoding='utf-8')\n\n def write(self, fh, content):\n \"\"\"Write content to file and close it.\"\"\"\n\n if not is3:\n content = content.encode('utf-8')\n fh.write(content)\n fh.close()\n\n\nclass DocWriter(FileWriter):\n \"\"\"Class for writing site documents.\"\"\"\n\n def __init__(self, dir_dst, template):\n \"\"\"Set required properties.\"\"\"\n\n self.dir_dst = dir_dst\n self.template = template\n\n def set_template_vars(self, doc):\n \"\"\"Set template variables.\"\"\"\n\n # empty doc vars dictionary to not retain previous doc values\n self.template.empty_doc_vars()\n for field, val in list(doc.items()):\n if isinstance(val, str) and not is3:\n val = val.decode('utf-8')\n self.template.add_doc_var(field, val)\n\n def write(self, doc, template):\n \"\"\"Render and write document to created file.\n\n Returns False if template is False.\n \"\"\"\n\n if not template:\n print(('Warning: doc %s has no template set and won\\'t be created.'\n % doc['url']))\n return False\n\n self.set_template_vars(doc)\n tpl_vars = self.template.get_all_vars()\n tpl_env = self.template.get_env()\n\n # Set additional template variables.\n tpl_vars['canonical'] = tpl_vars['base_url'] + tpl_vars['url']\n\n # Pre-render doc body so Jinja2 template tags can be used in content.\n tpl_vars['body'] = tpl_env.from_string(\n tpl_vars.get('body', '')).render(tpl_vars)\n\n page = tpl_env.get_template(template)\n out = self.getfile(self.dir_dst, doc['url'])\n\n content = page.render(tpl_vars)\n if not is3:\n content = content.encode('utf-8')\n\n out.write(content)\n out.close()\n","sub_path":"logya/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"373622196","text":"#INDIVIDUELLE FIB-FOLGE\nimport sys\nimport time\n\nf1 = 0\nf2 = 0\nfe = f1+f2\nliste = []\n\nliste.append(f1)\nliste.append(f2)\n\ngesuchteZahl = int(input(\"Gebe die letzte Schlangenzahl ein\\n>> \"))\n\n\ndef hochstellen():\n global f1\n global f2\n global fe\n global gesuchteZahl\n f1 = liste[0]\n f2 = liste[1]\n if f2 < gesuchteZahl:\n f2 += 1\n else:\n f2 = 1\n f1 += 1\n fe = f1 + f2\n\n if (f1 == gesuchteZahl) and (f2 == gesuchteZahl):\n print(\"Keine Lösung gefunden\")\n print(\"Enter drücken um zu beenden\")\n input()\n sys.exit()\n\nfiblen = int(input(\"Wie groß soll die Schlange sein?\\n>> \"))\nprint(\"\\n\"*50)\n\n\nwhile True:\n liste = []\n liste.append(f1)\n liste.append(f2)\n while len(liste) < fiblen:\n liste.append(fe)\n f1 = f2\n f2 = fe\n fe = f1 + f2\n f1 = liste[0]\n f2 = liste[1]\n fe = f1 + f2\n\n if liste[fiblen-1] != gesuchteZahl:\n hochstellen()\n else:\n if liste[0] <= liste[1]:\n print(liste)\n else:\n print(\"Fertig!\")\n print(\"Enter drücken um zu beenden\")\n input()\n sys.exit()\n hochstellen()\n","sub_path":"python/ideen/individuelle fibonacci Folge.py","file_name":"individuelle fibonacci Folge.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"509815638","text":"# scatter_graph01.py\n\n# 산점도 함수 scatter()\n\n# 배열 생성 패키지\n#import numpy as np\n\nimport matplotlib.pyplot as plt\n\n#x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n#y = np.array([9, 8, 7, 9, 8, 3, 2, 4, 3, 4])\n\nx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\ny = [9, 8, 7, 9, 8, 3, 2, 4, 3, 4]\n\nplt.figure(figsize=(10, 6))\n\nplt.scatter(x, y\n , alpha = 0.5\n , s = 50)\n\nplt.show()","sub_path":"20200316/2020_03_16/scatter_graph01.py","file_name":"scatter_graph01.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"49930173","text":"import ctypes as c\nimport os\nimport numpy as np\nimport ctypes as c\n\nprecision_types = {np.float32 : 1,\n np.float64 : 2}\n\npath = \"@LIBINVLIB_PATH@\"\ninvlib = c.CDLL(os.path.join(path, \"libinvlib.so\"))\n\ndef resolve_precision(fname, dtype):\n \"\"\"\n invlib supports single and double precision arithmetic. The arithmetic\n type is used as suffix to the function name.\n\n This function returns the invlib API function corresponding to the given\n floating point precision.\n\n Arguments:\n\n fname(:code:`str`): The name of the function to resolve.\n\n dtype(:code:`numpy.dtype`): The floating point type to use.\n\n Raises:\n\n Exception if a :code:`numpy.dtype` value is given that is not supported.\n \"\"\"\n if dtype == np.float32:\n return getattr(invlib, fname + \"_float\")\n elif dtype == np.float64:\n return getattr(invlib, fname + \"_double\")\n else:\n raise ValueError(\"Only numpy.float32 and numpy.float64 types are \"\\\n \" supported by invlib.\")\n\nstrides = {np.dtype('float32') : 4,\n np.dtype('float64') : 8}\n\nc_types = {np.dtype('float32') : c.c_float,\n np.dtype('float64') : c.c_double}\n\ndef get_stride(dtype):\n return strides[dtype]\n\ndef get_c_type(dtype):\n return c_types[dtype]\n\ndef buffer_from_memory(ptr, dtype, size):\n f = c.pythonapi.PyBuffer_FromMemory\n f.restype = ctypes.py_object\n s = strides[dtype]\n buffer = f(ptr, s * size)\n#\n# Vectors\n#\n\ninvlib.create_vector_float.argtypes = [c.c_void_p, c.c_uint64, c.c_bool]\ninvlib.create_vector_float.restype = c.c_void_p\n\ninvlib.create_vector_double.argtypes = [c.c_void_p, c.c_uint64, c.c_bool]\ninvlib.create_vector_double.restype = c.c_void_p\n\ninvlib.vector_rows_float.argtypes = [c.c_void_p]\ninvlib.vector_rows_float.restype = c.c_uint64\n\ninvlib.vector_rows_double.argtypes = [c.c_void_p]\ninvlib.vector_rows_double.restype = c.c_uint64\n\ninvlib.vector_get_data_pointer_float.argtypes = [c.c_void_p]\ninvlib.vector_get_data_pointer_float.restype = c.c_void_p\n\ninvlib.vector_get_data_pointer_double.argtypes = [c.c_void_p]\ninvlib.vector_get_data_pointer_double.restype = c.c_void_p\n\ninvlib.vector_dot_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_dot_float.restype = c.c_float\n\ninvlib.vector_dot_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_dot_double.restype = c.c_double\n\ninvlib.vector_add_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_add_float.restype = c.c_void_p\n\ninvlib.vector_add_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_add_double.restype = c.c_void_p\n\ninvlib.vector_subtract_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_subtract_float.restype = c.c_void_p\n\ninvlib.vector_subtract_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.vector_subtract_double.restype = c.c_void_p\n\ninvlib.vector_scale_float.argtypes = [c.c_void_p, c.c_float]\ninvlib.vector_scale_float.restype = None\n\ninvlib.vector_scale_double.argtypes = [c.c_void_p, c.c_double]\ninvlib.vector_scale_double.restype = None\n\n#\n# Matrices\n#\n\ninvlib.create_matrix_float.argtypes = [c.c_void_p, c.c_uint64, c.c_uint64, c.c_bool]\ninvlib.create_matrix_float.restype = c.c_void_p\n\ninvlib.create_matrix_double.argtypes = [c.c_void_p, c.c_uint64, c.c_uint64, c.c_bool]\ninvlib.create_matrix_double.restype = c.c_void_p\n\ninvlib.matrix_rows_float.argtypes = [c.c_void_p]\ninvlib.matrix_rows_float.restype = c.c_uint64\n\ninvlib.matrix_rows_double.argtypes = [c.c_void_p]\ninvlib.matrix_rows_double.restype = c.c_uint64\n\ninvlib.matrix_cols_float.argtypes = [c.c_void_p]\ninvlib.matrix_cols_float.restype = c.c_uint64\n\ninvlib.matrix_cols_double.argtypes = [c.c_void_p]\ninvlib.matrix_cols_double.restype = c.c_uint64\n\ninvlib.matrix_get_data_pointer_float.argtypes = [c.c_void_p]\ninvlib.matrix_get_data_pointer_float.restype = c.c_void_p\n\ninvlib.matrix_get_data_pointer_double.argtypes = [c.c_void_p]\ninvlib.matrix_get_data_pointer_double.restype = c.c_void_p\n\ninvlib.matrix_matrix_multiply_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_matrix_multiply_double.restype = c.c_void_p\n\ninvlib.matrix_matrix_multiply_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_matrix_multiply_float.restype = c.c_void_p\n\ninvlib.matrix_vector_multiply_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_vector_multiply_double.restype = c.c_void_p\n\ninvlib.matrix_vector_multiply_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_vector_multiply_float.restype = c.c_void_p\n\ninvlib.matrix_vector_multiply_transpose_double.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_vector_multiply_transpose_double.restype = c.c_void_p\n\ninvlib.matrix_vector_multiply_transpose_float.argtypes = [c.c_void_p, c.c_void_p]\ninvlib.matrix_vector_multiply_transpose_float.restype = c.c_void_p\n","sub_path":"src/invlib/interfaces/python/invlib/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"611694514","text":"import json\r\nimport os\r\nimport re\r\nimport time\r\nimport threading\r\nimport urllib\r\nimport requests\r\nimport demjson\r\nfrom bs4 import BeautifulSoup\r\nimport pymysql\r\n#12.38\r\ndb = pymysql.connect(\r\n \"localhost\",\r\n \"root\",\r\n \"edu123456\",\r\n \"hdkt_dev\",\r\n use_unicode=True,\r\n charset=\"utf8\")\r\ncursor = db.cursor()\r\nheaders = {\r\n 'Accept':\r\n 'application/json, text/javascript, */*; q=0.01',\r\n 'Accept-Encoding':\r\n 'gzip, deflate',\r\n 'Accept-Language':\r\n 'zh-CN,zh;q=0.8',\r\n 'Connection':\r\n 'keep-alive',\r\n 'Cookie':\r\n 'JSESSIONID=HWE7f-wyev4vkdVOsSby4gqx; screen_width_id=w1200; localAreaCode=58.; regFlg=0; username=360101100141720014; usertype=2; schoolStage=\"0001,0002,0003\"; gradeCode=\"\"; schoolId=36010110014172; defaultStage=0001; classId=\"\"; studentId=\"\"; schoolAreaCode=36.01.01; areacode=36.01.01; telNumber=13911635829; verifyCodePhone=13911635829; sso_login_flag=1; ut=3269c89e72fc8efd09930f8d3eceb52c76a33bc692e9de63d0e95cfbd7af904de567a391d2d2d25a; isPortal=1; phoneBuyAble=false; lastVisitTime=20171204143148; student_number=\"\"; student_name=\"\"',\r\n 'Host':\r\n 'plshdkt.jxrrt.cn',\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',\r\n 'X-Requested-With':\r\n 'XMLHttpRequest'\r\n}\r\nproxyHost = \"http-dyn.abuyun.com\"\r\nproxyPort = \"9020\"\r\n\r\n# 代理隧道验证信息\r\nproxyUser = \"HP07809N4O4MEA8D\"\r\nproxyPass = \"49A1ECE7666C0CF0\"\r\n\r\nproxyMeta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\r\n \"host\": proxyHost,\r\n \"port\": proxyPort,\r\n \"user\": proxyUser,\r\n \"pass\": proxyPass,\r\n}\r\n\r\nproxies = {\r\n \"http\": proxyMeta,\r\n \"https\": proxyMeta,\r\n}\r\n\r\n\r\ndef getKnowledge(cid, sid):\r\n url = 'http://tqmshdkt.jxrrt.cn/tqms/newhomework/topicstest/knowledge.action'\r\n data = {\r\n 'questions.class_id': cid,\r\n 'questions.qtype_id': '',\r\n 'questions.difficulty': '',\r\n 'questions.material_id': sid,\r\n 'questions.Knowledge_id': '',\r\n 'questions.from_platform': '2',\r\n '_dc': int(time.time())\r\n }\r\n text = requests.get(\r\n url, headers=headers, params=data, proxies=proxies).text\r\n return text\r\n\r\n\r\ndef getQuestion(cid, sid):\r\n url = 'http://tqmshdkt.jxrrt.cn/tqms/newhomework/topicstest/allQuestions.action'\r\n data = {\r\n 'questions.class_id': cid,\r\n 'questions.qtype_id': '',\r\n 'questions.difficulty': '',\r\n 'questions.material_id': sid,\r\n 'questions.Knowledge_id': '',\r\n 'questions.from_platform': '2',\r\n 'start': '0',\r\n 'limit': '1000',\r\n '_dc': int(time.time())\r\n }\r\n text = requests.get(\r\n url, headers=headers, params=data, proxies=proxies).text\r\n return text\r\n\r\n\r\ndef getps(data):\r\n db_temp = pymysql.connect(\r\n \"localhost\",\r\n \"root\",\r\n \"edu123456\",\r\n \"hdkt_dev\",\r\n use_unicode=True,\r\n charset=\"utf8\")\r\n cursor_temp = db_temp.cursor()\r\n sql = 'update temp3 set qdata=%s,ok=1 where id=%s'\r\n for line in data:\r\n xid = line[0]\r\n pid = line[1]\r\n cid = line[2]\r\n # try:\r\n # kdata = getKnowledge(cid, pid)\r\n # qdata = getQuestion(cid, pid)\r\n # print(xid, pid, cid)\r\n # print(kdata)\r\n # print(qdata)\r\n # cursor_temp.execute(sql, (kdata, qdata, xid))\r\n # db_temp.commit()\r\n # except:\r\n # sql2 = 'update temp set ok=0 where id=%s'\r\n # cursor_temp.execute(sql2, (xid))\r\n # db_temp.commit()\r\n qdata = getQuestion(cid, pid)\r\n print(xid, pid, cid)\r\n cursor_temp.execute(sql, (qdata, xid))\r\n db_temp.commit()\r\n\r\n\r\ndef getdata():\r\n sql = 'SELECT id,pid,cid FROM temp2 where ok=0'\r\n cursor.execute(sql)\r\n data = cursor.fetchall()\r\n return data\r\n\r\n\r\ndef work():\r\n data = getdata()\r\n dataLen = len(data)\r\n print(dataLen)\r\n tnum = 3\r\n x = dataLen // tnum\r\n Thead = []\r\n for r in range(0, tnum):\r\n t = threading.Thread(target=work3, args=(data[r * x:r * x + x], ))\r\n Thead.append(t)\r\n t = threading.Thread(target=work3, args=(data[x * tnum:dataLen], ))\r\n\r\n Thead.append(t)\r\n for t in Thead:\r\n t.start()\r\n\r\n for t in Thead:\r\n t.join()\r\n \r\n\r\n\r\ndef getall(start):\r\n print(\"start:\",start)\r\n db_temp = pymysql.connect(\r\n \"localhost\",\r\n \"root\",\r\n \"edu123456\",\r\n \"hdkt_dev\",\r\n use_unicode=True,\r\n charset=\"utf8\")\r\n cursor_temp = db_temp.cursor()\r\n sql = 'select id,kdata from temp where id>=%s and id<%s'\r\n cursor_temp.execute(sql,(start,start+2000))\r\n data=cursor_temp.fetchall()\r\n for line in data:\r\n xid = line[0]\r\n kdata = line[1]\r\n print(xid)\r\n try:\r\n js=json.loads(kdata)\r\n except:\r\n continue\r\n for x in js:\r\n kid=x['id']\r\n name=x['name']\r\n sql2='insert into knowledge values(%s,%s,%s)'\r\n cursor_temp.execute(sql2,(xid,kid,name))\r\n db_temp.commit()\r\n sql2 = 'update tempk set ok=3 where id=%s'\r\n cursor_temp.execute(sql2, (xid))\r\n db_temp.commit()\r\n\r\ndef getall2(start):\r\n print(\"start:\", start)\r\n db_temp = pymysql.connect(\r\n \"localhost\",\r\n \"root\",\r\n \"edu123456\",\r\n \"hdkt_dev\",\r\n use_unicode=True,\r\n charset=\"utf8\")\r\n cursor_temp = db_temp.cursor()\r\n sql = 'select id,qdata from temp3 where ok=1 and id>=%s and id<%s'\r\n cursor_temp.execute(sql, (start,start+2000))\r\n data = cursor_temp.fetchall()\r\n for line in data:\r\n xid = line[0]\r\n qdata = line[1]\r\n\r\n if len(qdata)==0:\r\n continue\r\n print(xid)\r\n\r\n try:\r\n js = json.loads(qdata)\r\n js = js['items']\r\n except:\r\n print(qdata)\r\n continue\r\n # js = json.loads(qdata)\r\n # js = js['items']\r\n for x in js:\r\n difficulty = x['difficulty']\r\n difficultyname = x['difficultyname']\r\n grade_name = x['grade_name']\r\n sid = x['id']\r\n knowledge_content = x['knowledge_content']\r\n qcontent = x['qcontent']\r\n qcontent_main = x['qcontent_main']\r\n qtype_name = x['qtype_name']\r\n questionsList = x['questionsList']\r\n quote_num = x['quote_num']\r\n sbj_name = x['sbj_name']\r\n scores = x['scores']\r\n source = 0\r\n video_analysis = x['video_analysis']\r\n sql2 = 'insert into problemData values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\r\n cursor_temp.execute(\r\n sql2, (xid, difficulty, difficultyname, grade_name, sid,\r\n knowledge_content, qcontent, qcontent_main, qtype_name,\r\n questionsList, quote_num, sbj_name, scores, source,\r\n video_analysis))\r\n \r\n db_temp.commit()\r\n sql2 = 'update temp3 set ok=3 where id=%s'\r\n cursor_temp.execute(sql2, (xid))\r\n db_temp.commit() \r\n\r\ndef work3(data):\r\n db_temp = pymysql.connect(\r\n \"localhost\",\r\n \"root\",\r\n \"edu123456\",\r\n \"hdkt_dev\",\r\n use_unicode=True,\r\n charset=\"utf8\")\r\n cursor_temp = db_temp.cursor()\r\n for line in data:\r\n xid = line[0]\r\n pid = line[1]\r\n cid = line[2]\r\n qdata = getQuestion(cid, pid)\r\n if len(qdata)==0:\r\n continue\r\n print(xid)\r\n try:\r\n js = json.loads(qdata)\r\n js = js['items']\r\n except:\r\n print(qdata)\r\n continue\r\n for x in js:\r\n difficulty = x['difficulty']\r\n difficultyname = x['difficultyname']\r\n grade_name = x['grade_name']\r\n sid = x['id']\r\n knowledge_content = x['knowledge_content']\r\n qcontent = x['qcontent']\r\n qcontent_main = x['qcontent_main']\r\n qtype_name = x['qtype_name']\r\n questionsList = x['questionsList']\r\n quote_num = x['quote_num']\r\n sbj_name = x['sbj_name']\r\n scores = x['scores']\r\n source = 0\r\n video_analysis = x['video_analysis']\r\n sql2 = 'insert into problemData values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\r\n cursor_temp.execute(\r\n sql2, (xid, difficulty, difficultyname, grade_name, sid,\r\n knowledge_content, qcontent, qcontent_main, qtype_name,\r\n questionsList, quote_num, sbj_name, scores, source,\r\n video_analysis))\r\n \r\n db_temp.commit()\r\n sql2 = 'update temp2 set ok=3 where id=%s'\r\n cursor_temp.execute(sql2, (xid))\r\n db_temp.commit() \r\n\r\ndef work2():\r\n start = 19723\r\n tnum = 10\r\n x = 2000\r\n Thead = []\r\n for r in range(0, tnum):\r\n t = threading.Thread(target=getall, args=(x * r + start, ))\r\n Thead.append(t)\r\n\r\n for t in Thead:\r\n t.start()\r\n\r\n for t in Thead:\r\n t.join()\r\n\r\n\r\nif __name__ == '__main__':\r\n # start = 19723\r\n # for i in range(0,20):\r\n # getall2(start+i*1000)\r\n work()","sub_path":"教育网站/problem/getTest.py","file_name":"getTest.py","file_ext":"py","file_size_in_byte":9339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"210973906","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass rnn_model(nn.Module):\r\n def __init__(self, device, word_embedding=None, n_classes=2, vocab_size=None, use_pretrained_wv=False):\r\n super(rnn_model, self).__init__()\r\n self.device = device\r\n self.n_classes = n_classes\r\n\r\n if use_pretrained_wv:\r\n self.embedding = self._create_emb_layer(word_embedding, non_trainable=True)\r\n self.input_size= word_embedding.shape[1]\r\n else:\r\n self.input_size=50\r\n self.embedding = nn.Embedding(vocab_size,self.input_size)\r\n self.hidden_size = self.input_size\r\n self.rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size,dropout=0.4)\r\n self.h2o = nn.Linear(self.hidden_size,self.n_classes)\r\n self.logsoftmax = nn.LogSoftmax()\r\n self.dp20 = nn.Dropout(p=.2)\r\n self.dp40 = nn.Dropout(p=.4)\r\n\r\n\r\n\r\n\r\n def forward(self, input,hidden,is_train=True):\r\n embedded = self.embedding(input).view(-1, 1, self.hidden_size) #one token at a time (may be changed to one seq)\r\n output = embedded\r\n output, hidden = self.rnn(output, hidden)\r\n output = output[-1] #take the last output state\r\n if is_train:\r\n output = self.dp20(output)\r\n output = self.h2o(output)\r\n logprobs = self.logsoftmax(output)\r\n return logprobs\r\n\r\n def initHidden(self):\r\n return torch.zeros(1, 1, self.hidden_size, device=self.device)\r\n\r\n def _create_emb_layer(self, weights_matrix, non_trainable=False):\r\n num_embeddings, embedding_dim = weights_matrix.size()\r\n emb_layer = nn.Embedding(num_embeddings, embedding_dim)\r\n emb_layer.load_state_dict({'weight': weights_matrix})\r\n if non_trainable:\r\n emb_layer.weight.requires_grad = False\r\n\r\n return emb_layer\r\n\r\nif __name__ == '__main__':\r\n pass","sub_path":"src/rnn_model.py","file_name":"rnn_model.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"478099388","text":"from __future__ import print_function, division, absolute_import, unicode_literals\n\nimport os\nimport shutil\nimport numpy as np\nfrom collections import OrderedDict\nimport logging\n\nimport tensorflow as tf\n\nimport util\nfrom layers import (weight_variable, weight_variable_devonc, bias_variable, \n conv2d, deconv2d, max_pool, crop_and_concat,\n l2_loss)\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\ndef create_conv_net(x, keep_prob, channels_in, channels_out,n_class, layers=1, features_root=16, filter_size=3, pool_size=2, summaries=True):\n \"\"\"\n Creates a new convolutional unet for the given parametrization.\n \n :param x: input tensor, shape [?,nx,ny,channels_in]\n :param keep_prob: dropout probability tensor\n :param channels_in: number of channels in the input image\n :param channels_out: number of channels in the output image\n :param n_class: number of output labels\n :param layers: number of layers in the net\n :param features_root: number of features in the first layer\n :param filter_size: size of the convolution filter\n :param pool_size: size of the max pooling operation\n :param summaries: Flag if summaries should be created\n \"\"\"\n \n logging.info(\"Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}\".format(layers=layers,\n features=features_root,\n filter_size=filter_size,\n pool_size=pool_size))\n # Placeholder for the input image\n nx = tf.shape(x)[1]\n ny = tf.shape(x)[2]\n x_image = tf.reshape(x, tf.stack([-1,nx,ny,channels_in]))\n in_node = x_image\n batch_size = tf.shape(x_image)[0]\n \n weights = []\n biases = []\n convs = []\n pools = OrderedDict()\n deconv = OrderedDict()\n dw_h_convs = OrderedDict()\n up_h_convs = OrderedDict()\n \n in_size = 1000\n size = in_size\n # down layers\n for layer in range(0, layers):\n features = 2**layer*features_root\n stddev =1.5* np.sqrt(2 / (filter_size**2 * features))\n if layer == 0:\n w1 = weight_variable([filter_size, filter_size, channels_in, features], stddev)\n else:\n w1 = weight_variable([filter_size, filter_size, features//2, features], stddev)\n \n w2 = weight_variable([filter_size, filter_size, features, features], stddev)\n b1 = bias_variable([features])\n b2 = bias_variable([features])\n \n conv1 = conv2d(in_node, w1, keep_prob)\n tmp_h_conv = tf.nn.relu(conv1 + b1)\n conv2 = conv2d(tmp_h_conv, w2, keep_prob)\n dw_h_convs[layer] = tf.nn.relu(conv2 + b2)\n \n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n \n size -= 4 \n if layer < layers-1:#because after it's the end of the U\n pools[layer] = max_pool(dw_h_convs[layer], pool_size)\n in_node = pools[layer]\n size /= 2\n \n in_node = dw_h_convs[layers-1]#it's the last layer the bottom of the U but it's because\n #of the definition of range we have layers -1 and not layers\n \n # up layers\n for layer in range(layers-2, -1, -1):#we don't begin at the bottom of the U\n features = 2**(layer+1)*features_root\n stddev = 1.5*np.sqrt(2 / (filter_size**2 * features))\n \n wd = weight_variable_devonc([pool_size, pool_size, features//2, features], stddev)\n bd = bias_variable([features//2]) # weights and bias for upsampling \n #from a layer to another !!\n h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd) \n #recall that in_node is the last layer\n #bottom of the U\n \n h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)#layer\n #before the bottom of the U \n deconv[layer] = h_deconv_concat\n \n w1 = weight_variable([filter_size, filter_size, features, features//2], stddev)\n w2 = weight_variable([filter_size, filter_size, features//2, features//2], stddev)\n b1 = bias_variable([features//2])\n b2 = bias_variable([features//2])\n \n conv1 = conv2d(h_deconv_concat, w1, keep_prob)\n h_conv = tf.nn.relu(conv1 + b1)\n conv2 = conv2d(h_conv, w2, keep_prob)\n in_node = tf.nn.relu(conv2 + b2)\n up_h_convs[layer] = in_node\n\n weights.append((w1, w2))\n biases.append((b1, b2))\n convs.append((conv1, conv2))\n \n size *= 2\n size -= 4\n\n # Output Map\n\n weight = weight_variable([1, 1, features_root, channels_out], stddev)\n bias = bias_variable([channels_out])\n conv = conv2d(in_node, weight, tf.constant(1.0))\n output_map = conv + bias\n up_h_convs[\"out\"] = output_map\n \n if summaries:\n for i, (c1, c2) in enumerate(convs):\n tf.summary.image('summary_conv_%02d_01'%i, get_image_summary(c1))\n tf.summary.image('summary_conv_%02d_02'%i, get_image_summary(c2))\n \n for k in pools.keys():\n tf.summary.image('summary_pool_%02d'%k, get_image_summary(pools[k]))\n \n for k in deconv.keys():\n tf.summary.image('summary_deconv_concat_%02d'%k, get_image_summary(deconv[k]))\n \n for k in dw_h_convs.keys():\n tf.summary.histogram(\"dw_convolution_%02d\"%k + '/activations', dw_h_convs[k])\n\n for k in up_h_convs.keys():\n tf.summary.histogram(\"up_convolution_%s\"%k + '/activations', up_h_convs[k])\n \n variables = []\n for w1,w2 in weights:\n variables.append(w1)\n variables.append(w2)\n \n for b1,b2 in biases:\n variables.append(b1)\n variables.append(b2)\n\n \n return output_map, variables, int(in_size - size)\ndef srcnn(x,keep_prob, channels_in,channels_out, layers =1,filters_nb=[64,32], filters_widths=[9,1,5], summaries=True):\n \"\"\"\n Creates a new convolutional unet for the given parametrization.\n \n :param x: input tensor, shape [?,nx,ny,channels_in]\n :param channels_in: number of channels in the input image\n :param channels_out: number of channels in the output image\n :param filters_nb: number of filters for each layer\n :param filters_widths: size of the filter for each layer\n :param summaries: Flag if summaries should be created\n \"\"\"\n \n logging.info(\"Layers {layers},Number of filters {filters_nb}, filter size {filters_widths}\".format(layers=layers,filters_nb=filters_nb,\n filters_widths=filters_widths))\n # Placeholder for the input image\n nx = tf.shape(x)[1]\n ny = tf.shape(x)[2]\n x_image = tf.reshape(x, tf.stack([-1,nx,ny,channels_in]))\n in_node = x_image\n batch_size = tf.shape(x_image)[0]\n\n \n weights = []\n biases = []\n convs = []\n h_convs = OrderedDict()\n filters_loop=np.hstack(([channels_in],filters_nb,[channels_out]))\n \n # Patch extraction and Non linear Mapping\n for layer in range(0,layers+2):\n stddev=np.sqrt(2/(filters_loop[layer+1]*filters_widths[layer]**2))\n w=weight_variable([filters_widths[layer], filters_widths[layer], filters_loop[layer], filters_loop[layer+1]], stddev)\n b = bias_variable([filters_loop[layer+1]])\n conv_ = conv2d(in_node, w,keep_prob)\n if layer>> valid_identifier(\"\")\n False\n >>> valid_identifier(\"get\")\n False\n >>> valid_identifier(\"bang!\")\n False\n >>> valid_identifier(\"_something\")\n True\n \"\"\"\n if keyword.iskeyword(name):\n return False\n if name in dir(__builtins__):\n return False\n return re.match(\"[_A-Za-z][_a-zA-Z0-9]*$\", name) is not None\n\n\ndef list_functions(function_defs):\n \"\"\"List all the functions\"\"\"\n print(u\"Available functions:\")\n for function_def in function_defs:\n print(u\" {}\".format(function_def[\"name\"]))\n\n\ndef list_workflows(workflow_defs):\n \"\"\"List all the workflows\"\"\"\n print(u\"Available workflows:\")\n for workflow_def in workflow_defs:\n print(u\" {}\".format(workflow_def[\"programmatic_name\"]))\n\n\ndef list_actions(action_defs):\n \"\"\"List all the actions (rules)\"\"\"\n print(u\"Available rules:\")\n for action_def in action_defs:\n print(u\" {}\".format(action_def[\"name\"]))\n\n\ndef list_message_destinations(message_destination_defs):\n \"\"\"List all the message destinations\"\"\"\n print(u\"Available message destinations:\")\n for message_destination_def in message_destination_defs:\n print(u\" {}\".format(message_destination_def[\"programmatic_name\"]))\n\n\ndef list_incident_fields(field_defs):\n \"\"\"List all the custom incident fields\"\"\"\n print(u\"Available incident fields:\")\n for field_def in field_defs:\n if field_def[\"type_id\"] == INCIDENT_TYPE_ID and field_def.get(\"prefix\") == \"properties\":\n print(u\" {}\".format(field_def[\"name\"]))\n\n\ndef list_datatables(datatable_defs):\n \"\"\"List all the datatables\"\"\"\n print(u\"Available datatables:\")\n for datatable_def in datatable_defs:\n if datatable_def[\"type_id\"] == DATATABLE_TYPE_ID:\n print(u\" {}\".format(datatable_def[\"type_name\"]))\n\n\ndef list_automatic_tasks(task_defs):\n \"\"\"List all the tasks (built-in and custom are not distinguished)\"\"\"\n print(u\"Available tasks:\")\n for task_def in task_defs:\n print(u\" {}\".format(task_def[\"programmatic_name\"]))\n\n\ndef list_scripts(script_defs):\n \"\"\"List all the scripts\"\"\"\n print(u\"Available scripts:\")\n for script_def in script_defs:\n print(u\" {}\".format(script_def[\"name\"]))\n\ndef list_artifact_types(artifact_type_defs):\n \"\"\"List all the artifact types\"\"\"\n print(u\"Available artifact types:\")\n for artifact_type_def in artifact_type_defs:\n print(u\" {}\".format(artifact_type_def[\"name\"]))\n\n\ndef clean(dictionary, keep):\n \"\"\"Remove attributes that are not in the 'keep' list\"\"\"\n for key in dictionary.copy().keys():\n if key not in keep:\n dictionary.pop(key)\n return dictionary\n\n\ndef render_file_mapping(file_mapping_dict, data, source_dir, target_dir):\n \"\"\"\n Walk each value in the \"rendered\" file-mapping dictionary, and create the target files.\n\n Nesting in the 'target' dictionary represents the target directory structure.\n Source values are the full path to a source file.\n Each source file is treated as a JINJA2 template, and rendered using the data provided.\n\n :param file_mapping_dict: {\"target\": \"source\"...}\n :param data: the data for JINJA rendering of each source file\n :param source_dir: path to the root of the source files\n :param target_dir: path where the target files and directories should be written\n \"\"\"\n for (key, value) in sorted(file_mapping_dict.items()):\n if not key:\n LOG.error(u\"Cannot render empty target for %s\", value)\n continue\n # The key is a directory-name or filename,\n # optionally followed by a '@xxx\" where 'xxx' is a variable tha the\n # template needs, such as a loop-variable. Split this out if present.\n loopvar = None\n if \"@\" in key:\n split = key.split(\"@\", 1)\n key = split[0]\n loopvar = split[1]\n data[\"loopvar\"] = loopvar\n #\n if isinstance(value, dict):\n # This is a subdirectory\n subdir = os.path.join(target_dir, key)\n try:\n os.mkdir(subdir)\n except OSError as exc:\n LOG.warn(exc)\n render_file_mapping(value, data, source_dir, subdir)\n else:\n target_file = os.path.join(target_dir, key)\n source_file = os.path.join(source_dir, value)\n if os.path.exists(target_file):\n LOG.error(u\"Not writing %s: file exists.\", target_file)\n continue\n # Render the source file as a JINJA template\n LOG.debug(u\"Writing %s from template %s\", target_file, source_file)\n LOG.info(u\"Writing %s\", target_file)\n with io.open(source_file, 'r', encoding=\"utf-8\") as source:\n source_template = source.read()\n source_rendered = template_functions.render(source_template, data)\n with io.open(target_file, mode=\"w\", encoding=\"utf-8\") as outfile:\n outfile.write(source_rendered)\n\n\ndef codegen_from_template(cmd, client, export_file, template_file_path, package,\n message_destination_names, function_names, workflow_names, action_names,\n field_names, datatable_names, task_names, script_names, artifact_type_names,\n output_dir, output_file, zip):\n \"\"\"Based on a template-file, produce the generated file or package.\n\n To codegen a single file, the template will be a JSON dict with just one entry,\n such as {\"file_to_generate.py\": \"path/to/template.jinja2\"}\n To codegen a whole directory, the template dict can have multiple values,\n including nested subdirectories.\n\n Each source (\"path/to/template.jinja2\") will be rendered using jinja2,\n then written to the target (\"file_to_generate.py\").\n\n :param cmd: 'codegen' or 'extract'\n :param client: the REST client\n :param export_file: file containing customization exports (default is to use the server's latest)\n :param template_file_path: location of templates\n :param package: name of the package to be generated\n :param message_destination_names: list of message desctinations; generate all the functions that use them\n :param function_names: list of named functions to be generated\n :param workflow_names: list of workflows whose customization def should be included in the package\n :param action_names: list of actions (rules) whose customization def should be included in the package\n :param field_names: list of incident fields whose customization def should be included in the package\n :param datatable_names: list of data tables whose customization def should be included in the package\n :param task_names: list of automatic tasks whose customization def should be included in the package\n :param script_names: list of scripts whose customization def should be included in the package\n :param artifact_type_names: lists of custom artifact groups to include\n :param output_dir: output location\n :param output_file: output file name, also .res file produced for 'extract'\n :param zip: True if resulting file(s) should be zipped up\n \"\"\"\n functions = {}\n function_params = {}\n message_destinations = {}\n incident_fields = {}\n action_fields = {}\n datatables = {}\n datatable_fields = {}\n phases = {}\n automatic_tasks = {}\n scripts = {}\n workflows = {}\n actions = {}\n artifact_types = {}\n\n if export_file:\n with io.open(export_file, 'r', encoding=\"utf-8\") as export:\n export_data = json.loads(export.read())\n LOG.info(u\"{} is based on the organization export from '{}'.\".format(cmd, export_file))\n else:\n # Force a recent export\n latest_export_uri = \"/configurations/exports/\"\n client.post(latest_export_uri, {\"layouts\": True, \"actions\": True, \"phases_and_tasks\": True})\n\n # Get the most recent org export that includes actions and tasks\n export_uri = \"/configurations/exports/history\"\n export_list = client.get(export_uri)[\"histories\"]\n last_date = 0\n last_id = 0\n for export in export_list:\n if export[\"options\"][\"actions\"] and export[\"options\"][\"phases_and_tasks\"]:\n if export[\"date\"] > last_date:\n last_date = export[\"date\"]\n last_id = export[\"id\"]\n if last_date == 0:\n LOG.error(u\"ERROR: No suitable export is available. \"\n u\"Create an export for code generation. (Administrator Settings -> Organization -> Export).\")\n return\n dt = datetime.datetime.utcfromtimestamp(last_date/1000.0)\n LOG.info(u\"{} is based on the organization export from {}.\".format(cmd, dt))\n export_uri = \"/configurations/exports/{}\".format(last_id)\n export_data = client.get(export_uri)\n\n # Get definitions for custom incident fields - used in multiple areas\n all_fields = dict((field[\"name\"], field)\n for field in export_data.get(\"fields\")\n if field[\"type_id\"] == INCIDENT_TYPE_ID and field.get(\"prefix\") == \"properties\")\n\n all_destinations = dict((dest[\"programmatic_name\"], dest)\n for dest in export_data.get(\"message_destinations\", []))\n all_destinations_2 = dict((dest[\"name\"], dest)\n for dest in export_data.get(\"message_destinations\", []))\n\n if function_names or message_destination_names:\n # Check that 'functions' are available (v30 onward)\n function_defs = export_data.get(\"functions\")\n if not function_defs:\n LOG.error(u\"ERROR: Functions are not available in this export.\")\n return\n function_names = function_names or []\n available_names = [function_def[\"name\"] for function_def in function_defs]\n if message_destination_names:\n # Build a list of all the functions that use the specified message destination(s)\n for function_def in function_defs:\n if function_def[\"destination_handle\"] in message_destination_names:\n function_names.append(function_def[\"name\"])\n\n # Check that each named function is available\n for function_name in function_names or []:\n if function_name not in available_names:\n LOG.error(u\"ERROR: Function '%s' not found in this export.\", function_name)\n list_functions(function_defs)\n return\n\n # Check that the named message destination is available\n for message_destination_name in message_destination_names or []:\n if message_destination_name not in all_destinations:\n LOG.error(u\"ERROR: Message destination '%s' not found in this export.\", message_destination_name)\n list_message_destinations(export_data.get(\"message_destinations\"))\n return\n\n if workflow_names:\n # Check that 'workflows' are available (v28 onward)\n workflow_defs = export_data.get(\"workflows\")\n if not workflow_defs:\n LOG.error(u\"ERROR: Workflows are not available in this export.\")\n return\n else:\n workflow_names = []\n\n # ensure empty lists as we may dynamically add to them\n if not task_names:\n task_names = []\n if not script_names:\n script_names = []\n if not artifact_type_names:\n artifact_type_names = []\n if not field_names:\n field_names = []\n\n if action_names:\n # Check that 'actions' are available. actions are rules\n action_defs = export_data.get(\"actions\")\n if not action_defs:\n LOG.error(u\"ERROR: Rules are not available in this export.\")\n return\n\n # Check that each named action is available\n actions = {action_def[\"name\"]: clean(copy.deepcopy(action_def), ACTION_ATTRIBUTES)\n for action_def in action_defs\n if action_def[\"name\"] in action_names}\n all_action_fields = dict((field[\"uuid\"], field)\n for field in export_data.get(\"fields\")\n if field[\"type_id\"] == ACTION_TYPE_ID)\n all_action_fields_2 = dict((field[\"name\"], field)\n for field in export_data.get(\"fields\")\n if field[\"type_id\"] == ACTION_TYPE_ID)\n\n for action_name in action_names:\n if action_name not in actions:\n LOG.error(u\"ERROR: Rule '%s' not found in this export.\", action_name)\n list_actions(action_defs)\n return\n action_def = actions[action_name]\n\n # Get the activity-fields for this action (if any)\n action_field_uuids = [item.get(\"content\")\n for item in action_def[\"view_items\"]\n if \"content\" in item and item.get(\"field_type\")]\n fields = []\n for field_uuid in action_field_uuids:\n field = copy.deepcopy(all_action_fields.get(field_uuid))\n if field is None:\n # v29-style export where layout indexed by field name\n field = copy.deepcopy(all_action_fields_2.get(field_uuid))\n clean(field, ACTION_FIELD_ATTRIBUTES)\n for template in field.get(\"templates\", []):\n clean(template, TEMPLATE_ATTRIBUTES)\n for value in field.get(\"values\", []):\n clean(value, VALUE_ATTRIBUTES)\n fields.append(field)\n action_fields[field[\"name\"]] = field\n\n # Get the workflow(s) for this rule (if any)\n wf_names = action_def[\"workflows\"]\n for wf_name in wf_names:\n if wf_name not in workflow_names:\n workflow_names.append(wf_name)\n\n # Get the task(s) for this rule (if any)\n for automation in action_def[\"automations\"]:\n if automation.get(\"tasks_to_create\"):\n for task_name in automation[\"tasks_to_create\"]:\n if task_name not in task_names:\n task_names.append(task_name)\n\n elif automation.get(\"scripts_to_run\"):\n script_name = automation[\"scripts_to_run\"]\n if script_name not in script_names:\n script_names.append(script_name)\n\n elif automation.get(\"field\"):\n field_name = automation[\"field\"]\n if field_name not in field_names and field_name in all_fields:\n field_names.append(field_name)\n\n # Get the message destination(s) for this rule (if any)\n dest_names = action_def[\"message_destinations\"]\n for dest_name in dest_names:\n if dest_name not in message_destinations:\n dest = copy.deepcopy(all_destinations_2[dest_name])\n clean(dest, MESSAGE_DESTINATION_ATTRIBUTES)\n message_destinations[dest_name] = dest\n\n all_functions = dict((function[\"name\"], function)\n for function in export_data.get(\"functions\") or [])\n all_function_fields = dict((field[\"uuid\"], field)\n for field in export_data.get(\"fields\")\n if field[\"type_id\"] == FUNCTION_TYPE_ID)\n\n for function_name in (function_names or []):\n # Get the function definition\n function_def = copy.deepcopy(all_functions.get(function_name))\n # Remove the attributes we don't want to serialize\n clean(function_def, FUNCTION_ATTRIBUTES)\n for view_item in function_def.get(\"view_items\", []):\n clean(view_item, VIEW_ITEM_ATTRIBUTES)\n functions[function_name] = function_def\n\n # Get the parameters (input fields) for this function\n param_names = [item.get(\"content\")\n for item in function_def[\"view_items\"]\n if \"content\" in item]\n params = []\n for param_name in param_names:\n param = copy.deepcopy(all_function_fields[param_name])\n clean(param, FUNCTION_FIELD_ATTRIBUTES)\n for template in param.get(\"templates\", []):\n clean(template, TEMPLATE_ATTRIBUTES)\n for value in param.get(\"values\", []):\n clean(value, VALUE_ATTRIBUTES)\n params.append(param)\n function_params[param[\"uuid\"]] = param\n\n # Get the message destination for this function\n dest_name = function_def[\"destination_handle\"]\n if dest_name not in message_destinations:\n dest = copy.deepcopy(all_destinations[dest_name])\n clean(dest, MESSAGE_DESTINATION_ATTRIBUTES)\n message_destinations[dest_name] = dest\n\n if workflow_names:\n all_workflows = dict((workflow[\"programmatic_name\"], workflow)\n for workflow in export_data.get(\"workflows\"))\n for workflow_name in workflow_names:\n # Get the workflow definition\n workflow_def = all_workflows.get(workflow_name)\n if workflow_def:\n # Remove the attributes we don't want to serialize\n workflow = clean(copy.deepcopy(workflow_def), WORKFLOW_ATTRIBUTES)\n clean(workflow[\"content\"], WORKFLOW_CONTENT_ATTRIBUTES)\n workflows[workflow_name] = workflow\n else:\n LOG.error(u\"ERROR: Workflow '%s' not found in this export.\", workflow_name)\n list_workflows(export_data.get(\"workflows\"))\n return\n\n if field_names:\n for field_name in field_names:\n fielddef = all_fields.get(field_name)\n if fielddef:\n field = clean(copy.deepcopy(fielddef), INCIDENT_FIELD_ATTRIBUTES)\n for template in field.get(\"templates\", []):\n clean(template, TEMPLATE_ATTRIBUTES)\n for value in field.get(\"values\", []):\n clean(value, VALUE_ATTRIBUTES)\n incident_fields[field[\"uuid\"]] = field\n else:\n LOG.error(u\"ERROR: Custom incident field '%s' not found in this export.\", field_name)\n list_incident_fields(export_data.get(\"fields\"))\n return\n\n if datatable_names:\n # Get datatable definitions\n all_datatables = dict((table[\"type_name\"], table)\n for table in export_data.get(\"types\")\n if table[\"type_id\"] == DATATABLE_TYPE_ID)\n for datatable_name in datatable_names:\n datatable = all_datatables.get(datatable_name)\n if datatable:\n for (fieldname, fielddef) in datatable[\"fields\"].items():\n field = clean(copy.deepcopy(fielddef), DATATABLE_FIELD_ATTRIBUTES)\n for template in field.get(\"templates\", []):\n clean(template, TEMPLATE_ATTRIBUTES)\n for value in field.get(\"values\", []):\n clean(value, VALUE_ATTRIBUTES)\n datatable_fields[field[\"uuid\"]] = field\n datatables[datatable_name] = datatable\n else:\n LOG.error(u\"ERROR: Datatable '%s' not found in this export.\", datatable_name)\n list_datatables(export_data.get(\"types\", []))\n return\n\n # Automatic tasks determine the list of phases\n phase_names = set()\n if task_names:\n # Get task definitions\n all_tasks = dict((task[\"programmatic_name\"], task)\n for task in export_data.get(\"automatic_tasks\"))\n for task_name in task_names:\n task = all_tasks.get(task_name)\n if task:\n automatic_tasks[task_name] = clean(copy.deepcopy(task), AUTOMATIC_TASK_ATTRIBUTES)\n phase_names.add(task[\"phase_id\"])\n else:\n LOG.error(u\"ERROR: Task '%s' not found in this export.\", task_name)\n list_automatic_tasks(export_data.get(\"automatic_tasks\", []))\n return\n\n if phase_names:\n # Get phase definitions\n all_phases = dict((phase[\"name\"], phase)\n for phase in export_data.get(\"phases\"))\n for phase_name in phase_names:\n # Assume phase-name is found. It was derived from the automatic task.\n phase = all_phases[phase_name]\n phases[phase_name] = clean(copy.deepcopy(phase), PHASE_ATTRIBUTES)\n\n if script_names:\n # Get script definitions\n all_scripts = dict((script[\"name\"], script)\n for script in export_data.get(\"scripts\"))\n for script_name in script_names:\n script = all_scripts.get(script_name)\n if script:\n scripts[script_name] = clean(copy.deepcopy(script), SCRIPT_ATTRIBUTES)\n else:\n LOG.error(u\"ERROR: Script '%s' not found in this export.\", script_name)\n list_scripts(export_data.get(\"scripts\", []))\n return\n\n if artifact_type_names:\n # get custom artifact types\n all_artifact_types = dict((artifact_type[\"programmatic_name\"], artifact_type)\n for artifact_type in export_data.get(\"incident_artifact_types\"))\n\n for artifact_type_name in artifact_type_names:\n for artifact_name in all_artifact_types:\n artifact = all_artifact_types[artifact_name]\n if artifact[\"programmatic_name\"] == artifact_type_name:\n artifact_types[artifact[\"programmatic_name\"]] = clean(copy.deepcopy(artifact), ARTIFACT_TYPE_ATTRIBUTES)\n\n # confirm we got all the custom artifact types\n for artifact_name in artifact_type_names:\n if artifact_name not in artifact_types:\n LOG.error(u\"ERROR: Artifact Type '%s' not found in this export.\", artifact_name)\n list_artifact_types(export_data.get(\"incident_artifact_types\", []))\n return\n\n # Minify the export_data\n fields_list = []\n if len(incident_fields) == 0:\n # import requires at least one, use placeholder\n fields_list.extend([\"incident/inc_training\"])\n else:\n fields_list.extend([\"incident/{}\".format(fld[\"name\"]) for fld in incident_fields.values()])\n fields_list.extend([\"actioninvocation/{}\".format(fld[\"name\"]) for fld in action_fields.values()])\n fields_list.extend([\"__function/{}\".format(fld[\"name\"]) for fld in function_params.values()])\n keep_keys = [\n \"export_date\",\n \"export_format_version\",\n \"id\",\n \"server_version\"\n ]\n minify_keys = {\n \"actions\": {\"name\": actions.keys()},\n \"automatic_tasks\": {\"programmatic_name\": automatic_tasks.keys()},\n \"fields\": {\"export_key\": fields_list},\n \"functions\": {\"name\": functions.keys()},\n \"message_destinations\": {\"programmatic_name\": message_destinations.keys()},\n \"phases\": {\"name\": phases.keys()},\n \"scripts\": {\"name\": scripts.keys()},\n \"types\": {\"type_name\": datatables.keys()},\n \"workflows\": {\"programmatic_name\": workflows.keys()},\n \"incident_artifact_types\": {\"programmatic_name\": artifact_types.keys()}\n }\n for key in export_data.keys():\n if key in keep_keys:\n pass\n elif key in minify_keys.keys():\n name = list(minify_keys[key].keys())[0] # The property we match on\n values = minify_keys[key][name] # These are the names of the things to keep\n for data in list(export_data[key]):\n if not data.get(name):\n LOG.warning(\"No %s in %s\", name, key)\n if not data.get(name) in values:\n export_data[key].remove(data)\n elif isinstance(export_data[key], list):\n export_data[key] = []\n elif isinstance(export_data[key], dict):\n export_data[key] = {}\n else:\n export_data[key] = None\n # Incident types are special, add one for this specific package\n # (because not enabled, this doesn't actually get loaded into the destination)\n t0 = int(time.time()*1000)\n export_data[\"incident_types\"] = [{\n \"update_date\": t0,\n \"create_date\": t0,\n \"uuid\": str(UUID_CODEGEN),\n \"description\": \"Customization Packages (internal)\",\n \"export_key\": \"Customization Packages (internal)\",\n \"name\": \"Customization Packages (internal)\",\n \"enabled\": False,\n \"system\": False,\n \"parent_id\": None,\n \"hidden\": False,\n \"id\": 0\n }]\n\n # if an extract, write the file and return\n if cmd == \"extract\":\n do_extract(output_file, export_data, zip)\n return\n\n # Prepare the dictionary of substitution values for jinja2\n # (includes all the configuration elements related to the functions)\n data = {\n \"package\": package,\n \"function_names\": function_names,\n \"output_dir\": output_dir,\n \"output_file\": output_file,\n \"functions\": functions,\n \"function_params\": function_params,\n \"message_destinations\": message_destinations,\n \"incident_fields\": incident_fields,\n \"action_fields\": action_fields,\n \"datatables\": datatables,\n \"datatable_fields\": datatable_fields,\n \"phases\": phases,\n \"automatic_tasks\": automatic_tasks,\n \"scripts\": scripts,\n \"workflows\": workflows,\n \"actions\": actions,\n \"export_data\": export_data,\n \"incident_artifact_types\": artifact_types\n }\n LOG.debug(u\"Configuration data:\\n%s\", json.dumps(data, indent=2))\n\n # Read the files/package template and render it\n # to produce the file-mapping dictionary from template-files to generated-files\n with io.open(template_file_path, 'r', encoding=\"utf-8\") as template_file:\n file_mapping_template = template_file.read()\n file_mapping = template_functions.render_json(file_mapping_template, data)\n\n LOG.debug(u\"Codegen template:\\n%s\", json.dumps(file_mapping, indent=2))\n\n # Write all the files defined in the mapping definition\n src_dir = os.path.dirname(template_file_path)\n render_file_mapping(file_mapping, data, src_dir, output_dir)\n\ndef do_extract(output_file, export_data, zip):\n \"\"\"\n either create the export file in clear text or zip up\n :param output_file:\n :param export_data:\n :param zip: True if zipping up file\n :return: None\n \"\"\"\n if sys.version_info.major >= 3:\n res_data = json.dumps(export_data, ensure_ascii=False)\n else:\n res_data = unicode(json.dumps(export_data, ensure_ascii=False))\n\n if zip:\n # get the base name of the file to create so we know what to call our file inside the zip file\n if not output_file.endswith(\".zip\"):\n output_file = \"\".join((output_file, \".zip\"))\n print (u\"Writing {}\".format(output_file))\n\n base = os.path.basename(output_file)\n filename = os.path.splitext(base)\n while '.' in filename[0]:\n filename = os.path.splitext(filename[0])\n # name internal to zip file\n filename = \"\".join((filename[0], \".res\"))\n\n with ZipFile(output_file, 'w') as myZip:\n myZip.writestr(filename, res_data)\n else:\n print (u\"Writing {}\".format(output_file))\n with io.open(output_file, 'w', encoding=\"utf-8\") as extract_fh:\n extract_fh.write(res_data)\n\n return\n\ndef codegen_package(client, export_file, package,\n message_destination_names, function_names, workflow_names, action_names,\n field_names, datatable_names, task_names, script_names, artifact_type_names,\n output_dir):\n \"\"\"Generate a an installable python package\"\"\"\n if not valid_identifier(package):\n LOG.error(u\"ERROR: %s is not a valid package name.\", package)\n return\n\n # Make the output directory (usually a new subdirectory of cwd)\n try:\n os.makedirs(output_dir)\n except OSError as exc:\n LOG.warn(u\"%s\", exc)\n\n template_file_path = pkg_resources.resource_filename(\"resilient_circuits\", PACKAGE_TEMPLATE_PATH)\n return codegen_from_template('codegen', client, export_file, template_file_path, package,\n message_destination_names, function_names, workflow_names, action_names,\n field_names, datatable_names, task_names, script_names, artifact_type_names,\n output_dir, None, False)\n\n\ndef codegen_functions(client, export_file, function_names, workflow_names, action_names, artifact_type_names,\n output_dir, output_file):\n \"\"\"Generate a python file that implements one or more functions\"\"\"\n message_destination_names = None\n template_file_path = pkg_resources.resource_filename(\"resilient_circuits\", FUNCTION_TEMPLATE_PATH)\n return codegen_from_template('codegen', client, export_file, template_file_path, None,\n message_destination_names, function_names, workflow_names, action_names,\n None, None, None, None, artifact_type_names,\n output_dir, output_file, False)\n\ndef extract_to_res(client, export_file,\n message_destination_names, function_names, workflow_names, action_names,\n field_names, datatable_names, task_names, script_names, artifact_types,\n output_file, zip):\n \"\"\"\n extract portions of a .res file as directed by the parameters below and save to a specified file\n :param client:\n :param export_file: .res to use otherwise the most recent one produced will be used\n :param message_destination_names:\n :param function_names:\n :param workflow_names:\n :param action_names:\n :param field_names:\n :param datatable_names:\n :param task_names:\n :param script_names:\n :param artifact_types:\n :param output_file: file to produce or overwrite\n :param zip: true if resulting file should be zipped\n :return: None\n \"\"\"\n return codegen_from_template('extract', client, export_file, None, None,\n message_destination_names, function_names, workflow_names, action_names,\n field_names, datatable_names, task_names, script_names, artifact_types,\n None, output_file, zip)\n\ndef get_customize_file_path(package):\n \"\"\"Get the location of current customize.py for this package\"\"\"\n output_base = os.path.join(os.getcwd(), package)\n customize_dir = os.path.join(output_base, package, \"util\")\n customize_file = os.path.join(customize_dir, \"customize.py\")\n return customize_file, output_base, customize_dir\n\ndef get_codegen_reload_data(package):\n \"\"\"Read the default codegen_reload_data section from the given package\"\"\"\n\n # Get the file path of the customize file for the package\n customize_file, base_dir, customize_dir = get_customize_file_path(package)\n\n # Check if customize.py exits. We need to get the reload commands\n # from the current customize.py and if it's not there then exit.\n if not os.path.isfile(customize_file):\n raise Exception(u\"{} does not exist. Run resilient_circuits codegen without --reload option to create it.\".format(customize_file))\n\n data = None\n\n # Dynamically load the customize module and call the codegen_reload_date routine.\n # Different pacakges are used in Python 2 and Python 3.\n if sys.version_info.major == 2: # Python 2\n try:\n customize_module = imp.load_source(\"customize\", customize_file)\n data = customize_module.codegen_reload_data()\n except Exception as e:\n LOG.error(u\"Error loading codegen_reload_data: %s\", e)\n else: # Python 3\n try:\n spec = importlib.util.spec_from_file_location(\"codegen_reload_data\", customize_file)\n customize_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(customize_module)\n data = customize_module.codegen_reload_data()\n except Exception as e:\n LOG.error(u\"Error loading codegen_reload_data for package %s\", e)\n return data or []\n\n\ndef merge_codegen_params(reload_list, arg_list):\n \"\"\"Merge the codegen reload params list additional arguments list with no duplicates\"\"\"\n\n if reload_list:\n new_reload_list = reload_list\n else:\n new_reload_list = []\n\n if arg_list:\n new_arg_list = arg_list\n else:\n new_arg_list = []\n\n # Combine the reload and new argument list without duplicates\n combined_args_list = list(set(new_reload_list).union(set(new_arg_list)))\n\n return combined_args_list\n\n\n\ndef codegen_reload_package(client, args):\n \"\"\"Generate a package using previous codegen parameters and add any new ones from the commandline.\"\"\"\n\n # Get the previous params for codegen from the customize.py\n # codegen_reload_data function.\n codegen_params = get_codegen_reload_data(args.reload)\n\n if codegen_params == None or codegen_params == []:\n raise Exception(u\"codegen_reload_data entry point returned empty list\")\n\n # Rename the old customize.py file to customize-yyyymmdd-hhmmss.bak\n customize_file, output_base, customize_dir = get_customize_file_path(args.reload)\n\n # Get time now.\n now = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n old_customize_file = os.path.join(customize_dir, \"customize-{}.bak\".format(now))\n LOG.info(u\"Renaming customize.py to %s\", old_customize_file)\n os.rename(customize_file, old_customize_file)\n\n try:\n # If there are new commandline parameters, append them to the old commandline\n # list for each param type.\n message_destinations = merge_codegen_params(codegen_params.get(\"message_destinations\", []), args.messagedestination)\n functions = merge_codegen_params(codegen_params.get(\"functions\", []), args.function)\n rules = merge_codegen_params(codegen_params.get(\"actions\", []), args.rule)\n workflows = merge_codegen_params(codegen_params.get(\"workflows\", []), args.workflow)\n incident_fields = merge_codegen_params(codegen_params.get(\"incident_fields\", []), args.field)\n datatables = merge_codegen_params(codegen_params.get(\"datatables\", []), args.datatable)\n automatic_tasks = merge_codegen_params(codegen_params.get(\"automatic_tasks\", []), args.task)\n scripts = merge_codegen_params(codegen_params.get(\"scripts\", []), args.script)\n artifact_types = merge_codegen_params(codegen_params.get(\"incident_artifact_types\", []), args.artifacttype)\n\n # Print the codegen --reload command with all arguments.\n print_codegen_reload_commandline(args.reload, args.exportfile,\n message_destinations,\n functions,\n workflows,\n rules,\n incident_fields,\n datatables,\n automatic_tasks,\n scripts,\n artifact_types)\n\n # Call codegen to recreate package with the new parameter list.\n codegen_package(client,\n args.exportfile,\n args.reload,\n message_destinations,\n functions,\n workflows,\n rules,\n incident_fields,\n datatables,\n automatic_tasks,\n scripts,\n artifact_types,\n output_base)\n except Exception as e:\n LOG.error(u\"Error running codegen --reload %s\", e)\n finally:\n # If no customize.py was created an error occurred somewhere in codegen.\n # Rename the saved off version back to customize.py\n if not os.path.isfile(customize_file):\n LOG.info(u\"Renaming %s back to %s\", old_customize_file, customize_file)\n os.rename(old_customize_file, customize_file)\n\ndef create_command(command, params, quotes):\n \"\"\"Create commandline substring for codegen --reload commandline \"\"\"\n result_command = command\n if len(params) > 0:\n for item in params:\n if quotes:\n result_command = result_command + u' \"{}\"'.format(item)\n else:\n result_command = result_command + u\" {}\".format(item)\n else:\n result_command = u\"\"\n return result_command\n\ndef print_codegen_reload_commandline(package, export_file, message_destinations, functions, workflows,\n rules, incident_fields, datatables, tasks, scripts, artifact_types):\n \"\"\"Print the resilient-circuits codegen --reload commandline for a given package\"\"\"\n\n # Build the commandline string\n commandline = u\"resilient-circuits codegen --reload {}\".format(package)\n if export_file:\n commandline = commandline + u\" --export {}\".format(export_file)\n commandline = commandline + create_command(u\" --messagedestination\", message_destinations, False)\n commandline = commandline + create_command(u\" --rule\", rules, True)\n commandline = commandline + create_command(u\" --workflow\", workflows, False)\n commandline = commandline + create_command(u\" --function\", functions, False)\n commandline = commandline + create_command(u\" --field\", incident_fields, False)\n commandline = commandline + create_command(u\" --datatable\", datatables, False)\n commandline = commandline + create_command(u\" --task\", tasks, False)\n commandline = commandline + create_command(u\" --script\", scripts, True)\n commandline = commandline + create_command(u\" --artifacttype\", artifact_types, True)\n\n print (commandline)","sub_path":"fn_parse_populate/lib/python3.6/site-packages/resilient_circuits/util/resilient_codegen.py","file_name":"resilient_codegen.py","file_ext":"py","file_size_in_byte":41500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"132733606","text":"import pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\ntrainset_path = \"./train.csv\"\r\ntestset_path = \"./test.csv\"\r\nencoder = LabelEncoder()\r\n\r\n# train\r\ntrainset = pd.read_csv(trainset_path)\r\ntrainset = trainset.dropna(subset=[\"Age\", \"Fare\", \"Sex\", \"Embarked\", \"Pclass\", \"SibSp\", \"Parch\"])\r\ntrainset[\"Sex\"] = encoder.fit_transform(trainset[\"Sex\"])\r\ntrainset[\"Embarked\"] = encoder.fit_transform(trainset[\"Embarked\"])\r\n\r\ninputs = trainset.loc[:, [\"Age\", \"Fare\", \"Sex\", \"Embarked\", \"Pclass\", \"SibSp\", \"Parch\"]]\r\nlabels = trainset.loc[:, [\"Survived\"]]\r\n\r\nclf = DecisionTreeClassifier(max_depth=3)\r\nclf.fit(inputs, labels)\r\n\r\n# test\r\ntestset = pd.read_csv(testset_path)\r\ntestset = testset.fillna(trainset.median())\r\ntestset[\"Sex\"] = encoder.fit_transform(testset[\"Sex\"])\r\ntestset[\"Embarked\"] = encoder.fit_transform(testset[\"Embarked\"])\r\n\r\ninputs = testset.loc[:, [\"Age\", \"Fare\", \"Sex\", \"Embarked\", \"Pclass\", \"SibSp\", \"Parch\"]]\r\npredicts = clf.predict(inputs)\r\n\r\n# submit\r\noutputs = pd.DataFrame({\"PassengerId\":testset[\"PassengerId\"], \"Survived\":predicts})\r\noutputs.to_csv(\"submission.csv\", index=False)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"56316924","text":"import fileinput\nimport sys\nimport re\n\nfileName = sys.argv[1]\nchkpts = {}\nrunTime = -1\n\nf = open(fileName + \"_processed\", \"w\")\niterCount = 0\n\nstartTime = None\nfor line in fileinput.input(fileName):\n start = re.search(r\"(?P[0-9]+\\.[0-9]+): start\", line)\n if start is not None:\n if runTime != -1:\n f.write(\"iter:\" + str(iterCount) + \" time:\" + str(runTime))\n f.write(\" chkpt status:\" + repr(chkpts) + \"\\n\")\n iterCount += 1\n chkpts = {}\n startTime = float(start.group(\"name\"))\n\n end = re.search(r\"(?P[0-9]+\\.[0-9]+): end\", line)\n if end is not None:\n endTime = float(end.group(\"name\"))\n if startTime is not None:\n runTime = endTime - startTime\n\n chkpt = re.search(r\"chkpt\\[(?P[0-9]+)\\]=(?P[0-9]+)\", line)\n if chkpt is not None:\n chkptId = int(chkpt.group(\"name1\"))\n unrollFactor = int(chkpt.group(\"name2\"))\n chkpts[chkptId] = unrollFactor\n\n","sub_path":"experiment/pack16/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"488767890","text":"from flask_restful import fields\n\nclass Product(object):\n\n resource_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'uri': fields.Url('.single_product', absolute=True)\n }\n\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n def __repr__(self):\n return ' Product # {0} , Name: {1}'.format(self.id,self.name)","sub_path":"project/datastore/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"53721498","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n#单列表数据\r\n'''\r\nfigure(num=None, figsize=None, dpi=None, facecolor=None, edgecolor=None, \r\nframeon=True)\r\nnum:图像编号或名称,数字为编号 ,字符串为名称\r\nfigsize:指定figure的宽和高,单位为英寸;\r\ndpi:指定绘图对象的分辨率,即每英寸多少个像素,缺省80。1英寸等于2.5cm,A4纸21*30cm\r\nfacecolor:背景颜色\r\nedgecolor:边框颜色\r\nframeon:是否显示边框\r\n'''\r\nplt.figure(num=1, facecolor = \"blue\", edgecolor = \"white\")\r\nplt.plot([3,1,4,5,2]) #y轴数字,x轴默认为[0,1,2,3,4]\r\nplt.ylabel(\"Grade\")\r\nplt.xlabel(\"Level\")\r\nplt.savefig('test', dpi=600) #默认PNG格式,dpi修改输出质量\r\nplt.show\r\n\r\n#双列表数据\r\nplt.figure(num=2, facecolor = \"orange\", edgecolor = \"green\")\r\nplt.plot([0,2,4,6,8], [3,1,4,5,2]) #x轴和y轴顺序绘制\r\nplt.ylabel(\"Grade\")\r\nplt.axis([-1,10,0,6]) #x轴起始值、终止值,y轴起始值、终止值\r\nplt.show\r\n\r\n#绘图区域:plt.subplot(nrows, ncols, plot_number)\r\nplt.figure()\r\ndef f(t):\r\n return np.exp(-t) * np.cos(2*np.pi*t) #能量衰减曲线\r\na = np.arange(0.0, 5.0, 0.02)\r\nplt.subplot(211) #即plt.subplot(2,1,1)\r\nplt.plot(a,f(a))\r\nplt.subplot(2,1,2) #即plt.subplot(212)\r\nplt.plot(a,np.cos(2*np.pi*a), 'r--') #正弦曲线\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","sub_path":"0. Matplotlib_general.py","file_name":"0. Matplotlib_general.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"202515979","text":"import argparse\nimport chainer\nfrom chainer import iterators\nfrom chainercv.utils import apply_to_iterator, ProgressHook\nimport chainermn\n\nfrom configs import cfg\nfrom utils.load_pretrained_model import load_pretrained_model\nfrom setup_helpers import setup_dataset, setup_model\nfrom evaluate import eval_coco, eval_voc\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=str,\n help='Path to the config file.')\n parser.add_argument('--batchsize', type=int, default=8,\n help='Default is 8.')\n parser.add_argument('--pretrained_model', type=str,\n help='Path to the pretrained model.')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n cfg.merge_from_file(args.config)\n cfg.freeze\n\n comm = chainermn.create_communicator('pure_nccl')\n device = comm.intra_rank\n\n model = setup_model(cfg)\n load_pretrained_model(cfg, args.config, model, args.pretrained_model)\n dataset = setup_dataset(cfg, 'eval')\n\n model.use_preset('evaluate')\n chainer.cuda.get_device_from_id(device).use()\n model.to_gpu()\n\n if not comm.rank == 0:\n apply_to_iterator(model.predict, None, comm=comm)\n return\n\n iterator = iterators.MultithreadIterator(\n dataset, args.batchsize * comm.size, repeat=False, shuffle=False)\n\n in_values, out_values, rest_values = apply_to_iterator(\n model.predict, iterator, hook=ProgressHook(len(dataset)), comm=comm)\n # delete unused iterators explicitly\n del in_values\n\n if cfg.dataset.eval == 'COCO':\n eval_coco(out_values, rest_values)\n elif cfg.dataset.eval == 'VOC':\n eval_voc(out_values, rest_values)\n else:\n raise ValueError()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"evaluate_multi.py","file_name":"evaluate_multi.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"647587424","text":"#KPL Intro to Coding Final Project\n#The Pong code is started but not complete\n#Pong in progress - Starting point for class # 7\n\n# Import Modules and Declare Global Variables\n\nimport random, sys, time, pygame\nfrom pygame.locals import *\n\n\npygame.init() # starts pygame\n\nFPS = 60 # 60 Frames per second\nfpsClock = pygame.time.Clock()\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nball_HEIGHT = 25\nball_WIDTH = 25\nball_SPEED = 5\n\nball_x_speed = ball_SPEED\nball_y_speed = ball_SPEED\nscore1 = 0 # This is the score for player 1\nscore2 = 0 # This is the score for player 2\n\n\n# Set up colors for future use\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n\n# Starting position of the Ball\nball_x_pos = SCREEN_WIDTH//2\nball_y_pos = SCREEN_HEIGHT//2\n\n\n\n\n#Starting position of the Paddles\nPADDLE_WIDTH = 50\nPADDLE_HEIGHT = 200\nGUTTER = 50\nPaddle1_y_pos = ((SCREEN_HEIGHT//2)-(PADDLE_HEIGHT//2))\nPaddle1_x_pos = GUTTER\nPaddle2_y_pos = ((SCREEN_HEIGHT//2)-(PADDLE_HEIGHT//2))\nPaddle2_x_pos = SCREEN_WIDTH - GUTTER - PADDLE_WIDTH\n\nMOVERATE = 5\nPaddle1_down = False\nPaddle1_up = False\nPaddle2_down = False\nPaddle2_up = False\n\ngameOverMode = False\n\n# Load the graphics\ntheballGraphic = pygame.image.load('pong_ball_25_cube.png') \nright_paddle = pygame.image.load('pong_paddle.png')\nleft_paddle = pygame.image.load('pong_paddle.png')\n\n# Set up the Game window\nDISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Pong in Progress\") #This is the title\n\n\n#Load the Sound effects\npygame.mixer.init(44100, -16, 2, 2048)\nbeep1 = pygame.mixer.Sound('pongBlip1.ogg')\nbeep2 = pygame.mixer.Sound('pongBlip2.ogg')\n\n\n\n# Helper functions\n\n\ndef scoreBoard():\n RETROFONT = pygame.font.Font('PressStart2P.ttf', 48)\n scoreSurf = RETROFONT.render( str(score1) + ' ' + str(score2), True, WHITE)\n scoreRect = scoreSurf.get_rect()\n scoreRect.topleft = (SCREEN_WIDTH//3, SCREEN_HEIGHT //5)\n DISPLAYSURF.blit(scoreSurf, scoreRect)\n\ndef resetBall():\n global ball_x_pos, ball_y_pos\n ball_x_pos = SCREEN_WIDTH//2\n ball_y_pos = SCREEN_HEIGHT//2\n\ndef ballRandomizer():\n global ball_y_speed\n randomNumber = random.randint(-2, 2)\n ball_y_speed = ball_y_speed + randomNumber\n\n\n# The Main \"game\" loop\n\ndef main():\n global ball_x_pos, ball_y_pos, ball_x_speed, ball_y_speed, FPSCLOCK\n global theballGraphic, score1, score2, Paddle2_y_pos, Paddle1_y_pos\n global Paddle1_up, Paddle1_down, Paddle2_up, Paddle2_down\n\n DISPLAYSURF.fill(BLACK) # Paint the whole screen a certain color\n pygame.draw.line(DISPLAYSURF, WHITE, (SCREEN_WIDTH//2,0),(SCREEN_WIDTH//2,SCREEN_HEIGHT),25)\n\n ball_x_pos = ball_x_pos + ball_x_speed # Update the ball's x position\n ball_y_pos = ball_y_pos + ball_y_speed # Update the ball's y position\n\n DISPLAYSURF.blit(theballGraphic, (ball_x_pos, ball_y_pos)) #ball\n DISPLAYSURF.blit(left_paddle, (Paddle1_x_pos, Paddle1_y_pos)) #left paddle\n DISPLAYSURF.blit(right_paddle, (Paddle2_x_pos, Paddle2_y_pos)) #right paddle\n scoreBoard() # Run the scoreboard function\n\n\n # Bounce off the ceiling\n if ball_y_pos <= 0:\n ball_y_speed = - ball_y_speed #changes vertical direction\n\n # Bounce off the floor\n if ball_y_pos >= SCREEN_HEIGHT - ball_HEIGHT:\n ball_y_speed = - ball_y_speed #changes vertical direction\n\n # Bounce off the right paddle\n if ball_x_pos == SCREEN_WIDTH - GUTTER - PADDLE_WIDTH - ball_WIDTH:\n if Paddle2_y_pos - ball_HEIGHT <= ball_y_pos <= Paddle2_y_pos + PADDLE_HEIGHT:\n ball_x_speed = - ball_x_speed #changes horizontal direction\n ballRandomizer()\n beep1.play()\n\n\n # Bounce off the right wall\n if ball_x_pos == SCREEN_WIDTH - ball_WIDTH:\n ball_x_speed = - ball_x_speed #changes horizontal direction\n score1 = score1 + 1\n resetBall()\n\n # Bounce off the left paddle\n if ball_x_pos == GUTTER + PADDLE_WIDTH:\n if Paddle1_y_pos - ball_HEIGHT <= ball_y_pos <= Paddle1_y_pos + PADDLE_HEIGHT:\n ball_x_speed = - ball_x_speed #changes horizontal direction\n ballRandomizer()\n beep2.play()\n \n\n # Bounce off the left wall\n if ball_x_pos == 0:\n ball_x_speed = - ball_x_speed #changes horizontal direction\n score2 = score2 + 1\n resetBall()\n\n\n pygame.display.update() # Refreshes the display\n fpsClock.tick(FPS)\n\n # Check for a QUIT event (such as closing the window)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n \n elif event.type == KEYDOWN:\n if event.key == K_UP:\n print(\"player 2 pressed up\")\n Paddle2_up = True\n Paddle2_down = False\n if event.key == K_DOWN:\n print(\"player 2 pressed down\")\n Paddle2_up = False\n Paddle2_down = True\n if event.key == K_w:\n print(\"player 1 pressed up\")\n Paddle1_up = True\n Paddle1_down = False\n if event.key == K_s:\n print(\"player 1 pressed down\")\n Paddle1_up = False\n Paddle1_down = True\n \n if not gameOverMode:\n if Paddle2_up:\n Paddle2_y_pos = Paddle2_y_pos - MOVERATE\n if Paddle2_y_pos < 5:\n Paddle2_up = False\n if Paddle2_down:\n Paddle2_y_pos = Paddle2_y_pos + MOVERATE\n if Paddle2_y_pos > SCREEN_HEIGHT - PADDLE_HEIGHT -5:\n Paddle2_down = False \n if Paddle1_up:\n Paddle1_y_pos = Paddle1_y_pos - MOVERATE\n if Paddle1_y_pos < 5:\n Paddle1_up = False\n if Paddle1_down:\n Paddle1_y_pos = Paddle1_y_pos + MOVERATE \n if Paddle1_y_pos > SCREEN_HEIGHT - PADDLE_HEIGHT - 5:\n Paddle1_down = False \n\n\n\n\n\nwhile True:\n main()\n","sub_path":"PONG_SUPER_MODE_COMPLETE.py","file_name":"PONG_SUPER_MODE_COMPLETE.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"350998060","text":"\"\"\"\nBrent Fanning random turtle creation\n\"\"\"\n\n# we'll need to import random and turtle from another python file\n# and then create this object from WITHIN the other file\n# ...and create a screen using wn = turtle.Screen()\n\nimport turtle\nimport random\n\n\n#wn = turtle.Screen()\n\nclass RandT1(object):\n \"\"\" has methods that do turtle stuff \"\"\"\n\n def __init__(self):\n pass\n\n def turtlesetup(self):\n\n jimmy = turtle.Turtle()\n wn = turtle.Screen()\n\n wn.bgcolor(\"green\") # set the window background color\n\n\n\n jimmy.shape('turtle')\n\n randomColor = random.randint(1,99)\n if(randomColor < 25):\n jimmy.color(\"lightgreen\")\n if(randomColor < 50):\n jimmy.color(\"hotpink\")\n if(randomColor < 75):\n jimmy.color(\"orange\")\n else:\n jimmy.color(\"red\")\n\n jimmy.pensize(3)\n jimmy.forward(2)# here we can see the turtle\n # being placed in the center of the screen\n\n # ...and HERE we set a NEW x and y location\n starttx = random.randrange(-50,50)\n startty = random.randrange(-50,50)\n\n jimmy.penup()# ...we pull the pen UP so nothing\n # will be marked down when we move from the center\n # to our new starting x and y locations\n jimmy.setpos(starttx,startty)# ...and finally we\n # set our new starting x/y locations using \"setpos\"\n #pass\n jimmy.pendown()# ...then we put our pen back down\n # again and we're ready to start leaving trails!\n\n return(wn,jimmy)\n\n def moveturtle(self,wn,jimmy):\n #while isInScreen(wn, jimmy):\n angle = random.randrange(1, 361)\n jimmy.right(angle)\n jimmy.forward(50)\n\n def turtFuncUturn(self, jimmy):\n \"\"\"\n do a u turn with this method\n \"\"\"\n # assuming we're starting facing east\n # go forward a short distance\n # ...then turn left 90 degrees\n # ...then go straight a short distance\n\n jimmy.forward((random.randint(25,100)))#25-100\n\n # ...then turn\n jimmy.left(90)\n jimmy.forward((random.randint(25,100)))\n\n # ...and turn again\n jimmy.left(90)\n jimmy.forward((random.randint(25,100)))\n#=============================================================================\n\n # after entering:\n # import randTurt\n # ...into idle you can create a turtle object and then make it move randomly\n # in the following way:\n # enter the following line of code to start the program:\n # ...after making sure to enter this: ob1 = randTurt.RandT1()\n # ...then enter this one line:\n # y = ob1.turtlesetup()\n # ...or something like this: func = ob1.turtlesetup()\n\n # ...followed by something like this:\n # ob1.moveturtle( func[0], func[1] )\n # ...and boom! The turtle moves!!! yay!\n","sub_path":"dir2rando/randTurt.py","file_name":"randTurt.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"263124171","text":"from sklearn.utils import class_weight\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom tools.kt_utils import *\n\ndef train_and_evaluate_model(model, X_train, Y_train, X_dev, Y_dev,\n batch_size=32, num_epochs=1,\n use_class_weights=False):\n ''' \n Inputs: \n model: Keras model to train on \n X_train: the images to train on in the form (#images, height, width, ch\\\n annels) \n Y_train: the labels for the X_train in the form (#images, label) \n \n X_dev: the images to test on in the same form as X_train \n \n Y_dev: the labels for the X_dev in the same form as Y_train \n \n batch_size: number of images per minibatch \n \n num_epochs: number of epochs to train for \n \n Returns: \n \n History. A keras History object with history.history being a dictionary\n of ['acc'] ['loss'] ['val_acc'] ['val_loss'] , each of which are lists\n \n e.g. history.history['acc'][0] could produce a float with the results\n for the first epoch's accuracy \n '''\n\n\n \n\n cw = None\n if use_class_weights:\n cw = get_class_weights(Y_train)\n print('class weights: ', str(cw))\n \n history = model.fit(X_train,\n Y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n class_weight=cw)\n \n \n print(\"eval: \", str(model.evaluate(X_dev, Y_dev)))\n preds = model.predict(X_dev)\n\n \n\n save_images(preds, Y_dev, X_dev, model.layers[1].name)\n\n\n return history, preds\n\n\ndef k_fold(model, X_train, Y_train, X_dev, Y_dev, batch_size, num_epochs, use_class_weights=True):\n\n # no imagedatagen being used in kfold yet.\n print(\"X_train shape: \", str(X_train.shape))\n\n\n \n cw = None\n if use_class_weights:\n cw = get_class_weights(Y_train)\n print('class weights: ', str(cw))\n model.fit(X_train, Y_train, epochs=num_epochs,\n batch_size=batch_size, class_weight=cw)\n results = model.evaluate(X_dev, Y_dev)\n preds = model.predict(X_dev)\n print('k_fold accuracy: ', str(results[1]))\n return preds, results[1] # return our preds, accuracy\n\n\ndef get_class_weights(Y_train):\n cw = dict(enumerate(class_weight.compute_class_weight('balanced',\n np.unique(Y_train),\n Y_train[:,0])))\n return cw\n\n\n\n","sub_path":"tools/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"436482643","text":"import unittest\nimport numpy as np\n\nfrom utils.grader import (\n gaussian_mixture_grad, regression_mixture_grad, regression_without_cov_grad\n)\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_gaussian_mixture_grad(self):\n # X = Z * 1 + V\n # beta = 1\n X = np.array([[-1], [-1.1], [-0.9], [-1.2], [-0.8], [-1.3], [-0.95],\n [1], [1.1], [0.9], [1.2], [0.8], [1.3], [0.95], [1.12]])\n init_beta = np.array([2])\n g = gaussian_mixture_grad(X, beta=init_beta, beta0=init_beta, sigma=0.3)\n assert g.shape == X.shape\n assert np.all(g < 0)\n\n def test_regression_mixture_grad(self):\n # Y = Z(2*X)+V\n # beta = 2\n X = np.array([[0], [-0.4], [-0.2], [-0.8], [-0.3], [-0.05],\n [0.1], [0.4], [0.09], [1.2], [0.3], [0.05]])\n Y = np.array([0.1, 0.75, 0.35, -1.54, -0.58, -0.099,\n -0.2, -0.8, 0.19, 2.38, -0.62, 0.098])\n init_beta = np.array([1])\n g = regression_mixture_grad(X, Y, beta=init_beta, beta0=init_beta, sigma=0.3)\n assert g.shape == X.shape\n assert np.all(g > -0.2)\n\n def test_regression_without_cov_grad(self):\n # Y = 2 * X + V\n X = np.array([[0], [-0.4], [np.nan], [-0.8], [-0.3], [np.nan],\n [0.1], [np.nan], [0.09], [1.2], [np.nan], [0.05]])\n Y = np.array([0.1, -0.75, -0.35, -1.54, -0.58, -0.099,\n 0.2, 0.8, 0.19, 2.38, 0.62, 0.098])\n Z = np.logical_not(np.isnan(X))\n init_beta = np.array([1])\n g = regression_without_cov_grad(X, Y, beta=init_beta,\n beta0=init_beta, sigma=0.1)\n assert g.shape == X.shape\n assert np.all(g[Z.ravel()] > -0.1)\n\n","sub_path":"tests/test_grader.py","file_name":"test_grader.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"67768919","text":"from numpy import matrix,array,arccos,linalg,degrees,floor,gcd\nfrom copy import deepcopy\nclass atom_unit:\n\telement=''\n\tdynamics=''\n\tposition = []\n\tdef __init__(self,position=[],element='',dynamics=['T','T','T']):\n\t\tself.position = position\n\t\tself.element = element\n\t\tself.dynamics = dynamics\n\n\tdef read_poslines(self,pos_line):\n\t\tpos_list = pos_line.split()\n\t\tself.position = [float(pos_list[0]),float(pos_list[1]),float(pos_list[2])]\n\t\tif len(pos_list) >= 6:\n\t\t\tself.dynamics =[float(pos_list[3]),float(pos_list[4]),float(pos_list[5])]\n\n\tdef set_position(self,position):\n\t\tself.position=position\n\n\tdef set_position_c(self,c):\n\t\tself.position[2]=c\n\n\tdef set_element(self,element):\n\t\tself.element=element\n\n\tdef set_dynamics(self,dynamics):\n\t\tself.dynamics=dynamics\n\nclass atomic_structure:\n\tfile = ''\n\ttitle = ''\n\tfrac = 1.0\n\tbasis = []\n\telements = []\n\tnatoms = []\n\tselective_dynamics = 0\n\ttag = 'Direct'\n\tatom = []\n\tnions = 0\n\tthickness = 0\n\tdef __init__(self,file='POSCAR'):\n\t\twith open(file,'r') as fin:\n\t\t\tPOSCAR_lines = fin.readlines()\n\t\t#print(POSCAR_lines)\n\t\tself.title = POSCAR_lines[0][:-2]\n\t\tself.frac = float(POSCAR_lines[1].split()[0])\n\t\tself.basis = []\n\t\tfor i in range(3):\n\t\t\tx = float(POSCAR_lines[i+2].split()[0])\n\t\t\ty = float(POSCAR_lines[i+2].split()[1])\n\t\t\tz = float(POSCAR_lines[i+2].split()[2])\n\t\t\tself.basis.append([x,y,z])\n\t\tself.elements = POSCAR_lines[5].split()\n\t\tself.natoms = []\n\t\tfor s in POSCAR_lines[6].split():\n\t\t\tself.natoms.append(int(s))\n\t\tif POSCAR_lines[7].split()[0][0] in ['S','s']:\n\t\t\tself.selective_dynamics = 1\n\t\tif POSCAR_lines[7+self.selective_dynamics].split()[0][0] in ['D','d']:\n\t\t\tself.tag = 'Direct'\n\t\telif POSCAR_lines[7+self.selective_dynamics].split()[0][0] in ['C','c']:\n\t\t\tself.tag = 'Cartesian'\n\t\tself.nions = 0\n\t\tself.atom = []\n\t\tfor i in range(len(self.elements)):\n\t\t\tfor j in range(self.natoms[i]):\n\t\t\t\tself.nions += 1\n\t\t\t\tatom_ij = atom_unit(element=self.elements[i])\n\t\t\t\tatom_ij.read_poslines(POSCAR_lines[8+self.selective_dynamics+self.nions-1])\n\t\t\t\tself.atom.append(atom_ij)\n\t\tself.standardize()\n\n\n\n\tdef reset(self):\n\t\tself.frac = 1.0\n\t\tself.basis = []\n\t\tself.elements = []\n\t\tself.natoms = []\n\t\tself.selective_dynamics = 0\n\t\tself.tag = 'Direct'\n\t\tself.atom = []\n\t\tself.nions = 0\n\n\tdef set_basis(self,basis):\n\t\tself.basis=basis\n\n\tdef atom_in_cell(self,position):\n\t\tflag = True\n\t\ttrans=[0,0,0]\n\t\tfor i in range(3):\n\t\t\tif abs(position[i])<1e-10:\n\t\t\t\tposition[i]=0.\n\t\t\tif abs(position[i]-1)<1e-10:\n\t\t\t\tposition[i]=1.0\t\n\t\t\tif not(position[i]>=0 and position[i]<1.):\n\t\t\t\tflag = False\n\t\t\t\ttrans[i] = int(floor(position[i]))\n\t\treturn flag,trans\n\n\tdef add_atom(self,atom):\n\t\tif atom.element not in self.elements:\n\t\t\tself.elements.append(atom.element)\n\t\t\tself.natoms.append(0)\n\t\ti = self.elements.index(atom.element)\n\t\tpos = 0\n\t\tfor j in range(i+1):\n\t\t\tpos += self.natoms[j]\n\t\tself.nions += 1\n\t\tself.natoms[i] += 1\n\t\tself.atom.insert(pos,atom)\n\n\tdef del_atom(self,i):\n\t\tself.nions -= 1\n\t\tindex = self.elements.index(self.atom[i].element)\n\t\tself.natoms[index] -= 1\n\t\tdel(self.atom[i])\n\n\tdef cell_angle(self):\n\t\talpha = round(calc_Angle(self.basis[0],self.basis[2]),6)\n\t\tbeta = round(calc_Angle(self.basis[1],self.basis[2]),6)\n\t\tgamma = round(calc_Angle(self.basis[0],self.basis[1]),6)\n\t\treturn [alpha,beta,gamma]\n\n\tdef cell_length(self):\n\t\ta = linalg.norm(self.basis[0])\n\t\tb = linalg.norm(self.basis[1])\n\t\tc = linalg.norm(self.basis[2])\n\t\treturn [a,b,c]\n\n\tdef strain(self,strain_list):\n\t\tfor i in range(3):\n\t\t\tself.basis[i]=(array(self.basis[i])*strain_list[i]).tolist()\n\n\tdef add_vaccum(self,d):\n\t\told_c = self.basis[-1][-1]\n\t\tnew_c = old_c + d\n\t\tif self.tag=='Direct':\n\t\t\tfor i in range(self.nions):\n\t\t\t\tself.atom[i].set_position_c(self.atom[i].position[-1]*old_c/new_c)\n\t\t\tself.basis[-1][-1]=new_c\n\n\tdef standardize(self):\n\t\tc_min = 1\n\t\tc_max = -1\n\t\tfor atom in self.atom:\n\t\t\tif atom.position[-1]>c_max:\n\t\t\t\tc_max=deepcopy(atom.position[2])\n\t\t\tif atom.position[-1]0.5:\n\t\t\tself.thickness = 1-(c_max-c_min)\n\t\telse:\n\t\t\tself.thickness = c_max-c_min\n\t\tcenter = c_min+self.thickness/2\n\t\tfor atom in self.atom:\n\t\t\tatom.set_position_c((atom.position[2]+0.5-center)%1)\n\n\n\tdef transform(self,P,p=[0,0,0]):\n\t\tnewPOSCAR=deepcopy(self)\n\t\tnewPOSCAR.reset()\n\t\tif linalg.det(array(P)) == 0:\n\t\t\tprint(\"-Error: The determination of the transform matrix is 0!\")\n\t\telse:\n\t\t\tif linalg.det(array(P)) < 0:\n\t\t\t\tprint(\"-Warning: The transform matrix changes the coordinate system from right- to left-handed.\")\n\t\t\tif linalg.det(array(P)) != 1:\n\t\t\t\tprint(\"-The transform matrix changes the cell volume.\")\n\t\t\t#print(array(P))\n\t\t\tnewPOSCAR.set_basis((array(self.basis).T.dot(array(P))).T)\n\t\t\tatom_new = []\n\t\t\tinv_p = linalg.pinv(array(P)) #the atom position vector should dot this\n\t\t\t#################search atoms in the new basis#####################\n\t\t\tstack = [[0,0,0],[-1,0,0],[1,0,0],[0,-1,0],[0,1,0],[0,0,1],[0,0,-1]]\n\t\t\tsearch_map = []\n\t\t\twhile len(stack)!=0:\n\t\t\t\tn_new = 0\n\t\t\t\tfor i in range(self.nions):\n\t\t\t\t\tnewatom = deepcopy(self.atom[i])\n\t\t\t\t\tposition = ((array(self.atom[i].position)+array(stack[0])).dot(array(inv_p).T)+array(p)).tolist()\n\t\t\t\t\tflag,trans = self.atom_in_cell(position)\n\n\t\t\t\t\tif flag:\n\t\t\t\t\t\tnewatom.set_position(position)\n\t\t\t\t\t\tnewPOSCAR.add_atom(newatom)\n\t\t\t\t\t\tn_new += 1\n\n\t\t\t\tif n_new>0:\n\t\t\t\t\tfor nextcell in [[-1,0,0],[1,0,0],[0,-1,0],[0,1,0],[0,0,1],[0,0,-1]]:\n\t\t\t\t\t\tcell = array(nextcell)+array(stack[0])\n\t\t\t\t\t\tif cell.tolist() not in search_map and cell.tolist() not in stack:\n\t\t\t\t\t\t\tstack.append(cell.tolist())\n\n\t\t\t\t#for nextcell in [[-1,0,0],[1,0,0],[0,-1,0],[0,1,0],[0,0,1],[0,0,-1]]:\n\t\t\t\t#\tcell = array(nextcell)+array(stack[0])\n\t\t\t\t#\tif cell.tolist() not in search_map and cell.tolist() not in stack: \n\t\t\t\t#\t\tindex=0\n\t\t\t\t#\t\tfor corners in [[0,0,0],[1,0,0],[0,1,0],[1,1,0],[0,0,1],[1,0,1],[0,1,1],[1,1,1]]:\n\t\t\t\t#\t\t\tcornerpos = array(corners)+array(cell)\n\t\t\t\t#\t\t\tflag,trans=self.atom_in_cell(((cornerpos).dot(array(inv_p).T)).tolist())\n\t\t\t\t#\t\t\tif flag:\n\t\t\t\t#\t\t\t\tindex+=1\n\t\t\t\t#\t\tif index>0:\n\t\t\t\t#\t\t\tstack.append(cell.tolist())\n\t\t\t\tsearch_map.append(stack.pop(0))\n\t\treturn newPOSCAR\n\n\n\tdef print_POSCAR(self,file='CONTCAR'):\n\t\twith open(file,'wt') as fout:\n\t\t\tprint(self.title,file=fout)\n\t\t\tprint(\"%19.14f\"%(self.frac),file=fout)\n\t\t\tfor i in range(3):\n\t\t\t\tprint(\" %22.16f%22.16f%22.16f\"%(self.basis[i][0],self.basis[i][1],self.basis[i][2]),file=fout)\n\t\t\tfor val in self.elements:\n\t\t\t\tprint(\" %-2s\"%(val),end='',file=fout)\n\t\t\tprint(file=fout)\n\t\t\tfor val in self.natoms:\n\t\t\t\tprint(\" %-2d\"%(val),end='',file=fout) #vasp POSCAR standard \"%6d\"\n\t\t\tprint(file=fout)\n\t\t\tif self.selective_dynamics == 1:\n\t\t\t\tprint('Selective Dynamics',file=fout)\n\t\t\tprint(self.tag,file=fout)\n\t\t\tfor i in range(self.nions):\n\t\t\t\tprint(\"%20.16f%20.16f%20.16f\"%(self.atom[i].position[0],self.atom[i].position[1],self.atom[i].position[2]),end='',file=fout)\n\t\t\t\tif self.selective_dynamics == 1:\n\t\t\t\t\tprint(\" %s %s %s\"%(self.atom[i].dynamics[0],self.atom[i].dynamics[1],self.atom[i].dynamics[2]),file=fout)\n\t\t\t\telse:\n\t\t\t\t\tprint(file=fout)\n\n\ndef calc_Angle(a,b): #Calculate the angle between vectors \n\treturn degrees(arccos(array(a).dot(array(b))/(linalg.norm(a)*linalg.norm(b))))\n\ndef choost(a,b):\n\tif a>b:\n\t\treturn(b)\n\telse:\n\t\treturn(a)\n\ndef calculate_strain(mp,nq,a,b,printflag=True):\n\ta1=(-nq*b**2/(mp))**0.5\n\tb1=(-mp*a**2/(nq))**0.5\n\t#print(b**2/a**2)\n\tdelta_a = (a1-a)/a\n\tdelta_b = (b1-b)/b\n\tif abs(delta_a) 2.5E-1).any())\n\n def test_SingleOutputOne(self):\n a_in = numpy.random.uniform(-1.0, +1.0, (8,16))\n a_out = numpy.random.randint(2, size=(8,1)).astype(numpy.float32)\n a_mask = (0.0 + a_out).flatten()\n \n self.check(a_in, a_out, a_mask)\n\n def test_SingleOutputZero(self):\n a_in = numpy.random.uniform(-1.0, +1.0, (8,16))\n a_out = numpy.random.randint(2, size=(8,1)).astype(numpy.float32)\n a_mask = (1.0 - a_out).flatten()\n\n self.check(a_in, a_out, a_mask)\n\n def test_SingleOutputNegative(self):\n a_in = numpy.random.uniform(-1.0, +1.0, (8,16))\n a_out = numpy.random.randint(2, size=(8,1)).astype(numpy.float32)\n a_mask = (0.0 + a_out).flatten()\n a_out = -1.0 * 2.0 + a_out\n \n self.check(a_in, a_out, a_mask)\n \n def test_MultipleOutputRandom(self):\n a_in = numpy.random.uniform(-1.0, +1.0, (8,16))\n a_out = numpy.random.randint(2, size=(8,4)).astype(numpy.float32)\n a_mask = numpy.random.randint(2, size=(8,)).astype(numpy.float32)\n\n self.check(a_in, a_out, a_mask)\n\n\nclass TestMaskedDataClassification(unittest.TestCase):\n\n def check(self, a_in, a_out, a_mask, act='Softmax', n_iter=100):\n nn = MLPC(layers=[L(act)], learning_rule='rmsprop', n_iter=n_iter)\n nn.fit(a_in, a_out, a_mask)\n return nn.predict_proba(a_in)\n\n def test_TwoLabelsOne(self):\n # Only one sample has the value 1 with weight 1.0, but all 0s are weighted 0.0.\n a_in = numpy.random.uniform(-1.0, +1.0, (16,4))\n a_out = numpy.zeros((16,1), dtype=numpy.int32)\n a_out[0] = 1\n a_mask = (0.0 + a_out).flatten()\n \n a_test = self.check(a_in, a_out, a_mask).mean(axis=0)\n assert_greater(a_test[1], a_test[0] * 1.25)\n\n def test_TwoLabelsZero(self):\n # Only one sample has the value 0 with weight 1.0, but all 1s are weighted 0.0. \n a_in = numpy.random.uniform(-1.0, +1.0, (16,4))\n a_out = numpy.ones((16,1), dtype=numpy.int32)\n a_out[-1] = 0\n a_mask = (1.0 - a_out).flatten()\n \n a_test = self.check(a_in, a_out, a_mask).mean(axis=0)\n assert_greater(a_test[0], a_test[1] * 1.25)\n\n def test_FourLabels(self):\n # Only multi-label sample has weight 1.0, the others have weight 0.0. Check probabilities!\n chosen = random.randint(0,15)\n a_in = numpy.random.uniform(-1.0, +1.0, (16,4))\n a_out = numpy.random.randint(2, size=(16,4))\n a_mask = numpy.zeros((16,), dtype=numpy.int32)\n a_mask[chosen] = 1.0\n\n a_test = self.check(a_in, a_out, a_mask, act=\"Sigmoid\", n_iter=250).mean(axis=0)\n for i in range(a_out.shape[1]):\n compare = assert_greater if a_out[chosen][i]==0 else assert_less\n compare(a_test[i*2], a_test[i*2+1])\n","sub_path":"projetoAM_python/mlp/scikit-neuralnetwork-master/sknn/tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"554719667","text":"import numpy as np\nimport turtle as tt\n\ntt.shape(\"turtle\")\ndef archymedes(n):\n\ta = 20*(n - 1)*2*np.sin(np.pi/n)\n\ttt.left(180/n + 90)\n\tfor i in np.arange(1, n + 1):\n\t\ttt.forward(a)\n\t\ttt.left(360/n)\n\ttt.right(180/n + 90)\n\ttt.penup()\n\ttt.forward(20)\n\ttt.pendown()\n\t\nfor i in np.arange(3, 17):\n\tarchymedes(i)\n","sub_path":"lab_03/ex_09.py","file_name":"ex_09.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"130937791","text":"from flask import Blueprint, Flask, render_template, request\nfrom flaskext.mysql import MySQL\nfrom app.database import Database\nimport requests\nimport json\n\ndb = Database()\nbp_app = Blueprint(\"home\", __name__)\n\ndef configure(app):\n app.register_blueprint(bp_app)\n\n@bp_app.route(\"/\")\ndef index():\n return render_index()\n\n@bp_app.route(\"/edit\", methods=['POST','GET'])\ndef edit():\n if request.method == 'POST':\n send_game(request)\n return render_index()\n else:\n game_id = request.args.get('game_id')\n game = db.find_game_by_id(game_id)\n return render_template('edit.html', game = game)\n \n \n@bp_app.route(\"/create\", methods=['POST','GET'])\ndef create():\n if request.method == 'GET' :\n return render_template('create.html')\n else:\n send_game(request)\n return render_index()\n\ndef render_index():\n games = db.events_list()\n return render_template('index.html', games = games)\n\ndef send_game(request):\n send_dict = {} \n games = []\n games.append(create_game(request))\n send_dict[\"data\"] = games\n send_dict[\"email\"] = request.form['email']\n game_json = json.dumps(send_dict)\n url = 'http://desafio.logus.tech/desafio'\n requests.post(url, data = game_json)\n\ndef create_game(request):\n game_dict = {}\n game_dict[\"id\"] = request.form['game_id'] \n game_dict[\"teams\"] = request.form['teams']\n game_dict[\"home\"] = request.form['home']\n game_dict[\"away\"] = request.form['away']\n game_dict[\"date\"] = request.form['date']\n return game_dict","sub_path":"app/blueprints/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"109784372","text":"# https://leetcode.com/problems/max-stack/\nimport math\nfrom collections import deque, defaultdict\n\nfrom Helpers import helper as hlp\nfrom Helpers import test_class\nimport re\n\nclass Node():\n def __init__(self, val = None):\n self.next = None\n self.prev = None\n self.val = val\n\n\nclass Solution(test_class.test_class):\n\n def setUp(self):\n super().setUp()\n self.head = None\n self.tail = None\n self.maxStack = deque()\n\n def push(self, x: int) -> None:\n if not self.head:\n self.head = Node(x)\n self.tail = self.head\n else:\n node = Node(x)\n self.head.next = node\n node.prev = self.head\n self.head = node\n if not self.maxStack or (self.maxStack and self.head.val >= self.maxStack[-1].val):\n self.maxStack.append(self.head)\n elif self.maxStack and self.head.val < self.maxStack[-1].val:\n self.maxStack.appendleft(self.head)\n self.print()\n\n def pop(self) -> int:\n res = None\n if self.head:\n res = self.head.val\n if self.head.prev:\n self.head = self.head.prev\n self.head.next = None\n else:\n self.head = self.tail = None\n\n if self.maxStack and self.maxStack[-1].val == res:\n self.maxStack.pop()\n self.print()\n return res\n\n def top(self) -> int:\n return self.head.val\n\n def peekMax2(self) -> int:\n res = float('-inf')\n current = self.head\n while current:\n res = max(res, current.val)\n current = current.prev\n return res\n\n def peekMax(self) -> int:\n if self.maxStack:\n return self.maxStack[-1].val\n else:\n return None\n\n def popMax(self) -> int:\n if self.maxStack:\n maxNode = self.maxStack[-1]\n\n # remove max node\n if maxNode:\n # if head\n if maxNode == self.head:\n return self.pop()\n else:\n maxNode = self.maxStack.pop()\n if maxNode.next:\n maxNode.next.prev = maxNode.prev\n if maxNode.prev:\n maxNode.prev.next = maxNode.next\n self.print()\n return maxNode.val\n\n def popMax2(self) -> int:\n res = float('-inf')\n maxNode = None\n\n current = self.head\n while current:\n if current.val > res:\n res = max(res, current.val)\n maxNode = current\n current = current.prev\n\n # remove max node\n if maxNode:\n # if head\n if maxNode == self.head:\n return self.pop()\n else:\n if maxNode.next:\n maxNode.next.prev = maxNode.prev\n if maxNode.prev:\n maxNode.prev.next = maxNode.next\n self.print()\n return res\n\n def print(self):\n res = ''\n current = self.tail\n while current:\n res += ' ' + str(current.val)\n current = current.next\n print(res)\n\n def test_1(self):\n self.push(5),self.push(1),self.push(5)\n self.assertEqual(5, self.top())\n self.assertEqual(5, self.peekMax())\n self.assertEqual(5, self.popMax())\n self.assertEqual(1, self.top())\n self.assertEqual(5, self.peekMax())\n self.pop()\n self.assertEqual(5, self.top())\n\n def test_2(self):\n self.push(-2)\n self.assertEqual(-2, self.popMax())\n self.push(-45), self.push(-82), self.push(29)\n self.assertEqual(29, self.pop())\n self.assertEqual(-45, self.peekMax())\n self.push(40)\n self.assertEqual(40, self.pop())\n\n def test_3(self):\n self.push(5)\n self.push(1)\n self.assertEqual(5, self.popMax())\n self.assertEqual(1, self.peekMax())\n\n","sub_path":"Interviews_LNK/MaxStack.py","file_name":"MaxStack.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"98853534","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@version: 001\n@author: jianpan\n@file: test.py\n@time: 2017/6/6 22:04\n\"\"\"\n\nimport urllib\nimport urllib.parse\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nfrom multiprocessing import Process\nimport gevent.pool\nfrom gevent import monkey;monkey.patch_all()\nimport time\nimport threading\nfrom ProxyProvider import ProxyProvider\nimport pprint\nimport logging\nimport logging.config\n\nlogging.config.fileConfig(\"/usr/local/test/mobike_1/conf/logger.conf\")\nlogger = logging.getLogger(\"example02\")\n\nclass Crawler:\n def __init__(self):\n self.start_time = time.time()\n self.proxyProvider = ProxyProvider()\n self.headers = [{\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.mobike.com',\n 'mobileNo': '13729009386',\n 'eption': 'a7eab',\n 'lang': 'zh',\n 'uuid': '78c8a7eecf9ba3ef7f2bca96e59fbe4c',\n 'citycode': '0755',\n 'accesstoken': 'bc6066bdb5ab2bff2b9c942d0add4089'\n },\n {\n'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n'Host': 'api.mobike.com',\n'Accept-Encoding': 'gzip',\n'platform': '1',\n'eption': '99d25',\n'citycode': '0755',\n'os': '23',\n'lang': 'zh',\n'version': '5.4.1',\n'uuid': '3d6bc5f57fbe14a14c846bfe7091993c'}\n\n ]\n self.result_success = []\n self.index = 0\n self.result_fail = []\n self.req_index = 0\n\n def get_nearby_bikes(self, args):\n url = 'https://api.mobike.com/mobike-api/rent/v2/nearbyBikesInfo.do'\n self.req_index +=1\n\n reqdata = [{\n 'cityCode': '0755',\n 'biketype': '0',\n 'latitude': str(args[0]),\n 'scope': '500',\n 'sign': '68bb54c96acc635edc9fd95eb72b42a0',\n 'userid': '72182150542387824640362706',\n 'client_id': 'android',\n 'longitude': str(args[1])\n },\n {\n 'cityCode': '0755',\n 'biketype': '0',\n 'latitude': str(args[0]),\n 'scope': '500',\n 'sign': '67a80df315dc26738ba002cbfa920e26',\n 'client_id': 'android',\n 'longitude': str(args[1])\n }\n ]\n # print(reqdata)\n i = self.req_index % 2\n data = urllib.parse.urlencode(reqdata[i]).encode('utf-8')\n self.get_request(url, self.headers[i], data, args)\n\n def get_request(self, url, headers, data, args):\n proxy = self.proxyProvider.pick()\n try:\n proxies = {'http': proxy.url}\n # print(proxies)\n proxy_support = urllib.request.ProxyHandler(proxies)\n opener = urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n a = urllib.request.Request(url=url, headers=headers, data=data)\n r = urllib.request.urlopen(a).read()\n tmp = json.loads(r)\n # print(tmp)\n result = pd.DataFrame.from_dict(tmp['bike'])\n self.result_success.append(result)\n except Exception as ex:\n #print(ex)\n proxy.fatal_error()\n self.result_fail.append(\"{0}, {1}, {2}, {3}\" .format(args[0], args[1],proxy.url, ex))\n finally:\n self.index += 1\n #if self.index % 100 == 0:\n # print(self.index)\n\n def gevent_fun(self,locate_list, pool_num):\n from gevent import monkey\n monkey.patch_all(socket=True, select=True)\n from gevent.pool import Pool\n gevent_pool = Pool(pool_num)\n gevent_pool.map(self.get_nearby_bikes, locate_list)\n\n def thread_fun(self, locate_list, pool_num):\n import threadpool\n thread_pool = threadpool.ThreadPool(pool_num)\n requestsx = threadpool.makeRequests(self.get_nearby_bikes, locate_list)\n [thread_pool.putRequest(req) for req in requestsx]\n thread_pool.wait()\n\n\n def process_start(self, locate_list, PoolNum):\n pool = gevent.pool.Pool(PoolNum)\n data_list = pool.map(self.get_nearby_bikes, locate_list)\n result = []\n for i in data_list:\n if type(i) != str:\n result.append(i)\n time.strftime()\n\n # def process_start(self, locate_list):\n # for locate in locate_list:\n # self.get_nearby_bikes(locate)\n\n def init_start(self, PoolNum, now):\n left = 22.55\n top = 113.80\n right = 22.85\n bottom = 114.20\n offset = 0.002\n lat_range = np.arange(left, right, offset)\n lon_range = np.arange(top, bottom, offset)\n locate_range = np.transpose([np.tile(lat_range, len(lon_range)), np.repeat(lon_range, len(lat_range))])\n print(len(locate_range))\n # self.thread_fun(locate_range, PoolNum)\n self.gevent_fun(locate_range, PoolNum)\n with open(\"/usr/local/test/mobike_1/data/{0}_fail.txt\".format(t), 'w') as f:\n f.write('\\n'.join(self.result_fail))\n pd_result = pd.concat(self.result_success)\n #pd_result = self.result_success[0]\n #for i in self.result_success:\n # pd_result.append(i, ignore_index=True)\n print(len(self.result_fail))\n #pd_result = pd_result.drop_duplicates(['bikeIds'])\n t = time.strftime('%Y%m%d%H%M%S', now)\n print(len(pd_result))\n\n pd_result.to_csv('/usr/local/test/mobike_1/data/{0}_success.csv'.format(t),header=True,index=False)\n\nif __name__ == \"__main__\":\n time_start = time.time()\n \n logger.info(' ####################################')\n logger.info(' ####################################')\n logger.info(' ############## START ###############')\n now = time.localtime()\n c = Crawler()\n c.init_start(40, now)\n time_end = time.time()\n print(time_end-time_start)\n logger.info(' ############## END ############### ')\n logger.info(' ####################################')\n logger.info(' ####################################')\n","sub_path":"bin/my.py","file_name":"my.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"29537221","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2019 LG Electronics, Inc.\n#\n# This software contains code licensed as described in LICENSE.\n#\n\nimport simulation.config as config\nimport lgsvl\n\ncf = config.Config()\nsim = cf.Simulator()\ncf.LoadOrResetScene(sim, \"BorregasAve\")\n\nspawns = sim.get_spawn()\n\nstate = lgsvl.AgentState()\nstate.transform = spawns[0]\na = sim.add_agent(\"Lincoln2017MKZ (Apollo 5.0)\", lgsvl.AgentType.EGO, state)\n\nsensors = a.get_sensors()\nfor s in sensors:\n if s.name == \"Lidar\":\n s.save(\"lidar.pcd\")\n break","sub_path":"07-save-lidar-point-cloud.py","file_name":"07-save-lidar-point-cloud.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"544787595","text":"import argparse\nimport sys\n\nfrom .canvas import Canvas\n\nCOMMANDS = {\n \"C\": [\"w\", \"h\"],\n \"L\": [\"x1\", \"y1\", \"x2\", \"y2\"],\n \"R\": [\"x1\", \"y1\", \"x2\", \"y2\"],\n \"B\": [\"x\", \"y\", \"c\"]\n}\n\nparser_description = \"\"\"\nAvailable commands are:\nC w h - Create Canvas: Should create a new canvas of width w and height h.\nL x1 y1 x2 y2 - Create Line: Should create a new line from (x1,y1) to (x2,y2). \nR x1 y1 x2 y2 - Create Rectangle: Should create a new rectangle, whose upper left corner is (x1,y1).\nB x y c - Bucket Fill: Should fill the entire area connected to (x,y) with \"colour\" c.\n\"\"\"\n\ndef execute_command(canvas: Canvas, command):\n command, *args = command.split(\" \")\n \n if command not in COMMANDS:\n raise ValueError(\"Invalid command\")\n elif len(COMMANDS[command]) > len(args):\n missing_argument = COMMANDS[command][len(command)]\n raise ValueError(f\"Missing {missing_argument} argument\")\n elif len(COMMANDS[command]) < len(args):\n raise ValueError(\"Too many arguments\")\n elif command != \"C\" and canvas is None:\n raise ValueError(\"The canvas is empty\")\n \n try:\n if command == \"B\":\n args[:-1] = [int(arg) - 1 for arg in args[:-1]]\n elif command == \"C\":\n args = [int(arg) for arg in args]\n else:\n args = [int(arg) - 1 for arg in args]\n except ValueError:\n raise ValueError(\"Invalid argument type\")\n\n if command == \"C\":\n return Canvas(*args)\n elif command == \"L\":\n return canvas.draw_line(*args)\n elif command == \"R\":\n return canvas.draw_rectangle(*args)\n else:\n return canvas.fill(*args)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n \"Canvas\",\n description=parser_description,\n formatter_class=argparse.RawDescriptionHelpFormatter # For multiline description\n )\n parser.add_argument(\n \"file\", help=\"input file path\"\n )\n parser.add_argument(\n \"out_file\", help=\"output file path\"\n )\n args = parser.parse_args()\n\n canvas = None\n \n with open(args.file) as f, open(args.out_file, 'w+') as out_file:\n for i, line in enumerate(f):\n try:\n canvas = execute_command(canvas, line)\n canvas.print(out=out_file)\n except ValueError as e:\n print(\n f\"The following error occured at {i + 1} line:\", \n file=sys.stderr\n )\n print(e, file=sys.stderr)\n break\n except:\n print(\"Internal error\", file=sys.stderr)\n break\n\nmain()\n","sub_path":"canvas/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"220328663","text":"from pathlib import Path\nfrom sys import platform\nimport logging\nimport os\nfrom FamliFileIO.csvwrap import CSVWrap\n\nclass TagInfoFile():\n '''\n This file handles the reading/writing of the\n info csv files in the FAMLI dataset C1 studies\n '''\n file_name = 'tags.csv'\n\n replacements = { \"LI\": \"L1\",\n \"RI\": \"R1\",\n \"RI5\": \"R15\",\n \"LI5\": \"L15\",\n \"CI\": \"C1\",\n \"LO\": \"L0\",\n \"RO\": \"R0\",\n \"3DI\": \"3D1\",\n \"Undecided\": \"Unknown\",\n \"No tag\": \"Unknown\"\n }\n\n @staticmethod\n def GetActualTag(tag):\n if tag in TagInfoFile.replacements:\n return TagInfoFile.replacements[tag]\n return tag\n\n def __init__(self, study_dir):\n # study dir is the output folder where the tag file will be written\n if isinstance(study_dir, str):\n self.study_dir = Path(study_dir)\n elif isinstance(study_dir, Path):\n self.study_dir = study_dir\n else:\n raise TypeError(('Study DIR path object is of the wrong type'))\n\n self.file_name = TagInfoFile.file_name\n self.tag_info_file = self.study_dir/TagInfoFile.file_name\n self.file_tag_dict = []\n self.tag_statistic = {}\n\n def exists(self):\n return self.tag_info_file.exists()\n\n def getFilePath(self):\n \"\"\"\n Get complete path for the tag info\n \"\"\"\n return self.tag_info_file\n\n def checkKeys(self, keys_list):\n return 'File' in keys_list and 'type' in keys_list and 'tag' in keys_list\n\n def createTagStatistics(self):\n self.tag_statistic = {}\n for row in self.file_tag_dict:\n if row['tag'] not in self.tag_statistic:\n self.tag_statistic[row['tag']] = 0\n self.tag_statistic[row['tag']] += 1\n\n def updateTagStatistics(self, tag):\n if tag not in self.tag_statistic:\n self.tag_statistic[tag] = 0\n self.tag_statistic[tag] += 1\n\n def read(self):\n try:\n rows, keys = CSVWrap.readCSV(self.tag_info_file)\n if len(rows) > 0 and \\\n rows is not None and \\\n self.checkKeys(keys):\n # Sort the rows by file name\n newlist = sorted(rows, key=lambda k: Path(k['File']).name)\n self.file_tag_dict = newlist\n for row in self.file_tag_dict:\n row['tag'] = TagInfoFile.GetActualTag(row['tag'])\n self.tag_statistic = {}\n self.createTagStatistics()\n except Exception as e:\n logging.error(\"Error reading the tag file now: {}\".format(e))\n logging.error(e)\n self.file_tag_dict = []\n\n def write(self):\n try:\n if len(self.file_tag_dict) == 0:\n logging.warning('Nothing to write in taginfo, skipping')\n return\n CSVWrap.writeCSV(self.file_tag_dict, self.tag_info_file)\n except Exception as e:\n logging.error('Error writing the tag file')\n\n def addTag(self, abs_study_original_path, relative_to_path, file_name, filetype, tag, write=False):\n \"\"\"\n abs_study_original_path: Absolute path to the study\n relative_to_path: Path to the server. Study's relative path to the server will be stored\n file_name: Name of the file in study\n \"\"\"\n # study dir is the output folder where the tag file will be written\n if isinstance(abs_study_original_path, str):\n abs_study_original_path = Path(abs_study_original_path)\n elif not isinstance(abs_study_original_path, Path):\n raise TypeError('Input absolute study original path has to be either string or a Path')\n\n relative_study_path = abs_study_original_path.relative_to(relative_to_path)\n\n row = {}\n row['File'] = relative_study_path / file_name\n row['type'] = filetype\n row['tag'] = TagInfoFile.GetActualTag(tag)\n\n self.file_tag_dict.append(row)\n self.updateTagStatistics(row['tag'])\n if write:\n CSVWrap.writeCSV([row], self.tag_info_file, append=True)\n\n def getFileNamesWithTags(self, tag_list, tag_type=None):\n file_names = [ {'File':file_row['File'], 'tag':file_row['tag']} for file_row in self.file_tag_dict if file_row['tag'] in tag_list and (tag_type is None or file_row['type'] == tag_type)]\n return file_names\n \n def getFileNamesWithTag(self, tag):\n file_names = [ file_row['File'] for file_row in self.file_tag_dict if file_row['tag'] == tag]\n return file_names\n\n def getFileNamesWithType(self, filetype):\n file_names = [ {'File':file_row['File'], 'tag':file_row['tag']} for file_row in self.file_tag_dict if file_row['type'] == filetype]\n return file_names\n\n def getTagForFileName(self, file_name):\n for file_row in self.file_tag_dict:\n if file_row['File'].name == file_name:\n return file_row['tag']\n\n def getStudyDir(self):\n return self.study_dir\n\n def getStudyName(self):\n return self.study_dir.stem \n\n def setStudyDir(self, study_dir):\n \"\"\"\n Set the directory where the tag info file will be stored.\n \"\"\"\n if isinstance(study_dir, str):\n self.study_dir = Path(study_dir)\n elif isinstance(study_dir, Path):\n self.study_dir = study_dir\n else:\n raise TypeError(('Study DIR path object is of the wrong type'))\n\n def getNumFiles(self):\n return len(self.file_tag_dict)\n \n def getNumCines(self):\n cines = [f for f in self.file_tag_dict if f['type'] is 'cine']\n return len(cines)\n \n def getNumImages(self):\n imgs = [f for f in self.file_tag_dict if f['type'] is '2d image']\n return len(imgs)\n\n def clear(self):\n self.file_tag_dict=[]\n self.tag_statistic={}\n\n def getAllRows(self):\n return self.file_tag_dict\n\n def getDictWithNameKeys(self):\n dict_to_return = {}\n for row in self.file_tag_dict:\n name = Path(row['File']).name\n dict_to_return[name] = {}\n dict_to_return[name] = [row['type'], row['tag']]\n return dict_to_return\n\n def deleteTagFile(self):\n if self.tag_info_file.exists():\n logging.warning('DELETING the tag file {}'.format(self.tag_info_file))\n os.remove(self.tag_info_file)\n\n @staticmethod\n def CompileFinishedStudies(dir_with_tags):\n \"\"\"\n Method to write out a finished_studies.txt with the names of the studies (i.e. directory parents to tags.csv)\n \"\"\"\n try:\n tag_file_name = TagInfoFile.file_name\n finished_studies = list(dir_with_tags.glob(\"**/\"+tag_file_name))\n fsd = [(study.parent).name for study in finished_studies]\n with open(dir_with_tags/\"finished_studies.txt\", 'w') as f:\n for study in fsd:\n f.write(study+\"\\n\")\n except Exception as e:\n logging.error(\"Exception during getting the list of finished studies\\n\"+e)\n","sub_path":"src/py/FamliFileIO/taginfo.py","file_name":"taginfo.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"440601013","text":"\"\"\"\nA ImageWithText Idevice is one built up from an image and free text.\n\"\"\"\nimport logging\nfrom exe.engine.idevice import Idevice\nfrom exe.engine.field import TextAreaField, ImageField\nfrom exe.engine.translate import lateTranslate\nlog = logging.getLogger(__name__)\nclass ImageWithTextIdevice(Idevice):\n \"\"\"\n A ImageWithText Idevice is one built up from an image and free text.\n \"\"\"\n persistenceVersion = 6\n def __init__(self, defaultImage = None):\n Idevice.__init__(self, \n x_(u\"Image with Text\"), \n x_(u\"University of Auckland\"), \n x_(u\"\"\"

\nThe image with text iDevice can be used in a number of ways to support both\nthe emotional (affective) and learning task (cognitive) dimensions of eXe\ncontent. \n

\nIntegrating visuals with verbal summaries\n

\nCognitive psychologists indicate that presenting learners with a\nrepresentative image and corresponding verbal summary (that is presented\nsimultaneously) can reduce cognitive load and enhance learning retention.\nThis iDevice can be used to present an image (photograph, diagram or\ngraphic) with a brief verbal summary covering the main points relating to\nthe image. For example, if you were teaching the functions of a four-stroke\ncombustion engine, you could have a visual for each of the four positions of\nthe piston with a brief textual summary of the key aspects of each visual.\n

\"\"\"), u\"\", u\"\")\n self.emphasis = Idevice.NoEmphasis\n self.image = ImageField(x_(u\"Image\"), u\"\")\n self.image.idevice = self\n self.image.defaultImage = defaultImage\n self.text = TextAreaField(x_(u\"Text\"),\n x_(\"\"\"Enter the text you wish to \n associate with the image.\"\"\"))\n self.text.idevice = self\n self.float = u\"left\"\n self.caption = u\"\"\n self._captionInstruc = x_(u\"\"\"Provide a caption for the image \nyou have just inserted.\"\"\")\n captionInstruc = lateTranslate('captionInstruc')\n def upgradeToVersion1(self):\n \"\"\"\n Called to upgrade from 0.5 release\n \"\"\"\n self.float = u\"left\"\n def upgradeToVersion2(self):\n \"\"\"\n Called to upgrade from 0.6 release\n \"\"\"\n self.caption = u\"\"\n self.emphasis = Idevice.NoEmphasis\n def upgradeToVersion3(self):\n \"\"\"\n Upgrades v0.6 to v0.7.\n \"\"\"\n self.lastIdevice = False\n def upgradeToVersion4(self):\n \"\"\"\n Upgrades to exe v0.10\n \"\"\"\n self._upgradeIdeviceToVersion1()\n def upgradeToVersion5(self):\n \"\"\"\n Upgrades to v0.12\n \"\"\"\n log.debug(\"upgrade to version 5\")\n self._upgradeIdeviceToVersion2() \n self.image._upgradeFieldToVersion2()\n def upgradeToVersion6(self):\n \"\"\"\n Called to upgrade from 0.13 release\n \"\"\"\n self._captionInstruc = x_(u\"\"\"Provide a caption for the image \nyou have just inserted.\"\"\")\n","sub_path":"eXe/rev2283-2337/right-branch-2337/exe/engine/imagewithtextidevice.py","file_name":"imagewithtextidevice.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"25519058","text":"import json\nimport requests\nimport codecs\nfrom flask import jsonify\n\ndef get_keys():\n\tkeys = []\n\tjs = open('all_recipes.json')\n\tdata = (json.load(js)).get('data')\n\t#real_data = data.get('data')\n\tfor d in data:\n\t\tkeys.append(d.get('id'))\n\n\treturn keys\n\ndef save_request():\n\tkeys = get_keys()\n\tpath = \"http://api.vennfridge.appspot.com/recipes/\"\n\t#subkeys = [keys[0], keys[1]]\n\n\twith open('test.txt','w') as fd:\n\t\tfd.write('{\\n\"result\" : [ \\n')\n\n\tfor id in keys:\n\t\tr = requests.get(path+str(id))\n\t\twith open('test.txt','a') as fd:\n\t\t\tfd.write(r.text+',\\n')\n\n\t### DO NOT FORGET TO REMOVE LAST COMMA MANUALLY ###\n\twith open('test.txt','a') as fd:\n\t\tfd.write(']\\n}')\n\nsave_request()","sub_path":"visual_data/get_recipes.py","file_name":"get_recipes.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"479054940","text":"#!/usr/bin/env\n## Import para a pasta acima\nimport sys\nfrom datetime import datetime\nfrom flask import Blueprint, session, redirect, request, render_template\nsys.path.append(\"..\")\n\n## Imports padrão\nfrom functions import *\nfrom forms_syscadomar import *\nfrom models_syscadomar import *\nfrom models_syscadomar import Funcionarios\n\n\nrh = Blueprint('rh', __name__, template_folder='../templates/rh/')\n\n@rh.route('/recursosHumanos')\ndef recursosHumanos():\n error = \"\"\n return render_template('rh.html', error = error, title_page = 'Recursos Humanos')\n\n@rh.route('/rh/cadastroEstoque', methods=['POST', 'GET'])\ndef cadastro_estoque_rh():\n \"\"\" Cadastro de Estoque para o RH \"\"\"\n error = \"\"\n categorias = CategoriaItens.objects()\n centros_estoque = CentroEstoque.objects()\n id_item = request.args.get('id_item')\n lista_itens = Itens.objects().distinct('descricao')\n next_code = proximoCodigoItem()\n\n form = ItensForm()\n itens_estoque = Itens.objects(centroEstoque='RH')\n\n if id_item:\n item = Itens.objects.get(id=id_item)\n btn_text = 'Salvar'\n else:\n item = ''\n btn_text = 'Incluir'\n\n if request.method == 'POST':\n if Itens.objects(codigo=fillZerosFive(form.codigo.data)):\n error = \"Item já possui cadastro:Codigo:{}\".format(fillZerosFive(form.codigo.data))\n else:\n novo_item = Itens(\n tipoDeUnidade = form.tipoDeUnidade.data,\n codigo = fillZerosFive(form.codigo.data), \n descricao = form.descricao.data,\n qtEstoqueCritico = form.estoqueCritico.data,\n categoria = request.form.get('categoria_item'),\n valorEstoque = 'R$ 0,00',\n qtEstoque = '0',\n centroEstoque = request.form.get('centro_estoque_item'),\n )\n novo_item.save()\n return render_template('cadastroItemRH.html',next_code = next_code, lista_itens = lista_itens, centros_estoque = centros_estoque, btn_text = btn_text, item = item, itens_estoque = itens_estoque, error = error, categorias = categorias, title_page = \"Cadastro Item\", form = form)\n return render_template('cadastroItemRH.html', lista_itens = lista_itens, next_code = next_code, centros_estoque = centros_estoque, btn_text = btn_text, item = item, itens_estoque = itens_estoque, error = error, categorias = categorias, title_page = \"Cadastro Item\", form = form)\n\n@rh.route('/pontoMotoristas', methods=['POST','GET'])\ndef pontoMotoritas():\n print('testando')\n motoristas = Motoristas.objects().distinct('codigo')\n today = datetime.datetime.today()\n data_default = '{}/{}'.format(today.month,today.year)\n data = []\n form = PesquisaRelatorioPonto()\n\n if request.method == 'POST':\n data_default = form.data.data,\n data_default = data_default[0]\n\n for i in motoristas:\n temp = pontoConsolidado(filtraPonto(i,data_default), str(i))\n data.append(temp)\n\n total_valores = []\n for i in data:\n total_valores.append(i['total'])\n\n print (sumMoneyList(total_valores))\n\n context = {\n 'error':'',\n 'title_page':'Relátorio de Horas {}'.format(data_default),\n 'data':data,\n 'data_default':data_default,\n 'form':form,\n }\n\n return render_template('pontoMotoristas.html', **context)\n\n@rh.route('/recursosHumanos/novoFuncionario', methods=['POST', 'GET'])\ndef novoFuncionario():\n \"\"\"\n Rota de Cadastro de Funcionarios\n \"\"\"\n form = FuncionarioForm()\n id_funcionario = request.args.get('id_funcionario')\n btn_text = request.args.get('submit')\n\n if request.method == 'POST':\n if Funcionarios.objects(nome=form.nome.data):\n funcionario = Funcionarios.objects.get(nome=form.nome.data)\n funcionario.update(\n codigo = form.codigo.data,\n nome = form.nome.data.upper(),\n cpf = form.cpf.data,\n dataNasc = objDate(form.dataNasc.data), \n dataNascimento = form.dataNasc.data,\n ultimoASO = objDate(form.ultimoASO.data),\n ultimoASOstring = form.ultimoASO.data,\n salario = form.salario.data,\n setor = form.setor.data.upper(),\n horas_mes = form.horas_mes.data,\n estado = form.estado.data.upper(),\n cidade = form.cidade.data.upper(),\n bairro = form.bairro.data.upper(),\n rua = form.rua.data.upper(),\n numero = form.numero.data,\n observacoes = form.observacoes.data.upper(),\n )\n funcionario.save()\n return redirect('cadastroFuncionario')\n else:\n novo_funcionario = Funcionarios(\n codigo = form.codigo.data,\n nome = form.nome.data.upper(),\n cpf = form.cpf.data,\n dataNasc = objDate(form.dataNasc.data), \n dataNascimento = form.dataNasc.data,\n ultimoASO = objDate(form.ultimoASO.data),\n ultimoASOstring = form.ultimoASO.data,\n salario = form.salario.data,\n setor = form.setor.data.upper(),\n horas_mes = form.horas_mes.data,\n estado = form.estado.data.upper(),\n cidade = form.cidade.data.upper(),\n bairro = form.bairro.data.upper(),\n rua = form.rua.data.upper(),\n numero = form.numero.data,\n observacoes = form.observacoes.data.upper(),\n ).save()\n print (novo_funcionario)\n return '',204\n\n@rh.route('/recursosHumanos/cadastroFuncionario')\ndef cadastroFuncionario():\n form = FuncionarioForm()\n id_funcionario = request.args.get('id_funcionario')\n\n if id_funcionario:\n funcionario = Funcionarios.objects.get(id=id_funcionario)\n btn_text = 'Salvar'\n else:\n funcionario = ''\n btn_text = 'Cadastrar Novo Funcionário'\n\n context = {\n 'error':'',\n 'title_page':'Cadastro de Funcionário',\n 'lista_funcionarios':'',\n 'lista_cargos':'',\n 'form':form,\n 'btn_text':btn_text,\n 'funcionario':funcionario,\n 'funcionarios':Funcionarios.objects(),\n 'lista_cpf':Funcionarios.objects().distinct('cpf'),\n }\n return render_template('cadastro_funcionarios.html', **context)\n\n\n@rh.route('/funcionariosjson', methods=['POST','GET'])\ndef funcionariosjson():\n if session['username'] != '':\n funcionarios = Funcionarios.objects()\n funcionarios = jsonify(funcionarios)\n return funcionarios \n else:\n return '', 204 \n\n\n\n@rh.route('/relatorio_portaria', methods=['POST','GET'])\ndef relatorio_portaria():\n context = {\n 'title_page':'Relátorio da Portaria',\n 'lista_placas':Veiculos.objects().distinct('placa'),\n 'registro_veiculos': PortariaVeiculos.objects(data=objDate(dataAtual())),\n 'entradas_funcionarios':PortariaFuncionarios.objects(data=objDate(dataAtual())),\n 'entradas_visitantes':PortariaVisitantes.objects(data=objDate(dataAtual())),\n }\n\n if request.method == 'POST':\n data = request.form.get('data')\n if data == '':\n data = dataAtual()\n print (data)\n data_final = request.form.get('data_final')\n if data_final == '':\n data_final = dataAtual()\n context['registro_veiculos'] = PortariaVeiculos.objects(data__gte=objDate(data))\n context['entradas_funcionarios'] = PortariaFuncionarios.objects(data__gte=objDate(data)) \n context['entradas_visitantes'] = PortariaVisitantes.objects(data__gte=objDate(data)) \n else:\n context['registro_veiculos'] = PortariaVeiculos.objects(data__gte=objDate(data), data__lte=objDate(data_final))\n context['entradas_funcionarios'] = PortariaFuncionarios.objects(data__gte=objDate(data), data__lte=objDate(data_final))\n context['entradas_visitantes'] = PortariaVisitantes.objects(data__gte=objDate(data), data__lte=objDate(data_final))\n\n\n \n\n\n return render_template('relatorio_portaria.html', **context)\n\n\n@rh.route('/rh/relatorio_horas_extras')\ndef relatorio_horas_extras():\n return render_template('relatorio_horas_extras.html')\n\n\n","sub_path":"cadomar/modules/rh.py","file_name":"rh.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"118832663","text":"\"\"\"projectRSATU URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\n# from django.contrib.auth import views as auth_views\nfrom django.views.generic import TemplateView\n\nfrom RSATU1 import views\n\nurlpatterns = [\n # path('', views.index, name='home'),\n path('admin/', admin.site.urls),\n\n path('about', views.about),\n path('contact', views.contact),\n path('blog', views.PostListView.as_view(), name='blog'),\n re_path(r'blog/post/(?P\\d+)', views.post),\n re_path(r'tag/(?P\\w+)', views.tags),\n\n # re_path(r'^login/$', auth_views.login, name='login'),\n # re_path(r'^logout/$', auth_views.logout, name='logout'),\n # re_path(r'^admin/', admin.site.urls)\n\n re_path(r'^accounts/login/$', views.user_login, name='login'),\n re_path(r'^accounts/register/$', views.register, name='register'),\n\n path('', TemplateView.as_view(template_name='home.html'), name='home'),\n\n path('accounts/', include('django.contrib.auth.urls')),\n\n re_path(r'^chats/$', views.get_chat_list, name='chat_list'),\n re_path(r'^chats/create/(?P\\d+)/$', views.create_dialog, name='create_dialog'),\n re_path(r'^chats/(?P\\d+)/$', views.MessagesView.as_view(), name='messages'),\n]\n","sub_path":"projectRSATU/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"438538409","text":"from django.db.models import Q\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.http import urlencode\nfrom django.views import View\nfrom django.views.generic import TemplateView, ListView\n\n\nclass DetailView(TemplateView):\n context_key = 'objects'\n model = None\n\n def get_context_data(self, **kwargs):\n pk = kwargs.get('pk')\n context = super().get_context_data(**kwargs)\n context[self.context_key] = get_object_or_404(self.model, pk=pk)\n return context\n\n def get_objects(self):\n return self.model.objects.all()\n\n\nclass UpdateView(View):\n form_class = None\n template_name = None\n redirect_url = ''\n model = None\n key_kwarg = 'pk'\n context_key = 'object'\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.form_class(initial=self.get_form_initial())\n context = self.make_context(form)\n return render(request, self.template_name, context=context)\n\n def get_form_initial(self):\n model_fields = [field.name for field in self.model._meta.fields]\n initial = {}\n for field in model_fields:\n initial[field] = getattr(self.object, field)\n print(initial)\n return initial\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(data=request.POST)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n self.object = self.get_object()\n for field, value in form.cleaned_data.items():\n setattr(self.object, field, value)\n self.object.save()\n return redirect(self.get_redirect_url())\n\n def form_invalid(self, form):\n context = self.make_context(form)\n return render(self.request, self.template_name, context=context)\n\n def get_object(self):\n pk = self.kwargs.get(self.key_kwarg)\n return get_object_or_404(self.model, pk=pk)\n\n def make_context(self, form):\n return {\n 'form': form,\n self.context_key: self.object\n }\n\n def get_redirect_url(self):\n return self.redirect_url\n\n\nclass DeleteView(View):\n template_name = None\n model = None\n redirect_url = None\n confirmation_for_delete = None\n\n\n def get(self, request, *args, **kwargs):\n object = get_object_or_404(self.model, pk=kwargs.get('pk'))\n if self.confirmation_for_delete == True:\n context = {'object': object}\n return render(self.request, self.template_name, context)\n else:\n object.delete()\n return redirect(self.get_redirect_url())\n\n def post(self, request, *args, **kwargs):\n object = get_object_or_404(self.model, pk = kwargs.get('pk'))\n object.delete()\n return redirect(self.get_redirect_url())\n\n\n def get_redirect_url(self):\n return self.redirect_url\n\n\nclass SearchView(ListView):\n template_name = None\n model = None\n paginate_by = 10\n paginate_orphans = 1\n page_kwarg = 'page'\n form = None\n\n\n def get(self, request, *args, **kwargs):\n self.form = self.get_search_form()\n self.search_value = self.get_search_value()\n return super().get(request, *args, **kwargs)\n\n def get_search_form(self):\n return self.form(data=self.request.GET)\n\n def get_search_value(self):\n if self.form.is_valid():\n return self.form.cleaned_data['search']\n return None\n\n def get_queryset(self):\n queryset = super().get_queryset()\n if self.search_value:\n queryset = queryset.filter(\n self.get_query()\n )\n return queryset\n\n def get_query(self):\n pass\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=object_list, **kwargs)\n context['form'] = self.form\n if self.search_value:\n context['query'] = urlencode({'search' : self.search_value})\n return context","sub_path":"source/webapp/views/base_views.py","file_name":"base_views.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"113746775","text":"__author__ = 'Kozma Balazs'\nfrom msvcrt import getch\nfrom file_operator import FileOperator\nimport mysql.connector\nimport csv\n\n\nclass ListingDataBase(object):\n @staticmethod\n def listing_database(which_file):\n if FileOperator.csv_or_db() == 'db':\n ListingDataBase.listing_database_db(which_file)\n else:\n ListingDataBase.listing_database_csv(which_file)\n\n @staticmethod\n def listing_database_db(which_file):\n server_name, user_name, user_password, database_name = FileOperator.app_config_reader()\n if 'donors' in which_file:\n table_name = '.Donor'\n else:\n table_name = '.Event'\n sql_command, result, header = [], [], []\n sql_command.append(\"SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE \" + \\\n \"`TABLE_SCHEMA`='\" + database_name + \"' AND `TABLE_NAME`='\" + table_name[1:] + \"';\")\n sql_command.append(\"SELECT * FROM \" + database_name + table_name)\n dbcon = mysql.connector.connect(user=user_name, password=user_password, host=server_name, database=database_name)\n cursor = dbcon.cursor()\n for i, one_command in enumerate(sql_command):\n cursor.execute(one_command)\n for cursor_message in cursor:\n if i == 0:\n header.append(cursor_message[0])\n else:\n result.append(cursor_message)\n if len(result) != 0:\n if len(result) == 1:\n print(\"There is only one result:\")\n elif len(result) > 1:\n print(\"There are \" + str(len(result)) + \" results:\")\n print(\"-\" * 52)\n for i in range(len(result)):\n ListingDataBase.printer(i + 1, header, result[i])\n else:\n print(\"There is no data corresponding to this query...\")\n dbcon.close()\n getch()\n\n @staticmethod\n def listing_database_csv(which_file):\n print(\"-\" * 52)\n is_there_any_data = False\n with open(which_file, \"r\", encoding=\"utf-8\") as csvfile:\n filereader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n is_first_row = True\n for i, row in enumerate(filereader):\n if is_first_row:\n first_row = row\n for i, header in enumerate(first_row):\n header = header.replace(\"_\", \" \")\n first_row[i] = header[0].upper() + header[1:]\n is_first_row = False\n continue\n is_there_any_data = True\n ListingDataBase.printer(i, first_row, row)\n if not is_there_any_data:\n print(\"Sorry, the database is empty...\")\n\n @staticmethod\n def printer(i, first_row, row):\n is_first_element = True\n for (head, one_element) in zip(first_row, row):\n one_element = str(one_element)\n if is_first_element:\n print(str(i) + \".\" + \" \" * (24 - len(str(i)) - len(head)) + head + \": \" + one_element)\n is_first_element = False\n else:\n print(\" \" * (25 - len(head)) + head + \": \" + one_element)\n print(\"-\" * 52)\n if i % 2 == 0 and i != 0:\n getch()","sub_path":"listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"18657493","text":"from opengever.ogds.base.actor import Actor\nfrom opengever.ogds.models.team import Team\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zExceptions import BadRequest\n\n\nclass ListTeamMembers(BrowserView):\n \"\"\"Fetch team group and group members by team id.\"\"\"\n\n template = ViewPageTemplateFile(\"list_groupmembers.pt\")\n\n def __init__(self, context, request):\n super(ListTeamMembers, self).__init__(context, request)\n\n self.active = None\n self.members = None\n self.team_id = None\n self.group_id = None\n\n def __call__(self):\n self.team_id = self.context.REQUEST.get('team', None)\n self.members = []\n\n if not self.team_id:\n raise BadRequest('no team id')\n\n try:\n team = Team.query.filter_by(team_id=self.team_id).one()\n group = getattr(team, 'group', None)\n self.group_id = getattr(group, 'groupid', None)\n except NoResultFound:\n self.group_id = None\n\n if not self.group_id:\n raise BadRequest('no group id')\n\n self.active = getattr(group, 'active', None)\n\n actors = [Actor.user(user.userid) for user in getattr(group, 'users', [])]\n actors.sort(key=lambda actor: actor.get_label())\n self.members = [each.get_link() for each in actors]\n\n return self.template()\n","sub_path":"opengever/base/browser/list_teammembers.py","file_name":"list_teammembers.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"526450721","text":"import numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport pickle\r\nimport sys\r\n\r\ndef test(w,w2,b):\r\n x_te = np.zeros((240,27),dtype = np.float)\r\n with open(sys.argv[1]) as fp:\r\n id_ = 0\r\n ids = []\r\n i=0\r\n for line in fp:\r\n vector = line.strip().split(',')\r\n if i%18 == 1:\r\n ids.append(vector[0])\r\n if vector[1] == 'PM2.5':\r\n x_te[id_,:9] = vector[2:]\r\n id_ += 1\r\n elif vector[1] == 'AMB_TEMP':\r\n x_te[id_,9:18] = vector[2:]\r\n elif vector[1] == 'PM10':\r\n x_te[id_,18:] = vector[2:]\r\n \r\n y_te = b + x_te.dot(w) + np.square(x_te).dot(w2)# + np.power(x_te,3).dot(w3)# + np.power(x_te,4).dot(w4) + np.power(x_te,5).dot(w5)\r\n with open(sys.argv[2],'w') as fp:\r\n fp.write('id,value\\n')\r\n for i in range(240):\r\n fp.write('id_' + str(i) + ',' + str(y_te[i,0]) + '\\n')\r\n fp.close()\r\nw=pickle.load(open(\"w.dat\", \"rb\"))\r\nw2=pickle.load(open(\"w2.dat\", \"rb\"))\r\nb=pickle.load(open(\"b.dat\", \"rb\"))\r\ntest(w,w2,b)\r\n\r\n\r\n\r\n\r\n","sub_path":"hw1/hw1_best.py","file_name":"hw1_best.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"578703893","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom Dialogs.superadmin.Hospitals.deleteAdmin import *\nfrom Data.States import *\nfrom Dialogs.messageBox import *\n\nclass selectAdmin(object):\n def __init__(self):\n self.last_city = ''\n self.hospital_list = []\n self.admin_list = []\n self.last_hospital = ''\n def setup(self, selectAdmin):\n selectAdmin.setObjectName(\"selectAdmin\")\n selectAdmin.resize(333, 408)\n selectAdmin.setWindowTitle(\"\")\n self.title = QtWidgets.QLabel(selectAdmin)\n self.title.setGeometry(QtCore.QRect(40, 0, 261, 51))\n self.title.setObjectName(\"title\")\n self.frame = QtWidgets.QFrame(selectAdmin)\n self.frame.setGeometry(QtCore.QRect(10, 60, 311, 291))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.cityComboBox = QtWidgets.QComboBox(self.frame)\n self.cityComboBox.setGeometry(QtCore.QRect(120, 140, 161, 27))\n self.cityComboBox.setObjectName(\"cityComboBox\")\n self.hospitalComboBox = QtWidgets.QComboBox(self.frame)\n self.hospitalComboBox.setGeometry(QtCore.QRect(120, 190, 161, 27))\n self.hospitalComboBox.setObjectName(\"hospitalComboBox\")\n self.stateComboBox = QtWidgets.QComboBox(self.frame)\n self.stateComboBox.setGeometry(QtCore.QRect(120, 90, 161, 27))\n self.stateComboBox.setObjectName(\"stateComboBox\")\n self.cityLabel = QtWidgets.QLabel(self.frame)\n self.cityLabel.setGeometry(QtCore.QRect(30, 140, 111, 31))\n self.cityLabel.setObjectName(\"cityLabel\")\n self.hospitalLabel = QtWidgets.QLabel(self.frame)\n self.hospitalLabel.setGeometry(QtCore.QRect(30, 190, 111, 31))\n self.hospitalLabel.setObjectName(\"hospitalLabel\")\n self.stateLabel = QtWidgets.QLabel(self.frame)\n self.stateLabel.setGeometry(QtCore.QRect(30, 90, 111, 31))\n self.stateLabel.setObjectName(\"stateLabel\")\n self.adminLabel = QtWidgets.QLabel(self.frame)\n self.adminLabel.setGeometry(QtCore.QRect(30, 240, 111, 31))\n self.adminLabel.setObjectName(\"adminLabel\")\n self.adminComboBox = QtWidgets.QComboBox(self.frame)\n self.adminComboBox.setGeometry(QtCore.QRect(120, 240, 161, 27))\n self.adminComboBox.setObjectName(\"adminComboBox\")\n self.ORLabel = QtWidgets.QLabel(self.frame)\n self.ORLabel.setGeometry(QtCore.QRect(110, 50, 111, 31))\n self.ORLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.ORLabel.setObjectName(\"ORLabel\")\n self.searchByID = QtWidgets.QLineEdit(self.frame)\n self.searchByID.setGeometry(QtCore.QRect(20, 20, 261, 27))\n self.searchByID.setObjectName(\"searchByID\")\n self.deleteButton = QtWidgets.QPushButton(selectAdmin)\n self.deleteButton.setGeometry(QtCore.QRect(140, 360, 80, 28))\n self.deleteButton.setObjectName(\"deleteButton\")\n\n self.retranslateUi(selectAdmin)\n QtCore.QMetaObject.connectSlotsByName(selectAdmin)\n\n def retranslateUi(self, selectAdmin):\n _translate = QtCore.QCoreApplication.translate\n selectAdmin.setWindowTitle(_translate(\"selectAdmin\", \" \"))\n self.title.setText(_translate(\"selectAdmin\", \"

Select Admin

\"))\n self.cityLabel.setText(_translate(\"selectAdmin\", \"

City :

\"))\n self.hospitalLabel.setText(_translate(\"selectAdmin\", \"

Hospital :

\"))\n self.stateLabel.setText(_translate(\"selectAdmin\", \"

State :

\"))\n self.adminLabel.setText(_translate(\"selectAdmin\", \"

Admin :

\"))\n self.ORLabel.setText(_translate(\"selectAdmin\", \"

OR

\"))\n self.searchByID.setPlaceholderText(_translate(\"selectAdmin\", \"Search by Admin Id\"))\n self.deleteButton.setText(_translate(\"selectAdmin\", \"SELECT\"))\n\n self.stateAddFunction(selectAdmin)\n self.clickEvents(selectAdmin)\n\n def clickEvents(self, parent):\n self.deleteButton.clicked.connect(lambda : self.clickOnDeleteAdmin(parent))\n\n def clickOnDeleteAdmin(self, parent):\n id = self.searchByID.text()\n if id != \"\":\n if id.isdigit():\n import requests\n URL = \"https://mdtouch.herokuapp.com/api/administrator/\" + str(id)\n r = requests.get(url= URL)\n data = r.json()\n if data == {\"detail\": \"Not found.\"}:\n self.window = messageBox()\n self.window.infoBox(\"Id Does Not Exists\")\n self.searchByID.setText(\"\")\n return\n else:\n URL = \"https://mdtouch.herokuapp.com/api/hospital/\" + str(data[\"workplace\"])\n r = requests.get(url=URL)\n hdata = r.json()\n parent.close()\n self.window = QDialog()\n self.dialog = deleteAdmin()\n self.dialog.setup(self.window,data,hdata)\n self.window.setModal(True)\n self.window.show()\n return\n\n else:\n self.window = messageBox()\n self.window.infoBox(\"Id is a Integer\")\n self.searchByID.setText(\"\")\n return\n\n if self.adminComboBox.count() == 0:\n self.window = messageBox()\n self.window.infoBox(\"Select the hospital first\")\n return\n admin_name = self.adminComboBox.currentText()\n adminData = {}\n hdata = {}\n for i in self.admin_list:\n if admin_name == i[\"firstName\"] + \" \" + i[\"lastName\"]:\n adminData = i\n break\n for i in self.hospital_list:\n if i[\"name\"] == self.hospitalComboBox.currentText():\n hdata = i\n break\n parent.close()\n self.window = QDialog()\n self.dialog = deleteAdmin()\n self.dialog.setup(self.window,adminData,hdata)\n self.window.setModal(True)\n self.window.show()\n\n\n def stateAddFunction(self,parent):\n for i in states.values():\n self.stateComboBox.addItem(i)\n for i in cities[\"Andhra Pradesh\"]:\n self.cityComboBox.addItem(i)\n self.stateComboBox.currentIndexChanged.connect(lambda : self.cityAddFunction(parent))\n self.stateComboBox.currentIndexChanged.connect(lambda :self.hospitalComboBoxAdd(parent))\n #self.stateComboBox.currentIndexChanged.connect(lambda : self.adminComboBoxAdd(parent))\n\n def cityAddFunction(self,parent):\n state = self.stateComboBox.currentText()\n i = self.cityComboBox.count()\n flag = True\n while i > 0:\n flag = False\n self.cityComboBox.removeItem(0)\n i-=1\n flag = True\n for i in cities[state]:\n flag = False\n self.cityComboBox.addItem(i)\n flag = True\n self.cityComboBox.currentIndexChanged.connect(lambda :self.hospitalComboBoxAdd(parent))\n\n def hospitalComboBoxAdd(self,parent):\n if self.last_city == self.cityComboBox.currentText() or self.cityComboBox.count() != len(cities[self.stateComboBox.currentText()]) or self.cityComboBox.itemText(self.cityComboBox.count()-1) != cities[self.stateComboBox.currentText()][-1]:\n return\n self.last_city = self.cityComboBox.currentText()\n # First Erase all Hospitals\n i = self.hospitalComboBox.count()\n while i > 0:\n i -= 1\n self.hospitalComboBox.removeItem(0)\n\n import requests\n print(self.cityComboBox.currentText())\n URL = \"https://mdtouch.herokuapp.com/api/hospital/\"\n param ={\n \"city\": self.cityComboBox.currentText()\n }\n r = requests.get(url=URL,params=param)\n l = r.json()\n print(l)\n self.hospital_list = l\n for i in l:\n self.hospitalComboBox.addItem(str(i[\"name\"]))\n self.hospitalComboBox.currentIndexChanged.connect(lambda : self.adminComboBoxAdd(parent))\n\n def adminComboBoxAdd(self,parent):\n i = self.adminComboBox.count()\n while i > 0:\n i -= 1\n self.adminComboBox.removeItem(0)\n workplace_id = 0\n print(self.hospital_list)\n for i in self.hospital_list:\n if i[\"name\"] == self.hospitalComboBox.currentText():\n workplace_id = i[\"id\"]\n break\n if workplace_id == 0:\n return\n\n print(workplace_id)\n import requests\n URL = \"https://mdtouch.herokuapp.com/api/administrator/\"\n param = {\n \"workplace\" : int(workplace_id)\n }\n r = requests.get(url=URL,params=param)\n self.admin_list = r.json()\n print(self.admin_list)\n for i in self.admin_list:\n self.adminComboBox.addItem(str(i[\"firstName\"]) + \" \" + i[\"lastName\"])","sub_path":"Dialogs/superadmin/Hospitals/selectAdmin.py","file_name":"selectAdmin.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"322121195","text":"import os\nfrom data import srdata\n\n\nclass face_data(srdata.SRData):\n def __init__(self, args, name='face_data', train=True, benchmark=False):\n super(face_data, self).__init__(\n args, name=name, train=train, benchmark=benchmark\n )\n\n def _set_filesystem(self, data_dir):\n super(face_data, self)._set_filesystem(data_dir)\n\n s = str(self.args.total_scale).split('.')\n hr_path = 'HR_' + s[0] + s[1]\n lr_path = 'LR_' + s[0] + s[1]\n\n self.dir_hr = os.path.join(self.apath, hr_path)\n self.dir_lr = os.path.join(self.apath, lr_path)\n","sub_path":"data/face_data.py","file_name":"face_data.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"299900602","text":"import json\n\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\n\nfilename = 'DataViz/shipwrecks.json'\nwith open (filename, encoding=\"utf8\") as f:\n all_ship_data = json.load(f)\n\nall_ship_dicts = all_ship_data['features']\n\ndepths, lons, lats, hover_texts = [], [], [], []\n\nfor ship_dict in all_ship_dicts:\n try: \n depth = int(ship_dict['properties']['Depth'])\n lon = ship_dict['geometry']['coordinates'][0]\n lat = ship_dict['geometry']['coordinates'][1]\n title = ship_dict['properties']['Vessel']\n except TypeError:\n print(f\"{title} missing depth info.\")\n else: \n lons.append(lon)\n lats.append(lat)\n hover_texts.append(title)\n depths.append(depth)\n\ndata = [{\n 'type': 'scattergeo',\n 'lon': lons,\n 'lat': lats,\n 'text': hover_texts,\n 'marker': {\n 'size': [int(depth)/5 for depth in depths],\n 'color': depths,\n 'colorscale': 'Reds',\n 'colorbar': {'title': 'Resting Depth'},\n }\n}]\n\nmy_layout = Layout(\n title='Shipwrecks of the Great Lakes',\n geo = dict(\n showframe = True,\n framecolor = \"rgb(82, 82, 82)\",\n framewidth = 2,\n scope='usa',\n showland = True,\n showlakes = True,\n showrivers = True,\n lakecolor = \"rgb(148, 195, 234)\",\n rivercolor = \"rgb(40, 110, 168)\",\n landcolor = \"rgb(145, 195, 138)\",\n resolution = 50,\n subunitcolor = \"rgb(217, 217, 217)\",\n countrycolor = \"rgb(217, 217, 217)\",\n countrywidth = 0.5,\n subunitwidth = 0.5,\n center=dict(\n lon = -86.59149,\n lat = 44.10808,\n ),\n projection=dict(\n scale = 2,\n ),\n ),\n )\n\nfig = {'data': data, 'layout':my_layout}\noffline.plot(fig, filename='DataViz/index.html')\n\n# readable_file = 'DataViz/shipwrecks.json'\n# with open(readable_file, 'w') as f:\n# json.dump(all_ship_data, f, indent=4)\n","sub_path":"DataViz/shipwrecks.py","file_name":"shipwrecks.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"364040324","text":"# 01bags\ndef findBag(w, v, c):\n dp = [ 0 for _ in range(c + 1)]\n for i in range(1, len(v) + 1):\n for j in range(c, w[i - 1] - 1, -1):\n dp[j] = max(dp[j], dp[j - w[i - 1]] + v[i - 1])\n return dp[-1]\n\n# download in desktop\npath = \"C:\\\\Users\\\\Administrator\\\\Desktop\\\\Test-Data-of-01-knapsack-problem\\\\\"\nfor i in range(10):\n pathin = path + 'beibao' + str(i) + '.in'\n pathout = path + 'beibao' + str(i) + '.out'\n w = []\n v = []\n result0 = -1\n with open(pathin, 'r') as fr:\n lines = fr.read().splitlines()\n for line in lines:\n line = line.split(' ')\n w.append(int(line[0]))\n v.append(int(line[1]))\n c = w[0]\n w = w[1:]\n v = v[1:]\n result0 = findBag(w, v, c)\n\n result2 = -2\n with open(pathout, 'r') as fr:\n lines = fr.read().splitlines()\n result2 = int(lines[0])\n \n # return true\n print(result0 == result2)","sub_path":"script for 01bags.py","file_name":"script for 01bags.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"527887272","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom numpy.testing import assert_equal\nimport astropy.units as u\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.table import Table\nfrom astropy.coordinates import Angle\nfrom ...data import DataStore, EventList\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ...utils.energy import EnergyBounds, Energy\nfrom ..energy_offset_array import EnergyOffsetArray\nfrom ..fov_cube import FOVCube\n\n\ndef make_test_array(dummy_data=False):\n ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV')\n offset = Angle(np.linspace(0, 2.5, 100), \"deg\")\n array = EnergyOffsetArray(ebounds, offset)\n if dummy_data is True:\n # Define an EventList with three events\n table = Table()\n table['RA'] = [0.6, 0, 2]\n table['RA'].unit = 'deg'\n table['DEC'] = [0, 1.5, 0] * u.deg\n table['ENERGY'] = [0.12, 22, 55] * u.TeV\n table.meta['RA_PNT'] = 0\n table.meta['DEC_PNT'] = 0\n events = EventList(table)\n array.fill_events([events])\n return array, events.offset, events.energy\n else:\n return array\n\n\ndef make_empty_cube():\n array = make_test_array()\n offmax = array.offset.max() / 2.\n offmin = array.offset.min()\n bins = 2 * len(array.offset)\n coordx_edges = Angle(np.linspace(offmax.value, offmin.value, bins), \"deg\")\n coordy_edges = Angle(np.linspace(offmax.value, offmin.value, bins), \"deg\")\n energy_edges = array.energy\n empty_cube = FOVCube(coordx_edges, coordy_edges, energy_edges)\n return empty_cube\n\n\n@requires_data('gammapy-extra')\ndef test_energy_offset_array_fill():\n dir = '$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2'\n data_store = DataStore.from_dir(dir)\n ev_list = data_store.load_all('events')\n array = make_test_array()\n array.fill_events(ev_list)\n # TODO: add some assert, e.g. counts in some bin with non-zero entries.\n\n\n@requires_dependency('scipy')\ndef test_energy_offset_array_fill_evaluate():\n array, offset, energy = make_test_array(True)\n # Test if the array is filled correctly\n bin_e = np.array([2, 78, 91])\n bin_off = np.array([23, 59, 79])\n ind = np.where(array.data.value == 1)\n assert_equal(bin_e, ind[0])\n assert_equal(bin_off, ind[1])\n # Test the evaluate method\n interpol_param = dict(method='nearest', bounds_error=False)\n for off, E in zip(offset, energy):\n res = array.evaluate(E, off, interpol_param)\n res_GeV = array.evaluate(E.to('GeV'), off, interpol_param)\n assert_equal(res, 1)\n assert_equal(res_GeV, 1)\n\n\n@requires_dependency('matplotlib')\ndef test_energy_offset_array_plot():\n array = make_test_array()\n array.plot_image()\n\n\ndef test_energy_offset_array_read_write(tmpdir):\n array = make_test_array()\n\n filename = str(tmpdir / 'data.fits')\n array.write(filename)\n array2 = EnergyOffsetArray.read(filename)\n\n assert_equal(array.data, array2.data)\n assert_equal(array.energy, array2.energy)\n assert_equal(array.offset, array2.offset)\n\n # Test if the data in the EnergyOffsetArray have an associated error\n array, event_off, event_energy = make_test_array(True)\n filename = str(tmpdir / 'data2.fits')\n array.write(filename)\n array2 = EnergyOffsetArray.read(filename)\n assert_equal(array.data_err, array2.data_err)\n assert_equal(array.data, array2.data)\n assert_equal(array.energy, array2.energy)\n assert_equal(array.offset, array2.offset)\n\n\ndef test_energy_offset_array_bin_volume():\n array = make_test_array()\n energy_bin = array.energy.bands\n offset_bin = np.pi * (array.offset[1:] ** 2 - array.offset[:-1] ** 2)\n expected_volume = energy_bin[3] * offset_bin[4].to('sr')\n bin_volume = array.bin_volume\n assert_quantity_allclose(expected_volume, bin_volume[3, 4])\n\n\n@requires_dependency('scipy')\ndef test_evaluate_at_energy():\n array, offset, energy = make_test_array(True)\n e_eval = energy[0]\n interpol_param = dict(method='nearest', bounds_error=False)\n table_energy = array.evaluate_at_energy(e_eval, interpol_param)\n assert_quantity_allclose(table_energy[\"offset\"], array.offset_bin_center)\n # Offset bin for the first event is 23\n assert_equal(table_energy[\"value\"][23], 1)\n\n\n@requires_dependency('scipy')\ndef test_evaluate_at_offset():\n array, offset, energy = make_test_array(True)\n off_eval = offset[0]\n interpol_param = dict(method='nearest', bounds_error=False)\n table_offset = array.evaluate_at_offset(off_eval, interpol_param)\n assert_quantity_allclose(table_offset[\"energy\"], array.energy.log_centers)\n # Energy bin for the first event is 2\n assert_equal(table_offset[\"value\"][2], 1)\n\n\n@requires_dependency('scipy')\ndef test_acceptance_curve_in_energy_band():\n \"\"\"\n I define an energy range on witch I want to compute the acceptance curve that has the same boundaries as the\n energyoffsetarray.energy one and I take a Nbin for this range equal to the number of bin of the\n energyoffsetarray.energy one. This way, the interpolator will evaluate at energies that are the same as the one\n that define the RegularGridInterpolator. With the method=\"nearest\" you are sure to get 1 for the energybin where\n are located the three events that define the energyoffsetarray. Since in this method we integrate over the energy\n and multiply by the solid angle, I check if for the offset of the three events (bin [23, 59, 79]), we get in the\n table[\"Acceptance\"] what we expect by multiplying 1 by the solid angle and the energy bin width where is situated\n the event (bin [2, 78, 91]).\n \"\"\"\n array, offset, energy = make_test_array(True)\n energ_range = Energy([0.1, 100], 'TeV')\n bins = 100\n interpol_param = dict(method='nearest', bounds_error=False)\n table_energy = array.acceptance_curve_in_energy_band(energ_range, bins, interpol_param)\n assert_quantity_allclose(table_energy[\"offset\"], array.offset_bin_center)\n assert_quantity_allclose(table_energy[\"Acceptance\"][23] * table_energy[\"Acceptance\"].unit,\n 1 * array.energy.bands[2].to('MeV'))\n assert_quantity_allclose(table_energy[\"Acceptance\"][59] * table_energy[\"Acceptance\"].unit,\n 1 * array.energy.bands[78].to('MeV'))\n assert_quantity_allclose(table_energy[\"Acceptance\"][79] * table_energy[\"Acceptance\"].unit,\n 1 * array.energy.bands[91].to('MeV'))\n\n\n@requires_dependency('scipy')\ndef test_to_cube():\n \"\"\"\n There are three events in the energyoffsetarray at three offset and energies. I define a FOVCube with the same energy\n bin as the energyoffsetarray.energy. For the event in the offset bin 23 (=0.6 degre) and in the energy bin 2\n (0.12 Tev), I check if after calling the to_cube() method, all the x and y of the new FOVCube matching with an offset\n equal to 0.6+/-0.1 are filled with 1.\n \"\"\"\n array, offset, energy = make_test_array(True)\n cube = make_empty_cube()\n interpol_param = dict(method='nearest', bounds_error=False)\n cube_model = array.to_cube(cube.coordx_edges, cube.coordy_edges, cube.energy_edges, interpol_param)\n i = np.where(cube_model.data[2, :, :] == 1)\n x = cube_model.coordx_edges\n y = cube_model.coordy_edges\n xx, yy = np.meshgrid(x, y)\n dist = np.sqrt(xx ** 2 + yy ** 2)\n assert_quantity_allclose(dist[i], 0.6 * u.deg, atol=0.1 * u.deg)\n","sub_path":"gammapy/background/tests/test_energy_offset_array.py","file_name":"test_energy_offset_array.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"319110282","text":"from django.contrib import admin\nfrom .models import Resource, Employer, Location, ReferenceSkillList, Role\n\nclass JoinAdmin(admin.ModelAdmin):\n\tclass Meta:\n\t\tmodel = Resource, Employer, Location,ReferenceSkillList, Role\n\nclass ResourceAdmin(admin.ModelAdmin):\n\tlist_display = ('preferred_name', 'last_name', 'email')\n\tsearch_fields = ('preferred_name', 'last_name')\n\nclass EmployerAdmin(admin.ModelAdmin):\n\tlist_display = ('employer_name',)\n\tsearch_fields = ('employer_name',)\n\nclass LocationAdmin(admin.ModelAdmin):\n\tlist_display = ('name', 'phone_number')\n\tsearch_fields = ('name', 'phone_number')\n\nclass ReferenceSkillListAdmin(admin.ModelAdmin):\n\tlist_display = ('skill_category', 'skill_name')\n\tsearch_fields = ('skill_category', 'skill_name')\n\nclass RoleAdmin(admin.ModelAdmin):\n\tlist_display = ('role_name', 'role_description')\n\tsearch_fields = ('role_name', 'role_description')\n\nadmin.site.register(Resource, ResourceAdmin)\nadmin.site.register(Employer, EmployerAdmin)\nadmin.site.register(Location, LocationAdmin)\nadmin.site.register(ReferenceSkillList, ReferenceSkillListAdmin)\nadmin.site.register(Role, RoleAdmin)","sub_path":"mikebrookes_net/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"75390720","text":"# Importing libraries\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Reading data from csv file format available\nbreastCancer = pd.read_csv('BreastCancer.csv')\n\n# Converting non-numerical data into numerical\nbreastCancer[\"diagnosis\"] = pd.Categorical(breastCancer[\"diagnosis\"])\nbreastCancer[\"diagnosis\"] = breastCancer[\"diagnosis\"].cat.codes\ncancerData = breastCancer.values\n\n# Split the data set into training and test sets\nx_train, x_test, y_train, y_test = train_test_split(cancerData[:, 2:32], cancerData[:, 1], test_size=0.2, random_state=45)\nprint(x_train.size)\nprint(x_train.shape)\n\n# Creating neural network model for breast cancer diagnosis\n# Define the model to be generated/built\nnnCancer = Sequential()\n# Provide input and neurons for first hidden dense layer\nnnCancer.add(Dense(15, input_dim=30, activation='relu'))\n# Define the output neuron\nnnCancer.add(Dense(1, activation='sigmoid'))\n\n# Fitting the neural network model on the training data set\nnnCancer.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nnnCancerModel = nnCancer.fit(x_train, y_train, epochs=100, verbose=0, initial_epoch=0)\n\n# Display the neural network identified\nprint('The summary of the neural network is', nnCancer.summary())\nprint(nnCancer.evaluate(x_test, y_test, verbose=0))\n\n","sub_path":"ICP1_DL_PK/Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"574043495","text":"import logging\n\nfrom tornado import gen\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PublishingMixin(object):\n \"\"\"This mixin adds publishing messages to RabbitMQ. It uses a\n persistent connection and channel opened when the application\n start up and automatically reopened if closed by RabbitMQ\n\n \"\"\"\n @property\n def app_id(self):\n \"\"\"Return a value to be used for the app_id AMQP message property.\n\n :rtype: str\n\n \"\"\"\n service = self.application.settings.get('service')\n version = self.application.settings.get('version')\n return '{}/{}'.format(service, version)\n\n @gen.coroutine\n def amqp_publish(self, exchange, routing_key, body, properties=None,\n mandatory=False):\n \"\"\"Publish the message to RabbitMQ\n\n Expects Correlation ID to be available via self.correlation_id,\n default value will be None if not set.\n The ``sprockets.mixins.correlation`` plugin provides this.\n\n :param str exchange: The exchange to publish the message to\n :param str routing_key: The routing key to publish the message with\n :param str,unicode,bytes body: The message body to send\n :param dict properties: An optional dict of additional properties\n to append. Will not override mandatory\n properties:\n app_id, correlation_id, message_id, timestamp\n :param bool mandatory: Whether to instruct the server to return an\n unqueueable message\n http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish.mandatory\n\n \"\"\"\n yield self.application.amqp.publish(\n exchange,\n routing_key,\n body,\n self.app_id,\n getattr(self, 'correlation_id', None),\n properties,\n mandatory\n )\n","sub_path":"sprockets/mixins/amqp/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"465362366","text":"import os.path as path\n\nproj_dir = path.normpath(path.dirname(path.abspath(__file__)) + \"\\\\..\")\nstr2remove = (proj_dir + \"\\\\lifelib\\\\projects\\\\simplelife\\\\\").replace(\"\\\\\", \"\\\\\\\\\")\ntarget_file = \".\\\\build\\\\html\\\\projects\\\\generated\\\\simplelife.build_input.html\"\n\ndef inplace_change(filename, oldstr, newstr):\n # Code taken from:\n # https://stackoverflow.com/questions/4128144/replace-string-within-file-contents\n\n # Safely read the input filename using 'with'\n with open(filename, mode='r', encoding='utf-8') as f:\n s = f.read()\n if oldstr not in s:\n print('\"{oldstr}\" not found in {filename}.'.format(**locals()))\n return\n\n # Safely write the changed content, if found in the file\n msg = 'Changing \"{oldstr}\" to \"{newstr}\" in {filename}'.format(**locals())\n with open(filename, mode='w', encoding='utf-8') as f:\n print(msg)\n s = s.replace(oldstr, newstr)\n f.write(s)\n\ninplace_change(target_file, str2remove, '')","sub_path":"makedocs/uddocs.py","file_name":"uddocs.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"68944119","text":"import sys, os, re, shutil, argparse\nfrom collections import defaultdict\nimport pandas as pd\n\nfrom .params import read_params\nfrom .job_queues import SlurmQueue\n\n\ndef as_compiled_re(obj):\n '''\n Compile obj as regex pattern if needed.\n '''\n return obj if hasattr(obj, 'match') else re.compile(obj)\n\n\ndef match_files_in_dir(dir, pat):\n '''\n Iterate through files in dir that match pat.\n '''\n pat = as_compiled_re(pat)\n for file in os.listdir(dir):\n m = pat.match(file)\n if m is not None:\n yield m \n\n\ndef read_stderr_file(stderr_file):\n warning_pat = re.compile(r'Warning.*')\n error_pat = re.compile(\n r'.*(Error|Exception|error|fault|failed|Errno).*'\n )\n error = None\n with open(stderr_file) as f:\n for line in f:\n if not warning_pat.match(line) and error_pat.match(line):\n error = line.rstrip()\n return error\n\n\ndef get_job_error(job_file, stderr_pat):\n '''\n Parse the latest error for job_file.\n '''\n job_dir = os.path.dirname(job_file)\n stderr_files = []\n for m in match_files_in_dir(job_dir, stderr_pat):\n stderr_file = m.group(0)\n job_id = int(m.group(1))\n stderr_files.append((job_id, stderr_file))\n\n job_id, stderr_file = sorted(stderr_files)[-1]\n stderr_file = os.path.join(job_dir, stderr_file)\n error = read_stderr_file(stderr_file)\n return error\n\n\ndef get_job_errors(job_files, stderr_pat=r'(\\d+).stderr'):\n '''\n Parse the latest errors for a set of job_files.\n '''\n errors = []\n for job_file in job_files:\n error = get_job_error(job_file, stderr_pat)\n errors.append(error)\n\n return errors\n\n\ndef get_job_metric(job_file, metric_pat):\n '''\n Read the latest output for job_file.\n '''\n job_dir = os.path.dirname(job_file)\n job_name = os.path.basename(job_dir)\n\n dfs = []\n for m in match_files_in_dir(job_dir, metric_pat):\n metric_file = os.path.join(job_dir, m.group(0))\n df = pd.read_csv(metric_file, sep=' ')\n df['job_name'] = job_name\n dfs.append(df)\n\n df = pd.concat(dfs)\n\n params = read_params(job_file, line_start='# ')\n for param, value in params.items():\n df[param] = value\n\n return df\n\n\ndef get_job_metrics(job_files, metric_pat=r'(\\d+).metrics'):\n '''\n Read the latest output for a set of job_files.\n '''\n dfs = []\n for job_file in job_files:\n df = get_job_metric(job_file, metric_pat)\n dfs.append(df)\n\n return pd.concat(dfs)\n\n\ndef print_array_indices(idx_set):\n s = get_array_indices_string(idx_set)\n print(s)\n\n\ndef get_array_indices_string(idx_set):\n s = ''\n last_idx = None\n skipping = False\n for idx in sorted(idx_set):\n if last_idx is None:\n s += str(idx)\n elif idx == last_idx + 1:\n skipping = True\n else: # gap\n if skipping:\n skipping = False\n s += '-' + str(last_idx)\n s += ',' + str(idx)\n last_idx = idx\n if skipping:\n s += '-' + str(last_idx)\n return s\n\n\ndef parse_array_indices_str(s):\n idx_pat = re.compile(r'^(\\d+)(-(\\d+))?$')\n indices = []\n for field in s.split(','):\n m = idx_pat.match(field)\n idx_start = int(m.group(1))\n if m.group(2):\n idx_end = int(m.group(3))\n indices.extend(range(idx_start, idx_end+1))\n else:\n indices.append(idx_start)\n return set(indices)\n\n\ndef find_job_ids(job_dir, stderr_pat):\n '''\n Find job ids that have been submitted by\n parsing stderr file names in job_dir.\n '''\n job_ids = []\n for f in os.listdir(job_dir):\n if os.path.isdir(os.path.join(job_dir, f)):\n m = re.match(r'^(\\d+)', f)\n else:\n m = re.match(stderr_pat, f)\n if m:\n job_id = int(m.group(1))\n job_ids.append(job_id)\n\n return job_ids\n\n\ndef read_job_output(job_dir, output_pat):\n '''\n Find job ids that have been submitted by\n parsing stderr file names in job_dir.\n '''\n job_dfs = []\n for m in match_files_in_dir(job_dir, output_pat):\n output_file = os.path.join(job_dir, m.group(0))\n print(output_file)\n job_df = pd.read_csv(output_file, sep=' ', error_bad_lines=False)\n job_df['job_name'] = os.path.split(job_dir)[-1]\n try:\n array_idx = int(m.group(2))\n job_df['array_idx'] = array_idx\n except:\n pass\n job_dfs.append(job_df)\n\n return job_dfs\n\n\ndef print_last_error(job_dir, stderr_pat):\n\n last_job_id = -1\n last_stderr_file = None\n for m in match_files_in_dir(job_dir, stderr_pat):\n stderr_file = os.path.join(job_dir, m.group(0))\n job_id = int(m.group(1))\n if job_id > last_job_id:\n last_job_id = job_id\n last_stderr_file = stderr_file\n\n if last_stderr_file is None:\n print('no error file')\n return\n\n error = read_stderr_file(last_stderr_file)\n print(last_stderr_file + '\\t' + str(error))\n\n\ndef find_submitted_array_indices(job_dir, stderr_pat):\n '''\n Find array indices and job ids that have been\n submitted by parsing stderr file names in job_dir.\n '''\n submitted = set()\n job_ids = []\n for m in match_files_in_dir(job_dir, stderr_pat):\n stderr_file = m.group(0)\n job_id = int(m.group(1))\n array_idx = int(m.group(2))\n job_ids.append(job_id)\n submitted.add(array_idx)\n\n return submitted, job_ids\n\n\ndef copy_back_from_scr_dir(job_dir, scr_dir, copy_back_pat):\n '''\n Copy back output files from scr_dir to job_dir.\n '''\n copied = []\n for m in match_files_in_dir(scr_dir, copy_back_pat):\n copy_back_file = m.group(0)\n src_file = os.path.join(scr_dir, copy_back_file)\n dst_file = os.path.join(job_dir, copy_back_file)\n shutil.copyfile(src_file, dst_file)\n copied.append(dst_file)\n\n return copied\n\n\ndef find_completed_array_indices(job_dir, output_pat, read=False):\n '''\n Find array_indices that have completed by parsing output\n files in job_dir, also optionally read and return job dfs.\n '''\n job_dfs = []\n completed = set()\n for m in match_files_in_dir(job_dir, output_pat):\n array_idx = int(m.group(2))\n completed.add(array_idx)\n if read:\n output_file = os.path.join(job_dir, m.group(0))\n job_df = pd.read_csv(output_file, sep=' ')\n job_df['job_name'] = os.path.split(job_dir)[-1]\n job_df['array_idx'] = array_idx\n job_dfs.append(job_df)\n\n return completed, job_dfs\n\n\ndef print_errors_for_array_indices(job_dir, stderr_pat, indices):\n\n stderr_files = defaultdict(list)\n for m in match_files_in_dir(job_dir, stderr_pat):\n stderr_file = m.group(0)\n job_id = int(m.group(1))\n array_idx = int(m.group(2))\n if array_idx in indices:\n stderr_files[array_idx].append((job_id, stderr_file))\n\n for array_idx in sorted(indices):\n if not stderr_files[array_idx]:\n print('no error file for array_idx {}'.format(array_idx))\n continue\n job_id, stderr_file = sorted(stderr_files[array_idx])[-1]\n stderr_file = os.path.join(job_dir, stderr_file)\n error = read_stderr_file(stderr_file)\n print(stderr_file + '\\t' + str(error))\n\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('job_scripts', nargs='+')\n parser.add_argument('--job_type')\n parser.add_argument('--array_job', default=False, action='store_true')\n parser.add_argument('--submitted', default=None)\n parser.add_argument('--copy_back', '-c', default=False, action='store_true')\n parser.add_argument('--print_indices', '-i', default=False, action='store_true')\n parser.add_argument('--print_errors', '-e', default=False, action='store_true')\n parser.add_argument('--resub_errors', '-r', default=False, action='store_true')\n parser.add_argument('--output_file', '-o')\n return parser.parse_args(argv)\n\n\ndef main(argv):\n args = parse_args(argv)\n\n all_job_dfs = []\n for job_script in args.job_scripts:\n\n assert os.path.isfile(job_script), 'file ' + job_script + ' does not exist'\n\n if args.job_type is None: # infer from file name\n if 'fit' in job_script:\n args.job_type = 'fit'\n elif 'train' in job_script:\n args.job_type = 'train'\n\n # determine relevant files based on job type\n if args.job_type == 'train':\n job_script_pat = re.compile(r'(.*)_train.sh')\n output_ext = 'training_output'\n copy_back_exts = [\n 'model', 'solver','caffemodel', 'solverstate', 'training_output', 'png', 'pdf'\n ]\n elif args.job_type == 'fit':\n job_script_pat = re.compile(r'(.*)_fit.sh')\n output_ext = 'gen_metrics'\n copy_back_exts = [\n 'types', 'model', 'caffemodel', 'dx', 'sdf', 'channels', 'latent', 'pymol', 'gen_metrics'\n ]\n\n # for array jobs, get output for any array indices present\n if args.array_job:\n stderr_pat = re.compile(r'slurm-(\\d+)_(\\d+)\\.err$')\n output_pat = re.compile(r'(.*)_(\\d+)\\.' + output_ext + '$')\n copy_back_pat = re.compile(r'(.*)_(\\d+)\\.' + '({})$'.format('|'.join(copy_back_exts)))\n else:\n stderr_pat = re.compile(r'slurm-(\\d+)\\.err$')\n output_pat = re.compile(r'(.*)\\.' + output_ext + '$')\n copy_back_pat = re.compile(r'(.*)\\.' + '({})$'.format('|'.join(copy_back_exts)))\n\n print(job_script)\n job_dir = os.path.dirname(job_script)\n\n if args.array_job:\n\n if args.submitted is not None:\n submitted = parse_array_indices_str(args.submitted)\n job_ids = find_job_ids(job_dir, stderr_pat)\n else:\n submitted, job_ids = find_submitted_array_indices(job_dir, stderr_pat)\n n_submitted = len(submitted)\n\n if n_submitted == 0:\n print('none submitted')\n continue\n\n completed, job_dfs = find_completed_array_indices(job_dir, output_pat, read=args.output_file)\n n_completed = len(completed)\n\n if args.output_file:\n all_job_dfs.extend(job_dfs)\n\n incomplete = submitted - completed\n n_incomplete = len(incomplete)\n\n if args.print_indices:\n print('n_submitted = {} ({})'.format(n_submitted, get_array_indices_string(submitted)))\n print('n_completed = {} ({})'.format(n_completed, get_array_indices_string(completed)))\n print('n_incomplete = {} ({})'.format(n_incomplete, get_array_indices_string(incomplete)))\n else:\n print('n_submitted = {}'.format(n_submitted))\n print('n_completed = {}'.format(n_completed))\n print('n_incomplete = {}'.format(n_incomplete))\n\n if args.print_errors:\n print_errors_for_array_indices(job_dir, stderr_pat, indices=incomplete)\n\n if args.copy_back:\n\n last_job_id = sorted(job_ids)[-1]\n scr_dir = os.path.join(job_dir, str(last_job_id))\n\n copied = copy_back_from_scr_dir(job_dir, scr_dir, copy_back_pat)\n n_copied = len(copied)\n print('copied {} files from {}'.format(n_copied, last_job_id))\n\n if args.resub_errors: # resubmit incomplete jobs\n\n for m in match_files_in_dir(job_dir, job_script_pat):\n job_script = os.path.join(job_dir, m.group(0))\n SlurmQueue.submit_job(\n job_script,\n work_dir=job_dir,\n array_idx=get_array_indices_string(incomplete)\n )\n\n else:\n job_ids = find_job_ids(job_dir, stderr_pat)\n\n if args.output_file:\n job_dfs = read_job_output(job_dir, output_pat)\n all_job_dfs.extend(job_dfs)\n\n if args.print_errors:\n print_last_error(job_dir, stderr_pat)\n\n if args.copy_back:\n\n for last_job_id in sorted(job_ids):\n scr_dir = os.path.join(job_dir, str(last_job_id))\n copied = copy_back_from_scr_dir(job_dir, scr_dir, copy_back_pat)\n n_copied = len(copied)\n print('copied {} files from {}'.format(n_copied, last_job_id))\n\n if args.output_file:\n if all_job_dfs:\n job_df = pd.concat(all_job_dfs)\n pd.set_option('display.max_columns', 100)\n print(job_df.groupby('job_name').mean())\n job_df.to_csv(args.output_file, sep=' ')\n print('concatenated metrics to {}'.format(args.output_file))\n else:\n print('nothing to concatenate')\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","sub_path":"param_search/job_output.py","file_name":"job_output.py","file_ext":"py","file_size_in_byte":13074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"346390127","text":"import re, math, time\nfrom collections import Counter\nfrom functools import reduce\n\n\nD1 = \"C:\\\\Users\\\\Администратор\\\\Desktop\\\\1.txt\"\nD2 = \"C:\\\\Users\\\\Администратор\\\\Desktop\\\\2.txt\"\n\n\ndef get_vector_length(doc):\n with open(doc, 'r') as file:\n doc_words = re.findall(r'\\w+', file.read())\n words_freq = Counter(doc_words)\n\n length = 0\n for word in dict(words_freq):\n length += words_freq[word] ** 2\n return (words_freq, length)\n\nfreq1, doc1_length = get_vector_length(D1)\nfreq2, doc2_length = get_vector_length(D2)\n\nsame_w = dict(freq1 & freq2)\nD1D2 = reduce(int.__add__, [freq1[word]*freq2[word] for word in same_w])\n\ndistance = math.acos(D1D2 / math.sqrt(doc1_length * doc2_length))\nprint(distance)\n\n","sub_path":"Problems/DocumentDistanceProblem.py","file_name":"DocumentDistanceProblem.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"215337327","text":"\"\"\"\nThis ATS handles all incoming non-Adaptiv SWIFT messages.\n\nHistory\n=======\n2017-12-11 CHNG0005220511 Manan Gosh\t DIS go-live\n2018-05-11 CHG1000406751 Willie vd Bank Made changes for securities processing\n2018-10-11 Sadanand Upase Seperated the handling for MT54X irrespecive of ISIN\n2020-03-13 FAOPS-774 Cuen Edwards Reduce durations of built-in delays (sleeps).\n2020-03-18 FAOPS-777 Cuen Edwards Complete removal of built-in delay. Added safeguard to allow running\n of multiple ATSes. Update of RTB email address.\n2020-05-11 FAOPS-746 Cuen Edwards Addition of ACK/NACK handler for SARB security transfer MT199s and\n start of a refactor.\n\"\"\"\n\n# FIXME - This should not be necessary.\nimport sys\nsys.path.append(r'/front/arena/apps/lib64/pythonextensionlib27/pymqi')\n\nimport acm\nfrom at_email import EmailHelper\nimport at_logging\nimport CMQC\nimport datetime\nimport demat_config\nimport demat_isin_mgmt\nimport demat_settmnt_mgmt\nimport demat_trade_mgmt\nimport dis_isin_mgmt\nimport gen_mq\nimport gen_swift_functions\nimport os\nfrom pymqi import MQMIError\nimport re\nfrom SARBSecurityTransferInstructionProcessor import SARBSecurityTransferInstructionProcessor\nimport security_settlements\nimport shutil\nimport time\nimport traceback\nimport uuid\n\n\nLOGGER = at_logging.getLogger(__name__)\nLOGGING_FORMAT = '%(asctime)s,%(msecs)03d %(levelname)s %(message)s'\nLOGGING_DATE_FORMAT = '%y%m%d %H%M%S'\nat_logging.setFormat(LOGGING_FORMAT, LOGGING_DATE_FORMAT)\n\n# Refactor - config should not be DEMAT specific!\nCONFIG = demat_config.get_config()\n\nTO_PROCESS_DIRECTORY_PATH = CONFIG['swift_msg_to_process_dir']\nPROCESSED_DIRECTORY_PATH = CONFIG['swift_msg_processed_dir']\nERROR_DIRECTORY_PATH = CONFIG['swift_msg_error_dir']\nMANUAL_MESSAGE_DIRECTORY_PATH = CONFIG['swift_msg_to_process_manually_dir']\n\nMQ_MESSENGER = None\n\n\nclass MTMessage(object):\n \"\"\"\n An object encapsulating information about a received MT Message.\n \"\"\"\n\n def __init__(self, message_file_path):\n \"\"\"\n Constructor.\n \"\"\"\n self.message_file_path = message_file_path\n self.message_file_name = os.path.basename(message_file_path)\n self.message = _read_file_contents(message_file_path)\n self.message_function = gen_swift_functions.get_msg_function(self.message)\n self.message_type = gen_swift_functions.get_msg_type(self.message)\n\n\ndef start():\n \"\"\"\n Start hook for module-mode ATS.\n\n This hook is called when a module-mode ATS is started and is used\n to perform any start-up actions (e.g. connecting to an AMB, etc.).\n If the start hook returns False, then the ATS will shutdown.\n \"\"\"\n LOGGER.info('Start called at {date_time}.'.format(date_time=datetime.datetime.today()))\n _connect_to_incoming_mq()\n _perform_demat_initialisation()\n _perform_dis_initialisation()\n\n\ndef work():\n \"\"\"\n Work hook for module-mode ATS.\n\n This hook is called continuously after a module-mode ATS has been\n started and can be used to perform any periodic work. It is\n approximately called, max 10 times/sec when idle).\n \"\"\"\n mt_message = _get_next_mt_message()\n while mt_message is not None:\n try:\n _process_mt_message(mt_message)\n _move_mt_message_file_to_processed_directory(mt_message)\n except Exception as exception:\n LOGGER.exception(exception)\n _move_mt_message_file_to_error_directory(mt_message)\n mt_message = _get_next_mt_message()\n\n\ndef stop():\n \"\"\"\n Stop hook for module-mode ATS.\n\n This hook is called when a module-mode ATS is stopped and is used\n to perform any shutdown actions (e.g. disconnecting from an AMB,\n etc.).\n \"\"\"\n LOGGER.info('Stop called at {date_time}.'.format(date_time=datetime.datetime.today()))\n _disconnect_from_incoming_mq()\n dis_isin_mgmt.deinitialize()\n\n\ndef status():\n \"\"\"\n Status hook for module-mode ATS.\n\n This hook is called to retrieve the status of a module-mode ATS.\n \"\"\"\n return\n\n\ndef _get_next_mt_message():\n \"\"\"\n Get the next MT Message to process (if any).\n\n If there is no next MT Message to process, None is returned.\n \"\"\"\n next_message_file_path = _get_next_message_file_path()\n if next_message_file_path is None:\n return None\n return MTMessage(next_message_file_path)\n\n\ndef _get_next_message_file_path():\n \"\"\"\n Get the file path of the next message file to process (if any).\n\n If there is no next message file to process, None is returned.\n \"\"\"\n # Look for any manual (file) message.\n manual_message_file_path = _get_next_manual_message_file_path()\n if manual_message_file_path is not None:\n # Manual message file found.\n return manual_message_file_path\n # No manual message found - look for any queue message.\n queue_message = _get_next_queue_message()\n if queue_message is not None:\n # Queue message found.\n return _write_queue_message_to_to_process_directory(queue_message)\n # No next message to process.\n return None\n\n\ndef _get_next_manual_message_file_path():\n \"\"\"\n Get the file path of the next manual message file to process (if\n any).\n\n If there is no next manual message file to process, None is\n returned.\n \"\"\"\n for file_name in os.listdir(MANUAL_MESSAGE_DIRECTORY_PATH):\n file_path = os.path.join(MANUAL_MESSAGE_DIRECTORY_PATH, file_name)\n if os.path.isdir(file_path):\n continue\n # A race condition exists if multiple ATS instances are running.\n # More than one ATS may detect and attempt to process a file. To\n # avoid this we attempt to rely on the semantics of the shutil\n # move operation (that is apparently atomic for moves on the same\n # file system). If the move succeeds, we can assume that this\n # instance of the ATS is the sole processor of the file.\n while os.path.exists(file_path):\n try:\n return _move_manual_message_file_to_to_process_directory(file_path)\n except:\n time.sleep(0.2)\n return None\n\n\ndef _get_next_queue_message():\n \"\"\"\n Get the next queue message file to process (if any).\n\n If there is no next queue message to process, None is returned.\n \"\"\"\n try:\n return MQ_MESSENGER.Get()\n except MQMIError as exception:\n if exception.comp == CMQC.MQCC_FAILED and exception.reason == CMQC.MQRC_NO_MSG_AVAILABLE:\n # Queue is empty.\n return None\n else:\n # Some other MQ error.\n _send_mq_error_mail()\n raise\n\n\ndef _move_manual_message_file_to_to_process_directory(manual_message_file_path):\n \"\"\"\n Move the specified manual message file to the to_be_processed\n directory and return the path of the message file to be\n processed.\n \"\"\"\n manual_message_file_name = os.path.basename(manual_message_file_path)\n destination_file_path = TO_PROCESS_DIRECTORY_PATH\n destination_file_name = _generate_message_file_name()\n to_process_message_file_path = _move_file(manual_message_file_path, destination_file_path, destination_file_name)\n message = \"Manual message file '{manual_message_file_name}' received and written to \"\n message += \"'{to_process_message_file_path}'.\"\n LOGGER.info(message.format(\n manual_message_file_name=manual_message_file_name,\n to_process_message_file_path=to_process_message_file_path\n ))\n return to_process_message_file_path\n\n\ndef _write_queue_message_to_to_process_directory(queue_message):\n \"\"\"\n Write the specified queue message to_be_processed directory and\n return the path of the message file to be processed.\n \"\"\"\n destination_file_path = TO_PROCESS_DIRECTORY_PATH\n destination_file_name = _generate_message_file_name()\n to_process_message_file_path = _write_to_file(queue_message, destination_file_path, destination_file_name)\n LOGGER.info(\"Queue message received and written to '{to_process_message_file_path}'.\".format(\n to_process_message_file_path=to_process_message_file_path\n ))\n return to_process_message_file_path\n\n\ndef _generate_message_file_name():\n \"\"\"\n Generate a unique file name for a message file.\n \"\"\"\n return str(uuid.uuid4()) + '.txt'\n\n\ndef _process_mt_message(mt_message):\n \"\"\"\n Process the specified MT Message.\n \"\"\"\n start_date_time = datetime.datetime.today()\n message_function = mt_message.message_function\n if message_function == 'F01':\n _process_fin_mt_message(mt_message)\n elif message_function == 'F21':\n _process_ack_nack_mt_message(mt_message)\n else:\n raise ValueError(\"Invalid message function '{message_function}' specified.\".format(\n message_function=message_function\n ))\n end_date_time = datetime.datetime.today()\n duration = end_date_time - start_date_time\n LOGGER.info('Processed in: {duration}'.format(duration=duration))\n\n\ndef _move_mt_message_file_to_processed_directory(mt_message):\n \"\"\"\n Move the file represented by the specified MT Message to the\n processed directory.\n \"\"\"\n message_file_path = mt_message.message_file_path\n message_file_name = mt_message.message_file_name\n message_function = mt_message.message_function\n message_type = mt_message.message_type\n processed_directory_path = os.path.join(PROCESSED_DIRECTORY_PATH, message_function, message_type)\n _move_file(message_file_path, processed_directory_path)\n message = \"Message file '{message_file_name}' moved to processed directory \"\n message += \"'{processed_directory_path}'.\"\n LOGGER.info(message.format(\n message_file_name=message_file_name,\n processed_directory_path=processed_directory_path\n ))\n\n\ndef _move_mt_message_file_to_error_directory(mt_message):\n \"\"\"\n Move the file represented by the specified MT Message to the\n error directory.\n \"\"\"\n message_file_path = mt_message.message_file_path\n message_file_name = mt_message.message_file_name\n error_directory_path = ERROR_DIRECTORY_PATH\n _move_file(message_file_path, error_directory_path)\n message = \"Message file '{message_file_name}' moved to error directory \"\n message += \"'{error_directory_path}'.\"\n LOGGER.info(message.format(\n message_file_name=message_file_name,\n error_directory_path=error_directory_path\n ))\n\n\ndef _move_file(source_file_path, destination_directory_path, destination_file_name=None):\n \"\"\"\n Move the file identified by the source file path to the\n specified destination directory path, optionally renaming the\n file if a destination file name is specified.\n \"\"\"\n if not os.path.exists(destination_directory_path):\n os.makedirs(destination_directory_path)\n if destination_file_name is None:\n destination_file_name = os.path.basename(source_file_path)\n destination_file_path = os.path.join(destination_directory_path, destination_file_name)\n shutil.move(source_file_path, destination_file_path)\n return destination_file_path\n\n\ndef _write_to_file(file_contents, destination_directory_path, destination_file_name):\n \"\"\"\n Write the file contents to a file with the specifed destination\n name in the specified destination directory.\n \"\"\"\n if not os.path.exists(destination_directory_path):\n os.makedirs(destination_directory_path)\n destination_file_path = os.path.join(destination_directory_path, destination_file_name)\n with open(destination_file_path, 'w') as destination_file:\n destination_file.write(file_contents)\n return destination_file_path\n\n\ndef _read_file_contents(read_file_path):\n \"\"\"\n Read the contents of the file at the specified file path.\n \"\"\"\n with open(read_file_path, 'rt') as read_file:\n return read_file.read()\n\n\ndef _process_fin_mt_message(mt_message):\n \"\"\"\n Process the specified FIN MT Message.\n \"\"\"\n message_type = mt_message.message_type\n LOGGER.info('MT{message_type} message received.'.format(\n message_type=message_type\n ))\n message = mt_message.message\n # FIXME - The way messages are routed to handlers needs serious cleanup!\n # Because 901/902/903 must be handled differently for trade_mgmt messages\n if _is_demat_trade_mgmt_message(message) and message_type in ['598-901', '598-902', '598-903']:\n demat_trade_mgmt.trade_mgmt_incoming(message, message_type)\n # Separating the handling for MT54X incoming messages irrespective of ISIN\n elif message_type in ['544', '545', '546', '547', '548']:\n demat_func_for_msgtype[message_type](message, message_type)\n else:\n isin = gen_swift_functions.get_isin(message)\n if isin and isin[0:3] == 'ZAG':\n message_file_name = mt_message.message_file_name\n dis_func_for_msgtype[message_type](message, message_type, message_file_name)\n else:\n demat_func_for_msgtype[message_type](message, message_type)\n\n\ndef _process_ack_nack_mt_message(mt_message):\n \"\"\"\n Process the specified ACK/NACK MT Message.\n \"\"\"\n message_type = mt_message.message_type\n LOGGER.info('ACK/NACK message received for MT{message_type}.'.format(\n message_type=message_type\n ))\n message = mt_message.message\n # FIXME - The way messages are routed to handlers needs serious cleanup!\n if message_type in list(demat_ack_func_for_msgtype.keys()):\n demat_ack_func_for_msgtype[message_type](message)\n elif SARBSecurityTransferInstructionProcessor.is_handled_swift_ack_nack(message):\n SARBSecurityTransferInstructionProcessor.handle_swift_ack_nack(message)\n else:\n LOGGER.warning(\"Message not handled.\")\n\n\ndef _is_demat_trade_mgmt_message(message):\n \"\"\"\n Determine whether or not the specified message is a DEMAT trade\n management message\n \"\"\"\n try:\n return demat_trade_mgmt.is_trade_message(message)\n except:\n return False\n\n\ndef _mt548_processing(message, message_type):\n \"\"\"\n Perform processing of MT548 messages.\n \"\"\"\n FAS_548 = False\n try:\n msg_RELA_ref = gen_swift_functions.get_trans_ref_from_tag(':20C::RELA//', message)\n if 'FAS' in msg_RELA_ref[0] or re.search(r\"^[0-9]{10}$\", msg_RELA_ref[0]):\n security_settlements.process_incoming(message, message_type)\n FAS_548 = True\n except Exception as exception:\n LOGGER.exception(exception)\n\n if not FAS_548:\n demat_trade_mgmt.trade_mgmt_incoming(message, message_type)\n\n\ndef _perform_demat_initialisation():\n \"\"\"\n Perform any DEMAT initialisation.\n \"\"\"\n global demat_func_for_msgtype\n demat_func_for_msgtype = dict()\n demat_func_for_msgtype['598-154'] = demat_isin_mgmt.isin_mgmt_incoming\n demat_func_for_msgtype['598-901'] = demat_isin_mgmt.isin_mgmt_incoming # Format Rejection - ISIN Mgmt\n demat_func_for_msgtype['598-902'] = demat_isin_mgmt.isin_mgmt_incoming # Invalid Content - ISIN Mgmt\n demat_func_for_msgtype['598-171'] = demat_trade_mgmt.trade_mgmt_incoming\n demat_func_for_msgtype['564'] = demat_trade_mgmt.trade_mgmt_incoming\n demat_func_for_msgtype['566'] = demat_trade_mgmt.trade_mgmt_incoming\n demat_func_for_msgtype['298-128'] = demat_settmnt_mgmt.settmnt_mgmt_incoming\n demat_func_for_msgtype['548'] = _mt548_processing\n demat_func_for_msgtype['544'] = security_settlements.process_incoming\n demat_func_for_msgtype['545'] = security_settlements.process_incoming\n demat_func_for_msgtype['546'] = security_settlements.process_incoming\n demat_func_for_msgtype['547'] = security_settlements.process_incoming\n global demat_ack_func_for_msgtype\n demat_ack_func_for_msgtype = dict()\n demat_ack_func_for_msgtype['598-155'] = demat_isin_mgmt.process_ack_nack\n\n\ndef _perform_dis_initialisation():\n \"\"\"\n Perform any DIS initialisation.\n \"\"\"\n dis_isin_mgmt.initialize()\n global dis_func_for_msgtype\n dis_func_for_msgtype = dict()\n dis_func_for_msgtype['598-154'] = dis_isin_mgmt.isin_mgmt_incoming\n dis_func_for_msgtype['564'] = dis_isin_mgmt.trade_mgmt_incoming\n global dis_ack_func_for_msgtype\n dis_ack_func_for_msgtype = dict()\n dis_ack_func_for_msgtype['598-155'] = dis_isin_mgmt.process_ack_nack\n\n\ndef _connect_to_incoming_mq():\n \"\"\"\n Connect to the incoming MQ.\n \"\"\"\n global MQ_MESSENGER\n if MQ_MESSENGER is None:\n MQ_MESSENGER = gen_mq.MqMessenger('MeridianInCustMq', True)\n LOGGER.info('Connected to incoming MQ.')\n\n\ndef _disconnect_from_incoming_mq():\n \"\"\"\n Disconnect to the incoming MQ.\n \"\"\"\n if MQ_MESSENGER is not None:\n MQ_MESSENGER.Disconnect()\n LOGGER.info('Disconnected from incoming MQ')\n\n\ndef _send_mq_error_mail():\n \"\"\"\n Send email notification for MQ exception.\n \"\"\"\n email_from = 'ABCapITRTBAMFrontAre@absa.africa'\n email_to = CONFIG['mq_connection_notifications']\n environment = acm.FInstallationData.Select('').At(0).Name()\n email_subject = 'Front Arena Incoming SWIFT ATS MQ Failure - ' + environment\n email_body = traceback.format_exc()\n email_helper = EmailHelper(body=email_body, subject=email_subject, mail_to=email_to,\n mail_from=email_from, body_type=EmailHelper.BODY_TYPE_HTML, sender_type=EmailHelper\n .SENDER_TYPE_SMTP, host=EmailHelper.get_acm_host())\n email_helper.send()\n","sub_path":"Python modules/mq_swift_incoming_ats.py","file_name":"mq_swift_incoming_ats.py","file_ext":"py","file_size_in_byte":17507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"163279349","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\ndf1 = pd.read_csv('DataTrain_new.csv')\ndf2 = pd.read_csv('DataTest_new.csv')\n\nX = df1.drop(['SalePrice'], axis=1)\ny = df1['SalePrice']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.4)\n\nregr = linear_model.Lasso(alpha=0.5)\nregr.fit(X_train, y_train)\ny_predicted = regr.predict(X_test)\n\nresult = regr.score(X_test, y_test)\nprint(\"Accuracy: %.2f%%\" % (result*100.0))\n\nmae = metrics.mean_absolute_error(y_test, y_predicted)\nmse = metrics.mean_squared_error(y_test, y_predicted)\nr2 = metrics.r2_score(y_test, y_predicted)\n\nprint(\"The model performance for testing set\")\nprint(\"--------------------------------------\")\nprint('MAE is {}'.format(mae))\nprint('MSE is {}'.format(mse))\nprint('R2 score is {}'.format(r2))\n\nfig, ax = plt.subplots()\nfig.suptitle('LASSO Regression', fontsize=16)\nax.scatter(y_predicted, y_test, edgecolors=(0, 0, 1))\nax.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--', lw=3)\nax.set_xlabel('Predicted')\nax.set_ylabel('Actual')\nplt.show()","sub_path":"LASSO Regression.py","file_name":"LASSO Regression.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"602114413","text":"from random import choice, randint\n\n#question 1\nx = randint(-100, 100)\nwhile x == 0: # make sure x isnt 0\n x = randint(-100, 100)\n\ny = randint(-100, 100)\nwhile y == 0: # make sure y isnt 0\n y = randint(-100, 100)\nprint(x)\nprint(y)\n\nif x > 0 and y > 0:\n print(\"both positive numbers\")\nelif x > 0 and y < 0:\n print(\"x is positive y is negative\")\nelif x < 0 and y > 0:\n print(\"x is negative y is positive\")\nelse:\n print(\"both are negative\")\n \n#question 2 - set to true if call in sick if you are sick and have sick days,\n# you are kinda sick and hate your job and sick days remaining\n\n#randomly assign values to these variables\nactually_sick = choice([True, False])\nkinda_sick = choice([True, False])\nhate_your_job = choice([True, False])\nsick_days = randint(0, 10)\n\ncalling_in_sick = None # set to True or False\ncalling_in_sick = False\n\nif actually_sick == True and sick_days < 10:\n calling_in_sick = True\nelif kinda_sick == True and hate_your_job == True and sick_days < 10:\n calling_in_sick = True\nelse:\n calling_in_sick = False\n \nprint(\"calling_in_sick {}\" .format(calling_in_sick))\n","sub_path":"Python/Python3-Course/CourseExamples/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"155328147","text":"\"\"\"Create Molecule Approved Field\n\nRevision ID: e3823b99982d\nRevises: 6276ee82945c\nCreate Date: 2016-03-17 19:59:48.526628\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'e3823b99982d'\ndown_revision = '6276ee82945c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('Molecules', sa.Column('Approved', sa.Boolean(True)))\n\n\ndef downgrade():\n op.drop_column(\"Molecules\", \"Approved\")\n","sub_path":"alembic/versions/e3823b99982d_create_molecule_approved_field.py","file_name":"e3823b99982d_create_molecule_approved_field.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"609586590","text":"import math\nimport Materials\nimport Paraxial\nimport Meridional\nfrom Prepare import FAR_L\n\n\ndef first_para():\n\n name = 'first_para'\n\n if name not in Materials.lights:\n light = {'L': Materials.lens[0]['d'],\n 'U': math.degrees(math.atan(Materials.stops[0]['r'] / Materials.lens[0]['d']))}\n Materials.lights[name] = [light]\n Paraxial.paraxial(Materials.lens, Materials.lights[name])\n\n if 'ideal spot' not in Materials.basic:\n if 'first_para' in Materials.lights:\n Materials.basic['ideal spot'] = Materials.lights['first_para'][-1]['L']\n\n print(Materials.lights[name])\n\n return Materials.lights[name]\n\n\ndef second_para():\n\n name = 'lp'\n\n if name not in Materials.lights:\n if 'w' in Materials.obj:\n light = {'L': 0, 'U': Materials.obj['w']}\n else:\n light = {'L': 0,\n 'U': math.degrees(math.atan(Materials.obj['r'] / Materials.lens[0]['d']))}\n\n Materials.lights[name] = [light]\n Paraxial.paraxial(Materials.lens, Materials.lights[name])\n\n Materials.basic[name] = Materials.lights[name][-1]['L']\n\n\ndef meri_on():\n K1 = Materials.K1\n K2 = Materials.K2\n\n if Materials.lens[0]['d'] < FAR_L:\n\n name = 'infi_on_' + str(K1) + '_' + str(K2) + Materials.extend\n\n if name not in Materials.lights:\n h1 = K1 * Materials.stops[0]['r']\n sin_i = h1 / Materials.lens[0]['r']\n sin_i_pie = sin_i / Materials.lens[0]['n']\n I = math.degrees(math.asin(sin_i))\n Ipie = math.degrees(math.asin(sin_i_pie))\n U = I - Ipie\n L = Materials.lens[0]['r'] + Materials.lens[0]['r'] * sin_i_pie / \\\n math.sin(math.radians(U)) + Materials.lens[1]['d']\n\n Materials.lights[name] = [{'L': L, 'U': U}]\n Meridional.meridional(Materials.lens, Materials.lights[name], 1)\n else:\n\n name = 'limi_on_' + str(K1) + '_' + str(K2) + Materials.extend\n\n if name not in Materials.lights:\n Umax = math.atan(Materials.stops[0]['r'] / Materials.lens[0]['d'])\n L = Materials.lens[0]['d']\n sinU = K1 * math.sin(Umax)\n U = math.degrees(math.asin(sinU))\n Materials.lights[name] = [{'L': L, 'U': U}]\n\n Meridional.meridional(Materials.lens, Materials.lights[name])\n\n return Materials.lights[name]\n\n\ndef meri_off():\n K1 = Materials.K1\n K2 = Materials.K2\n\n if Materials.lens[0]['d'] < FAR_L:\n name = 'infi_off_' + str(K1) + '_' + str(K2) + Materials.extend\n\n if name not in Materials.lights:\n U = K2 * Materials.obj['w']\n L = Materials.stops[0]['d'] + K1 * Materials.stops[0]['r'] / (math.tan(math.radians(U)))\n Materials.lights[name] = [{'L': L, 'U': U}]\n\n else:\n name = 'limi_off_' + str(K1) + '_' + str(K2) + Materials.extend\n\n if name not in Materials.lights:\n ymax = Materials.obj['r']\n\n tanU = (K2 * ymax - K1 * Materials.stops[0]['r']) / (Materials.stops[0]['d'] - Materials.lens[0]['d'])\n L = Materials.stops[0]['d'] + K1 * Materials.stops[0]['r'] / tanU\n U = math.degrees(math.atan(tanU))\n Materials.lights[name] = [{'L': L, 'U': U}]\n\n Meridional.meridional(Materials.lens, Materials.lights[name])\n\n return Materials.lights[name]\n\n\ndef height(y=0, w=0):\n if 'height' not in Materials.basic:\n if y == 0 and w == 0:\n y = Materials.obj['r']\n w = math.radians(Materials.obj['w'])\n\n Materials.basic['height'] = Paraxial.height(Materials.lens, y, w)\n\n return Paraxial.height(Materials.lens, y, w)\n\n\ndef focal():\n if 'focal' not in Materials.basic:\n Materials.basic['focal'] = Paraxial.focal(Materials.lens)\n\n return Materials.basic['focal']\n\n\ndef all_parameters():\n first_para()\n second_para()\n height()\n focal()","sub_path":"Calculate.py","file_name":"Calculate.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"97563382","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nimport peakutils\nfrom scipy import signal\nimport random\nimport argparse\nimport os\nimport time\nimport math\nimport multiprocessing as mp\nimport json\nimport alphatims.bruker\nimport sqlite3\nimport configparser\nfrom configparser import ExtendedInterpolation\n\n\nclass FixedDict(object):\n def __init__(self, dictionary):\n self._dictionary = dictionary\n def __setitem__(self, key, item):\n if key not in self._dictionary:\n raise KeyError(\"The key {} is not defined.\".format(key))\n self._dictionary[key] = item\n def __getitem__(self, key):\n return self._dictionary[key]\n def get_dict(self):\n return self._dictionary\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\n# load the ms1 frame ids\ndef load_ms1_frame_ids(raw_db_name):\n db_conn = sqlite3.connect('{}/analysis.tdf'.format(raw_db_name))\n ms1_frame_properties_df = pd.read_sql_query(\"select Id,Time from Frames where MsMsType == {} order by Time\".format(FRAME_TYPE_MS1), db_conn)\n db_conn.close()\n return ms1_frame_properties_df\n\n# get the ms1 frame ids with a range of RT as a tuple\ndef get_ms1_frame_ids(rt_lower, rt_upper):\n df = ms1_frame_properties_df[(ms1_frame_properties_df.Time >= rt_lower) & (ms1_frame_properties_df.Time <= rt_upper)]\n ms1_frame_ids = tuple(df.Id)\n return ms1_frame_ids\n\ndef calculate_monoisotopic_mass_from_mz(monoisotopic_mz, charge):\n monoisotopic_mass = (monoisotopic_mz * charge) - (PROTON_MASS * charge)\n return monoisotopic_mass\n\n# takes a numpy array of intensity, and another of mz\ndef mz_centroid(_int_f, _mz_f):\n try:\n return ((_int_f/_int_f.sum()) * _mz_f).sum()\n except:\n print(\"exception in mz_centroid\")\n return None\n\n# calculate the r-squared value of series_2 against series_1, where series_1 is the original data (source: https://stackoverflow.com/a/37899817/1184799)\ndef calculate_r_squared(series_1, series_2):\n residuals = series_1 - series_2\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((series_1 - np.mean(series_1))**2)\n if ss_tot != 0:\n r_squared = 1 - (ss_res / ss_tot)\n else:\n r_squared = None\n return r_squared\n\ndef estimate_target_coordinates(row_as_series, mz_estimator, scan_estimator, rt_estimator):\n sequence_estimation_attribs_s = row_as_series[['theoretical_mz','experiment_rt_mean','experiment_rt_std_dev','experiment_scan_mean','experiment_scan_std_dev','experiment_intensity_mean','experiment_intensity_std_dev']]\n sequence_estimation_attribs = np.reshape(sequence_estimation_attribs_s.values, (1, -1)) # make it 2D\n\n # estimate the raw monoisotopic m/z\n mz_delta_ppm_estimated = mz_estimator.predict(sequence_estimation_attribs)[0]\n theoretical_mz = sequence_estimation_attribs_s.theoretical_mz\n estimated_monoisotopic_mz = (mz_delta_ppm_estimated / 1e6 * theoretical_mz) + theoretical_mz\n\n # estimate the raw monoisotopic scan\n estimated_scan_delta = scan_estimator.predict(sequence_estimation_attribs)[0]\n experiment_scan_mean = sequence_estimation_attribs_s.experiment_scan_mean\n estimated_scan_apex = (estimated_scan_delta * experiment_scan_mean) + experiment_scan_mean\n\n # estimate the raw monoisotopic RT\n estimated_rt_delta = rt_estimator.predict(sequence_estimation_attribs)[0]\n experiment_rt_mean = sequence_estimation_attribs_s.experiment_rt_mean\n estimated_rt_apex = (estimated_rt_delta * experiment_rt_mean) + experiment_rt_mean\n\n return {\"mono_mz\":estimated_monoisotopic_mz, \"scan_apex\":estimated_scan_apex, \"rt_apex\":estimated_rt_apex}\n\ndef get_decoy_coordinates(target_mz, target_scan, peak_width_scan, target_rt, peak_width_rt):\n # calculate decoy mz\n mz_base_offset_ppm = (1 if random.random() < 0.5 else -1) * 100 # +/- offset of 100 ppm\n mz_random_delta_ppm = random.randint(-20, +20) # random delta ppm between -20 and +20\n mz_offset_ppm = mz_base_offset_ppm + mz_random_delta_ppm\n decoy_mz = (mz_offset_ppm / 1e6 * target_mz) + target_mz\n # calculate decoy scan\n scan_base_offset = (1 if random.random() < 0.5 else -1) * 2 * peak_width_scan # +/- 2 peak widths\n scan_random_delta = random.randint(-10, +10)\n scan_offset = scan_base_offset + scan_random_delta\n decoy_scan = target_scan + scan_offset\n # calculate decoy RT\n rt_base_offset = (1 if random.random() < 0.5 else -1) * 2 * peak_width_rt # +/- 2 peak widths\n rt_random_delta = random.randint(-10, +10)\n rt_offset = rt_base_offset + rt_random_delta\n decoy_rt = target_rt + rt_offset\n return (decoy_mz, decoy_scan, decoy_rt)\n\ndef calculate_decoy_coordinates(row_as_series):\n peak_width_scan = row_as_series.experiment_scan_peak_width\n peak_width_rt = row_as_series.experiment_rt_peak_width\n estimated_monoisotopic_mz = row_as_series.target_coords['mono_mz']\n estimated_scan_apex = row_as_series.target_coords['scan_apex']\n estimated_rt_apex = row_as_series.target_coords['rt_apex']\n\n (decoy_mz, decoy_scan, decoy_rt) = get_decoy_coordinates(estimated_monoisotopic_mz, estimated_scan_apex, peak_width_scan, estimated_rt_apex, peak_width_rt)\n return {\"mono_mz\":decoy_mz, \"scan_apex\":decoy_scan, \"rt_apex\":decoy_rt}\n\n# Find the ratio of H(peak_number)/H(peak_number-1) for peak_number=1..6\n# peak_number = 0 refers to the monoisotopic peak\n# number_of_sulphur = number of sulphur atoms in the molecule\n#\n# source: Valkenborg et al, \"A Model-Based Method for the Prediction of the Isotopic Distribution of Peptides\", https://core.ac.uk/download/pdf/82021511.pdf\ndef peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur):\n MAX_NUMBER_OF_SULPHUR_ATOMS = 3\n MAX_NUMBER_OF_PREDICTED_RATIOS = 6\n\n S0_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)\n S0_r[1] = np.array([-0.00142320578040, 0.53158267080224, 0.00572776591574, -0.00040226083326, -0.00007968737684])\n S0_r[2] = np.array([0.06258138406507, 0.24252967352808, 0.01729736525102, -0.00427641490976, 0.00038011211412])\n S0_r[3] = np.array([0.03092092306220, 0.22353930450345, -0.02630395501009, 0.00728183023772, -0.00073155573939])\n S0_r[4] = np.array([-0.02490747037406, 0.26363266501679, -0.07330346656184, 0.01876886839392, -0.00176688757979])\n S0_r[5] = np.array([-0.19423148776489, 0.45952477474223, -0.18163820209523, 0.04173579115885, -0.00355426505742])\n S0_r[6] = np.array([0.04574408690798, -0.05092121193598, 0.13874539944789, -0.04344815868749, 0.00449747222180])\n\n S1_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)\n S1_r[1] = np.array([-0.01040584267474, 0.53121149663696, 0.00576913817747, -0.00039325152252, -0.00007954180489])\n S1_r[2] = np.array([0.37339166598255, -0.15814640001919, 0.24085046064819, -0.06068695741919, 0.00563606634601])\n S1_r[3] = np.array([0.06969331604484, 0.28154425636993, -0.08121643989151, 0.02372741957255, -0.00238998426027])\n S1_r[4] = np.array([0.04462649178239, 0.23204790123388, -0.06083969521863, 0.01564282892512, -0.00145145206815])\n S1_r[5] = np.array([-0.20727547407753, 0.53536509500863, -0.22521649838170, 0.05180965157326, -0.00439750995163])\n S1_r[6] = np.array([0.27169670700251, -0.37192045082925, 0.31939855191976, -0.08668833166842, 0.00822975581940])\n\n S2_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)\n S2_r[1] = np.array([-0.01937823810470, 0.53084210514216, 0.00580573751882, -0.00038281138203, -0.00007958217070])\n S2_r[2] = np.array([0.68496829280011, -0.54558176102022, 0.44926662609767, -0.11154849560657, 0.01023294598884])\n S2_r[3] = np.array([0.04215807391059, 0.40434195078925, -0.15884974959493, 0.04319968814535, -0.00413693825139])\n S2_r[4] = np.array([0.14015578207913, 0.14407679007180, -0.01310480312503, 0.00362292256563, -0.00034189078786])\n S2_r[5] = np.array([-0.02549241716294, 0.32153542852101, -0.11409513283836, 0.02617210469576, -0.00221816103608])\n S2_r[6] = np.array([-0.14490868030324, 0.33629928307361, -0.08223564735018, 0.01023410734015, -0.00027717589598])\n\n model_params = np.empty(MAX_NUMBER_OF_SULPHUR_ATOMS, dtype=np.ndarray)\n model_params[0] = S0_r\n model_params[1] = S1_r\n model_params[2] = S2_r\n\n ratio = None\n if (((1 <= peak_number <= 3) & (((number_of_sulphur == 0) & (498 <= monoisotopic_mass <= 3915)) |\n ((number_of_sulphur == 1) & (530 <= monoisotopic_mass <= 3947)) |\n ((number_of_sulphur == 2) & (562 <= monoisotopic_mass <= 3978)))) |\n ((peak_number == 4) & (((number_of_sulphur == 0) & (907 <= monoisotopic_mass <= 3915)) |\n ((number_of_sulphur == 1) & (939 <= monoisotopic_mass <= 3947)) |\n ((number_of_sulphur == 2) & (971 <= monoisotopic_mass <= 3978)))) |\n ((peak_number == 5) & (((number_of_sulphur == 0) & (1219 <= monoisotopic_mass <= 3915)) |\n ((number_of_sulphur == 1) & (1251 <= monoisotopic_mass <= 3947)) |\n ((number_of_sulphur == 2) & (1283 <= monoisotopic_mass <= 3978)))) |\n ((peak_number == 6) & (((number_of_sulphur == 0) & (1559 <= monoisotopic_mass <= 3915)) |\n ((number_of_sulphur == 1) & (1591 <= monoisotopic_mass <= 3947)) |\n ((number_of_sulphur == 2) & (1623 <= monoisotopic_mass <= 3978))))):\n beta0 = model_params[number_of_sulphur][peak_number][0]\n beta1 = model_params[number_of_sulphur][peak_number][1]\n beta2 = model_params[number_of_sulphur][peak_number][2]\n beta3 = model_params[number_of_sulphur][peak_number][3]\n beta4 = model_params[number_of_sulphur][peak_number][4]\n scaled_m = monoisotopic_mass / 1000.0\n ratio = beta0 + (beta1*scaled_m) + beta2*(scaled_m**2) + beta3*(scaled_m**3) + beta4*(scaled_m**4)\n return ratio\n\n# assumes the isotope's raw points have already been flattened to a particular dimension (e.g. scan, RT, m/z) and\n# sorted by ascending order in that dimension\ndef fit_curve_to_flattened_isotope(flattened_points_df, estimated_apex, estimated_peak_width, maximum_number_of_peaks, isotope_dimension, isotope_number, sequence, charge, run_name):\n peaks_l = []\n filtered_points_d = None\n if len(flattened_points_df) > 0:\n # apply a filter to make curve fitting easier, if there are enough points\n flattened_points_df['filtered_intensity'] = flattened_points_df.intensity # set the default\n window_length = 11\n if len(flattened_points_df) > window_length:\n try:\n flattened_points_df['filtered_intensity'] = signal.savgol_filter(flattened_points_df.intensity, window_length=window_length, polyorder=3)\n filtered = True\n except:\n # print(\"Filter failed for the flattened isotope {}, dimension {}, sequence {}, charge {}, run {}\".format(isotope_number, isotope_dimension, sequence, charge, run_name))\n filtered = False\n else:\n # print(\"Not enough points to filter in the flattened isotope {}, dimension {}, sequence {}, charge {}, run {}\".format(isotope_number, isotope_dimension, sequence, charge, run_name))\n filtered = False\n if filtered:\n filtered_points_d = flattened_points_df[['x','filtered_intensity']].to_dict('records')\n else:\n filtered_points_d = None\n\n # find the peak(s)\n # the minimum distance between peaks gives the minimum mount of feature overlap we will tolerate\n peak_x_l = []\n try:\n peak_idxs = peakutils.indexes(flattened_points_df.filtered_intensity.values, thres=0.05, min_dist=estimated_peak_width/2, thres_abs=False)\n peak_x_l = flattened_points_df.iloc[peak_idxs].x.to_list()\n except:\n pass\n if len(peak_x_l) == 0:\n # get the maximum intensity point\n peak_x_l = [flattened_points_df.loc[flattened_points_df.filtered_intensity.idxmax()].x]\n # print('could not find any peaks - taking the maximum point - peak_x_l: {}'.format(peak_x_l))\n peaks_df = flattened_points_df[flattened_points_df.x.isin(peak_x_l)]\n\n # peaks_df should now contain the rows from flattened_points_df that represent the peaks\n\n # find the valleys\n # the minimum distance between valleys gives the minimum peak width\n valley_x_l = []\n try:\n valley_idxs = peakutils.indexes(-flattened_points_df.filtered_intensity.values, thres=0.05, min_dist=estimated_peak_width/8, thres_abs=False)\n valley_x_l = flattened_points_df.iloc[valley_idxs].x.to_list()\n except:\n pass\n if len(valley_x_l) == 0:\n # get the minimum and maximum x\n # print('could not find any valleys - taking the widest points')\n valley_x_l = [flattened_points_df.x.min(), flattened_points_df.x.max()]\n valleys_df = flattened_points_df[flattened_points_df.x.isin(valley_x_l)]\n\n # valleys_df should now contain the rows from flattened_points_df that represent the valleys\n\n # print('sequence {}, charge {}, number of points {}\\npoints {}\\npeaks {}\\nvalleys {}\\n'.format(sequence, charge, len(flattened_points_df), flattened_points_df, peaks_df, valleys_df))\n\n # isolate each peak and extract its attributes\n for peak_idx,peak in enumerate(peaks_df.itertuples()):\n # find the x bounds\n upper_x = valleys_df[valleys_df.x > peak.x].x.min()\n if math.isnan(upper_x):\n upper_x = flattened_points_df.x.max()\n lower_x = valleys_df[valleys_df.x < peak.x].x.max()\n if math.isnan(lower_x):\n lower_x = flattened_points_df.x.min()\n peak_points_df = flattened_points_df[(flattened_points_df.x >= lower_x) & (flattened_points_df.x <= upper_x)]\n peak_points_left_df = peak_points_df[peak_points_df.x <= peak.x]\n peak_points_right_df = peak_points_df[peak_points_df.x > peak.x]\n # find the standard deviation and FWHM, assuming a gaussian distribution\n std_dev = np.mean([abs(peak.x-upper_x), abs(peak.x-lower_x)]) / 3 # each side is three std devs, so we take the mean of the two\n fwhm = 2.355 * std_dev\n base_width = upper_x - lower_x\n # calculate the area under the curve by summing the intensities\n area_under_curve = peak_points_df.intensity.sum()\n # calculate the r-squared value of the fitted curve\n if filtered:\n r_squared = calculate_r_squared(series_1=peak_points_df.intensity, series_2=peak_points_df.filtered_intensity)\n else:\n r_squared = None\n # calculate the peak symmetry\n lhs_auc = peak_points_left_df.intensity.sum()\n rhs_auc = peak_points_right_df.intensity.sum()\n if rhs_auc == 0:\n symmetry = 0\n else:\n symmetry = lhs_auc / rhs_auc\n # assemble all the peak attributes\n peak_attributes = (peak.x, peak.intensity, lower_x, upper_x, std_dev, fwhm, base_width, area_under_curve, r_squared, symmetry)\n peaks_l.append(peak_attributes)\n else:\n if args.small_set_mode:\n print(\"No points were found in the flattened isotope {}, dimension {}, sequence {}, charge {}, run {}\".format(isotope_number, isotope_dimension, sequence, charge, run_name))\n\n # form a dataframe to process the peaks we detected\n isolated_peaks_df = pd.DataFrame(peaks_l, columns=['apex_x','intensity','lower_x','upper_x','std_dev','full_width_half_max','base_width','area_under_curve','r_squared','peak_symmetry'])\n isolated_peaks_found = len(isolated_peaks_df)\n\n # sort the detected peaks by their proximity to the estimated apex, and return the maximum_number_of_peaks\n isolated_peaks_df['apex_x_delta'] = abs(isolated_peaks_df.apex_x - estimated_apex)\n isolated_peaks_df.sort_values(by=['apex_x_delta'], ascending=True, inplace=True)\n isolated_peaks_df.reset_index(drop=True, inplace=True)\n isolated_peaks_df = isolated_peaks_df.loc[:maximum_number_of_peaks-1]\n isolated_peaks_returned = len(isolated_peaks_df)\n\n # print('fit_curve_to_flattened_isotope: found {} isolated peaks; returning {}, sequence {}, charge {}, run {}'.format(isolated_peaks_found, isolated_peaks_returned, sequence, charge, run_name))\n\n # collate the results\n results_d = {}\n if len(isolated_peaks_df) > 0:\n results_d['peaks'] = isolated_peaks_df.to_dict('records')\n else:\n results_d['peaks'] = None\n results_d['filtered_points'] = filtered_points_d\n return results_d\n\n# this function assumes the x-axis is a column labelled 'x', and the y-axis is a column labelled 'intensity'\ndef calculate_isotope_correlation(isotope_0_df, isotope_1_df, isotope_2_df):\n # scale the x axis so we can join them\n isotope_0_df['x_scaled'] = (isotope_0_df.x * 100).astype(int)\n isotope_1_df['x_scaled'] = (isotope_1_df.x * 100).astype(int)\n isotope_2_df['x_scaled'] = (isotope_2_df.x * 100).astype(int)\n\n # combine the isotopes by aligning the x-dimension points they have in common\n combined_df = pd.merge(isotope_0_df, isotope_1_df, on='x_scaled', how='inner', suffixes=('_0', '_1')).sort_values(by='x_scaled')\n combined_df = pd.merge(combined_df, isotope_2_df, on='x_scaled', how='inner', suffixes=('_0', '_2')).sort_values(by='x_scaled')\n combined_df.rename(columns={'intensity': 'intensity_2'}, inplace=True)\n combined_df = combined_df[['x_scaled','intensity_0','intensity_1','intensity_2']]\n\n # calculate the correlation coefficient\n if len(combined_df) >=3: # let's have at least three points in common between the isotopes to get a meaningful correlation\n correlation_coefficient_a = np.corrcoef(combined_df[['intensity_0','intensity_1','intensity_2']].values, rowvar=False)\n if isinstance(correlation_coefficient_a, np.ndarray):\n isotope_correlation = correlation_coefficient_a[1,0]\n else:\n isotope_correlation = correlation_coefficient_a\n print(\"np.corrcoef return a scalar {} for isotopes {}\".format(isotope_correlation, combined_df))\n else:\n isotope_correlation = 0\n return isotope_correlation\n\n# for a given set of isotopes, calculate the feature metrics\ndef calculate_feature_metrics(isotope_peaks_df, isotope_raw_points_l, estimated_mono_mz, estimated_scan_apex, estimated_rt_apex, rt_metrics_l, scan_metrics_l, expected_spacing_mz, charge):\n # ensure we have a full set of metrics, even if some are None\n metrics_names = \\\n ['delta_mz_ppm',\n 'delta_rt',\n 'delta_scan',\n 'fwhm_rt_0',\n 'fwhm_scan_0',\n 'geometric_mean_0_1',\n 'geometric_mean_0_1_2',\n 'isotope_0_1_mz_delta_ppm',\n 'isotope_0_1_rt_delta',\n 'isotope_0_1_scan_delta',\n 'isotope_0_2_mz_delta_ppm',\n 'isotope_0_2_rt_delta',\n 'isotope_0_2_scan_delta',\n 'monoisotope_auc_over_isotope_peak_auc_sum',\n 'monoisotope_int_over_isotope_peak_int_sum',\n 'mz_delta_ppm_std_dev_0',\n 'mz_delta_ppm_std_dev_1',\n 'number_of_frames_0',\n 'number_of_frames_1',\n 'number_of_frames_2',\n 'number_of_missing_frames_0',\n 'number_of_missing_frames_1',\n 'number_of_missing_frames_2',\n 'peak_base_width_rt_0',\n 'peak_base_width_scan_0',\n 'r_squared_phr',\n 'rt_isotope_correlation',\n 'rt_isotope_cv',\n 'rt_peak_symmetry_0',\n 'rt_peak_symmetry_1',\n 'rt_peak_symmetry_2',\n 'scan_isotope_correlation',\n 'scan_isotope_cv',\n 'scan_peak_symmetry_0',\n 'scan_peak_symmetry_1',\n 'scan_peak_symmetry_2']\n d = {}\n for n in metrics_names:\n d[n] = None\n feature_metrics = FixedDict(d)\n\n # Calculate the feature metrics\n calculated_monoisotopic_mz = isotope_peaks_df.iloc[0].mz_centroid\n if calculated_monoisotopic_mz is not None:\n feature_metrics['delta_mz_ppm'] = (calculated_monoisotopic_mz - estimated_mono_mz) / estimated_mono_mz * 1e6\n else:\n feature_metrics['delta_mz_ppm'] = None\n if (scan_metrics_l[0] is not None) and (scan_metrics_l[0]['apex_x'] is not None) and (estimated_scan_apex is not None):\n feature_metrics['delta_scan'] = (scan_metrics_l[0]['apex_x'] - estimated_scan_apex) / estimated_scan_apex\n else:\n feature_metrics['delta_scan'] = None\n if (rt_metrics_l[0] is not None) and (rt_metrics_l[0]['apex_x'] is not None) and (estimated_rt_apex is not None):\n feature_metrics['delta_rt'] = (rt_metrics_l[0]['apex_x'] - estimated_rt_apex) / estimated_rt_apex\n else:\n feature_metrics['delta_rt'] = None\n\n # Calculate the delta ppm of the de-isotoped first and second isotopic peaks\n monoisotopic_mz_centroid = isotope_peaks_df.iloc[0].mz_centroid # monoisotopic\n isotope_1_mz_centroid = isotope_peaks_df.iloc[1].mz_centroid # first isotope\n isotope_2_mz_centroid = isotope_peaks_df.iloc[2].mz_centroid # second isotope\n\n # get the raw points for each isotope\n mono_raw_points_df = isotope_raw_points_l[0].copy()\n isotope_1_raw_points_df = isotope_raw_points_l[1].copy()\n isotope_2_raw_points_df = isotope_raw_points_l[2].copy()\n\n # delta from where isotope 1 is detected and where it's predicted\n if (monoisotopic_mz_centroid is not None) and (isotope_1_mz_centroid is not None):\n isotope_0_1_mz_delta_ppm = (monoisotopic_mz_centroid - (isotope_1_mz_centroid - (1 * expected_spacing_mz))) / monoisotopic_mz_centroid * 1e6\n feature_metrics['isotope_0_1_mz_delta_ppm'] = isotope_0_1_mz_delta_ppm\n else:\n feature_metrics['isotope_0_1_mz_delta_ppm'] = None\n\n # delta from where isotope 2 is detected and where it's predicted\n if (monoisotopic_mz_centroid is not None) and (isotope_2_mz_centroid is not None):\n isotope_0_2_mz_delta_ppm = (monoisotopic_mz_centroid - (isotope_2_mz_centroid - (2 * expected_spacing_mz))) / monoisotopic_mz_centroid * 1e6\n feature_metrics['isotope_0_2_mz_delta_ppm'] = isotope_0_2_mz_delta_ppm\n else:\n feature_metrics['isotope_0_2_mz_delta_ppm'] = None\n\n # calculate the RT apex deltas from the monoisotopic peak for the first and second isotopes\n if (rt_metrics_l[1] is not None) and (rt_metrics_l[0] is not None) and (rt_metrics_l[1]['apex_x'] is not None) and (rt_metrics_l[0]['apex_x'] is not None):\n isotope_0_1_rt_delta = (rt_metrics_l[1]['apex_x'] - rt_metrics_l[0]['apex_x']) / rt_metrics_l[0]['apex_x']\n feature_metrics['isotope_0_1_rt_delta'] = isotope_0_1_rt_delta\n else:\n feature_metrics['isotope_0_1_rt_delta'] = None\n\n if (rt_metrics_l[2] is not None) and (rt_metrics_l[0] is not None) and (rt_metrics_l[2]['apex_x'] is not None) and (rt_metrics_l[0]['apex_x'] is not None):\n isotope_0_2_rt_delta = (rt_metrics_l[2]['apex_x'] - rt_metrics_l[0]['apex_x']) / rt_metrics_l[0]['apex_x']\n feature_metrics['isotope_0_2_rt_delta'] = isotope_0_2_rt_delta\n else:\n feature_metrics['isotope_0_2_rt_delta'] = None\n\n # calculate the scan apex deltas from the monoisotopic peak for the first and second isotopes\n if (scan_metrics_l[1] is not None) and (scan_metrics_l[0] is not None) and (scan_metrics_l[1]['apex_x'] is not None) and (scan_metrics_l[0]['apex_x'] is not None):\n isotope_0_1_scan_delta = (scan_metrics_l[1]['apex_x'] - scan_metrics_l[0]['apex_x']) / scan_metrics_l[0]['apex_x']\n feature_metrics['isotope_0_1_scan_delta'] = isotope_0_1_scan_delta\n else:\n feature_metrics['isotope_0_1_scan_delta'] = None\n\n if (scan_metrics_l[2] is not None) and (scan_metrics_l[0] is not None) and (scan_metrics_l[2]['apex_x'] is not None) and (scan_metrics_l[0]['apex_x'] is not None):\n isotope_0_2_scan_delta = (scan_metrics_l[2]['apex_x'] - scan_metrics_l[0]['apex_x']) / scan_metrics_l[0]['apex_x']\n feature_metrics['isotope_0_2_scan_delta'] = isotope_0_2_scan_delta\n else:\n feature_metrics['isotope_0_2_scan_delta'] = None\n\n # calculate the monoisotopic peak intensity divided by the sum of the isotope peaks\n if (isotope_peaks_df.iloc[:3].summed_intensity.sum() != 0):\n monoisotope_int_over_isotope_peak_int_sum = isotope_peaks_df.iloc[0].summed_intensity / isotope_peaks_df.iloc[:3].summed_intensity.sum()\n feature_metrics['monoisotope_int_over_isotope_peak_int_sum'] = monoisotope_int_over_isotope_peak_int_sum\n else:\n feature_metrics['monoisotope_int_over_isotope_peak_int_sum'] = None\n\n # calculate the monoisotopic peak AUC divided by the sum of the isotope peak AUCs\n if (rt_metrics_l[0] is not None) and (rt_metrics_l[1] is not None) and (rt_metrics_l[2] is not None) and (rt_metrics_l[0]['area_under_curve'] is not None) and (rt_metrics_l[1]['area_under_curve'] is not None) and (rt_metrics_l[2]['area_under_curve'] is not None):\n monoisotope_auc_over_isotope_peak_auc_sum = rt_metrics_l[0]['area_under_curve'] / (rt_metrics_l[0]['area_under_curve'] + rt_metrics_l[1]['area_under_curve'] + rt_metrics_l[2]['area_under_curve'])\n feature_metrics['monoisotope_auc_over_isotope_peak_auc_sum'] = monoisotope_auc_over_isotope_peak_auc_sum\n else:\n feature_metrics['monoisotope_auc_over_isotope_peak_auc_sum'] = None\n\n # calculate the theoretical and observed isotopic peak height ratios\n monoisotopic_mass = calculate_monoisotopic_mass_from_mz(monoisotopic_mz_centroid, charge)\n ratios = []\n for isotope in [1,2]: # ratio of isotopes 1:0, 2:1\n expected_ratio = peak_ratio(monoisotopic_mass=monoisotopic_mass, peak_number=isotope, number_of_sulphur=0)\n observed_ratio = isotope_peaks_df.iloc[isotope].summed_intensity / isotope_peaks_df.iloc[isotope-1].summed_intensity\n ratios.append((expected_ratio, observed_ratio))\n\n ratios_a = np.array(ratios)\n if ((None not in ratios_a[:,0]) and (None not in ratios_a[:,1])):\n r_squared_phr = calculate_r_squared(ratios_a[:,0], ratios_a[:,1])\n else:\n r_squared_phr = None\n feature_metrics['r_squared_phr'] = r_squared_phr\n\n # calculate the geometric mean of the isotope peak intensities\n if (isotope_peaks_df.iloc[0].summed_intensity > 0) and (isotope_peaks_df.iloc[1].summed_intensity > 0):\n geometric_mean_0_1 = np.log(isotope_peaks_df.iloc[0].summed_intensity * isotope_peaks_df.iloc[1].summed_intensity) / 2\n feature_metrics['geometric_mean_0_1'] = geometric_mean_0_1\n else:\n feature_metrics['geometric_mean_0_1'] = None\n if (isotope_peaks_df.iloc[0].summed_intensity > 0) and (isotope_peaks_df.iloc[1].summed_intensity > 0) and (isotope_peaks_df.iloc[2].summed_intensity > 0):\n geometric_mean_0_1_2 = np.log(isotope_peaks_df.iloc[0].summed_intensity * isotope_peaks_df.iloc[1].summed_intensity * isotope_peaks_df.iloc[2].summed_intensity) / 3\n feature_metrics['geometric_mean_0_1_2'] = geometric_mean_0_1_2\n else:\n feature_metrics['geometric_mean_0_1_2'] = None\n\n # calculate the m/z ppm standard deviation for isotopes 0 and 1\n mz_centroid_0 = isotope_peaks_df.iloc[0].mz_centroid\n mono_raw_points_df['mz_ppm_delta'] = (mono_raw_points_df.mz - mz_centroid_0) / mz_centroid_0 * 1e6\n mz_delta_ppm_std_dev_0 = np.std(mono_raw_points_df.mz_ppm_delta)\n feature_metrics['mz_delta_ppm_std_dev_0'] = mz_delta_ppm_std_dev_0\n\n mz_centroid_1 = isotope_peaks_df.iloc[1].mz_centroid\n isotope_1_raw_points_df['mz_ppm_delta'] = (isotope_1_raw_points_df.mz - mz_centroid_1) / mz_centroid_1 * 1e6\n mz_delta_ppm_std_dev_1 = np.std(isotope_1_raw_points_df.mz_ppm_delta)\n feature_metrics['mz_delta_ppm_std_dev_1'] = mz_delta_ppm_std_dev_1\n\n # calculate the symmetry of the isotopes in RT and CCS\n if (rt_metrics_l[0] is not None):\n feature_metrics['rt_peak_symmetry_0'] = rt_metrics_l[0]['peak_symmetry']\n if (rt_metrics_l[1] is not None):\n feature_metrics['rt_peak_symmetry_1'] = rt_metrics_l[1]['peak_symmetry']\n if (rt_metrics_l[2] is not None):\n feature_metrics['rt_peak_symmetry_2'] = rt_metrics_l[2]['peak_symmetry']\n\n if (scan_metrics_l[0] is not None):\n feature_metrics['scan_peak_symmetry_0'] = scan_metrics_l[0]['peak_symmetry']\n if (scan_metrics_l[1] is not None):\n feature_metrics['scan_peak_symmetry_1'] = scan_metrics_l[1]['peak_symmetry']\n if (scan_metrics_l[2] is not None):\n feature_metrics['scan_peak_symmetry_2'] = scan_metrics_l[2]['peak_symmetry']\n\n # calculate the isotopic peak correlation with each other in RT and CCS\n if ((mono_raw_points_df is not None) and (len(mono_raw_points_df) > 0) and (isotope_1_raw_points_df is not None) and (len(isotope_1_raw_points_df) > 0) and (isotope_2_raw_points_df is not None) and (len(isotope_2_raw_points_df) > 0)):\n # correlation in RT\n rt_0_df = mono_raw_points_df.groupby(['frame_id'], as_index=False).intensity.sum()\n rt_1_df = isotope_1_raw_points_df.groupby(['frame_id'], as_index=False).intensity.sum()\n rt_2_df = isotope_2_raw_points_df.groupby(['frame_id'], as_index=False).intensity.sum()\n\n rt_0_df['x'] = rt_0_df.frame_id\n rt_1_df['x'] = rt_1_df.frame_id\n rt_2_df['x'] = rt_2_df.frame_id\n\n feature_metrics['rt_isotope_correlation'] = calculate_isotope_correlation(rt_0_df, rt_1_df, rt_2_df)\n\n # correlation in CCS\n scan_0_df = mono_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n scan_1_df = isotope_1_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n scan_2_df = isotope_2_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n\n scan_0_df['x'] = scan_0_df.scan\n scan_1_df['x'] = scan_1_df.scan\n scan_2_df['x'] = scan_2_df.scan\n\n feature_metrics['scan_isotope_correlation'] = calculate_isotope_correlation(scan_0_df, scan_1_df, scan_2_df)\n else:\n feature_metrics['rt_isotope_correlation'] = None\n feature_metrics['scan_isotope_correlation'] = None\n\n # calculate the CV for isotope apexes in RT\n if (rt_metrics_l[0] is not None) and (rt_metrics_l[1] is not None) and (rt_metrics_l[2] is not None) and (rt_metrics_l[0]['apex_x'] is not None) and (rt_metrics_l[1]['apex_x'] is not None) and (rt_metrics_l[2]['apex_x'] is not None) and (np.mean([rt_metrics_l[0]['apex_x'], rt_metrics_l[1]['apex_x'], rt_metrics_l[2]['apex_x']]) != 0):\n n = [rt_metrics_l[0]['apex_x'], rt_metrics_l[1]['apex_x'], rt_metrics_l[2]['apex_x']]\n feature_metrics['rt_isotope_cv'] = np.std(n) / np.mean(n)\n else:\n feature_metrics['rt_isotope_cv'] = None\n\n # calculate the CV for isotope apexes in CCS\n if (scan_metrics_l[0] is not None) and (scan_metrics_l[1] is not None) and (scan_metrics_l[2] is not None) and (scan_metrics_l[0]['apex_x'] is not None) and (scan_metrics_l[1]['apex_x'] is not None) and (scan_metrics_l[2]['apex_x'] is not None) and (np.mean([scan_metrics_l[0]['apex_x'], scan_metrics_l[1]['apex_x'], scan_metrics_l[2]['apex_x']]) != 0):\n n = [scan_metrics_l[0]['apex_x'], scan_metrics_l[1]['apex_x'], scan_metrics_l[2]['apex_x']]\n feature_metrics['scan_isotope_cv'] = np.std(n) / np.mean(n)\n else:\n feature_metrics['scan_isotope_cv'] = None\n\n # calculate the FWHM and peak base width of the monoisotopic peak in RT and CCS dimensions\n if (rt_metrics_l[0] is not None):\n feature_metrics['fwhm_rt_0'] = rt_metrics_l[0]['full_width_half_max']\n feature_metrics['peak_base_width_rt_0'] = rt_metrics_l[0]['base_width']\n else:\n feature_metrics['fwhm_rt_0'] = None\n feature_metrics['peak_base_width_rt_0'] = None\n\n if (scan_metrics_l[0] is not None):\n feature_metrics['fwhm_scan_0'] = scan_metrics_l[0]['full_width_half_max']\n feature_metrics['peak_base_width_scan_0'] = scan_metrics_l[0]['base_width']\n else:\n feature_metrics['fwhm_scan_0'] = None\n feature_metrics['peak_base_width_scan_0'] = None\n\n # calculate the number of points and missing points in consecutive frames\n if (rt_metrics_l[0] is not None):\n rt_lower_0 = rt_metrics_l[0]['lower_x']\n rt_upper_0 = rt_metrics_l[0]['upper_x']\n ms1_frame_ids_0 = get_ms1_frame_ids(rt_lower_0, rt_upper_0)\n ms1_frame_ids_0_df = pd.DataFrame(ms1_frame_ids_0, columns=['frame_id'])\n ms1_frame_ids_0_df['intensity'] = 0\n merged_df = pd.merge(ms1_frame_ids_0_df, isotope_raw_points_l[0], on='frame_id', how='left', suffixes=('_0', '_1')).sort_values(by='frame_id')\n number_of_missing_frames_0 = merged_df.intensity_1.isna().sum()\n feature_metrics['number_of_missing_frames_0'] = number_of_missing_frames_0\n feature_metrics['number_of_frames_0'] = len(ms1_frame_ids_0_df)\n else:\n feature_metrics['number_of_missing_frames_0'] = None\n feature_metrics['number_of_frames_0'] = None\n\n if (rt_metrics_l[1] is not None):\n rt_lower_1 = rt_metrics_l[1]['lower_x']\n rt_upper_1 = rt_metrics_l[1]['upper_x']\n ms1_frame_ids_1 = get_ms1_frame_ids(rt_lower_1, rt_upper_1)\n ms1_frame_ids_1_df = pd.DataFrame(ms1_frame_ids_1, columns=['frame_id'])\n ms1_frame_ids_1_df['intensity'] = 0\n merged_df = pd.merge(ms1_frame_ids_1_df, isotope_raw_points_l[1], on='frame_id', how='left', suffixes=('_0', '_1')).sort_values(by='frame_id')\n number_of_missing_frames_1 = merged_df.intensity_1.isna().sum()\n feature_metrics['number_of_missing_frames_1'] = number_of_missing_frames_1\n feature_metrics['number_of_frames_1'] = len(ms1_frame_ids_1_df)\n else:\n feature_metrics['number_of_missing_frames_1'] = None\n feature_metrics['number_of_frames_1'] = None\n\n if (rt_metrics_l[2] is not None):\n rt_lower_2 = rt_metrics_l[2]['lower_x']\n rt_upper_2 = rt_metrics_l[2]['upper_x']\n ms1_frame_ids_2 = get_ms1_frame_ids(rt_lower_2, rt_upper_2)\n ms1_frame_ids_2_df = pd.DataFrame(ms1_frame_ids_2, columns=['frame_id'])\n ms1_frame_ids_2_df['intensity'] = 0\n merged_df = pd.merge(ms1_frame_ids_2_df, isotope_raw_points_l[2], on='frame_id', how='left', suffixes=('_0', '_1')).sort_values(by='frame_id')\n number_of_missing_frames_2 = merged_df.intensity_1.isna().sum()\n feature_metrics['number_of_missing_frames_2'] = number_of_missing_frames_2\n feature_metrics['number_of_frames_2'] = len(ms1_frame_ids_2_df)\n else:\n feature_metrics['number_of_missing_frames_2'] = None\n feature_metrics['number_of_frames_2'] = None\n\n # gather the metrics\n if feature_metrics is not None and isinstance(feature_metrics, FixedDict):\n feature_metrics = feature_metrics.get_dict()\n\n return feature_metrics\n\ndef calculate_feature_attributes(isotope_raw_points_l, rt_0_metrics, scan_0_metrics, sequence, charge, run_name, estimated_mono_mz):\n intensity = 0\n inferred = False\n isotope_idx_not_in_saturation = -1 # this means there are _no_ isotopes not in saturation\n isotope_intensities_l = None\n monoisotopic_mz = None\n monoisotopic_mass = None\n number_of_isotopes = 0\n\n # join the isotope dataframes together\n isotope_raw_points_df = pd.concat(isotope_raw_points_l, axis=0, sort=False)\n\n # re-calculate the intensity of each isotope by summing its point closest to the monoisotope apex and one point either side\n isotope_intensity_l = []\n isotope_idx_not_in_saturation = -1\n for isotope_idx in range(NUMBER_OF_ISOTOPES):\n isotope_df = isotope_raw_points_df[isotope_raw_points_df.isotope_idx == isotope_idx].copy()\n if len(isotope_df) > 0:\n # find the intensity by summing the maximum point in the frame closest to the RT apex, and the frame maximums either side\n frame_maximums_l = []\n for frame_id,group_df in isotope_df.groupby('frame_id'):\n frame_maximums_l.append(group_df.loc[group_df.intensity.idxmax()])\n frame_maximums_df = pd.DataFrame(frame_maximums_l)\n frame_maximums_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)\n frame_maximums_df.reset_index(drop=True, inplace=True)\n # find the index closest to the RT apex and the index either side\n if (rt_0_metrics is not None) and (rt_0_metrics['apex_x'] is not None):\n frame_maximums_df['rt_delta'] = np.abs(frame_maximums_df.retention_time_secs - rt_0_metrics['apex_x'])\n apex_idx = frame_maximums_df.rt_delta.idxmin()\n else:\n apex_idx = frame_maximums_df.intensity.idxmax()\n apex_idx_minus_one = max(0, apex_idx-1)\n apex_idx_plus_one = min(len(frame_maximums_df)-1, apex_idx+1)\n # sum the maximum intensity and the max intensity of the frame either side in RT\n summed_intensity = frame_maximums_df.loc[apex_idx_minus_one:apex_idx_plus_one].intensity.sum()\n # are any of the three points in saturation?\n isotope_in_saturation = (frame_maximums_df.loc[apex_idx_minus_one:apex_idx_plus_one].intensity.max() > SATURATION_INTENSITY)\n # keep the points used at the apex for calculating the intensity\n isotope_apex_points_l = [tuple(x) for x in frame_maximums_df[['mz','scan','frame_id','retention_time_secs','intensity']].loc[apex_idx_minus_one:apex_idx_plus_one].to_numpy()]\n # keep the raw points for each isotope, and those used at the apex for calculating the intensity\n isotope_points_l = [tuple(x) for x in isotope_df[['mz','scan','frame_id','retention_time_secs','intensity']].to_numpy()]\n # add the isotope to the list\n isotope_intensity_l.append((summed_intensity, isotope_in_saturation, isotope_points_l, isotope_apex_points_l))\n if (isotope_in_saturation == False) and (isotope_idx_not_in_saturation == -1):\n isotope_idx_not_in_saturation = isotope_idx\n else:\n # the isotope doesn't have any points so there's no point continuing\n if args.small_set_mode:\n print('calculate_feature_attributes: isotope {} doesn\\'t have any points - stopping'.format(isotope_idx))\n break\n\n # calculate the monoisotopic m/z and mass\n monoisotopic_points_a = isotope_raw_points_df[isotope_raw_points_df.isotope_idx == 0][['mz','intensity']].to_numpy()\n monoisotopic_mz = mz_centroid(monoisotopic_points_a[:,1], monoisotopic_points_a[:,0])\n monoisotopic_mass = calculate_monoisotopic_mass_from_mz(monoisotopic_mz, charge)\n monoisotopic_mz_delta_ppm = (monoisotopic_mz - estimated_mono_mz) / estimated_mono_mz * 1e6\n\n # infer the intensity of peaks made up of points in saturation\n if len(isotope_intensity_l) > 0:\n isotope_intensities_df = pd.DataFrame(isotope_intensity_l, columns=['summed_intensity','saturated','isotope_points','isotope_apex_points'])\n # set up the default values\n isotope_intensities_df['inferred_intensity'] = isotope_intensities_df.summed_intensity # set the summed intensity to be the default adjusted intensity for all isotopes\n isotope_intensities_df['inferred'] = False\n\n # adjust the monoisotopic intensity if it has points in saturation. We can use an isotope that's\n # not in saturation as a reference, as long as there is one\n if isotope_idx_not_in_saturation > 0:\n # using as a reference the most intense isotope that is not in saturation, derive the isotope intensities back to the monoisotopic\n Hpn = isotope_intensities_df.iloc[isotope_idx_not_in_saturation].summed_intensity\n for peak_number in reversed(range(1,isotope_idx_not_in_saturation+1)):\n phr = peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur=0)\n if phr is not None:\n Hpn_minus_1 = Hpn / phr\n isotope_intensities_df.at[peak_number-1, 'inferred_intensity'] = int(Hpn_minus_1)\n isotope_intensities_df.at[peak_number-1, 'inferred'] = True\n Hpn = Hpn_minus_1\n else:\n break\n\n intensity = int(isotope_intensities_df.iloc[0].inferred_intensity) # the inferred saturation\n inferred = int(isotope_intensities_df.iloc[0].inferred) # whether the monoisotope intensity was inferred\n\n isotope_intensities_l = [tuple(x) for x in isotope_intensities_df[['summed_intensity','saturated','inferred_intensity','inferred','isotope_points','isotope_apex_points']].to_numpy()]\n number_of_isotopes = len(isotope_intensities_df)\n else:\n isotope_intensities_l = None\n\n # calculate with the top-proportion method\n monoisotope_df = isotope_raw_points_df[isotope_raw_points_df.isotope_idx == 0]\n # find the maximum intensity by scan in each frame\n frame_ccs_cutoffs = []\n for group_name,group_df in monoisotope_df.groupby(['frame_id']):\n max_intensity = group_df.intensity.max()\n intensity_cutoff = (1.0 - TOP_CCS_PROPORTION_TO_INCLUDE) * max_intensity\n frame_ccs_cutoffs.append((group_name, intensity_cutoff))\n # trim the monoisotope according to the CCS cutoffs\n frames_l = []\n for ccs_cutoff in frame_ccs_cutoffs:\n frame_df = monoisotope_df[(monoisotope_df.frame_id == ccs_cutoff[0]) & (monoisotope_df.intensity >= ccs_cutoff[1])]\n frames_l.append(frame_df)\n monoisotope_trimmed_by_ccs_cutoff_df = pd.concat(frames_l, axis=0, sort=False) # the monoisotope points trimmed by CCS cutoff\n # find the RT cutoff\n rt_flattened_df = monoisotope_trimmed_by_ccs_cutoff_df.groupby(['frame_id','retention_time_secs'], as_index=False).intensity.sum()\n max_rt_intensity = rt_flattened_df.intensity.max()\n rt_intensity_cutoff = (1.0 - TOP_RT_PROPORTION_TO_INCLUDE) * max_rt_intensity\n # trim the RT-flattened monoisotope accordingly\n rt_flattened_with_cutoff_df = rt_flattened_df[rt_flattened_df.intensity >= rt_intensity_cutoff]\n # now sum the remaining points to calculate the intensity\n peak_proportion_intensity = rt_flattened_with_cutoff_df.intensity.sum()\n\n # package the feature attributes\n feature_attributes = {}\n feature_attributes['intensity'] = intensity # sum of the maximum intensity and the max intensity of the frame either side in RT\n feature_attributes['inferred'] = inferred # whether the mono intensity was inferred from peak height ratios\n feature_attributes['isotope_idx_not_in_saturation'] = isotope_idx_not_in_saturation # index of the first isotope that is not in saturation\n if rt_0_metrics is not None:\n feature_attributes['rt_apex'] = rt_0_metrics['apex_x'] # the RT apex of the isolated mono peak\n else:\n feature_attributes['rt_apex'] = None\n if scan_0_metrics is not None:\n feature_attributes['scan_apex'] = scan_0_metrics['apex_x'] # the CCS apex of the isolated mono peak\n else:\n feature_attributes['scan_apex'] = None\n feature_attributes['isotope_intensities_l'] = isotope_intensities_l # information about each isotope\n feature_attributes['monoisotopic_mz_centroid'] = monoisotopic_mz # mono m/z for the isolated peak\n feature_attributes['monoisotopic_mz_delta_ppm'] = monoisotopic_mz_delta_ppm # delta m/z ppm from the estimated monoisotopic m/z\n feature_attributes['monoisotopic_mass'] = monoisotopic_mass # monoisotopic mass\n feature_attributes['number_of_isotopes'] = number_of_isotopes # the number of isotopes we found\n feature_attributes['peak_proportion_intensity'] = peak_proportion_intensity # intensity calculated by taking a proportion of the top of the peak\n feature_attributes['peak_proportions'] = {'ccs_proportion':TOP_CCS_PROPORTION_TO_INCLUDE, 'rt_proportion':TOP_RT_PROPORTION_TO_INCLUDE}\n\n return feature_attributes\n\ndef extract_feature_metrics_at_coords(coordinates_d, data_obj, run_name, sequence, charge, target_mode):\n feature_metrics_attributes_l = []\n\n estimated_mono_mz = coordinates_d['mono_mz']\n estimated_scan_apex = coordinates_d['scan_apex']\n estimated_rt_apex = coordinates_d['rt_apex']\n\n # distance for looking either side of the scan and RT apex, based on the other times this sequence has been seen in this experiment\n SCAN_WIDTH = args.max_peak_width_ccs\n RT_WIDTH = args.max_peak_width_rt\n\n # the width to use for isotopic width, in Da\n MZ_TOLERANCE_PPM = 5 # +/- this amount\n MZ_TOLERANCE_PERCENT = MZ_TOLERANCE_PPM * 10**-4\n MS1_PEAK_DELTA = estimated_mono_mz * MZ_TOLERANCE_PERCENT / 100\n\n # the expected spacing between isotopes in the m/z dimension\n expected_spacing_mz = CARBON_MASS_DIFFERENCE / charge\n\n # define the region we will look in for the feature\n feature_region_mz_lower = estimated_mono_mz - expected_spacing_mz\n feature_region_mz_upper = estimated_mono_mz + (NUMBER_OF_ISOTOPES * expected_spacing_mz) + expected_spacing_mz\n scan_lower = estimated_scan_apex - SCAN_WIDTH\n scan_upper = estimated_scan_apex + SCAN_WIDTH\n rt_lower = estimated_rt_apex - RT_WIDTH\n rt_upper = estimated_rt_apex + RT_WIDTH\n\n if args.small_set_mode:\n print('coordinates for sequence {} charge {} in {} mode:\\nestimated_mono_mz = {}\\nestimated_scan_apex = {}\\nestimated_rt_apex = {}\\n\\nfeature_region_mz_lower = {}\\nfeature_region_mz_upper={}\\nscan_lower={}\\nscan_upper={}\\nrt_lower={}\\nrt_upper={}\\n'.format(sequence, charge, \"target\" if target_mode else \"decoy\", estimated_mono_mz, estimated_scan_apex, estimated_rt_apex, feature_region_mz_lower, feature_region_mz_upper, scan_lower, scan_upper, rt_lower, rt_upper))\n\n isotope_peaks_l = []\n isotope_raw_points_l = []\n\n # load the ms1 points for this feature region\n feature_region_raw_points_df = data_obj[\n {\n \"rt_values\": slice(float(rt_lower), float(rt_upper)),\n \"mz_values\": slice(float(feature_region_mz_lower), float(feature_region_mz_upper)),\n \"scan_indices\": slice(int(scan_lower), int(scan_upper+1)),\n \"precursor_indices\": 0, # ms1 frames only\n }\n ][['mz_values','scan_indices','frame_indices','rt_values','intensity_values']]\n feature_region_raw_points_df.rename(columns={'mz_values':'mz', 'scan_indices':'scan', 'frame_indices':'frame_id', 'rt_values':'retention_time_secs', 'intensity_values':'intensity'}, inplace=True)\n # downcast the data types to minimise the memory used\n int_columns = ['frame_id','scan','intensity']\n feature_region_raw_points_df[int_columns] = feature_region_raw_points_df[int_columns].apply(pd.to_numeric, downcast=\"unsigned\")\n float_columns = ['retention_time_secs']\n feature_region_raw_points_df[float_columns] = feature_region_raw_points_df[float_columns].apply(pd.to_numeric, downcast=\"float\")\n\n MAXIMUM_NUMBER_OF_MONO_RT_PEAKS_FOR_TARGET_MODE = 10\n\n if len(feature_region_raw_points_df) > 0:\n # derive peaks for the monoisotopic and the isotopes\n\n # this section makes two lists for the isotopes:\n # - isotope_peaks_l is a list of tuples of the m/z centroid and the summed intensity for each isotope\n # - isotope_raw_points_l is a list of dataframes of the raw points for each isotope (that may be empty) with the raw points as (frame_id,mz,scan,intensity,retention_time_secs)\n for isotope_idx in range(NUMBER_OF_ISOTOPES):\n estimated_isotope_midpoint = estimated_mono_mz + (isotope_idx * expected_spacing_mz)\n # initial m/z isotope width\n isotope_mz_lower = estimated_isotope_midpoint - MS1_PEAK_DELTA\n isotope_mz_upper = estimated_isotope_midpoint + MS1_PEAK_DELTA\n isotope_raw_points_df = feature_region_raw_points_df[(feature_region_raw_points_df.mz >= isotope_mz_lower) & (feature_region_raw_points_df.mz <= isotope_mz_upper)].copy()\n if len(isotope_raw_points_df) == 0:\n # second attempt m/z isotope width\n MZ_TOLERANCE_PPM = 20 # +/- this amount\n MZ_TOLERANCE_PERCENT = MZ_TOLERANCE_PPM * 10**-4\n MS1_PEAK_DELTA = estimated_mono_mz * MZ_TOLERANCE_PERCENT / 100\n\n isotope_mz_lower = estimated_isotope_midpoint - MS1_PEAK_DELTA\n isotope_mz_upper = estimated_isotope_midpoint + MS1_PEAK_DELTA\n isotope_raw_points_df = feature_region_raw_points_df[(feature_region_raw_points_df.mz >= isotope_mz_lower) & (feature_region_raw_points_df.mz <= isotope_mz_upper)].copy()\n if len(isotope_raw_points_df) > 0:\n # print('found {} points for isotope {}'.format(len(isotope_raw_points_df), isotope_idx))\n isotope_raw_points_df['isotope_idx'] = isotope_idx\n # add the isotope's raw points to the list\n isotope_raw_points_l.append(isotope_raw_points_df)\n # centroid the raw points to get the peak for the isotope\n isotope_raw_points_a = isotope_raw_points_df[['mz','intensity']].values\n mz_cent = mz_centroid(isotope_raw_points_a[:,1], isotope_raw_points_a[:,0])\n # calculate the intensity\n summed_intensity = isotope_raw_points_a[:,1].sum()\n # add the peak to the list of isotopic peaks\n isotope_peaks_l.append((mz_cent, summed_intensity))\n else:\n break # we couldn't find any points where this isotope should be, so let's stop\n # print('found {} isotopes'.format(len(isotope_peaks_l)))\n isotope_peaks_df = pd.DataFrame(isotope_peaks_l, columns=['mz_centroid','summed_intensity'])\n\n # clean up\n feature_region_raw_points_df = None\n\n if len(isotope_peaks_df) >= 3: # we need at least three isotopes for this to work\n # update the monoisotopic m/z with the one we just calculated\n updated_mz_apex = isotope_peaks_df.iloc[0].mz_centroid\n\n # We have confidence in the accuracy of each dimension in decreasing order: m/z, RT, scan. Therefore we constrain\n # the cuboid by m/z first to find the peak in RT, then constrain the points to the RT peak's FWHM, then find\n # the peak in the scan dimension.\n\n # monoisotopic peak\n mono_raw_points_df = isotope_raw_points_l[0]\n\n # collapse the points onto the RT dimension\n rt_0_df = mono_raw_points_df.groupby(['frame_id','retention_time_secs'], as_index=False).intensity.sum()\n rt_0_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)\n rt_0_df['x'] = rt_0_df.retention_time_secs\n rt_0_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=rt_0_df, estimated_apex=estimated_rt_apex, estimated_peak_width=RT_WIDTH,\n maximum_number_of_peaks=(MAXIMUM_NUMBER_OF_MONO_RT_PEAKS_FOR_TARGET_MODE if target_mode else 1),\n isotope_dimension='RT', isotope_number=0, sequence=sequence, charge=charge, run_name=run_name)\n if rt_0_metrics_d['peaks'] is not None:\n # we may have detected multiple peaks in RT, and rather than pick one, we will gather the attributes and metrics for them all\n candidate_peaks_l = []\n for rt_peak_idx, rt_0_metrics in enumerate(rt_0_metrics_d['peaks']):\n # monoisotopic peak\n mono_raw_points_df = isotope_raw_points_l[0]\n # update the RT apex from the estimator with the apex we determined by fitting a curve to the isolated monoisotopic peak\n if (rt_0_metrics is not None) and (rt_0_metrics['apex_x'] is not None):\n updated_rt_apex = rt_0_metrics['apex_x']\n else:\n updated_rt_apex = estimated_rt_apex\n # collapse the points onto the mobility dimension, constraining the points to the FWHM of the peak in RT\n if (rt_0_metrics['apex_x'] is not None) and (rt_0_metrics['lower_x'] is not None) and (rt_0_metrics['upper_x'] is not None):\n mono_raw_points_df = mono_raw_points_df[(mono_raw_points_df.retention_time_secs >= rt_0_metrics['lower_x']) & (mono_raw_points_df.retention_time_secs <= rt_0_metrics['upper_x'])]\n scan_0_df = mono_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n scan_0_df.sort_values(by=['scan'], ascending=True, inplace=True)\n scan_0_df['x'] = scan_0_df.scan\n scan_0_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=scan_0_df, estimated_apex=estimated_scan_apex, estimated_peak_width=SCAN_WIDTH, maximum_number_of_peaks=1, isotope_dimension='CCS', isotope_number=0, sequence=sequence, charge=charge, run_name=run_name)\n if scan_0_metrics_d['peaks'] is not None:\n scan_0_metrics = scan_0_metrics_d['peaks'][0]\n else:\n scan_0_metrics = None\n # update the CCS apex from the estimator with the apex we determined by fitting a curve to the isolated monoisotopic peak\n if (scan_0_metrics is not None) and (scan_0_metrics['apex_x'] is not None):\n updated_scan_apex = scan_0_metrics['apex_x']\n else:\n updated_scan_apex = estimated_scan_apex\n\n # Isotope 1 peak\n isotope_1_raw_points_df = isotope_raw_points_l[1]\n\n # Collapse the points onto the RT dimension\n rt_1_df = isotope_1_raw_points_df.groupby(['frame_id','retention_time_secs'], as_index=False).intensity.sum()\n rt_1_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)\n rt_1_df['x'] = rt_1_df.retention_time_secs\n # we want the isolated peak nearest the monoisotopic peak apex, so we use the updated apex\n rt_1_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=rt_1_df, estimated_apex=updated_rt_apex, estimated_peak_width=RT_WIDTH, maximum_number_of_peaks=1, isotope_dimension='RT', isotope_number=1, sequence=sequence, charge=charge, run_name=run_name)\n if rt_1_metrics_d['peaks'] is not None:\n rt_1_metrics = rt_1_metrics_d['peaks'][0]\n else:\n rt_1_metrics = None\n\n # Collapse the points onto the mobility dimension\n if (rt_1_metrics is not None) and (rt_1_metrics['apex_x'] is not None) and (rt_1_metrics['lower_x'] is not None) and (rt_1_metrics['upper_x'] is not None):\n isotope_1_raw_points_df = isotope_1_raw_points_df[(isotope_1_raw_points_df.retention_time_secs >= rt_1_metrics['lower_x']) & (isotope_1_raw_points_df.retention_time_secs <= rt_1_metrics['upper_x'])]\n scan_1_df = isotope_1_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n scan_1_df.sort_values(by=['scan'], ascending=True, inplace=True)\n scan_1_df['x'] = scan_1_df.scan\n # we want the isolated peak nearest the monoisotopic peak apex, so we use the updated apex\n scan_1_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=scan_1_df, estimated_apex=updated_scan_apex, estimated_peak_width=SCAN_WIDTH, maximum_number_of_peaks=1, isotope_dimension='CCS', isotope_number=1, sequence=sequence, charge=charge, run_name=run_name)\n if scan_1_metrics_d['peaks'] is not None:\n scan_1_metrics = scan_1_metrics_d['peaks'][0]\n else:\n scan_1_metrics = None\n\n # Isotope 2 peak\n isotope_2_raw_points_df = isotope_raw_points_l[2]\n\n # Collapse the points onto the RT dimension\n rt_2_df = isotope_2_raw_points_df.groupby(['frame_id','retention_time_secs'], as_index=False).intensity.sum()\n rt_2_df.sort_values(by=['retention_time_secs'], ascending=True, inplace=True)\n rt_2_df['x'] = rt_2_df.retention_time_secs\n # we want the isolated peak nearest the monoisotopic peak apex, so we use the updated apex\n rt_2_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=rt_2_df, estimated_apex=updated_rt_apex, estimated_peak_width=RT_WIDTH, maximum_number_of_peaks=1, isotope_dimension='RT', isotope_number=2, sequence=sequence, charge=charge, run_name=run_name)\n if rt_2_metrics_d['peaks'] is not None:\n rt_2_metrics = rt_2_metrics_d['peaks'][0]\n else:\n rt_2_metrics = None\n\n # Collapse the points onto the mobility dimension\n if (rt_2_metrics is not None) and (rt_2_metrics['apex_x'] is not None) and (rt_2_metrics['lower_x'] is not None) and (rt_2_metrics['upper_x'] is not None):\n isotope_2_raw_points_df = isotope_2_raw_points_df[(isotope_2_raw_points_df.retention_time_secs >= rt_2_metrics['lower_x']) & (isotope_2_raw_points_df.retention_time_secs <= rt_2_metrics['upper_x'])]\n scan_2_df = isotope_2_raw_points_df.groupby(['scan'], as_index=False).intensity.sum()\n scan_2_df.sort_values(by=['scan'], ascending=True, inplace=True)\n scan_2_df['x'] = scan_2_df.scan\n # we want the isolated peak nearest the monoisotopic peak apex, so we use the updated apex\n scan_2_metrics_d = fit_curve_to_flattened_isotope(flattened_points_df=scan_2_df, estimated_apex=updated_scan_apex, estimated_peak_width=SCAN_WIDTH, maximum_number_of_peaks=1, isotope_dimension='CCS', isotope_number=2, sequence=sequence, charge=charge, run_name=run_name)\n if scan_2_metrics_d['peaks'] is not None:\n scan_2_metrics = scan_2_metrics_d['peaks'][0]\n else:\n scan_2_metrics = None\n\n # bundle up the data for the metrics calculation, which uses only the first three isotopes\n trimmed_isotopes_0_2_raw_points_l = [mono_raw_points_df, isotope_1_raw_points_df, isotope_2_raw_points_df] # the raw points for the isotopes, trimmed by the flattened curve fitting\n rt_metrics_l = [rt_0_metrics, rt_1_metrics, rt_2_metrics]\n scan_metrics_l = [scan_0_metrics, scan_1_metrics, scan_2_metrics]\n\n # calculate the feature metrics\n # - in this step we use the estimated apexes because we are measuring how close it was to the derived apex\n # - isotope_peaks_df is the m/z centroid and summed intensity of each 'raw' isotope\n # - trimmed_isotopes_0_2_raw_points_l is a list of dataframes containing the raw points for each isotope trimmed by the curve fitting\n feature_metrics = calculate_feature_metrics(isotope_peaks_df=isotope_peaks_df[:3], isotope_raw_points_l=trimmed_isotopes_0_2_raw_points_l, estimated_mono_mz=estimated_mono_mz, estimated_scan_apex=estimated_scan_apex, estimated_rt_apex=estimated_rt_apex, rt_metrics_l=rt_metrics_l, scan_metrics_l=scan_metrics_l, expected_spacing_mz=expected_spacing_mz, charge=charge)\n\n # bundle up the data for the attributes calculation - this uses all the isotopes we found\n # these points are trimmed to the extent of the monoisotopic\n isotope_raw_points_for_attributes_l = []\n for isotope_idx in range(len(isotope_raw_points_l)):\n isotope_raw_points_df = isotope_raw_points_l[isotope_idx]\n # # trim the isotope points by the RT extent of the monoisotopic\n # if (rt_0_metrics is not None) and (rt_0_metrics['lower_x'] is not None) and (rt_0_metrics['upper_x'] is not None):\n # isotope_raw_points_df = isotope_raw_points_df[(isotope_raw_points_df.retention_time_secs >= rt_0_metrics['lower_x']) & (isotope_raw_points_df.retention_time_secs <= rt_0_metrics['upper_x'])]\n # # trim the isotope points by the CCS extent of the monoisotopic\n # if (scan_0_metrics is not None) and (scan_0_metrics['lower_x'] is not None) and (scan_0_metrics['upper_x'] is not None):\n # isotope_raw_points_df = isotope_raw_points_df[(isotope_raw_points_df.scan >= scan_0_metrics['lower_x']) & (isotope_raw_points_df.scan <= scan_0_metrics['upper_x'])]\n isotope_raw_points_for_attributes_l.append(isotope_raw_points_df)\n\n # calculate the feature attributes\n # - isotope_raw_points_for_attributes_l is the raw points for each isotope trimmed to the extent of the monoisotopic\n feature_attributes = calculate_feature_attributes(isotope_raw_points_l=isotope_raw_points_for_attributes_l, rt_0_metrics=rt_0_metrics, scan_0_metrics=scan_0_metrics, sequence=sequence, charge=charge, run_name=run_name, estimated_mono_mz=estimated_mono_mz)\n\n # we need to make sure there's a viable number of isotopes\n if args.small_set_mode:\n print('sequence {}, charge {}, number of isotopes {}, rt apex {}, scan apex {}'.format(sequence, charge, feature_attributes['number_of_isotopes'], feature_attributes['rt_apex'], feature_attributes['scan_apex']))\n\n if (feature_attributes['number_of_isotopes'] >= MINIMUM_NUMBER_OF_ISOTOPES_FOR_VIABLE_FEATURE) and (feature_attributes['rt_apex'] is not None) and (feature_attributes['scan_apex'] is not None):\n # -------------------------\n # add in some monoisotopic peak RT attributes for visualisation and debugging\n # -------------------------\n # the isotope points flattened to the RT dimension and filtered with a Savgol filter - useful for visualisation\n if (rt_0_metrics_d is not None) and (rt_0_metrics_d['filtered_points'] is not None):\n feature_attributes[\"mono_filtered_points_l\"] = list(rt_0_metrics_d['filtered_points'])\n else:\n feature_attributes[\"mono_filtered_points_l\"] = None\n if (rt_1_metrics_d is not None) and (rt_1_metrics_d['filtered_points'] is not None):\n feature_attributes[\"isotope_1_filtered_points_l\"] = list(rt_1_metrics_d['filtered_points'])\n else:\n feature_attributes[\"isotope_1_filtered_points_l\"] = None\n if (rt_2_metrics_d is not None) and (rt_2_metrics_d['filtered_points'] is not None):\n feature_attributes[\"isotope_2_filtered_points_l\"] = list(rt_2_metrics_d['filtered_points'])\n else:\n feature_attributes[\"isotope_2_filtered_points_l\"] = None\n # bounds of the feature in RT\n if (rt_0_metrics is not None) and (rt_0_metrics['lower_x'] is not None) and (rt_0_metrics['upper_x'] is not None):\n feature_attributes[\"mono_rt_bounds\"] = (rt_0_metrics['lower_x'], rt_0_metrics['upper_x'])\n else:\n feature_attributes[\"mono_rt_bounds\"] = None\n if (rt_1_metrics is not None) and (rt_1_metrics['lower_x'] is not None) and (rt_1_metrics['upper_x'] is not None):\n feature_attributes[\"isotope_1_rt_bounds\"] = (rt_1_metrics['lower_x'], rt_1_metrics['upper_x'])\n else:\n feature_attributes[\"isotope_1_rt_bounds\"] = None\n if (rt_2_metrics is not None) and (rt_2_metrics['lower_x'] is not None) and (rt_2_metrics['upper_x'] is not None):\n feature_attributes[\"isotope_2_rt_bounds\"] = (rt_2_metrics['lower_x'], rt_2_metrics['upper_x'])\n else:\n feature_attributes[\"isotope_2_rt_bounds\"] = None\n # bounds of the feature in CCS\n if (scan_0_metrics is not None) and (scan_0_metrics['lower_x'] is not None) and (scan_0_metrics['upper_x'] is not None):\n feature_attributes[\"mono_scan_bounds\"] = (scan_0_metrics['lower_x'], scan_0_metrics['upper_x'])\n else:\n feature_attributes[\"mono_scan_bounds\"] = None\n if (scan_1_metrics is not None) and (scan_1_metrics['lower_x'] is not None) and (scan_1_metrics['upper_x'] is not None):\n feature_attributes[\"isotope_1_scan_bounds\"] = (scan_1_metrics['lower_x'], scan_1_metrics['upper_x'])\n else:\n feature_attributes[\"isotope_1_scan_bounds\"] = None\n if (scan_2_metrics is not None) and (scan_2_metrics['lower_x'] is not None) and (scan_2_metrics['upper_x'] is not None):\n feature_attributes[\"isotope_2_scan_bounds\"] = (scan_2_metrics['lower_x'], scan_2_metrics['upper_x'])\n else:\n feature_attributes[\"isotope_2_scan_bounds\"] = None\n # add this to the list of candidate peaks\n peak_selection_attributes = (feature_metrics['rt_isotope_correlation'],feature_metrics['rt_isotope_cv'],feature_metrics['scan_isotope_correlation'],feature_metrics['scan_isotope_cv'],feature_attributes['monoisotopic_mz_delta_ppm'],feature_metrics['delta_rt'],feature_metrics['delta_scan'])\n if args.small_set_mode:\n print('peak_selection_attributes: {}'.format(peak_selection_attributes))\n if (feature_metrics is not None) and (feature_attributes is not None):\n candidate_peak_d = {}\n candidate_peak_d['feature_metrics_attributes'] = (sequence, charge, int(rt_peak_idx), feature_metrics, feature_attributes)\n candidate_peak_d['peak_selection_attributes'] = peak_selection_attributes\n candidate_peaks_l.append(candidate_peak_d)\n else:\n print('feature_metrics is None: {}, feature_attributes is None: {}'.format((feature_metrics is None), (feature_attributes is None)))\n else:\n if args.small_set_mode:\n if not (feature_attributes['number_of_isotopes'] >= MINIMUM_NUMBER_OF_ISOTOPES_FOR_VIABLE_FEATURE):\n print('feature attributes has {} isotopes'.format(feature_attributes['number_of_isotopes']))\n if not (feature_attributes['rt_apex'] is not None):\n print('feature attributes has None rt_apex')\n if not (feature_attributes['scan_apex'] is not None):\n print('feature attributes has None scan_apex')\n\n if len(candidate_peaks_l) > 0:\n # decide which candidate peak to add for this sequence\n candidate_peaks_df = pd.DataFrame([item['peak_selection_attributes'] for item in candidate_peaks_l], columns=['rt_isotope_correlation','rt_isotope_cv','scan_isotope_correlation','scan_isotope_cv','monoisotopic_mz_delta_ppm','delta_rt','delta_scan'])\n candidate_peaks_df.dropna(inplace=True) # drop any rows that contain None\n if len(candidate_peaks_df) > 0:\n # find the candidate peak with the best isotope correlation in RT\n candidate_peak_idx = candidate_peaks_df.rt_isotope_correlation.idxmax()\n # select the peak corresponding to this index\n feature_metrics_attributes_l.append(candidate_peaks_l[candidate_peak_idx]['feature_metrics_attributes'])\n print(\"extraction success for mode {}, sequence {}, charge {}\".format(\"target\" if target_mode else \"decoy\", sequence, charge))\n if args.small_set_mode:\n print('sequence {}, charge {}, run {}:'.format(sequence, charge, run_name))\n print(candidate_peaks_df.to_string())\n print(\"chose index {}\".format(candidate_peak_idx))\n print()\n else:\n if args.small_set_mode:\n print(\"extract_feature_metrics_at_coords failed for mode {}: no L2 candidate peaks were found, sequence {}, charge {}, run {}.\".format(\"target\" if target_mode else \"decoy\", sequence, charge, run_name))\n feature_metrics_attributes_l = []\n else:\n if args.small_set_mode:\n print(\"extract_feature_metrics_at_coords failed for mode {}: no L1 candidate peaks were found, sequence {}, charge {}, run {}.\".format(\"target\" if target_mode else \"decoy\", sequence, charge, run_name))\n feature_metrics_attributes_l = []\n else:\n if args.small_set_mode:\n print(\"extract_feature_metrics_at_coords failed for mode {}: no peaks were found in the monoisotopic peak flattened to the RT dimension, sequence {}, charge {}, run {}.\".format(\"target\" if target_mode else \"decoy\", sequence, charge, run_name))\n feature_metrics_attributes_l = []\n else:\n if args.small_set_mode:\n print(\"extract_feature_metrics_at_coords failed for mode {}: only found {} isotopes, sequence {}, charge {}, run {}.\".format(\"target\" if target_mode else \"decoy\", len(isotope_peaks_df), sequence, charge, run_name))\n feature_metrics_attributes_l = []\n else:\n if args.small_set_mode:\n print(\"extract_feature_metrics_at_coords failed for mode {}: no points were found in the feature extraction region, sequence {}, charge {}, run {}.\".format(\"target\" if target_mode else \"decoy\", sequence, charge, run_name))\n feature_metrics_attributes_l = []\n\n # time_stop = time.time()\n # print(\"sequence {}, charge {}: {} seconds\".format(sequence, charge, round(time_stop-time_start,1)))\n\n # keep some debug information\n if args.small_set_mode and target_mode:\n info_d = {}\n info_d['sequence'] = sequence\n info_d['charge'] = charge\n info_d['run_name'] = run_name\n info_d['estimated_coordinates_d'] = coordinates_d\n info_d['rt_flattened_l'] = [rt_0_df.to_dict('records'), rt_1_df.to_dict('records'), rt_2_df.to_dict('records')]\n info_d['scan_flattened_l'] = [scan_0_df.to_dict('records'), scan_1_df.to_dict('records'), scan_2_df.to_dict('records')]\n info_d['feature_metrics_attributes_l'] = feature_metrics_attributes_l\n info_d['selected_peak_index'] = candidate_peak_idx\n DEBUG_DIR = \"{}/debug\".format(EXPERIMENT_DIR)\n if not os.path.exists(DEBUG_DIR):\n os.makedirs(DEBUG_DIR)\n with open('{}/run-{}-sequence-{}-metrics.json'.format(DEBUG_DIR, run_name, sequence), 'w') as f: \n json.dump(info_d, fp=f, cls=NpEncoder)\n\n return feature_metrics_attributes_l\n\n\n####################################################################\n\nparser = argparse.ArgumentParser(description='Using the run-specific coordinate estimators, for each of the library sequences, from each run, extract metrics for feature targets and decoys to collate a training set.')\nparser.add_argument('-eb','--experiment_base_dir', type=str, default='./experiments', help='Path to the experiments directory.', required=False)\nparser.add_argument('-en','--experiment_name', type=str, help='Name of the experiment.', required=True)\nparser.add_argument('-rn','--run_name', type=str, help='Name of the run.', required=True)\nparser.add_argument('-ssm','--small_set_mode', action='store_true', help='A small subset of the data for testing purposes.', required=False)\nparser.add_argument('-ssms','--small_set_mode_size', type=int, default='100', help='The number of identifications to sample for small set mode.', required=False)\nparser.add_argument('-ssseq','--small_set_sequence', type=str, help='Only extract this sequence.', required=False)\nparser.add_argument('-sschr','--small_set_charge', type=int, help='The charge for the selected sequence.', required=False)\nparser.add_argument('-mpwrt','--max_peak_width_rt', type=int, default=10, help='Maximum peak width tolerance for the extraction from the estimated coordinate in RT.', required=False)\nparser.add_argument('-mpwccs','--max_peak_width_ccs', type=int, default=20, help='Maximum peak width tolerance for the extraction from the estimated coordinate in CCS.', required=False)\nparser.add_argument('-ini','--ini_file', type=str, default='./tfde/pipeline/pasef-process-short-gradient.ini', help='Path to the config file.', required=False)\nparser.add_argument('-d','--denoised', action='store_true', help='Use the denoised version of the raw database.')\nparser.add_argument('-pdm','--precursor_definition_method', type=str, choices=['pasef','3did','mq'], default='pasef', help='The method used to define the precursor cuboids.', required=False)\nargs = parser.parse_args()\n\n# Print the arguments for the log\ninfo = []\nfor arg in vars(args):\n info.append((arg, getattr(args, arg)))\nprint(info)\n\nstart_run = time.time()\n\n# check the experiment directory exists\nEXPERIMENT_DIR = \"{}/{}\".format(args.experiment_base_dir, args.experiment_name)\nif not os.path.exists(EXPERIMENT_DIR):\n print(\"The experiment directory is required but doesn't exist: {}\".format(EXPERIMENT_DIR))\n sys.exit(1)\n\n# check the INI file exists\nif not os.path.isfile(args.ini_file):\n print(\"The configuration file doesn't exist: {}\".format(args.ini_file))\n sys.exit(1)\n\n# load the INI file\ncfg = configparser.ConfigParser(interpolation=ExtendedInterpolation())\ncfg.read(args.ini_file)\n\n# set up constants\nFRAME_TYPE_MS1 = cfg.getint('common','FRAME_TYPE_MS1')\nADD_C_CYSTEINE_DA = cfg.getfloat('common','ADD_C_CYSTEINE_DA')\nPROTON_MASS = cfg.getfloat('common','PROTON_MASS')\nCARBON_MASS_DIFFERENCE = cfg.getfloat('common','CARBON_MASS_DIFFERENCE')\nSATURATION_INTENSITY = cfg.getint('common','SATURATION_INTENSITY')\nMAXIMUM_Q_VALUE = cfg.getfloat('common','MAXIMUM_Q_VALUE')\n\nMAXIMUM_Q_VALUE_FOR_CLASSIFIER_TRAINING_SET = cfg.getfloat('extraction','MAXIMUM_Q_VALUE_FOR_CLASSIFIER_TRAINING_SET')\nNUMBER_OF_ISOTOPES = cfg.getint('extraction','NUMBER_OF_ISOTOPES')\nMINIMUM_NUMBER_OF_ISOTOPES_FOR_VIABLE_FEATURE = cfg.getint('extraction','MINIMUM_NUMBER_OF_ISOTOPES_FOR_VIABLE_FEATURE')\nTOP_CCS_PROPORTION_TO_INCLUDE = cfg.getfloat('extraction','TOP_CCS_PROPORTION_TO_INCLUDE')\nTOP_RT_PROPORTION_TO_INCLUDE = cfg.getfloat('extraction','TOP_RT_PROPORTION_TO_INCLUDE')\n\n# check the raw database exists\nif args.denoised:\n RAW_DATABASE_BASE_DIR = \"{}/raw-databases/denoised\".format(EXPERIMENT_DIR)\nelse:\n RAW_DATABASE_BASE_DIR = \"{}/raw-databases\".format(EXPERIMENT_DIR)\nRAW_DATABASE_NAME = \"{}/{}.d\".format(RAW_DATABASE_BASE_DIR, args.run_name)\nif not os.path.exists(RAW_DATABASE_NAME):\n print(\"The raw database is required but doesn't exist: {}\".format(RAW_DATABASE_NAME))\n sys.exit(1)\n\n# create the TimsTOF object\nRAW_HDF_FILE = '{}.hdf'.format(args.run_name)\nRAW_HDF_PATH = '{}/{}'.format(RAW_DATABASE_BASE_DIR, RAW_HDF_FILE)\nif not os.path.isfile(RAW_HDF_PATH):\n print('{} doesn\\'t exist so loading the raw data from {}'.format(RAW_HDF_PATH, RAW_DATABASE_NAME))\n data = alphatims.bruker.TimsTOF(RAW_DATABASE_NAME)\n print('saving to {}'.format(RAW_HDF_PATH))\n _ = data.save_as_hdf(\n directory=RAW_DATABASE_BASE_DIR,\n file_name=RAW_HDF_FILE,\n overwrite=True\n )\nelse:\n print('loading raw data from {}'.format(RAW_HDF_PATH))\n data = alphatims.bruker.TimsTOF(RAW_HDF_PATH)\n\n# load the MS1 frame IDs\nms1_frame_properties_df = load_ms1_frame_ids(RAW_DATABASE_NAME)\n\n# set up the coordinate estimators directory\nCOORDINATE_ESTIMATORS_DIR = \"{}/coordinate-estimators\".format(EXPERIMENT_DIR)\nif not os.path.exists(COORDINATE_ESTIMATORS_DIR):\n print(\"The coordinate estimators directory is required but doesn't exist: {}\".format(COORDINATE_ESTIMATORS_DIR))\n sys.exit(1)\n\n# load the sequence library\nSEQUENCE_LIBRARY_DIR = \"{}/sequence-library-{}\".format(EXPERIMENT_DIR, args.precursor_definition_method)\nSEQUENCE_LIBRARY_FILE_NAME = \"{}/sequence-library.feather\".format(SEQUENCE_LIBRARY_DIR)\nif not os.path.isfile(SEQUENCE_LIBRARY_FILE_NAME):\n print(\"The sequences library file doesn't exist: {}\".format(SEQUENCE_LIBRARY_FILE_NAME))\n sys.exit(1)\nelse:\n library_sequences_for_this_run_df = pd.read_feather(SEQUENCE_LIBRARY_FILE_NAME)\n library_sequences_for_this_run_df = library_sequences_for_this_run_df[(library_sequences_for_this_run_df.q_value <= MAXIMUM_Q_VALUE)]\n print(\"loaded {} sequences with q-value less than {} from the library {}\".format(len(library_sequences_for_this_run_df), MAXIMUM_Q_VALUE, SEQUENCE_LIBRARY_FILE_NAME))\n # for small set mode, randomly select some sequences\n if args.small_set_mode:\n if args.small_set_sequence is None:\n library_sequences_for_this_run_df.sort_values(by=['number_of_runs_identified','q_value','experiment_intensity_mean'], ascending=[False,True,False], inplace=True)\n library_sequences_for_this_run_df.reset_index(drop=True, inplace=True)\n library_sequences_for_this_run_df = library_sequences_for_this_run_df[:args.small_set_mode_size]\n else:\n library_sequences_for_this_run_df = library_sequences_for_this_run_df[(library_sequences_for_this_run_df.sequence == args.small_set_sequence) & (library_sequences_for_this_run_df.charge == args.small_set_charge)]\n print(\"trimmed to {} sequences for small set mode\".format(len(library_sequences_for_this_run_df)))\n\n# check the target decoy classifier directory exists\nTARGET_DECOY_MODEL_DIR = \"{}/target-decoy-models\".format(EXPERIMENT_DIR)\nif not os.path.exists(TARGET_DECOY_MODEL_DIR):\n print(\"The target-decoy classifier directory does not exist: {}\".format(TARGET_DECOY_MODEL_DIR))\n\n# remove the output file if it exists\nLIBRARY_SEQUENCES_WITH_METRICS_FILENAME = '{}/library-sequences-in-run-{}.pkl'.format(TARGET_DECOY_MODEL_DIR, args.run_name)\nif os.path.isfile(LIBRARY_SEQUENCES_WITH_METRICS_FILENAME):\n os.remove(LIBRARY_SEQUENCES_WITH_METRICS_FILENAME)\n\n# set all the sequences with this run name to match the metrics we extract for each\nlibrary_sequences_for_this_run_df['run_name'] = args.run_name\n\nprint(\"calculating the feature metrics for the library sequences in run {}\".format(args.run_name))\n\n# load the coordinate estimators\nMZ_ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, args.run_name, 'mz')\nSCAN_ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, args.run_name, 'scan')\nRT_ESTIMATOR_MODEL_FILE_NAME = \"{}/run-{}-{}-estimator.pkl\".format(COORDINATE_ESTIMATORS_DIR, args.run_name, 'rt')\n\nwith open(MZ_ESTIMATOR_MODEL_FILE_NAME, 'rb') as file:\n mz_estimator = pickle.load(file)\nwith open(SCAN_ESTIMATOR_MODEL_FILE_NAME, 'rb') as file:\n scan_estimator = pickle.load(file)\nwith open(RT_ESTIMATOR_MODEL_FILE_NAME, 'rb') as file:\n rt_estimator = pickle.load(file)\n\n# calculate the target coordinates\nprint(\"calculating the target coordinates for each sequence-charge\")\nlibrary_sequences_for_this_run_df['target_coords'] = library_sequences_for_this_run_df.apply(lambda row: estimate_target_coordinates(row, mz_estimator, scan_estimator, rt_estimator), axis=1)\n\n# calculate the decoy coordinates\nprint(\"calculating the decoy coordinates for each sequence-charge\")\nlibrary_sequences_for_this_run_df['decoy_coords'] = library_sequences_for_this_run_df.apply(lambda row: calculate_decoy_coordinates(row), axis=1)\n\n# extract feature metrics from the target coordinates for each sequence in the run\nprint(\"extracting feature metrics from the target coordinates\")\ntarget_metrics_l = [extract_feature_metrics_at_coords(coordinates_d=row.target_coords, data_obj=data, run_name=args.run_name, sequence=row.sequence, charge=row.charge, target_mode=True) for row in library_sequences_for_this_run_df.itertuples()]\nflattened_target_metrics_l = [item for sublist in target_metrics_l for item in sublist] # target_metrics_l is a list of lists, so we need to flatten it\ntarget_metrics_df = pd.DataFrame(flattened_target_metrics_l, columns=['sequence','charge','peak_idx','target_metrics','attributes'])\n# merge the target results with the library sequences for this run\nlibrary_sequences_with_target_metrics_df = pd.merge(library_sequences_for_this_run_df, target_metrics_df, how='left', left_on=['sequence','charge'], right_on=['sequence','charge'])\n\n# extract feature metrics from the decoy coordinates for each sequence in the run\nprint(\"extracting feature metrics from the decoy coordinates\")\ndecoy_metrics_l = [extract_feature_metrics_at_coords(coordinates_d=row.decoy_coords, data_obj=data, run_name=args.run_name, sequence=row.sequence, charge=row.charge, target_mode=False) for row in library_sequences_for_this_run_df.itertuples()]\nflattened_decoy_metrics_l = [item for sublist in decoy_metrics_l for item in sublist] # decoy_metrics_l is a list of lists, so we need to flatten it\ndecoy_metrics_df = pd.DataFrame(flattened_decoy_metrics_l, columns=['sequence','charge','peak_idx','decoy_metrics','attributes'])\n# don't include the attributes because we're not interested in the decoy's attributes\ndecoy_metrics_df.drop(['attributes','peak_idx'], axis=1, inplace=True)\n\n# join the two together to form the target and decoy metrics for each library sequence\nlibrary_sequences_for_this_run_df = pd.merge(library_sequences_with_target_metrics_df, decoy_metrics_df, how='left', left_on=['sequence','charge'], right_on=['sequence','charge'])\n\n# remove the rubbish target_metrics and attributes\nlibrary_sequences_for_this_run_df = library_sequences_for_this_run_df[(library_sequences_for_this_run_df.target_metrics.notna()) & (library_sequences_for_this_run_df.attributes.notna())]\n\n# downcast the data types to minimise the memory used\nint_columns = ['charge','number_of_runs_identified','peak_idx']\nlibrary_sequences_for_this_run_df[int_columns] = library_sequences_for_this_run_df[int_columns].apply(pd.to_numeric, downcast=\"unsigned\")\nfloat_columns = ['experiment_scan_mean','experiment_scan_std_dev','experiment_scan_peak_width','experiment_rt_mean','experiment_rt_std_dev','experiment_rt_peak_width','experiment_intensity_mean','experiment_intensity_std_dev','q_value']\nlibrary_sequences_for_this_run_df[float_columns] = library_sequences_for_this_run_df[float_columns].apply(pd.to_numeric, downcast=\"float\")\n\n# save the metrics for this run\nprint(\"writing {} metrics & attributes for the library sequences to {}\".format(len(library_sequences_for_this_run_df), LIBRARY_SEQUENCES_WITH_METRICS_FILENAME))\nlibrary_sequences_for_this_run_df.to_pickle(LIBRARY_SEQUENCES_WITH_METRICS_FILENAME)\n\nstop_run = time.time()\nprint(\"total running time ({}): {} seconds\".format(parser.prog, round(stop_run-start_run,1)))\n","sub_path":"pipeline/extract-library-sequence-features-for-run.py","file_name":"extract-library-sequence-features-for-run.py","file_ext":"py","file_size_in_byte":84433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"168671406","text":"class Cajero:\n def __init__(self, saldo_cajero=0, billete_minimo=0):\n self.saldo_cajero = saldo_cajero\n self.billete_minimo = billete_minimo\n \n \n self.billetes = [\n {'type': 'billete20000', 'quantity': 0, 'value_type': 20000, 'saldo': 0},\n {'type': 'billete10000', 'quantity': 0, 'value_type': 10000, 'saldo': 0},\n {'type': 'billete5000', 'quantity': 0, 'value_type': 5000, 'saldo': 0},\n {'type': 'billete1000', 'quantity': 0, 'value_type': 1000, 'saldo': 0},\n ]\n \n def imprime_ticket_carga_billetes(self, **kwargs):\n for k, v in kwargs.items():\n tmp = [item for item in self.billetes if item['type'] == k][0]\n print('Se carga {} => Q={} => Total$ {}'.format(k, v, v * tmp['value_type']))\n print(f'Nuevo saldo del cajero: {self.saldo_cajero}')\n \n def carga_billetes(self, *args, **kwargs):\n print('========CARGA BILLETES==========')\n for k,v in kwargs.items():\n tmp = [item for item in self.billetes if item['type'] == k][0]\n tmp['quantity'] = v\n tmp['saldo'] = v * tmp['value_type']\n self.saldo_cajero += tmp['saldo']\n self.imprime_ticket_carga_billetes(**kwargs)\n \n def saldo(self):\n print(f'Saldo cajero: {self.saldo_cajero}')\n \n def giro(self, monto_solicitado=0):\n #monto_solicitado = int(input(f'Monto: '))\n self.monto_solicitado = monto_solicitado\n \n if monto_solicitado % self.billete_minimo > 0:\n print('Solo contamos con billetes de 20 y 10. Cambia tu monto solicitado.')\n else:\n if self.saldo_cajero >= monto_solicitado:\n\n print('========GIRO==========')\n print('***INFO INICIAL DE CAJERO***')\n self.saldo()\n print('**RESUMEN GIRO**')\n print(f'Monto de giro solicitado: {self.monto_solicitado}')\n \n transaccion = []\n def procesa_giro():\n for item in self.billetes:\n dt = {}\n dt[item['type']] = {'utilizado': 0, 'monto_girado': 0}\n while item['quantity'] > 0 and int(self.monto_solicitado / item['value_type']) >= 1: \n dt[item['type']]['utilizado'] += 1\n self.monto_solicitado -= item['value_type']\n item['quantity'] -= 1\n dt[item['type']]['monto_girado'] += item['value_type']\n self.saldo_cajero -= dt[item['type']]['monto_girado'] \n transaccion.append(dt)\n \n print('*Detalle: ')\n procesa_giro()\n \n for detalle in transaccion:\n for k, v in detalle.items():\n print(' {} utilizados {} = $ {:_}'.format(k, v['utilizado'], v['monto_girado']))\n \n self.saldo()\n else:\n print('Saldo insuficiente para giros.')\n\ncajero = Cajero(billete_minimo=1000)\nprint()\ncajero.carga_billetes(billete20000=4, billete10000=2, billete5000=3, billete1000=90)\nprint()\ncajero.saldo()\nprint()\ncajero.giro(50000)\nprint()\ncajero.giro(67000)\nprint()\n\n\n \n ","sub_path":"atm_v3.py","file_name":"atm_v3.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"638229389","text":"import csv\r\nimport sys\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom export_to_csv import convert_from_txt_to_csv\r\nfrom collections import defaultdict\r\nfrom itertools import combinations\r\nfrom graph import Node\r\nimport networkx as nx\r\nfrom timeout import timeout\r\nimport time\r\n\r\nclass Counter:\r\n def __init__(self):\r\n self.iteration = 0\r\n\r\n\r\nclass grid:\r\n def __init__(self, problem_file_name, optimization):\r\n self.duplicate_dictionary = {}\r\n self.duplicate_list = []\r\n self.iterationcount = 0\r\n self.problem_file_name = problem_file_name\r\n self.componentsList = []\r\n self.counter = 0\r\n self.totalBoxes = 0\r\n self.neighbor_dict = dict()\r\n #get size of grid\r\n total_lines = 0\r\n total_characters = 0\r\n with open(self.problem_file_name, 'r') as f:\r\n for line in f:\r\n total_characters = len(line.strip())\r\n total_lines += 1\r\n\r\n # Make a grid...\r\n self.nrows = total_lines\r\n self.ncols = total_characters\r\n self.image = np.zeros([self.nrows, self.ncols])\r\n count_lines = 0\r\n count_characters = 0\r\n\r\n # Set every cell to a number (this would be your data)\r\n with open(self.problem_file_name, 'r') as f:\r\n for line in f:\r\n for letter in line.strip():\r\n if letter == 'B':\r\n self.image[count_lines][count_characters] = 0\r\n elif letter == 'Y':\r\n self.image[count_lines][count_characters] = 1\r\n elif letter == 'R':\r\n self.image[count_lines][count_characters] = 0.6\r\n \r\n count_characters = (count_characters + 1) % (total_characters)\r\n count_lines = (count_lines + 1) % (total_lines)\r\n \r\n self.initNeighborGraph()\r\n self.new_image = copy.deepcopy(self.image)\r\n\r\n def reset(self):\r\n self.image = copy.deepcopy(self.new_image)\r\n self.totalBoxes = 0\r\n \r\n def plot(self):\r\n row_labels = range(self.nrows)\r\n col_labels = range(self.ncols)\r\n plt.matshow(self.image)\r\n plt.xticks(range(self.ncols), col_labels)\r\n plt.yticks(range(self.nrows), row_labels)\r\n plt.show(block=False)\r\n plt.pause(0.5)\r\n plt.close(\"all\")\r\n\r\n\r\n def getGrid(self):\r\n return self.image\r\n\r\n def initNeighborGraph(self):\r\n for row in range(len(self.image)):\r\n for col in range(len(self.image[0])):\r\n tile = self.image[row][col]\r\n tile_name = str(row) + \"x\" + str(col)\r\n self.neighbor_dict[tile_name] = list()\r\n\r\n #right\r\n right = col + 1;\r\n right_tile_name = str(row) + \"x\" + str(right)\r\n if right < len(self.image[0]):\r\n right_tile = self.image[row][right];\r\n if right_tile_name not in self.neighbor_dict[tile_name]:\r\n self.neighbor_dict[tile_name].append(right_tile_name)\r\n #down\r\n down = row + 1;\r\n down_tile_name = str(down) + \"x\" + str(col)\r\n if down < len(self.image):\r\n down_tile = self.image[down][col];\r\n if down_tile_name not in self.neighbor_dict[tile_name]:\r\n self.neighbor_dict[tile_name].append(down_tile_name)\r\n #left\r\n left = col - 1;\r\n left_tile_name = str(row) + \"x\" + str(left)\r\n if left >=0:\r\n left_tile = self.image[row][left];\r\n if left_tile_name not in self.neighbor_dict[tile_name]:\r\n self.neighbor_dict[tile_name].append(left_tile_name)\r\n #up\r\n up = row - 1;\r\n up_tile_name = str(up) + \"x\" + str(col)\r\n if up >= 0:\r\n temp_UpTile = self.image[up][col];\r\n if up_tile_name not in self.neighbor_dict[tile_name]:\r\n self.neighbor_dict[tile_name].append(up_tile_name)\r\n\r\n def getNeighborGraph(self, key=None):\r\n if key is None:\r\n return self.neighbor_dict\r\n else:\r\n return self.neighbor_dict[key]\r\n\r\n def change_color(self, newColor, row, col):\r\n print(\"change : \", str(row) + \"x\" + str(col), \" to \", newColor)\r\n oldColor = self.image[row][col]\r\n if oldColor == newColor:\r\n return\r\n\r\n reachableNodes = []\r\n listButton = self.getNeighborGraph(str(row) + \"x\" + str(col))\r\n self.image[row][col] = newColor\r\n\r\n\r\n while (True):\r\n for button in listButton:\r\n row_, col_ = button.split(\"x\")\r\n row_ = int(row_)\r\n col_ = int(col_)\r\n if self.image[row_][col_] == oldColor:\r\n \r\n if button not in reachableNodes:\r\n reachableNodes.append(button)\r\n self.image[row_][col_] = newColor\r\n\r\n if len(reachableNodes) < 1:\r\n break\r\n \r\n listButton = []\r\n listButton = self.getNeighborGraph(reachableNodes[0])\r\n del reachableNodes[0];\r\n if len(listButton) < 1:\r\n break;\r\n \r\n def checkConnectedColors(self, image, x, y):\r\n self.counter += 1\r\n visited = []\r\n queue = []\r\n row = x\r\n col = y\r\n color = image[row][col]\r\n\r\n visited.append(str(row) + \"x\" + str(col))\r\n queue.append(str(row) + \"x\" + str(col))\r\n image[row][col] = -1\r\n self.totalBoxes = 1\r\n\r\n while (queue):\r\n s = queue.pop(0)\r\n for button in self.getNeighborGraph(s):\r\n row_, col_ = button.split(\"x\")\r\n row_ = int(row_)\r\n col_ = int(col_)\r\n if button not in visited and image[row_][col_] == color:\r\n visited.append(button)\r\n queue.append(button)\r\n self.totalBoxes += 1\r\n image[row_][col_] = -1\r\n self.componentsList.append([visited, color])\r\n\r\n def getComponents(self):\r\n image = copy.deepcopy(self.image)\r\n self.componentsList = []\r\n self.initNeighborGraph()\r\n for row in range(self.nrows):\r\n for col in range(self.ncols):\r\n if image[row][col] != -1:\r\n self.checkConnectedColors(image, row, col)\r\n return self.componentsList\r\n\r\n def getConComponents(self):\r\n \r\n dic = defaultdict(list)\r\n dicComponent = dict()\r\n lst = self.getComponents()\r\n for combo1, combo2 in combinations(lst,2):\r\n \r\n component1, color1 = combo1\r\n component2, color2 = combo2\r\n index1 = lst.index([component1, color1])\r\n index2 = lst.index([component2, color2])\r\n dicComponent[index1] = [component1, color1]\r\n dicComponent[index2] = [component2, color2]\r\n \r\n for button in component1:\r\n row_, col_ = button.split(\"x\")\r\n row_ = int(row_)\r\n col_ = int(col_)\r\n listNeighborButton = self.getNeighborGraph(str(row_) + \"x\" + str(col_))\r\n for i in listNeighborButton:\r\n if i in component2:\r\n dic[index1].append(index2)\r\n dic[index1] = list(set(dic[index1]))\r\n dic[index2].append(index1)\r\n dic[index2] = list(set(dic[index2])) \r\n return [dic, dicComponent]\r\n\r\n \r\n def changeColorOfComponentGraph(self, dictionary, dicComp, component, color):\r\n dic = dictionary\r\n dicComponent = dicComp\r\n dicComponent[component][1] = color\r\n deleteList = []\r\n connectedNodes = []\r\n \r\n for key in dic.keys():\r\n neighbors = dic[key]\r\n for neighbor in neighbors:\r\n print(dicComponent[key][1], dicComponent[neighbor][1])\r\n if dicComponent[key][1] == dicComponent[neighbor][1]:\r\n if neighbor not in deleteList:\r\n deleteList.append(neighbor)\r\n if key not in deleteList:\r\n deleteList.append(key)\r\n \r\n print(\"delete: \", deleteList)\r\n \r\n \"\"\"Check if neighboring Nodes have same color. Therefore deleteList should not be empty\r\n \"\"\"\r\n if len(deleteList) > 1:\r\n for key in deleteList:\r\n print(key)\r\n for neighbor in dic[key]:\r\n print(neighbor)\r\n if neighbor not in deleteList:\r\n connectedNodes.append(neighbor)\r\n \r\n \r\n connectedNodes = list(set(connectedNodes))\r\n \r\n print(\"connected before eliminating: \", connectedNodes)\r\n connectedNodes1 = connectedNodes[:]\r\n\r\n\r\n \r\n for elem in connectedNodes:\r\n if elem in deleteList:\r\n print(\"True\")\r\n connectedNodes1.remove(elem)\r\n\r\n newKey = deleteList[0]\r\n connectedNodes = connectedNodes1\r\n #print(dic)\r\n newDic = copy.deepcopy(dic)\r\n newDicComponent = copy.deepcopy(dicComponent)\r\n for key in dic.keys():\r\n print(\"key:\", key)\r\n for neighbor in dic[key]:\r\n print(\"neigh:\", neighbor)\r\n if neighbor in deleteList:\r\n print(\"delete:\", neighbor)\r\n #newDic.pop(neighbor, None)\r\n newDic[key].remove(neighbor)\r\n \r\n if key in deleteList:\r\n del newDic[key]\r\n if key != newKey:\r\n del newDicComponent[key]\r\n print(\"connected after eliminating: \", connectedNodes)\r\n #print(\"final dic\", newDic)\r\n #print(dic)\r\n\r\n\r\n \"\"\"Add transitions for new merged Key \r\n \"\"\"\r\n \r\n mergeComponent = dicComponent[newKey][0]\r\n print(\"newKey: \", newKey, \"\\nmerged component: \", mergeComponent)\r\n newDic[newKey] = connectedNodes\r\n for key in newDic.keys():\r\n if key in connectedNodes:\r\n newDic[key].append(newKey)\r\n newDicComponent[newKey][1] = color\r\n newDicComponent[newKey][0] = mergeComponent\r\n \r\n\r\n return newDic, newDicComponent\r\n else:\r\n return dic, dicComponent\r\n\r\n \r\n \r\n def expand_node(self, node):\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n self.iterationcount = 0\r\n if optimization in [\"dd1\", \"ssr-dd1\"]:\r\n matrix = create_matrix_for_DD(dic, dicComponent)\r\n self.duplicate_dictionary[matrix.tostring()] = True\r\n self.duplicate_dictionary[mirror_matrix(matrix).tostring()] = True\r\n elif optimization in [\"dd2\", \"dd3\"]:\r\n graph = from_dic_to_networkx_graph(dic, dicComponent)\r\n self.duplicate_list.append(graph)\r\n\r\n if len(dic.keys()) == 1:\r\n print(\"GAME OVER!!!\")\r\n return True\r\n else:\r\n lst = []\r\n solutionLst = []\r\n \r\n for component in dic.keys():\r\n for color in [0.0, 0.6, 1.0]:\r\n \"\"\"optimization: only look at changed states\r\n \"\"\"\r\n print(\"check: \", \"component:\", component, \"componentColor:\", copy.deepcopy(node.dicComponent)[component][1], \"color:\", color)\r\n \r\n if copy.deepcopy(node.dicComponent)[component][1] != color:\r\n \r\n if optimization == \"normal\":\r\n self.iterationcount += 1\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n elif optimization == \"dd1\":\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n newMatrix = create_matrix_for_DD(newDic, newDicComponent)\r\n if newMatrix.tostring() not in self.duplicate_dictionary:\r\n self.iterationcount += 1\r\n print(\"adding node that isnt in duplicate memory...\\n\", newMatrix)\r\n self.duplicate_dictionary[newMatrix.tostring()] = True\r\n self.duplicate_dictionary[mirror_matrix(newMatrix).tostring()] = True\r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n elif optimization == \"ssr\":\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n if len(newDic) < len(dic):\r\n self.iterationcount += 1\r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n elif optimization == \"ssr-dd1\":\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n newMatrix = create_matrix_for_DD(newDic, newDicComponent)\r\n if newMatrix.tostring() not in self.duplicate_dictionary and len(newDic) < len(dic):\r\n self.iterationcount += 1\r\n print(\"adding node that isnt in duplicate memory...\\n\", newMatrix)\r\n self.duplicate_dictionary[newMatrix.tostring()] = True\r\n self.duplicate_dictionary[mirror_matrix(newMatrix).tostring()] = True\r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n elif optimization == \"dd2\":\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n newGraph = from_dic_to_networkx_graph(newDic, newDicComponent)\r\n isomorphic = False\r\n for graph in self.duplicate_list:\r\n if nx.is_isomorphic(newGraph, graph, node_match=colors_match):\r\n isomorphic = True\r\n break\r\n if not isomorphic:\r\n self.iterationcount += 1\r\n print(\"adding node that isnt in duplicate memory...\\n\")\r\n self.duplicate_list.append(newGraph)\r\n \r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n elif optimization == \"dd3\":\r\n dic = copy.deepcopy(node.dic)\r\n dicComponent = copy.deepcopy(node.dicComponent)\r\n newDic, newDicComponent = self.changeColorOfComponentGraph(dic, dicComponent, component, color)\r\n newGraph = from_dic_to_networkx_graph(newDic, newDicComponent)\r\n isomorphic = False\r\n for graph in self.duplicate_list:\r\n if nx.faster_could_be_isomorphic(newGraph, graph):\r\n if nx.is_isomorphic(newGraph, graph, node_match=colors_match):\r\n isomorphic = True\r\n break\r\n if not isomorphic:\r\n self.iterationcount += 1\r\n print(\"adding node that isnt in duplicate memory...\\n\")\r\n self.duplicate_list.append(newGraph)\r\n \r\n lst.append([newDic, newDicComponent, dicComponent[component][0], color, self.iterationcount])\r\n\r\n nodeList = []\r\n for i in lst:\r\n newNodeID = node.id + \".\" + str(i[4])\r\n newNode = Node(newNodeID, i[0], i[1], i[2][0], i[3], False)\r\n nodeList.append(newNode)\r\n\r\n return nodeList\r\n\r\ndef create_matrix_for_DD(dic, dicComponent):\r\n colorsA = dicComponent\r\n matrixA = np.zeros([len(dic.keys()), len(dic.values())])\r\n \r\n for i,k in enumerate(dic.keys()):\r\n for j, kk in enumerate(dic.values()):\r\n if i != j:\r\n matrixA[i][j] = colorsA[k][1]\r\n else:\r\n matrixA[i][j] = -1\r\n return matrixA\r\n\r\ndef mirror_matrix(matrix):\r\n return np.flip(np.flip(matrix, 0), 1)\r\n\r\ndef from_dic_to_networkx_graph(dic, dicColor):\r\n\r\n G = nx.DiGraph(dic)\r\n for n in G.nodes():\r\n if dicColor[n][1] == 0.0:\r\n G.nodes[n]['color'] = 'b'\r\n elif dicColor[n][1] == 0.6:\r\n G.nodes[n]['color'] = 'r'\r\n elif dicColor[n][1] == 1.0:\r\n G.nodes[n]['color'] = 'y'\r\n\r\n return G\r\n\r\n\r\n#https://stackoverflow.com/questions/32363592/colored-graph-isomorphism\r\ndef colors_match(n1_attrib,n2_attrib):\r\n '''returns False if either does not have a color or if the colors do not match'''\r\n try:\r\n return n1_attrib['color']==n2_attrib['color']\r\n except KeyError:\r\n return False\r\n\r\n\r\ndef hComponents(dic, dicComponent):\r\n\r\n minKeyList = []\r\n\r\n \"\"\" Getting node that can eliminate a whole color\r\n \"\"\"\r\n print(\"DOING 1st STRATEGY\")\r\n totalBlue = 0\r\n totalRed = 0\r\n totalYellow = 0\r\n try1stWay = False\r\n #get number of color appearances in dictionary\r\n for key in dic.keys():\r\n color = dicComponent[key][1]\r\n if color == 0.0:\r\n totalBlue += 1\r\n elif color == 0.6:\r\n totalRed += 1\r\n elif color == 1.0:\r\n totalYellow += 1\r\n print(\"total colors: \", totalBlue, totalRed, totalYellow)\r\n #get total colors in dictionary\r\n totalColors = 0\r\n for color in [totalBlue, totalRed, totalYellow]:\r\n if color != 0:\r\n totalColors += 1\r\n print(\"total: \", totalColors)\r\n #check if key can eliminate a whole color at once\r\n for key in dic.keys():\r\n print(\"key: \", key)\r\n neighborsList = dic[key]\r\n print(\"key neighborslist: \", neighborsList)\r\n blueSum = 0\r\n redSum = 0\r\n yellowSum = 0\r\n #if key does not only have 1 neighbor. ==> if key is not a dead end.\r\n if len(neighborsList) > 1:\r\n for neighbor in neighborsList:\r\n color = dicComponent[neighbor][1]\r\n if color == 0.0:\r\n blueSum += 1\r\n elif color == 0.6:\r\n redSum += 1\r\n elif color == 1.0:\r\n yellowSum += 1\r\n \r\n print(\"not dead end! \", blueSum, redSum, yellowSum)\r\n if blueSum == totalBlue and totalBlue != 0:\r\n return totalColors - 1\r\n if redSum == totalRed and totalRed != 0:\r\n print(\"appending red key: \", key)\r\n return totalColors - 1\r\n if yellowSum == totalYellow and totalYellow != 0:\r\n return totalColors - 1\r\n\r\n return len(dic) - 1\r\n\r\n\r\ndef hColors(dic, dicComponent):\r\n components = int(len(dic) - 1)\r\n #return components\r\n blueValue = 0\r\n totalBlue = 0\r\n redValue = 0\r\n totalRed = 0\r\n yellowValue = 0\r\n totalYellow = 0\r\n for key in dicComponent.keys():\r\n if dicComponent[key][1] == 0.0:\r\n blueValue = 1\r\n totalBlue += 1\r\n elif dicComponent[key][1] == 0.6:\r\n redValue = 1\r\n totalRed += 1 \r\n elif dicComponent[key][1] == 1.0:\r\n yellowValue = 1\r\n totalYellow += 1\r\n\r\n totalColors = blueValue + redValue + yellowValue\r\n return int(totalColors - 1)\r\n\r\n\r\n# Check if a neighbor should be added to open list\r\ndef add_to_open(open_list, neighbor):\r\n for node in open_list:\r\n if (neighbor == node and neighbor.f >= node.f):\r\n return False\r\n return True\r\n\r\n@timeout()\r\ndef astar1(root, g):\r\n open_list = [root]\r\n closed_list = []\r\n pathDict = {}\r\n expansion = 0\r\n while len(open_list) > 0:\r\n open_list.sort()\r\n node = open_list.pop(0)\r\n \r\n closed_list.append(node)\r\n pathDict[node.id] = node.history\r\n if len(node.dic) <= 1:\r\n print(\"length queue: \", len(open_list))\r\n print(\"node expansions: \", expansion)\r\n print(\"found solution node! \", node.id, node.history)\r\n break;\r\n \r\n print(\"expanding a node for astar1!\")\r\n children = g.expand_node(node)\r\n #print(\"queue children: \", children)\r\n expansion += 1\r\n selectedChildren = []\r\n for child in children:\r\n if child in closed_list:\r\n continue\r\n \r\n child.parent = node\r\n child.g = len(child.id.split(\".\")) - 1\r\n child.h = hColors(child.dic, child.dicComponent)\r\n child.f = child.g + child.h\r\n if(add_to_open(open_list, child) == True):\r\n # If everything was okay, add child to open list\r\n open_list.append(child)\r\n \r\n \r\n \r\n path = node.id.split(\".\")\r\n print(path)\r\n optimalPath = []\r\n path_ = copy.deepcopy(path)\r\n for i in range(len(path_) - 1):\r\n nodeID = '.'.join(map(str, path)) \r\n optimalPath.append(pathDict[nodeID])\r\n path.pop(-1)\r\n \r\n optimalPathSorted = optimalPath[::-1]\r\n print(\"optimal: \", optimalPathSorted)\r\n return [len(optimalPathSorted), expansion]\r\n\r\n@timeout() \r\ndef astar2(root, g):\r\n open_list = [root]\r\n closed_list = []\r\n pathDict = {}\r\n expansion = 0\r\n while len(open_list) > 0:\r\n open_list.sort()\r\n node = open_list.pop(0)\r\n \r\n closed_list.append(node)\r\n pathDict[node.id] = node.history\r\n if len(node.dic) <= 1:\r\n print(\"length queue: \", len(open_list))\r\n print(\"node expansions: \", expansion)\r\n print(\"found solution node! \", node.id, node.history)\r\n break;\r\n \r\n print(\"expanding a node with astar2!\")\r\n children = g.expand_node(node)\r\n #print(\"queue children: \", children)\r\n expansion += 1\r\n selectedChildren = []\r\n for child in children:\r\n if child in closed_list:\r\n continue\r\n \r\n child.parent = node\r\n child.g = len(child.id.split(\".\")) - 1\r\n child.h = hComponents(child.dic, child.dicComponent)\r\n child.f = child.g + child.h\r\n if(add_to_open(open_list, child) == True):\r\n # If everything is okay, add child to open list\r\n open_list.append(child) \r\n \r\n path = node.id.split(\".\")\r\n print(path)\r\n optimalPath = []\r\n path_ = copy.deepcopy(path)\r\n for i in range(len(path_) - 1):\r\n nodeID = '.'.join(map(str, path)) \r\n optimalPath.append(pathDict[nodeID])\r\n path.pop(-1)\r\n \r\n optimalPathSorted = optimalPath[::-1]\r\n print(\"optimal: \", optimalPathSorted)\r\n return [len(optimalPathSorted), expansion]\r\n\r\n@timeout()\r\ndef bfs(root, g):\r\n queue = [root]\r\n pathDict = {}\r\n expansion = 0\r\n while True:\r\n \r\n node = queue.pop(0)\r\n \r\n pathDict[node.id] = node.history\r\n if len(node.dic) <= 1:\r\n print(\"length queue: \", len(queue))\r\n print(\"node expansions: \", expansion)\r\n print(\"found solution node! \", node.id, node.history)\r\n break;\r\n g.duplicate_list = []\r\n children = g.expand_node(node)\r\n print(\"expanding a node with bfs!\")\r\n expansion += 1\r\n if expansion == 1:\r\n print(\"children: \", [[child.id, len(child.dic)] for child in children])\r\n queue.extend(children)\r\n \r\n path = node.id.split(\".\")\r\n print(path)\r\n optimalPath = []\r\n path_ = copy.deepcopy(path)\r\n for i in range(len(path_) - 1):\r\n nodeID = '.'.join(map(str, path)) \r\n optimalPath.append(pathDict[nodeID])\r\n path.pop(-1)\r\n \r\n optimalPathSorted = optimalPath[::-1]\r\n print(\"optimal: \", optimalPathSorted)\r\n return [len(optimalPathSorted), expansion]\r\n\r\n\r\n# A function to perform a Depth-Limited search \r\n# from given source 'src' \r\ndef DLS(src,maxDepth, counter, g):\r\n g.duplicate_dictionary = {}\r\n g.duplicate_list = []\r\n if len(src.dic) == 1 :\r\n optimalPath = []\r\n while src.parent is not None:\r\n optimalPath.append(src.history)\r\n src = src.parent\r\n \r\n optimalPathSorted = optimalPath[::-1]\r\n print(\"optimal: \", optimalPathSorted)\r\n return [len(optimalPathSorted), counter.iteration]\r\n\r\n # If reached the maximum depth, stop recursing. \r\n if maxDepth <= 0 : return 'cutoff'\r\n \r\n cutoff_occured = False\r\n children = g.expand_node(src)\r\n print(\"expanding....\")\r\n print(\"children: \", children)\r\n counter.iteration += 1\r\n if counter.iteration == 1:\r\n print(\"children: \", [[child.id, len(child.dic)] for child in children])\r\n # Recur for all the vertices adjacent to this vertex \r\n for child in children:\r\n child.parent = src\r\n result = DLS(child, maxDepth-1, counter, g)\r\n if(result == 'cutoff'):\r\n cutoff_occured = True\r\n elif result is not None:\r\n return result\r\n return 'cutoff' if cutoff_occured else 'Not found'\r\n \r\n@timeout()\r\ndef iddfs(src, g):\r\n counter = Counter()\r\n maxDepth = len(src.dic)\r\n if maxDepth == 0 :\r\n return [0, 0]\r\n for depth in range(maxDepth):\r\n g.duplicate_dictionary = {}\r\n g.duplicate_list = []\r\n print(\"Checking with depth: \", depth)\r\n result = DLS(src, depth, counter, g)\r\n if result == 'cutoff': \r\n print (\"Target is NOT reachable from source \" +\r\n \"within max depth \" + str(depth))\r\n #maxDepth += 1\r\n else:\r\n print (\"Target is reachable from source \" +\r\n \"within max depth \" + str(depth))\r\n return result\r\n \r\n \r\n\r\n\r\ndef main(algorithm, optimization):\r\n optimal_moves = [0, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4]\r\n alg_moves = []\r\n node_expansions = []\r\n correct = 0\r\n timeList = []\r\n\r\n f = open('run_time_%s-%s-PROBLEM16.txt' % (algorithm, optimization), \"w\")\r\n f1 = open('node_expansions_%s-%s-PROBLEM16.txt' % (algorithm, optimization), \"w\")\r\n\r\n correct = 0\r\n for integer in range(16):\r\n integer += 1\r\n problem_file = \"problem%scomp.txt\" % (integer)\r\n integer -= 1\r\n g = grid(problem_file, optimization)\r\n print(\"\\n\", problem_file)\r\n print(\"==============================\")\r\n print(g.getGrid())\r\n g.initNeighborGraph()\r\n print(\"\\n\")\r\n dic, dicComponent = g.getConComponents()\r\n \r\n root = Node(\"0\", dic, dicComponent, 0, 0, True)\r\n \r\n start = round(time.time()* 1000) \r\n \r\n try:\r\n #algorithm\r\n run = eval(algorithm)\r\n shortest_length = run(root, g)\r\n alg_moves.append(shortest_length[0])\r\n node_expansions.append(shortest_length[1])\r\n string = str(shortest_length[1]) + \"\\n\"\r\n f1.write(string)\r\n \r\n if optimal_moves[integer] == shortest_length[0]:\r\n correct += 1\r\n totalTime = round(time.time()* 1000) - start\r\n timeList.append(totalTime)\r\n string = str(totalTime) + \"\\n\"\r\n f.write(string)\r\n except:\r\n shortest_length = [\"\", \"\"]\r\n alg_moves.append(shortest_length[0])\r\n node_expansions.append(shortest_length[1])\r\n string = str(shortest_length[1]) + \"\\n\"\r\n f1.write(string)\r\n \r\n if optimal_moves[integer] == shortest_length[0]:\r\n correct += 1\r\n totalTime = 600 * 1000\r\n timeList.append(totalTime)\r\n string = str(totalTime) + \"\\n\"\r\n f.write(string)\r\n pass\r\n\r\n averageTime = sum(timeList) / len(optimal_moves)\r\n f.close()\r\n f1.close()\r\n\r\n convert_from_txt_to_csv('run_time_%s-%s-PROBLEM16.txt' % (algorithm, optimization), 'run_time')\r\n\r\n convert_from_txt_to_csv('node_expansions_%s-%s-PROBLEM16.txt' % (algorithm, optimization), 'node_expansions')\r\n\r\n winRate = (correct / len(optimal_moves)) * 100\r\n\r\n\r\n print(\"%s Win rate: \" % (algorithm), winRate, \"%\")\r\n print(\"%s moves: \" % (algorithm), alg_moves)\r\n print(\"%s node_expansions: \"% (algorithm), node_expansions)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) == 3:\r\n algorithm = sys.argv[1]\r\n optimization = sys.argv[2]\r\n main(algorithm, optimization)\r\n else:\r\n print(\"Usage: python3.6 grid-kami [algorithm] [optimization]\")","sub_path":"PROBLEM16-dataset/grid-kami.py","file_name":"grid-kami.py","file_ext":"py","file_size_in_byte":30992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"15744464","text":"from API_call_for_GPX import project_name, project_directory\nimport gpxpy\nimport csv\nimport os\nimport re\n\n# create csv file called merged.csv to working directory and give column names x, y & t\nwith open(project_name + '.csv', 'a') as f:\n writer = csv.writer(f, quoting=csv.QUOTE_NONE, escapechar=' ', lineterminator='\\n')\n writer.writerow('yxt')\n\n# create a folder for your files manually\nsource_dir = project_directory + '/data/'\ntry:\n for file in os.listdir(source_dir):\n print(file)\n # Sometimes there are hidden or other files in the directory, we need to ignore them\n if not file.endswith(\".gpx\"):\n continue\n # Sometimes an empty GPX file gets included and crashes the program.\n if os.stat(source_dir + file).st_size == 0:\n print('File size zero!')\n continue\n filePath = source_dir + file\n gpx_file = open(filePath, 'r')\n gpx = gpxpy.parse(gpx_file)\n count = 0\n\n # iterate through rows and append each gpx row to merged csv\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n fields = ['{0},{1},{2}'.format(point.latitude, point.longitude, point.time)]\n # Here double whitespace is removed so QGIS accepts the time format\n re.sub(' +', ' ', fields[0])\n # Graphhopper creates quite a lot of GPX points and for this purpose every second is enough.\n count += 1\n if count % 2 == 0:\n with open(project_directory + '/GPX_merged_' + project_name + '.csv', 'a') as f:\n writer = csv.writer(f, quoting=csv.QUOTE_NONE, escapechar=' ', lineterminator='\\n')\n writer.writerow(fields)\nexcept Exception as e:\n print(e)\n\n","sub_path":"Parse_GPX.py","file_name":"Parse_GPX.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"568595490","text":"X = \"X\"\r\nO = \"O\"\r\nEMPTY = \" \"\r\nTIE = \"Ничья\"\r\nNUM_SQUARES = 9\r\n\r\ndef instructions():\r\n \"\"\"Выводит на экран инструкцию\"\"\"\r\n print(\r\n \"\"\"Добро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времен.\r\nТвой мозг и мой процессор сойдутся в схватке за доской игры \"Крестики-нолики\".\r\nЧтобы сделать ход, введи число от 0 до 8. Числа однозначо соответствуют полям доски - так, как показано ниже:\r\n\r\n 0 | 1 | 2\r\n ---------\r\n 3 | 4 | 5\r\n ---------\r\n 6 | 7 | 8\r\n\r\nПриготовься к бою, жалкий белковый человечешка. Вот-вот начнется решающее сражение.\\n\"\"\")\r\n\r\ndef ask_yes_no(question):\r\n \"\"\"Задает вопрос с ответом да/нет\"\"\"\r\n response = None\r\n while response not in (\"y\",\"n\"):\r\n response = input(question).lower()\r\n return response\r\n\r\ndef ask_number(question, low, hight):\r\n \"\"\"Просит ввести число из диапазона\"\"\"\r\n response = None\r\n while response not in range(low,hight):\r\n response = int(input(question))\r\n return response\r\n\r\ndef pieces():\r\n \"\"\"Определяет принадлежность первого хода\"\"\"\r\n go_first = ask_yes_no(\"Хочешь оставить первый ход за собой? (y/n): \")\r\n if go_first == \"y\":\r\n print(\"\\nНу что ж, даю тебе фору: играй крестиками.\")\r\n human = X\r\n computer = O\r\n else:\r\n print(\"\\nТвоя удаль тебя погубит... Буду начинать я.\")\r\n computer = O\r\n human = X\r\n return computer, human\r\n\r\ndef new_board():\r\n \"\"\"Создает новую игровую доску\"\"\"\r\n board = []\r\n for square in range(NUM_SQUARES):\r\n board.append(EMPTY)\r\n return board\r\n\r\ndef display_board(board):\r\n \"\"\"Отображает игровую доску на экране\"\"\"\r\n print(\"\\n\\t\",board[0],\"|\",board[1],\"|\",board[2])\r\n print(\"\\t\",\"---------\")\r\n print(\"\\n\\t\",board[3],\"|\",board[4],\"|\",board[5])\r\n print(\"\\t\",\"---------\")\r\n print(\"\\n\\t\",board[6],\"|\",board[7],\"|\",board[8],\"\\n\")\r\n\r\ndef legal_moves(board):\r\n \"\"\"Создать список доступных ходов\"\"\"\r\n moves = []\r\n for square in range(NUM_SQUARES):\r\n if board[square] == EMPTY:\r\n moves.append(square)\r\n return moves\r\n\r\ndef winner(board):\r\n \"\"\"Определяет победителя игре\"\"\"\r\n WAYS_TO_WIN = ((0,1,2),\r\n (3,4,5),\r\n (6,7,8),\r\n (0,3,6),\r\n (1,4,7),\r\n (2,5,8),\r\n (0,4,8),\r\n (2,4,6))\r\n for row in WAYS_TO_WIN:\r\n if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:\r\n winner = board[row[0]]\r\n return winner\r\n if EMPTY not in board:\r\n return TIE\r\n return None\r\n\r\ndef human_move(board,human):\r\n \"\"\"Получает ход человека\"\"\"\r\n legal = legal_moves(board)\r\n move = None\r\n while move not in legal:\r\n move = ask_number(\"Твой ход. Выбери одно из полей (0-8):\",0,NUM_SQUARES)\r\n if move not in legal:\r\n print(\"\\nСмешной человек! Это поле уже занято. Выбери другое.\\n\")\r\n print(\"Ладно...\")\r\n return move\r\n\r\ndef computer_move(board,computer,human):\r\n \"\"\"Делает ход за компьютерного противника\"\"\"\r\n board = board[:]\r\n BEST_MOVES = (4,0,2,6,8,1,3,5,7)\r\n print(\"Я выберу поле номер\")\r\n for move in legal_moves(board):\r\n board[move] = computer\r\n if winner(board) == computer:\r\n print(move)\r\n return move\r\n board[move] = EMPTY\r\n for move in legal_moves(board):\r\n board[move] = human\r\n if winner(board) == human:\r\n print(move)\r\n return move\r\n board[move] = EMPTY\r\n for move in BEST_MOVES:\r\n if move in legal_moves(board):\r\n print(move)\r\n return move\r\n\r\ndef next_turn(turn):\r\n \"\"\"Осуществляет переход хода\"\"\"\r\n if turn == X:\r\n return O\r\n else:\r\n return X\r\n\r\ndef congrat_winner(the_winner, computer, human):\r\n \"\"\"Поздравляет победителя игры\"\"\"\r\n if the_winner != TIE:\r\n print(\"Три \",the_winner,\" в ряд!\\n\")\r\n else:\r\n print(\"Ничья!\\n\")\r\n if the_winner == computer:\r\n print(\"Как я и предсазывалб победа в очередной раз осталась за мной.\\nВо всем.\")\r\n elif the_winner == human:\r\n print(\"О нет, этого не может быть! Неужели ты как-то сумел перехитрить меня, белковый?\\nКлянусь: я, компьютер, не допущу этого больше никогда!\")\r\n elif the_winner == TIE:\r\n print(\"Тебе несказанно повезло, дружок: ты сумел свести игру вничью.\\nРадуйся же сегодняшнему успеху! Завтра тебе уже не суждено его повторить.\")\r\n\r\ndef main():\r\n instructions()\r\n computer, human = pieces()\r\n turn = X\r\n board = new_board()\r\n display_board(board)\r\n while not winner(board):\r\n if turn == human:\r\n move = human_move(board, human)\r\n board[move] = human\r\n else:\r\n move = computer_move(board,computer,human)\r\n board[move] = computer\r\n display_board(board)\r\n turn = next_turn(turn)\r\n the_winner = winner(board)\r\n congrat_winner(the_winner,computer,human)\r\n\r\nmain()\r\ninput(\"Enter \")\r\n","sub_path":"X_0.py","file_name":"X_0.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"554683389","text":"import os\nimport sys\nimport shelve\nimport librosa\nimport threading\nimport multiprocessing as MP\nimport json\nimport time\nimport math\n\nimport encode_process as EP\nimport config\n\n'''\n Creates an event database out of a sound file. Depending on the\n parameters used, this data can be enormous. The idea is to create\n one event data file and use that for all other visualizations.\n'''\n\nclass event():\n\n def __init__(self, input_file, output_file=None, sample_rate=44100, fps=140, procs=6,\n freq_bins=512, freq_max=22050, freq_min=0, frame_size=600, chunk_sec=60):\n self.config = config.config.get_config()\n self.input_file = input_file\n self.output_file = \"%s.db\".format(input_file) if (not output_file) else output_file\n self.chunk_sec = chunk_sec # How many seconds per chunk to FFT\n self.frame_size = frame_size # How much data is read in at one time\n self.sample_rate = sample_rate # Sample rate of audio\n self.fps = fps # How many FPS to encode with\n self.hop_length = int(sample_rate/fps) # Size of each frame\n self.freq_bins = freq_bins # How many frequency bins for the FFT\n self.freq_max = freq_max # Max cutoff frequency\n self.freq_min = freq_min # Min cutoff frequency\n self.procs = procs # How many processes to spawn\n self.manager = None # Proceses manager\n self.pool = None # Process pool\n self.proc_list = [] # List of procs\n self.event_keys = [] # List of all top level keys in the event file\n self.event_db = None # The persistent DB object\n self.clock_sec = time.time() # Just a timer\n self.queue = MP.Queue() # Queue for IPC\n\n # Close down the DB\n def close(self):\n if (self.event_db):\n self.event_db.close()\n \n # Simply opens the database for playing.\n # DB must exists, of course\n # Returns the event db\n def load(self):\n print(\"Loading event file: {}\".format(self.output_file))\n if (not os.path.exists(self.output_file)):\n print(\"Error: Could not load event file.\")\n return None\n\n # Open and load event file\n self.event_db = shelve.open(self.output_file, writeback=self.config['WRITEBACK'])\n \n if (not self.event_db.has_key('-1:0')):\n print(\"Error: Could not find information entry in event file.\")\n return None\n\n event_info = self.event_db['-1:0']\n self.freq_bins = event_info['freq_bins']\n self.sample_rate = event_info['sample_rate']\n self.fps = event_info['fps']\n self.freq_max = event_info['freq_max']\n self.freq_min = event_info['freq_min']\n self.event_keys = event_info['event_keys']\n\n print(\"Event file loaded:\")\n print(\" - Sample Rate: {}\".format(self.sample_rate))\n print(\" - Recorded FPS: {}\".format(self.fps))\n print(\" - Freq bins: {}\".format(self.freq_bins))\n print(\" - Freq min/max: {}/{}\".format(self.freq_min, self.freq_max))\n print(\" - Hz per bin: {} hz\".format((self.freq_max - self.freq_min)/self.freq_bins))\n print(\" - Frames: {}\".format(len(self.event_keys)))\n\n return True\n\n # Encode using multi-processing\n def create_proced(self):\n print(\"Starting multiprocess encoding...\")\n print(\"Event DB file: {} -> {}\".format(self.input_file, self.output_file))\n print(\" - Sample Rate: {}\".format(self.sample_rate))\n print(\" - Recording FPS: {}\".format(self.fps))\n print(\" - Freq sample size: {}\".format(self.sample_rate/self.fps))\n print(\" - Freq bins: {}\".format(self.freq_bins))\n print(\" - Freq cut off high: {}\".format(self.freq_max))\n print(\" - Freq cut off low: {}\".format(self.freq_min))\n print(\" - Frame size: {}s\".format(self.frame_size))\n print(\" - Chunk size: {}s\".format(self.chunk_sec))\n\n # First lets check if the input file is available\n if (not os.path.exists(self.input_file)):\n print(\"\\nError: Source file '{}' does not exist.\".format(self.input_file))\n sys.exit(1)\n \n # Create a new db file\n print(\"\\nClearing old data... May take a few.\")\n\n if (os.path.exists(self.output_file)):\n print(\"***\")\n print(\"*** This database exists. Hit return if you REALLY want to re-encode it.\")\n print(\"***\")\n sys.stdin.readline()\n os.remove(self.output_file)\n\n print(\"Creating new DB file\")\n # Open our PO DB file\n self.event_db = shelve.open(self.output_file, writeback=self.config['WRITEBACK'])\n print(\"Creating processing pool\")\n self.manager = mp.Manager()\n self.queue = self.manager.Queue()\n self.pool = mp.Pool(self.procs)\n\n # Create our event header information\n event_info = {\n \"filename\" : self.input_file,\n \"sample_rate\" : self.sample_rate,\n \"fps\" : self.fps,\n \"freq_bins\" : self.freq_bins,\n \"freq_max\" : self.freq_max,\n \"freq_min\" : self.freq_min,\n \"event_keys\" : []\n }\n self.event_db['-1:0'] = event_info\n\n # Start encoding\n sec_offset = 0\n done = False\n self.clock_sec = time.time()\n while (not done):\n print(\"Reading {}s [{}K - {}K] @ {} seconds\".format(self.frame_size,\n sec_offset * self.sample_rate / 1024,\n ((sec_offset + self.frame_size) * self.sample_rate) / 1024,\n sec_offset))\n y_data, sr = librosa.core.load(self.input_file, self.sample_rate, offset=sec_offset, duration=self.frame_size)\n y_data_num = len(y_data)\n print(\"Read {}K Sec: {}\".format(y_data_num/1024, y_data_num/self.sample_rate))\n if (y_data_num <= 0):\n print(\"No more samples\")\n done = True\n else:\n print(\"Splitting %d buffer into %d second chunks\".format(y_data_num, self.chunk_sec))\n # Start splitting procs\n time_index = 0\n f_index = 0\n for data_index in range(0, y_data_num, (self.chunk_sec * self.sample_rate)):\n data_index_next = min(data_index + (self.chunk_sec * self.sample_rate), y_data_num)\n data_slice = y_data[data_index:data_index_next]\n p_name = \"{}\".format(f_index)\n # Add in the new \n new_proc = self.pool.apply_async(start_proc, (sec_offset + time_index, data_slice, event_info, p_name,))\n self.proc_list.append(new_proc)\n time_index += self.chunk_sec\n print(\"Data index: [{} - {}] - Slice len: [{}]\".format(data_index, data_index_next, len(data_slice)))\n f_index += 1\n print(\"Created {} procs. Starting...\".format(len(self.proc_list)))\n print(\"-\" * 80)\n\n self.run_procs()\n sec_offset += self.frame_size\n\n print(\"Saving event info...\")\n event_info['event_keys'] = self.event_keys\n self.event_db['-1:0'] = event_info\n self.event_db.sync()\n print(\"Convert time {}s\".format( time.time() - self.clock_sec))\n self.load()\n return self.event_db\n\n def run_procs(self):\n \"\"\"Run all the procs we just created\"\"\"\n index = 0\n # A timekeeper\n start_time = time.time()\n\n counter = 0\n # Loop until proc list is empty\n while (len(self.proc_list) > 0):\n # If we're not syncing\n # Are we running enough procs.\n for l_proc in self.proc_list:\n try:\n a_res = l_proc.get(timeout=.01)\n print(\"Proc finished: {}\".format(len(a_res)))\n self.proc_list.remove(l_proc)\n print(\"[ ()s ] Procs left: {}\".format(time.time() - self.clock_sec, len(self.proc_list)))\n for a_key in a_res:\n self.event_db[a_key] = a_res[a_key]\n if not (counter % 100):\n print(\"Syncing events: {:.2%}\".format(0.0 if len(a_res) == 0 else (counter/float(len(a_res)))))\n counter += 1\n self.event_keys += a_res.keys()\n counter = 0\n except Exception as e:\n# print(\"Exception: {}\".format(e))\n pass\n counter = 0\n\n # Some accessor stuff\n def get_event_keys(self):\n return sorted(self.event_keys)\n \n def get_input_file(self):\n return self.input_file\n\n def get_output_file(self):\n return self.output_file\n\n def get_frame_size(self):\n return self.frame_size\n\n def get_sample_rate(self):\n return self.sample_rate\n\n def get_hop_length(self):\n return self.hop_length\n\n def get_fps(self):\n return self.fps\n\n def get_freq_bins(self):\n return self.freq_bins\n\n def get_freq_max(self):\n return self.freq_max\n\n def get_freq_min(self):\n return self.freq_min\n\n def get_event_db(self):\n return self.event_db\n\n def get_event_info(self):\n if self.event_db.has_key('-1:0'):\n return self.event_db['-1:0']\n return {}\n\n def set_event_info(self, event_info):\n self.event_db['-1:0'] = event_info\n return event_info\n\ndef start_proc(sec_offset, slice_data, event_info, name):\n p_encoding = EP.Encode(sec_offset, slice_data, event_info, name)\n return p_encoding.run()\n \n \n","sub_path":"libs/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"357922068","text":" # BIRTHDAY MONTH\n\nbirth = {'Austin':'Jan 1', 'Alice':'Feb 29', 'Arial':'Nov 29'}\n\nwhile True:\n print('Enter your name to see your birthday...(or enter blank to quit')\n name = input()\n if name == '': # if name is empty then it's true...\n break\n \n if name in birth:\n print(birth[name] + ' is the brithday of ' + name)\n else:\n print('I dont have information about' + name)\n print('when is ' + name + 'birthday?')\n bd = input()\n birth[name] = bd # here is where we add a new dictionary value but the value of the varaible should not be the same in the dictionary like eve:'Jan 1' <- this is a no no...\n print('data updated!!')\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dictionary and structuring data/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"515703101","text":"import os,warnings\nimport pandas as pd\nfrom sqlalchemy.sql import not_\nfrom igf_data.utils.dbutils import read_dbconf_json\nfrom igf_data.task_tracking.igf_slack import IGF_slack\nfrom igf_data.task_tracking.igf_asana import IGF_asana\nfrom igf_data.igfdb.baseadaptor import BaseAdaptor\nfrom igf_data.igfdb.pipelineadaptor import PipelineAdaptor\nfrom igf_data.igfdb.igfTables import Project,Sample,Seqrun,Experiment,Run,Collection,File,Pipeline,Pipeline_seed\n\n\nclass Modify_pipeline_seed:\n '''\n A class for changing pipeline run status in the pipeline_seed table\n '''\n def __init__(self,igf_id_list,table_name,pipeline_name,dbconfig_file,\n log_slack=True,log_asana=True,slack_config=None,\n asana_project_id=None,asana_config=None,clean_up=True,):\n '''\n :param igf_id_list: A list of igf ids to uniquely identify the entity\n :param table_name: A database table name to look for the igf id\n available options are 'project','sample','experiment','run',\n 'file','seqrun','collection'\n :param pipeline_name: A pipeline name to change the status of the seed\n :param dbconfig_file: A file containing the database configuration\n :param log_slack: A boolean flag for toggling Slack messages, default True\n :param log_asana: Aboolean flag for toggling Asana message, default True\n :param slack_config: A file containing Slack tokens, default None\n :param asana_config: A file containing Asana tokens, default None\n :param asana_project_id: A numeric Asana project id, default is None\n :param clean_up: Clean up input file once its processed, default True\n '''\n try:\n self.igf_id_list=igf_id_list\n if table_name not in ('project','sample','experiment','run','file','seqrun','collection'):\n raise ValueError('Table {0} not supported for pipeline seed'.\\\n format(table_name))\n self.table_name=table_name\n self.pipeline_name=pipeline_name\n self.clean_up=clean_up\n dbparams = read_dbconf_json(dbconfig_file)\n self.base_adaptor=BaseAdaptor(**dbparams)\n self.log_slack=log_slack\n self.log_asana=log_asana\n if log_slack and slack_config is None:\n raise ValueError('Missing slack config file')\n elif log_slack and slack_config:\n self.igf_slack = IGF_slack(slack_config) # add slack object\n\n if log_asana and \\\n (asana_config is None or \\\n asana_project_id is None):\n raise ValueError('Missing asana config file or asana project id')\n elif log_asana and asana_config and asana_project_id:\n self.igf_asana=IGF_asana(asana_config,asana_project_id) # add asana object\n except:\n raise\n\n def _fetch_pipeline_seed_entry(self,igf_id,select_seed_status=None,restrict_seed_status=None):\n '''\n An internal method for fetching unique pipeline seed entry from database\n :param igf_id: A igf id to uniquely select pipe seed data\n :param select_seed_status: A list of seed status to include from the query, default None\n :param restrict_seed_status: A list of seed status to exclude from the query, default None\n '''\n try:\n query=None\n if self.table_name =='seqrun':\n query=self.base_adaptor.session.\\\n query(Pipeline_seed).\\\n join(Seqrun,Pipeline_seed.seed_id==Seqrun.seqrun_id).\\\n join(Pipeline).\\\n filter(Seqrun.seqrun_igf_id==igf_id).\\\n filter(Pipeline_seed.seed_table==self.table_name).\\\n filter(Pipeline.pipeline_id==Pipeline_seed.pipeline_id).\\\n filter(Pipeline.pipeline_name==self.pipeline_name) # get base query for seqrun table\n else:\n raise ValueError('Table {0} not supported for pipeline status reset'.\\\n format(self.table))\n\n if select_seed_status is not None and \\\n isinstance(select_seed_status,list) and \\\n len(select_seed_status) > 0:\n query=query.filter(Pipeline_seed.status.in_(select_seed_status)) # add generic select filter\n\n if restrict_seed_status is not None and \\\n isinstance(restrict_seed_status,list) and \\\n len(restrict_seed_status)>0:\n query=query.filter(not_(Pipeline_seed.status.in_(restrict_seed_status))) # add generic restrict filter\n\n pipeseed_data=self.base_adaptor.fetch_records(query,\\\n output_mode='one_or_none') # fetch unique value for pipeline seed\n return pipeseed_data\n except:\n raise\n\n def reset_pipeline_seed_for_rerun(self,seeded_label='SEEDED',\n restricted_status_list=('SEEDED','RUNNING')):\n '''\n A method for setting the pipeline for re-run if the first run has failed or aborted\n This method will set the pipeline_seed.status as 'SEEDED' only if its not already\n 'SEEDED' or 'RUNNING'\n :param seeded_label: A text label for seeded status, default SEEDED\n :param restricted_status_list: A list of pipeline status to exclude from the search,\n default ['SEEDED','RUNNING']\n '''\n try:\n db_connected=False\n restricted_status_list = list(restricted_status_list)\n input_id_list=self._read_input_list(igf_id_list=self.igf_id_list) # get input ids from file\n failed_ids=list() # define empty list of failed ids\n pass_list=list() # required for logging in asana\n base=self.base_adaptor\n base.start_session() # connect to database\n db_connected=True\n for igf_id in input_id_list:\n pipe_seed_data=self._fetch_pipeline_seed_entry(igf_id=igf_id,\n restrict_seed_status=restricted_status_list) # get pipe seed data for igf id\n if pipe_seed_data is None:\n failed_ids.append(igf_id) # add igf id to failed list\n else:\n pl=PipelineAdaptor(**{'session':base.session}) # connect to pipeline adaptor\n updated_seed_data=[{'pipeline_id':pipe_seed_data.pipeline_id,\n 'seed_id':pipe_seed_data.seed_id,\n 'seed_table':pipe_seed_data.seed_table,\n 'status':seeded_label}] # set data for seed update\n pl.update_pipeline_seed(data=updated_seed_data, autosave=False) # update data to pipeline seed table\n pass_list.append(igf_id)\n base.commit_session() # save data to database after all changes\n base.close_session() # close database connection\n db_connected=False\n if self.clean_up:\n self._clear_input_list(file_path=self.igf_id_list,\n igf_list=failed_ids) # over write input list and add failed ids for next try\n message='Overwriting pipeseed input list {0}'.format(self.igf_id_list)\n if self.log_slack:\n self.igf_slack.post_message_to_channel(message, reaction='pass') # comment to slack for file over writing\n if len(pass_list)>0:\n for id_line in pass_list:\n message='Changed pipeline seed for id {0}, pipeline {1}, to {2}'.\\\n format(id_line,self.pipeline_name,seeded_label)\n if self.log_slack:\n self.igf_slack.post_message_to_channel(message, reaction='pass') # comment to slack channel\n if self.log_asana:\n self.igf_asana.comment_asana_task(task_name=id_line,\n comment=message) # comment on asana task\n except Exception as e:\n if db_connected:\n base.rollback_session()\n base.close_session()\n message='Failed to update pipeline seed, Error: {0}'.format(e)\n warnings.warn(message)\n if self.log_slack:\n self.igf_slack.post_message_to_channel(message, reaction='fail')\n raise\n\n @staticmethod\n def _clear_input_list(file_path,igf_list):\n '''\n A static method for clearing the seqrun list file\n :param seqrun_igf_list: A file containing the sequencing run ids\n '''\n try:\n if not os.path.exists(file_path):\n raise IOError('File {0} not found'.format(file_path))\n\n with open(file_path,'w') as fwp:\n fwp.write('\\n'.join(igf_list)) # over write input list file\n except:\n raise\n\n @staticmethod\n def _read_input_list(igf_id_list):\n '''\n A static method for reading list of ids from an input file\n to a list\n :param igf_id_list: A file containing the input igf ids\n :return list: A list of ids from the input file\n '''\n try:\n if not os.path.exists(igf_id_list):\n raise IOError('File {0} not found'.format(igf_id_list))\n\n id_list=list() # define an empty list of igf ids\n with open(igf_id_list,'r') as fp:\n id_list=[i.strip() for i in fp] # add ids to the list\n return id_list\n except:\n raise","sub_path":"igf_data/process/pipeline/modify_pipeline_seed.py","file_name":"modify_pipeline_seed.py","file_ext":"py","file_size_in_byte":9552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"251943266","text":"__author__ = 'qcp'\r\n# -*- coding:utf-8 -*-\r\nimport tensorflow as tf\r\nimport os\r\nfrom numpy.random import RandomState\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n'''\r\nv1 = tf.constant([[1.0, 2.0], [3.0, 4.0]])\r\nv2 = tf.constant([[5.0, 6.0], [7.0, 8.0]])\r\n# with tf.Session() as sess:\r\n# print(sess.run(v1*v2))\r\n# print((v1*v2).eval())\r\n#print(sess.run(tf.reduce_mean(v1).eval()))\r\n#with tf.InteractiveSession() as sess1:\r\nsess = tf.InteractiveSession()\r\nprint(tf.greater(v1, v2).eval())\r\nprint(tf.where(tf.greater(v1, v2), v1, v2).eval())\r\n#tf.select ���经替换为 tf.where\r\n#loss = tf.reduce_sum(tf._select(tf.greater(v1,v2),(v1-v2)*a,(v2-v1)*b))\r\n'''\r\nbatch_size = 8\r\n#2个输入节点\r\nx = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='x-input')\r\n#1个输出节点\r\ny_ = tf.placeholder(dtype=tf.float32, shape=(None, 1), name='y-input')\r\n#单层神经网络前向传播过程,简单加权和\r\nw1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))\r\ny = tf.matmul(x, w1)\r\n\r\nloss_less = 10\r\nloss_more = 1\r\nloss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\r\n\r\n# 定义学习率\r\nlearning_rate = 0.001\r\n\r\n# 定义反向传播算法来优化神经网络中的参数\r\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\r\n\r\n#通过随机数生成一个模拟数据集\r\nrdm = RandomState(1)\r\ndataset_size = 128\r\nX = rdm.rand(dataset_size, 2)\r\n\r\n#加入随机偏移\r\nY = [[x1 + x2 + rdm.rand() / 10.0 - 0.05] for (x1, x2) in X]\r\n\r\nwith tf.Session() as sess:\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n STEPS = 5000\r\n for i in range(STEPS):\r\n start = (i * batch_size) % dataset_size\r\n end = min(start + batch_size, dataset_size)\r\n sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\r\n print(sess.run(w1))\r\n\r\n\"\"\"\r\nw2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))\r\n# 通过参数seed设置随机数种子,保证每次运行结果是一样的\r\n\r\n# x = tf.constant([[0.7, 0.9]])\r\n# 定义placeholder作为存放数据的地方。维度不一定要定义,但如果维度是确定的,定义可以减少出错的可能。\r\n# 在shape的一个维度上使用None可以方便地使用不大的batch。\r\n# 在训练时需要把数据分成较小的batch,但是在测试时可以一次使用全部数据。\r\n# 当数据集比较小时,这样可以方便测试,但数据集比较大时,将大量数据放入一个batch可能导致内存溢出。\r\nx = tf.placeholder(dtype=tf.float32, shape=(None, 2), name='x-input')\r\ny_ = tf.placeholder(dtype=tf.float32, shape=(None, 1), name='y-input')\r\n\r\n# 定义神经网络前向传播过程\r\n\r\ny = tf.matmul(a, w2)\r\n\r\n# 定义损失函数\r\ncross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))\r\n\r\n# 定义学习率\r\nlearning_rate = 0.001\r\n\r\n# 定义反向传播算法来优化神经网络中的参数\r\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\r\n\r\n\r\n\r\n#定义规则来给出样本标签\r\n#在这里,x1 + x2 < 1的样例被认为是正样本,用1表示,其他为负样本,用0表示\r\n\r\n\r\n\r\n '''\r\n 训练前的神经网络参数:\r\n w1=[[-0.8113182 1.4845988 0.06532937]\r\n [-2.4427042 0.0992484 0.5912243 ]]\r\n w2=[[-0.8113182 ]\r\n [ 1.4845988 ]\r\n [ 0.06532937]]\r\n '''\r\n\r\n #设定训练轮数\r\n STEPS = 5000\r\n for i in range(STEPS):\r\n #每次选取batch_size个样本进行训练\r\n start = (i * batch_size) % dataset_size\r\n end = min(start + batch_size, dataset_size)\r\n\r\n #通过选取的样本训练神经网络并更新参数\r\n sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\r\n\r\n #每隔一段时间计算在所有数据上的交叉熵\r\n if i % 1000 == 0:\r\n total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})\r\n print('After %d training steps, cross entropy on all data is %g' % (i, total_cross_entropy))\r\n '''\r\n 输出结果:\r\n After 0 training steps, cross entropy on all data is 0.0674925\r\n After 1000 training steps, cross entropy on all data is 0.0163385\r\n After 2000 training steps, cross entropy on all data is 0.00907547\r\n After 3000 training steps, cross entropy on all data is 0.00714436\r\n After 4000 training steps, cross entropy on all data is 0.00578471\r\n '''\r\n print(sess.run(w1))\r\n print(sess.run(w2))\r\n '''\r\n 训练后的神经网络参数:\r\n w1=[[-1.9618275 2.582354 1.6820377]\r\n [-3.4681718 1.0698231 2.11789 ]]\r\n w2=[[-1.824715 ]\r\n [ 2.6854665]\r\n [ 1.418195 ]]\r\n '''\r\n #print(sess.run(y))\r\n #输出[[3.957578]]\r\n #print(sess.run(y, feed_dict={x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\r\n\r\n #weights = tf.Variable(tf.random_normal([2, 3], stddev=2)) # stddev=标准差\r\n #biases = tf.Variable(tf.zeros([3]))\r\n # w2 = tf.Variable(weights.initialized_value())\r\n #w3 = tf.Variable(weights.initialized_value() * 2)\r\n # print(weights.eval(session=sess))\r\n #print(sess.run(weights))\r\n\"\"\"","sub_path":"4lossFunction.py","file_name":"4lossFunction.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"520636625","text":"def list_to_str(some_list):\n list_str = ''\n for i in range(len(some_list) - 1):\n list_str += str(some_list[i])\n list_str += \", \"\n list_str += \"and \" + str(some_list[-1])\n return list_str\n\n\nmy_list = [1, 2, 3, 4, 5]\nprint(list_to_str(my_list) + '\\n')\n\nmy_test_list = ['apples', 'bananas', 'tofu', 'cats']\nprint(list_to_str(my_test_list))\n","sub_path":"Chapter4/CommaCode.py","file_name":"CommaCode.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"69029080","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\n__author__ = 'wjhao'\n\nfrom pypinyin import pinyin, lazy_pinyin\nimport time\nimport jieba\nimport pymysql\nimport os\n\ndb = pymysql.connect(\"47.110.226.117\", \"build\", \"build@117\", \"build\", use_unicode=True, charset=\"utf8\")\n# db = pymysql.connect(\"120.27.21.176\", \"root\", \"zjzy@123\", \"build\", use_unicode=True, charset=\"utf8\")\ncursor = db.cursor()\nsql = \"\"\"select id from dpi_netsafetyplatform where wname='%s' \"\"\"\nsql_type = \"\"\"insert into dpi_protocoltype (typeName, code, status, platformCode_id) values ('%s', '%s', 0, %s)\"\"\"\nsql_app = \"\"\"insert into dpi_protocolapplication (appName, code, status, platformCode_id, typeCode_id) VALUES ('%s', '%s', 0, %s, %s)\"\"\"\nsql_ana = \"\"\"select * from dpi_enterprisecoding where name = '%s'\"\"\"\nsql_ono = \"\"\"select * from dpi_enterprisecoding where name like '%s'\"\"\"\nsql_ini = \"\"\"insert into dpi_appprocode (ns_appname, ns_code, vi_ec_id, platformCode_id, user, create_time, update_time) VALUES ('%s', '%s', %s, 1, 'auto create', '%s', '%s')\"\"\"\npath = os.path.dirname(os.path.abspath(__file__))\npath_qy = os.path.join(path, 'wa')\npt = os.listdir(path_qy)\n\nfor i in pt:\n cursor.execute(sql % i)\n t_d = cursor.fetchone()\n if t_d is None:\n code = ''.join(lazy_pinyin(i)).upper()\n cursor.execute(\"\"\"insert into dpi_netsafetyplatform (wname, code, status) VALUES ('%s', '%s', '0')\"\"\" % (i, code))\n db.commit()\n cursor.execute(sql % i)\n t_d = cursor.fetchone()[0]\n else:\n t_d = t_d[0]\n wapt = os.path.join(path_qy, i)\n for j in os.listdir(wapt):\n with open(os.path.join(wapt, j), 'r', encoding='gbk') as fp:\n data = fp.readlines()\n\n for j_b in data[1:]:\n jb = j_b.replace('\\n', '').split('\\t')\n cursor.execute(sql_ana % jb[1])\n\n da = cursor.fetchone()\n # if da is None:\n # seg = list(jieba.cut(jb[1]))\n # print(seg)\n # cursor.execute(\"\"\"select * from dpi_enterprisecoding where name like '%%%s%%'\"\"\" % seg[0])\n # t = cursor.fetchall()\n # print(t)\n if da:\n cd = time.strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(sql_ini % (jb[1].replace(' ', ''), jb[0], da[0], cd, cd))\n db.commit()\n # print('%s==========%s======%s' % (jb[1].replace(' ', ''), jb[0], da[0]))\n # pass\n # else:\n # try:\n # seg = list(jieba.cut(jb[1]))\n # cursor.execute(\"\"\"select * from dpi_enterprisecoding where name like '%%%s%%'\"\"\" % seg[0])\n # t = cursor.fetchall()\n # print('%s================%s' % (''.join(seg), '-'.join([i[1] for i in t])))\n # except IndexError:\n # print(jb[1], j)\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # break\ndb.close()\n\n\nclass InsertIData:\n def __init__(self, ids):\n \"\"\"\n ids : 1-测试库数据导入, 2-生产库数据导入\n :param ids:\n \"\"\"\n self.file = None\n self.path = None\n if ids == 1:\n self.db = pymysql.connect(\"120.27.21.176\", \"root\", \"zjzy@123\", \"build\", use_unicode=True, charset=\"utf8\")\n elif ids == 2:\n self.db = pymysql.connect(\"47.110.226.117\", \"build\", \"build@117\", \"build\", use_unicode=True, charset=\"utf8\")\n self.cursor = self.db.cursor()\n\n def get_file_path(self, file = None):\n self.path = os.path.dirname(os.path.abspath(__file__))\n self.path = os.path.join(self.path, 'wa')\n if file is None:\n self.file = os.listdir(self.path)\n else:\n self.file = list(file)\n\n def insert_into_data(self, file = None):\n sql_path = \"\"\"select id from dpi_netsafetyplatform where wname='%s' \"\"\"\n self.get_file_path(file=file)\n for p_d in self.file:\n self.cursor.execute(sql_path % p_d)\n t_d = cursor.fetchone()\n if t_d is None:\n code = ''.join(lazy_pinyin(i)).upper()\n cursor.execute(\n \"\"\"insert into dpi_netsafetyplatform (wname, code, status) VALUES ('%s', '%s', '0')\"\"\" % (i, code))\n db.commit()\n cursor.execute(\"\"\"select id from dpi_netsafetyplatform where wname='%s' \"\"\" % i)\n t_d = cursor.fetchone()[0]\n else:\n t_d = t_d[0]\n\n wapt = os.path.join(self.path, p_d)\n for j in os.listdir(wapt):\n with open(os.path.join(wapt, j), 'r', encoding='gbk') as fp:\n data = fp.readlines()\n\n for j_b in data[1:]:\n jb = j_b.replace('\\n', '').split('\\t')\n cursor.execute(\"\"\"select * from dpi_enterprisecoding where name = '%s'\"\"\" % jb[1])\n\n da = cursor.fetchone()\n\n if da:\n cd = time.strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(\"\"\"insert into dpi_appprocode (ns_appname, ns_code, vi_ec_id, platformCode_id, user, create_time, update_time) VALUES ('%s', '%s', %s, 1, 'auto create', '%s', '%s')\"\"\" % (jb[1].replace(' ', ''), jb[0], da[0], cd, cd))\n db.commit()","sub_path":"dpi/lib/oneOnone.py","file_name":"oneOnone.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"251300762","text":"# Version 3.6.1\nimport requests\nimport json\nfrom tia.log import *\nfrom tia.auth import *\nfrom datetime import datetime, date, timedelta\nimport quote as quoteUtil\nimport history as histUtil\n\n\ntoday = date.today()\ntoday = datetime(year=today.year,month=today.month,day=today.day,)\ntimedelta = timedelta(weeks=26)\nstart_date = today - timedelta\nprint (\"Start Date :\", start_date)\nhist = histUtil.getHistory(\"WISH\", 'daily', start_date)\nhist = hist['history']['day']\nhigh = 0.0\nlow = 999999.0\navg_vol = 0\nidx = 1\nres = []\ncloses = []\ngems = []\nfor h in hist:\n high = max(high, h['high'])\n low = min(low, h['low'])\n avg_vol += h['volume']\n idx +=1\n dt = datetime.strptime(h['date'], \"%Y-%m-%d\" )\n if ( dt == today):\n continue\n op = h['open']\n h['high'] = h['high'] * 100 / op\n h['low'] = h['low'] * 100 / op\n h['close'] = h['close'] * 100 / op\n h['open'] = h['open'] * 100 / op\n gem = ( (h['high']- h['low'] ) * h['close']) / h['open']\n gems.append(gem)\n closes.append(h['close'])\n res.append(h)\n print (h)\n print (gem)\nprint (len(res))\navg_vol = avg_vol / idx\nprint (avg_vol)\n# Get 52 week high and low and volume\n\n\nimport plotly.graph_objects as go\nimport pandas as pd\nimport plotly.express as px\n\npd.options.plotting.backend = \"plotly\"\ndf = pd.DataFrame(dict(GEM=gems[0:-1],CLOSES=closes[1:]))\nfig = df.plot()\nfig.show()\n\nimport numpy as np\nprint(np.corrcoef(gems[0:-1], closes[1:]))\n","sub_path":"histData.py","file_name":"histData.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"437574707","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom utility.perspectiveTransform import Perspective\n\n# Used code from Udacity online course 18 : Detect lane pixels and fit to find the lane boundary\nclass Boundary():\n def __init__(self):\n # half window width for finding lane boundary\n self.margin = 100\n # Choose the number of sliding windows\n self.nwindows = 9\n # Set the width of the windows +/- margin\n self.margin = 100\n # Set minimum number of pixels found to recenter window\n self.minpix = 50\n\n ####################################\n # Helper functions\n ####################################\n def y_to_x(self, y):\n '''\n Calculate x = ay**2 + by + c\n self.left_fit and self.right_fit contains [a,b,c]\n :param y: y coordinate to find corresponding x coordinate\n '''\n self.left_fit_x = self.left_fit[0] * (y ** 2) + self.left_fit[1] * y + self.left_fit[2]\n self.right_fit_x = self.right_fit[0] * (y ** 2) + self.right_fit[1] * y + self.right_fit[2]\n\n @staticmethod\n def polynomial_to_points(left_x, right_x, y):\n '''\n convert x,y coordinates to list of points for opencv plotting\n :param left_x: x coordinates for left lane\n :param right_x: x coordinates for right lane\n :param y: y coordinates\n '''\n left_vertices = np.array([np.transpose(np.vstack([left_x, y]))], dtype=np.int32)\n # flip the points on the right edge of the left traffic lane, so the points are ordered for fillPoly\n # 1 6\n # 2 5\n # 3 4\n # left window right window\n right_vertices = np.array([np.flipud(np.transpose(np.vstack([right_x, y])))], dtype=np.int32)\n pts = np.hstack((left_vertices, right_vertices))\n\n return pts, left_vertices, right_vertices\n\n\n def histogram_peaks(self, outdir, img):\n '''\n Find x coordinate for left and right lane near bottom of image using peaks for histogram\n of pixel intensities in the lower half of the binary image\n :param outdir: directory to save histogram\n :param img: input binary image with value between 0 and 255\n :return:\n '''\n self.img = img\n assert (len(np.unique(self.img) == 2) , \"input to histogram_peaks must be binary image, with values 0 or 255\")\n # take a histogram along all the columns in the lower half of the image\n self.img_h, self.img_w = self.img.shape[0:2]\n histogram = np.sum(self.img[self.img_h // 2:, :], axis=0)\n plt.plot(histogram)\n plt.xlabel('Pixel Positions')\n plt.ylabel('Counts')\n\n if (outdir):\n plt.savefig(outdir + \"histogram.jpg\")\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = int(histogram.shape[0] // 2)\n self.leftx_base = np.argmax(histogram[:midpoint])\n self.rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n plt.close()\n\n def calc_curvature(self):\n '''\n Calculate curvature of left and right lanes\n :return: radius of curvature for left and right lane\n '''\n y = self.img_h - 10\n # value taken from Udacity course\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30 / 720 # meters per pixel in y dimension\n self.xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(self.lefty * ym_per_pix, self.leftx * self.xm_per_pix, 2)\n right_fit_cr = np.polyfit(self.righty * ym_per_pix, self.rightx * self.xm_per_pix, 2)\n # Calculate the new radii of curvature a particular y coordinate\n left_curverad = ((1 + (2 * left_fit_cr[0] * y * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n # Now our radius of curvature is in meters\n return left_curverad, right_curverad\n\n def calc_dist_center(self):\n '''\n :return: Distance of the vehicle center to road center\n '''\n # assume center of image is along the center line of vehicle\n center_car_x = int(self.img_w / 2)\n # only take the x value for the left and right line polynomial towards bottom of frame\n center_lane_x = int((self.left_fit_x[-1] + self.right_fit_x[-1])/2)\n dist = (center_lane_x - center_car_x) * self.xm_per_pix\n return dist\n\n def create_window(self, i):\n '''\n Helper function for slide_window, find the boundary for windows for left and right lanes\n :param i: ith window\n '''\n # Set height of windows (image height divided by number of sliding windows)\n window_height = np.int(self.img_h // self.nwindows)\n\n # Identify window boundaries in y\n self.win_y_low = self.img_h - (i + 1) * window_height\n self.win_y_high = self.img_h - i * window_height\n # Identify window boundaries in x for left lane\n self.win_xleft_low = self.leftx_current - self.margin\n self.win_xleft_high = self.leftx_current + self.margin\n # Identify window boundaries in x for right lane\n self.win_xright_low = self.rightx_current - self.margin\n self.win_xright_high = self.rightx_current + self.margin\n # visualize location of current windows\n cv2.rectangle(self.out_img, (self.win_xleft_low, self.win_y_low), (self.win_xleft_high, self.win_y_high),\n (0, 255, 0), 2)\n cv2.rectangle(self.out_img, (self.win_xright_low, self.win_y_low), (self.win_xright_high, self.win_y_high),\n (0, 255, 0), 2)\n\n def recenter_window(self):\n '''\n Recenter window according to average x coordinate of non-zero pixels within the window\n '''\n # Part 1: find indices for pixels within the current window that have nonzero intensity\n # bool_in_window_left[i] is true when self.nonzeroy[i] and self.nonzerox[i] is a point in window\n bool_in_window_left = ((self.nonzeroy >= self.win_y_low) & (self.nonzeroy < self.win_y_high) &\n (self.nonzerox >= self.win_xleft_low) & (self.nonzerox < self.win_xleft_high))\n # convert from [False True ....] to [1 ...]\n idx_in_window_left = np.nonzero(bool_in_window_left)\n # array of one list, take 0th list to reduce the dimension\n idx_in_window_left = idx_in_window_left[0]\n # Append these indices to the lists\n self.left_lane_inds.append(idx_in_window_left)\n\n idx_in_window_right = ((self.nonzeroy >= self.win_y_low) & (self.nonzeroy < self.win_y_high) &\n (self.nonzerox >= self.win_xright_low) & (self.nonzerox < self.win_xright_high)).nonzero()[0]\n self.right_lane_inds.append(idx_in_window_right)\n\n # Part 2: recenter window for left or right lane\n\n # If you found > minpix pixels, recenter next window on their mean position\n if len(idx_in_window_left) > self.minpix:\n # x coordinates for pixels that has nonzero intensity (belong to lane) and also lies inside current window\n left_nonzero_in_window_x_coords = self.nonzerox[idx_in_window_left]\n # recenter next window on their mean position\n self.leftx_current = np.int(np.mean(left_nonzero_in_window_x_coords))\n\n if len(idx_in_window_right) > self.minpix:\n right_nonzero_in_window_x_coords = self.nonzerox[idx_in_window_right]\n self.rightx_current = np.int(np.mean(right_nonzero_in_window_x_coords))\n\n\n def slide_window(self):\n '''\n Find pixels belonging to left and right lanes by sliding window from bottom of image upward\n '''\n # Current positions to be updated for each window\n self.leftx_current = self.leftx_base\n self.rightx_current = self.rightx_base\n\n # Step through the windows one by one\n # Draw the windows on the visualization image\n self.out_img = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)\n for i in range(self.nwindows):\n self.create_window(i)\n self.recenter_window()\n\n # Concatenate the arrays of indices:\n # left_lane_inds num window rows, each row has num valid pixels in the window\n # turn this into a flat numpy array 1x total number of valid pixels in all windows\n self.left_lane_inds = np.concatenate(self.left_lane_inds)\n self.right_lane_inds = np.concatenate(self.right_lane_inds)\n\n\n\n\n def fit_use_prev(self):\n '''\n Find indices for pixels belonging to left and right lane by searching around\n the lane lines found in previous frame\n '''\n # Assume you now have a new warped binary image\n # from the next frame of video (also called \"binary_warped\")\n # You don't need to do a blind search again, but instead you can just search\n # in a margin around the previous line position l\n self.y_to_x(self.nonzeroy)\n\n self.left_lane_inds = ((self.nonzerox > self.left_fit_x - self.margin)\n & (self.nonzerox < self.left_fit_x + self.margin))\n\n self.right_lane_inds = ((self.nonzerox > (self.right_fit_x - self.margin))\n & (self.nonzerox < self.right_fit_x + self.margin))\n\n ####################################\n # fit_lane and visualize_lane are called by pipeline.py\n ####################################\n # this calls slide window or fit using previous result depends\n def fit_lane(self, birdeye_binary, idx):\n '''\n Find lane lines in a birdeye view binary image and fit a second order polynomial to each line\n :param birdeye_binary: birdeye view binary image\n :param idx: index of frame in a video\n '''\n # Identify the x and y positions of all nonzero pixels in the image\n self.img = birdeye_binary\n nonzero_pix = self.img.nonzero()\n self.nonzeroy = np.array(nonzero_pix[0]) # y coordinate of non zero pixel (row)\n self.nonzerox = np.array(nonzero_pix[1]) # x coordinate of non zero pixel (col)\n\n # Create empty lists to receive left and right lane pixel indices\n self.left_lane_inds = []\n self.right_lane_inds = []\n\n if idx==0:\n self.slide_window()\n else:\n self.fit_use_prev()\n\n # Extract left and right line pixel positions\n self.leftx = self.nonzerox[self.left_lane_inds]\n self.lefty = self.nonzeroy[self.left_lane_inds]\n self.rightx = self.nonzerox[self.right_lane_inds]\n self.righty = self.nonzeroy[self.right_lane_inds]\n\n # Fit a second order polynomial to each\n # use function x(y) instead of y(x)\n self.left_fit = np.polyfit(self.lefty, self.leftx, 2)\n self.right_fit = np.polyfit(self.righty, self.rightx, 2)\n\n\n\n\n def visualize_lane(self, outdir, original_img, to_front_matrix, blend_alpha = 0.5):\n '''\n calculate the curvature and distance to center, visualize the lane lines\n :param outdir: path to store visualization\n :param original_img: original frame from video\n :param to_front_matrix: matrix to warp birdeye view to front view\n :param blend_alpha: parameter for blending lane lines with original image\n '''\n # Part 1: Create an image with lanes in bird-eye view, then warp it to front view\n # Generate x and y values for plotting\n self.ploty = np.linspace(0, original_img.shape[0] - 1, original_img.shape[0])\n self.y_to_x(self.ploty)\n\n # input: 3 channel warped image\n color_birdeye_mask = np.zeros_like(original_img)\n pts, left_vertices, right_vertices = self.polynomial_to_points(self.left_fit_x, self.right_fit_x, self.ploty)\n # draw lane boundaries on the warped imagely\n cv2.fillPoly(color_birdeye_mask, [pts], (0,255,0))\n cv2.polylines(color_birdeye_mask,[left_vertices], isClosed = False, color = (255, 0,0), thickness = 20)\n cv2.polylines(color_birdeye_mask, [right_vertices], isClosed = False, color = (0, 0, 255), thickness = 20)\n # warp the mask back to original image (front view)\n color_front_mask = cv2.warpPerspective(color_birdeye_mask,to_front_matrix , (self.img_w, self.img_h))\n\n # Part 2: blend original image with the lanes\n img_blend = np.zeros_like(original_img)\n blend_beta = 1- blend_alpha\n result_img = cv2.addWeighted(color_front_mask, blend_alpha, original_img, blend_beta, 0.0, img_blend)\n\n # Part 3: show curvature and distance to center\n\n # calculate curvature and center dist\n left_curverad, right_curverad = self.calc_curvature()\n center_dist = self.calc_dist_center()\n\n # put text on image\n curv_left_text = \"Left lane curvature radius = {0:.2f} m\".format(left_curverad)\n curv_right_text = \"Right lane curvature radius = {0:.2f} m\".format(left_curverad)\n center_dist_text =\"Distance to center = {0:.2f} m\".format(center_dist)\n\n cv2.putText(result_img, curv_left_text, (10, 50), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(result_img, curv_right_text, (10, 100), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(result_img, center_dist_text, (10, 150), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n if(outdir):\n cv2.imwrite(outdir + \"lane_border_birdeye.jpg\", color_birdeye_mask)\n cv2.imwrite(outdir + \"lane_border_front.jpg\", color_front_mask)\n cv2.imwrite(outdir + \"blend.jpg\", result_img)\n\n return result_img\n","sub_path":"P4_lane/code/utility/laneBoundary.py","file_name":"laneBoundary.py","file_ext":"py","file_size_in_byte":13995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"115753926","text":"import numpy as num\nimport matplotlib.pyplot as plt\n\ndatos = num.loadtxt('dat.txt')\n\nx = datos[:,0]\ny = datos[:,1]\nz = num.exp(x)\n\n\nplt.figure()\nplt.scatter(x, y)\nplt.plot(x, z)\nplt.savefig('Funcion ex')\n","sub_path":"ejercicio18.py","file_name":"ejercicio18.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"204651590","text":"import re\n\nhandle = open('..\\Files\\mbox-short.txt')\nnumbers = list()\n\nfor line in handle:\n line = line.rstrip()\n number = re.findall('^New Revision: ([0-9]+)', line)\n\n if len(number) > 0:\n num = int(number[0])\n numbers.append(num)\n\nif len(numbers) > 0:\n print(round(sum(numbers)/len(numbers)))\nelse:\n print(0.0)\n","sub_path":"RegEx/Average.py","file_name":"Average.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"643851883","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import Http404\nfrom django.contrib import messages\nfrom django.contrib.admin.models import ADDITION, CHANGE, DELETION\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.db.models import Q, Sum\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import ugettext_lazy as __\nfrom django.views.generic import base, detail, edit, list\nfrom django.shortcuts import get_object_or_404, redirect\n\nfrom charge import forms, models\nfrom charge.utils import create_log_entry, login_required\n\n\n### Mixins ####################################################################\n\nclass FilterCreatorMixin(object):\n \"\"\"\n Limit a User to only obtain their own data.\n \"\"\"\n def get_queryset(self):\n \"\"\" Limit the queryset to the requesting user. \"\"\"\n base_qs = super(FilterCreatorMixin, self).get_queryset()\n current_user = self.request.user\n return base_qs.filter(creator=current_user)\n\n\n### BaseViews #################################################################\n\nclass BaseCreateView(edit.CreateView):\n \"\"\"\n The used Model should have a creator and name field.\n \"\"\"\n success_message = __('{name} created successfully')\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n self.object = form.save()\n\n # Display success message\n name = form.cleaned_data['name']\n msg = self.success_message.format(name=name)\n messages.success(self.request, msg)\n\n create_log_entry(self.object, self.request.user, ADDITION)\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass BaseUpdateView(FilterCreatorMixin, edit.UpdateView):\n \"\"\"\n The used Model should have a creator and name field.\n \"\"\"\n success_message = __('{name} updated successfully')\n\n def form_valid(self, form):\n self.object = form.save()\n\n # Display success message\n name = self.object.name\n msg = self.success_message.format(name=name)\n messages.success(self.request, msg)\n\n create_log_entry(self.object, self.request.user, CHANGE)\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass BaseDeleteView(FilterCreatorMixin, edit.DeleteView):\n \"\"\"\n DeleteView with user filter and redirect to Overview.\n\n The used Model should have a creator and name field.\n \"\"\"\n success_message = __('{name} deleted successfully')\n template_name = 'charge/object_confirm_delete.html'\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n\n # Display success message\n name = self.object.name\n msg = self.success_message.format(name=name)\n messages.success(self.request, msg)\n\n create_log_entry(self.object, self.request.user, DELETION)\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass Index(base.TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super(Index, self).get_context_data(**kwargs)\n # FIXME convert to base currency\n context['total_costs'] = models.Item.objects.aggregate(\n total_costs=Sum('cost'))['total_costs']\n return context\n\n\n### Event Related #############################################################\n\n@login_required\nclass EventCreate(BaseCreateView):\n model = models.Event\n form_class = forms.EventForm\n success_url = reverse_lazy('overview')\n\n def get_initial(self):\n initial = super(EventCreate, self).get_initial()\n # request user is default participant\n initial['participants'] = [self.request.user.pk]\n return initial\n\n\n@login_required\nclass EventDetail(detail.DetailView):\n \"\"\"\n Represents an individual Event object.\n \"\"\"\n model = models.Event\n\n def get_context_data(self, **kwargs):\n context = super(EventDetail, self).get_context_data(**kwargs)\n context['items'] = models.Item.objects.filter(event=self.object)\n participants = self.object.participants.all();\n for participant in participants:\n payments = self.object.payment_set.filter(user=participant)\n if (payments.count() >= 1):\n participant.payment = payments[0]\n context['participants'] = participants\n return context\n\n\n@login_required\nclass EventUpdate(BaseUpdateView):\n model = models.Event\n form_class = forms.EventForm\n\n def get_success_url(self):\n return reverse_lazy('event', args=[self.object.pk])\n\n\n@login_required\nclass EventDelete(BaseDeleteView):\n model = models.Event\n success_url = reverse_lazy('overview')\n\ndef event_bill(request, pk):\n event = models.Event.objects.get(pk=pk)\n if not request.user == event.creator:\n raise Http404()\n event.bill()\n return redirect(event.get_absolute_url())\n\ndef event_unbill(request, pk):\n event = models.Event.objects.get(pk=pk)\n if not request.user == event.creator:\n raise Http404()\n event.unbill()\n return redirect(event.get_absolute_url())\n\n@login_required\nclass EventHistory(list.ListView):\n model = models.EventLogEntry\n template_name = 'charge/event_history.html'\n\n def get_queryset(self):\n event_pk = self.kwargs['pk']\n item_ids = models.Item.objects.filter(event__in=event_pk).values_list(\n 'id', flat=True)\n event_ct = ContentType.objects.get_for_model(models.Event)\n event_filter = Q(content_type=event_ct, object_id=event_pk)\n item_ct = ContentType.objects.get_for_model(models.Item)\n item_filter = Q(content_type=item_ct, object_id__in=item_ids)\n qs = self.model.objects.filter(event_filter | item_filter)\n return qs\n\n\n### Item related ##############################################################\n\nclass ItemSuccessUrlMixin(object):\n \"\"\"\n Redirects to corresponding event.\n \"\"\"\n def get_success_url(self):\n return reverse_lazy('event', args=[self.object.event.pk])\n\n\n@login_required\nclass ItemCreate(ItemSuccessUrlMixin, BaseCreateView):\n \"\"\"\n Create Item for given Event.\n \"\"\"\n model = models.Item\n form_class = forms.ItemForm\n\n def post(self, request, *args, **kwargs):\n # request user must participant in related Event\n self.event = get_object_or_404(models.Event,\n pk=self.kwargs['event_pk'], participants=self.request.user)\n return super(ItemCreate, self).post(self, request, *args, **kwargs)\n\n def form_valid(self, form):\n \"\"\" Assigns event to form. \"\"\"\n form.instance.event = self.event\n return super(ItemCreate, self).form_valid(form)\n\n\n@login_required\nclass ItemUpdate(ItemSuccessUrlMixin, BaseUpdateView):\n model = models.Item\n form_class = forms.ItemForm\n\n\n@login_required\nclass ItemDelete(ItemSuccessUrlMixin, BaseDeleteView):\n model = models.Item\n\n\n@login_required\nclass Overview(list.ListView):\n model = models.Event\n template_name = 'charge/overview.html'\n\n def get_queryset(self):\n \"\"\" User should only see his objects. \"\"\"\n base_qs = super(Overview, self).get_queryset()\n current_user = self.request.user\n queryset = base_qs.filter(Q(creator=current_user) |\n Q(participants=current_user)).distinct()\n for event in queryset:\n user_payments = event.payment_set.filter(user=current_user)\n event.user_inbound_payments = event.user_open_inbound_payments(current_user)\n event.user_outbound_payments = event.user_open_outbound_payments(current_user)\n return queryset\n\n@login_required\nclass Logout(base.View):\n def get(self, request, *args, **kwargs):\n \"\"\"\n Signs out the user and adds a success message.\n \"\"\"\n messages.success(request, _('You have been signed out.'),\n fail_silently=True)\n login_url = reverse('auth_login')\n return auth_views.logout(request, next_page=login_url, *args, **kwargs)\n\ndef payment_mark(request, pk):\n payment = models.Payment.objects.get(pk=pk)\n if not request.user == payment.receiver():\n raise Http404()\n payment.is_paid = True\n payment.save()\n return redirect(payment.event.get_absolute_url())\n\ndef payment_unmark(request, pk):\n payment = models.Payment.objects.get(pk=pk)\n if not request.user == payment.receiver():\n raise Http404()\n payment.is_paid = False\n payment.save()\n return redirect(payment.event.get_absolute_url())\n\ndef user(request, user):\n if user == request.user.username:\n return redirect('/overview/')\n else:\n raise Http404()\n","sub_path":"src/charge/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"135827838","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom nav_msgs.msg import Odometry\nfrom lab4.msg import PIDInput\nfrom ackermann_msgs.msg import AckermannDriveStamped\nfrom math import exp\n\n\ndef clipped_linear(x, x0, y0, x1, y1):\n \"\"\"mx+b but clipped\n\n :param x: current input\n :param x0: minimum clipping input\n :param y0: minimum clipped output\n :param x1: maximum clipping input\n :param y1: maximum clipped output\n \"\"\"\n if x <= x0:\n return y0\n elif x >= x1:\n return y1\n else:\n return (y1 - y0) / (x1 - x0) * (x - x0) + y0\n\n\nclass CarControl(object):\n\n def __init__(self, Kp, Kd, min_speed, max_speed, d_error_alpha):\n rospy.init_node('car_control')\n\n self.Kp = Kp\n self.Kd = Kd\n self.min_speed = min_speed\n self.max_speed = max_speed\n self._d_error_alpha = d_error_alpha\n\n self._last_error = None\n self._last_time = None\n self._smoothed_error = None\n self._smoothed_d_error = None\n self._smoothed_period = None\n self._front_distance = 0.0\n self._smoothed_front_distance = 0.0\n self._speed = 0.0\n\n self.drive_msg = AckermannDriveStamped()\n self.drive_pub = rospy.Publisher('/drive', AckermannDriveStamped, queue_size=10)\n\n self.pid_input_sub = rospy.Subscriber('pid_input', PIDInput, self.pid_input_callback)\n\n self.scan_sub = rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n\n self.odom_subscriber = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n\n rospy.loginfo('car_control is running')\n\n rospy.spin()\n\n def pid_input_callback(self, data):\n time = rospy.get_time()\n if self._smoothed_error is not None and self._smoothed_period is not None:\n update_rate = 100. # Hz\n alpha = (1. - exp(-self._smoothed_period * update_rate))\n self._smoothed_error += alpha * (data.error - self._smoothed_error)\n else:\n self._smoothed_error = data.error\n if self._last_error is not None:\n # estimate time between calls for time derivatives\n # otherwise derivative is very noisy.\n # use an exponential running average, where alpha is actually\n # computed dynamically\n curr_period = time - self._last_time\n if self._smoothed_period is not None:\n alpha = (1. - exp(-self._smoothed_period))\n self._smoothed_period += alpha * (curr_period - self._smoothed_period)\n else:\n self._smoothed_period = curr_period\n\n # estimate derivative of error\n d_error = (data.error - self._last_error) / self._smoothed_period\n if self._smoothed_d_error is not None:\n update_rate = 20. # Hz\n alpha = (1. - exp(-self._smoothed_period * update_rate))\n self._smoothed_d_error += alpha * (d_error - self._smoothed_d_error)\n else:\n self._smoothed_d_error = d_error\n\n if self._last_time is not None and int(time*10.) > int(self._last_time*10.):\n rospy.loginfo('d_error: {:10.4f}'.format(d_error))\n rospy.loginfo('\\tdelta_error: {:10.4f}'.format(d_error * self._smoothed_period))\n rospy.loginfo('smoothed_d_error: {:10.4f}'.format(self._smoothed_d_error))\n\n Kp = self.Kp\n # if self._speed > 3.:\n # Kp /= self._speed * 3.0\n u = Kp * (self._smoothed_error + self.Kd * self._smoothed_d_error)\n self.drive_msg.drive.steering_angle = u\n # self.drive_msg.drive.speed = clipped_linear(\n # min(self._front_distance, self._smoothed_front_distance),\n # .5, # distance at which to travel minimum speed\n # self.min_speed, # minimum speed\n # 8., # distance at which to travel maximum speed\n # self.max_speed, # maximum speed\n # )\n self.drive_msg.drive.speed = (self.min_speed + self.max_speed) * .5\n self.drive_pub.publish(self.drive_msg)\n self._last_error = data.error\n self._last_time = time\n\n def angle2range(self, laser_scan, angle):\n \"\"\"Extracts the range at the given angle from the LaserScan message\n\n :param laser_scan: a LaserScan message\n :param angle: angle in radians\n \"\"\"\n i = int(round((angle - laser_scan.angle_min) / laser_scan.angle_increment))\n return laser_scan.ranges[i]\n \n def scan_callback(self, data):\n self._front_distance = self.angle2range(data, 0)\n update_rate = 1. # Hz\n alpha = (1. - exp(-self._smoothed_period * update_rate))\n td = (self._front_distance - self._smoothed_front_distance)\n self._smoothed_front_distance += alpha * td\n \n def odom_callback(self, data):\n self._speed = data.twist.twist.linear.x\n\n\nif __name__ == '__main__':\n CarControl(\n# Kp=1.2, # single-sided wall following\n# Kd=.008, # single-sided wall following\n Kp=1.8, # double-sided wall following\n Kd=.03, # double-sided wall following\n min_speed=1.0,\n max_speed=3.0,\n d_error_alpha=.1,\n )\n","sub_path":"lab4/scripts/car_control.py","file_name":"car_control.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"372081303","text":"import numpy as np\nfrom scipy import fftpack\nimport os\nimport csv\nimport tensorflow as tf\nfrom sklearn.metrics.pairwise import cosine_similarity, euclidean_distances\nimport heapq\nnp.random.seed(1)\ntf.set_random_seed(2)\n\nwindow_length = 500\ndct_length = 60\nincrement_ratio = 1\ndata_path = '/Users/anjanawijekoon/Data/SELFBACK/activity_data_34/merge_9/'\nimus = [1, 2]\n\nclasses = [\"jogging\", \"sitting\", \"standing\", \"walkfast\", \"walkmod\", \"walkslow\", \"upstairs\", \"downstairs\", \"lying\"]\nids = range(len(classes))\nclassDict = dict(zip(classes, ids))\n\n\ndef write_data(results_path, data):\n if os.path.isfile(results_path):\n f = open(results_path, 'a')\n f.write(data + '\\n')\n else:\n f = open(results_path, 'w')\n f.write(data + '\\n')\n f.close()\n\n\ndef read_data(path):\n person_data = {}\n files = os.listdir(path)\n for f in [ff for ff in files if ff != '.DS_Store']:\n temp = f.split(\"_\")\n user = temp[0]\n activity = temp[1]\n data = []\n reader = csv.reader(open(os.path.join(path, f), \"r\"), delimiter=\",\")\n for row in reader:\n data.append(row)\n\n activity_data = {}\n if user in person_data:\n activity_data = person_data[user]\n activity_data[activity] = data\n else:\n activity_data[activity] = data\n person_data[user] = activity_data\n\n return person_data\n\n\ndef extract_features(data):\n people = {}\n for person in data:\n person_data = data[person]\n activities = {}\n for activity in person_data:\n df = person_data[activity]\n ws, ts = split_windows(df)\n act = classDict[activity]\n dct_ws = dct(ws)\n dct_ts = dct(ts)\n activities[act] = [dct_ws, dct_ts]\n people[person] = activities\n return people\n\n\ndef split_windows(data):\n _w = []\n _t = []\n i = 0\n N = len(data)\n increment = int(window_length * increment_ratio)\n while i + window_length < N:\n start = i\n end = start + window_length\n w = [a[:3] for a in data[start:end]]\n t = [a[3:] for a in data[start:end]]\n i = int(i + increment)\n _w.append(w)\n _t.append(t)\n return _w, _t\n\n\ndef dct(windows):\n dct_window = []\n for tw in windows:\n x = [t[0] for t in tw]\n y = [t[1] for t in tw]\n z = [t[2] for t in tw]\n\n dct_x = np.abs(fftpack.dct(x, norm='ortho'))\n dct_y = np.abs(fftpack.dct(y, norm='ortho'))\n dct_z = np.abs(fftpack.dct(z, norm='ortho'))\n\n v = np.array([])\n v = np.concatenate((v, dct_x[:dct_length]))\n v = np.concatenate((v, dct_y[:dct_length]))\n v = np.concatenate((v, dct_z[:dct_length]))\n\n dct_window.append(v)\n return dct_window\n\n\ndef read():\n user_data = read_data(data_path)\n feature_data = extract_features(user_data)\n return feature_data\n\n\ndef cos_knn(k, test_data, test_labels, train_data, train_labels):\n cosine = cosine_similarity(test_data, train_data)\n top = [(heapq.nlargest(k, range(len(i)), i.take)) for i in cosine]\n top = [[train_labels[j] for j in i[:k]] for i in top]\n pred = [max(set(i), key=i.count) for i in top]\n pred = np.array(pred)\n correct = 0\n for j in range(len(test_labels)):\n if test_labels[j] == pred[j]:\n correct += 1\n return correct/float(len(test_labels))\n\n\ndef ed_knn(k, test_data, test_labels, train_data, train_labels):\n cosine = euclidean_distances(test_data, train_data)\n top = [(heapq.nlargest(k, range(len(i)), i.take)) for i in cosine]\n top = [[train_labels[j] for j in i[:k]] for i in top]\n pred = [max(set(i), key=i.count) for i in top]\n pred = np.array(pred)\n correct = 0\n for j in range(len(test_labels)):\n if test_labels[j] == pred[j]:\n correct += 1\n return correct/float(len(test_labels))\n\n\ndef split(_data, _test_ids):\n train_data_ = {key: value for key, value in _data.items() if key not in _test_ids}\n test_data_ = {key: value for key, value in _data.items() if key in _test_ids}\n return train_data_, test_data_\n\n\ndef flatten(_data):\n flatten_w_data = []\n flatten_t_data = []\n flatten_labels = []\n\n for subject in _data:\n activities = _data[subject]\n for activity in activities:\n activity_data = activities[activity]\n flatten_w_data.extend(activity_data[0])\n flatten_t_data.extend(activity_data[1])\n flatten_labels.extend([activity for i in range(len(activity_data[0]))])\n return flatten_w_data, flatten_t_data, flatten_labels\n\n","sub_path":"selfback_9/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"111655239","text":"import binascii\nimport hashlib\nimport hmac\nimport threading\nimport logging\nimport socket\nimport time\nimport json\n\nfrom PyQt5.QtCore import pyqtSignal, QObject\n\nfrom internals.exceptions import ServerError\nfrom internals.utils import send_message, get_message\nimport client.utils\nimport internals.config as CONFIG\n\nlogger = logging.getLogger('client')\nsocket_lock = threading.Lock()\n\n\nclass Transport(threading.Thread, QObject):\n new_message = pyqtSignal(dict)\n connection_lost = pyqtSignal()\n message_205 = pyqtSignal()\n\n def __init__(self, port, host, db, name, password, keys):\n threading.Thread.__init__(self)\n QObject.__init__(self)\n\n self.db = db\n self.name = name\n self.password = password\n self.keys = keys\n\n self.transport = self.setup_transport(port, host)\n self.setup_database()\n\n self.is_running = True\n\n def setup_transport(self, port, host):\n transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n transport.settimeout(5)\n\n max_attempts = 5\n retry_delay = 1\n connected = False\n\n for i in range(max_attempts):\n logger.info(f'Attempt #{i + 1} to connect to server')\n try:\n transport.connect((host, port))\n except (OSError, ConnectionRefusedError):\n pass\n else:\n connected = True\n break\n time.sleep(retry_delay)\n\n if not connected:\n connection_error_text = 'Can\\'t establish a connection to server'\n logger.critical(connection_error_text)\n raise ServerError(connection_error_text)\n\n logger.debug('Connected to host')\n\n password_bytes = self.password.encode('utf-8')\n salt = self.name.lower().encode('utf-8')\n password_hash = hashlib.pbkdf2_hmac('sha512', password_bytes, salt, 10000)\n password_hash_string = binascii.hexlify(password_hash)\n\n public_key = self.keys.publickey().export_key().decode('ascii')\n\n try:\n with socket_lock:\n request = client.utils.create_presence_request(self.name, public_key)\n send_message(transport, request)\n response = get_message(transport)\n # self.process_response(response)\n\n if CONFIG.RESPONSE in response:\n if response[CONFIG.RESPONSE] == 400:\n raise ServerError(response[CONFIG.ERROR])\n elif response[CONFIG.RESPONSE] == 511:\n response_data = response[CONFIG.DATA]\n client_accept_hash = hmac.new(password_hash_string, response_data.encode('utf-8'))\n digest = client_accept_hash.digest()\n\n acceptance_request = CONFIG.RESPONSE_511\n acceptance_request[CONFIG.DATA] = binascii.b2a_base64(digest).decode('ascii')\n send_message(transport, acceptance_request)\n\n auth_response = get_message(transport)\n self.process_response(auth_response)\n\n except (OSError, json.JSONDecodeError):\n logger.critical('Connection lost')\n raise ServerError('Connection lost')\n\n logger.info('Connection successfully established')\n return transport\n\n def setup_database(self):\n connection_lost_message = 'Server connection lost'\n try:\n self.load_users()\n self.load_contacts()\n except OSError as err:\n if err.errno:\n logger.critical(connection_lost_message)\n raise ServerError(connection_lost_message)\n logger.error('Connection timeout while loading contacts')\n except json.JSONDecodeError:\n logger.critical(connection_lost_message)\n raise ServerError(connection_lost_message)\n\n def load_users(self):\n logger.debug(f'Requesting all the chat users for {self.name}')\n request = client.utils.create_user_list_request(self.name)\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n if CONFIG.RESPONSE in response and response[CONFIG.RESPONSE] == 202:\n self.db.add_users(response[CONFIG.DATA_LIST])\n else:\n logger.error('Error while loading known users')\n\n def load_contacts(self):\n self.db.clear_contacts()\n logger.debug(f'Requesting contacts for {self.name}')\n request = client.utils.create_contact_list_request(self.name)\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n if CONFIG.RESPONSE in response and response[CONFIG.RESPONSE] == 202:\n for contact in response[CONFIG.DATA_LIST]:\n self.db.add_contact(contact)\n else:\n logger.error('Error while loading known users')\n\n def process_response(self, server_response):\n logger.debug(f'Server response processing: {server_response}')\n\n def is_new_message(response):\n is_action_correct = CONFIG.ACTION in response and response[CONFIG.ACTION] == CONFIG.ACTION_MESSAGE\n has_from_and_to = CONFIG.SENDER in response and CONFIG.DESTINATION in response\n has_message_text = CONFIG.MESSAGE_TEXT in response\n is_to_correct = response[CONFIG.DESTINATION] == self.name\n return is_action_correct and has_from_and_to and has_message_text and is_to_correct\n\n if CONFIG.RESPONSE in server_response:\n if server_response[CONFIG.RESPONSE] == 200:\n return\n elif server_response[CONFIG.RESPONSE] == 400:\n raise ServerError(f'{server_response[CONFIG.ERROR]}')\n elif server_response[CONFIG.RESPONSE] == 205:\n self.load_users()\n self.load_contacts()\n self.message_205.emit()\n else:\n logger.debug(f'Accepted an unknown http code {server_response[CONFIG.RESPONSE]}')\n\n elif is_new_message(server_response):\n message_sender = server_response[CONFIG.SENDER]\n message_text = server_response[CONFIG.MESSAGE_TEXT]\n logger.debug(f'Received a new message from {message_sender}:{message_text}')\n self.db.save_message(message_sender, 'in', message_text)\n self.new_message.emit(server_response)\n\n def run(self):\n logger.debug('Process started, listening for new messages')\n while self.is_running:\n\n # Waiting to free socket for other operations\n time.sleep(1)\n with socket_lock:\n try:\n self.transport.settimeout(0.5)\n response = get_message(self.transport)\n except OSError as err:\n if err.errno:\n logger.critical(f'Server connection lost')\n self.is_running = False\n self.connection_lost.emit()\n except (\n ConnectionError, ConnectionAbortedError,\n ConnectionRefusedError, json.JSONDecodeError,\n TypeError\n ):\n logger.debug(f'Server connection lost')\n self.is_running = False\n self.connection_lost.emit()\n else:\n logger.debug(f'Accepted new message from server {response}')\n self.process_response(response)\n finally:\n self.transport.settimeout(5)\n\n def shutdown(self):\n self.is_running = False\n request = client.utils.create_exit_message(self.name)\n with socket_lock:\n try:\n send_message(self.transport, request)\n except OSError:\n pass\n logger.debug('Transport shuts down')\n time.sleep(0.5)\n\n def add_contact(self, new_contact_name):\n logger.debug(f'Adding new contact: {new_contact_name}')\n request = client.utils.create_add_contact_request(self.name, new_contact_name)\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n self.process_response(response)\n\n def remove_contact(self, contact_to_remove):\n logger.debug(f'Removing contact: {contact_to_remove}')\n request = client.utils.create_remove_contact_request(self.name, contact_to_remove)\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n self.process_response(response)\n\n def send_message(self, to, message_text):\n request = client.utils.create_message_request(self.name, to, message_text)\n logger.debug(f'Prepared message to send: {request}')\n\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n self.process_response(response)\n logger.info(f'Send message to the user {to}')\n\n def key_request(self, user):\n logger.debug(f'Request public key for {user}')\n request = client.utils.create_public_key_request(user)\n\n with socket_lock:\n send_message(self.transport, request)\n response = get_message(self.transport)\n if CONFIG.RESPONSE in response and response[CONFIG.RESPONSE] == 511:\n return response[CONFIG.DATA]\n else:\n logger.error(f'Error requesting public key for {user}')\n","sub_path":"client/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":9687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"211575527","text":"#!/usr/bin/python3\n\nimport ipcalc\nimport requests\nimport json\nimport subprocess\nimport os.path as path\nimport fileinput\nfrom getmac import get_mac_address\n\n\nr = requests.post('https://tuportal.tunerd.mx/api/aps/show?mac_ap=12:42:C5:02:02:0D')\n#Obtención de la MAC\n#wlan_mac = get_mac_address(interface=\"wlan0\").upper()\n#url = 'https://tuportal.tunerd.mx/api/aps/show?mac_ap=' + wlan_mac\n#r = requests.post(url)\ntest = json.loads(r.text)\n\n\n\n#Asignación del json\n\nip = test['ip']\nmask = test['mask']\ndomain = test['domain']\ndns_server = test['dns_server']\nname = test['name']\nmac_ap = test['mac_ap']\nap_name = test['ap_name']\nrange_start = test['range_start']\nrange_end = test['range_end']\nname_wifi = test['name_wifi']\n\n#Subneteo\naddr = ipcalc.IP(ip, mask = mask)\nnetwork_with_cidr = str(addr.guess_network())\nbare_network = network_with_cidr.split('/')[0]\nbare_network_cidr = network_with_cidr.split('/')[1]\n\nstart_list = range_start.split('.')\nend_list = range_end.split('.')\nmax_pool = abs(int(start_list[3]) - int(end_list[3]))\n\n\n#Variables\ninterface = \"wlan0\"\n\n#Funciones\ndef createFile():\n subprocess([\"touch\", \"/etc/config/data\"])\n subprocess([\"chmod\", \"775\", \"data\"])\n file = open(\"/etc/config/data\", 'w')\n try:\n print(file.write(ip + \"\\n\"))\n print(file.write(name_wifi + \"\\n\"))\n print(file.write(start_list[3] + \"\\n\"))\n print(file.write(max_pool))\n finally:\n file.close()\n return True\n\ndef saveData():\n file = open(\"/etc/dhcp/data\", 'w')\n try:\n print(file.write(ip + \"\\n\"))\n print(file.write(name_wifi + \"\\n\"))\n print(file.write(start_list[3] + \"\\n\"))\n print(file.write(max_pool))\n finally:\n file.close()\n return True\n\ndef updateSsid():\n with fileinput.FileInput(\"c:/Users/hugov/Documents/Azul School/Python Course/Wifidog/prueba.txt\", inplace=True) as file:\n target = False\n for line in file:\n if not target and line.startswith(\"config wifi-iface 'wifinet0'\"):\n target = True\n elif target and line.startswith(\" option ssid\"):\n line = f\" option ssid '{name_wifi}'\\n\"\n target = False\n print(line, end=\"\")\n return True\n\ndef updateSsidTest():\n with fileinput.input(\"c:/Users/hugov/Documents/Azul School/Python Course/Wifidog/prueba.txt\", inplace=True) as file:\n print(type(file))\n for line in file:\n print (line)\n return True\n\n\ndef updateNetwork():\n with fileinput.FileInput(\"c:/Users/hugov/Documents/Azul School/Python Course/Wifidog/prueba.txt\", inplace=True) as file:\n target = False\n for line in file:\n if not target and line.startswith(f\"config interface '{interface}'\"):\n target = True\n elif target and line.startswith(\" option ipaddr\"):\n line = f\" option ipaddr '{ip}'\"\n target = False\n print(line, end=\"\")\n return True\n\ndef updateDhcp():\n with fileinput.FileInput(\"c:/Users/hugov/Documents/Azul School/Python Course/Wifidog/prueba.txt\", inplace=True) as file:\n target = False\n for line in file:\n if not target and line.startswith(f\"config dhcp '{interface}'\"):\n target = True\n elif target and line.startswith(\" option start\"):\n line = f\" option start '{start_list[3]}'\\n\"\n target = False\n elif not target and line.startswith(\" option limit\"):\n line = f\" option start '{max_pool}'\\n\"\n target = False\n print(line, end=\"\")\n return True\n\ndef mainFunction():\n file = open(\"/etc/dhcp/data\", 'r')\n try:\n anbu1 = file.readlines(1)[0].rstrip('\\n')\n anbu2 = file.readlines(1)[0].rstrip('\\n')\n anbu3 = file.readlines(1)[0].rstrip('\\n')\n anbu4 = file.readlines(1)[0].rstrip('\\n')\n \n if ip != anbu1:\n updateNetwork()\n elif name_wifi != anbu2:\n updateSsid()\n elif start_list[3] != anbu3 or max_pool != anbu4:\n updateDhcp()\n subprocess.run([\"reboot\"])\n finally:\n file.close()\n return True\n\n'''\nif path.exists('/etc/config/data'):\n mainFunction()\n saveData() \nelse:\n createFile()\n saveData()\n mainFunction()\n'''\n\nupdateSsidTest()","sub_path":"conf_linksys.py","file_name":"conf_linksys.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"74947453","text":"from collections import deque\nclass SnakeGame:\n def __init__(self, width, height, foods):\n self.board = [[0 for i in range(width)] for j in range(height)]\n self.width = width\n self.height = height\n\n for food in foods:\n self.board[food[0]][food[1]] = 1\n \n self.snake = deque([(0, 0)])\n self.snake_body = set()\n self.snake_body.add((0,0))\n\n self.score = 0\n \n def move(self, direction):\n dx = [1, -1, 0, 0]\n dy = [0, 0, -1, 1]\n action = 0\n if direction == 'U':\n action = 0\n elif direction == 'D':\n action = 1\n elif direction == 'L':\n action = 2\n elif direction == 'R':\n action = 3\n else:\n return -999\n \n head = self.snake[0]\n head_next = (head[0] + dx[action], head[1] + dy[action])\n \n # out of wall\n if head_next[0] < 0 or head_next[0] >= self.height or head_next[1] < 0 or head_next[1] >= self.width:\n return -1\n \n # hit self\n if head_next in self.snake_body and head_next != self.snake[-1]:\n return -1\n \n # safe, move on \n # no food\n if self.board[head_next[0]][head_next[1]] == 0:\n self.snake.pop()\n self.snake_body.remove(head_next)\n self.snake.appendleft(head_next)\n self.snake_body.add(head_next)\n # eat food\n else:\n self.snake.appendleft(head_next)\n self.snake_body.add(head_next)\n self.board[head_next[0]][head_next[1]] = 0\n self.score += 1\n return self.score\n\n ","sub_path":"snake game.py","file_name":"snake game.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"394766328","text":"import PySimpleGUI as sg\nimport urx\nimport time\nimport socket\n\n\n\n\ndef moveIt(arg):\n if arg == 'w' or arg == 'Forward':\n print('fwd')\n moveRobL(x=-1)\n elif arg == 's' or arg == 'Backward':\n print('bwd')\n moveRobL(x=1)\n elif arg == 'a' or arg == 'Left':\n print('left')\n moveRobL(y=-1)\n elif arg == 'd' or arg == 'Right':\n print('right')\n moveRobL(y=1)\n elif arg == 'q' or arg == 'RotLeft':\n print('rotate l')\n moveRobL(x=1, y=-1)\n elif arg == 'e' or arg == 'RotRight':\n print('rotate r')\n moveRobL(x=-1, y=1)\n elif arg == 'r' or arg == 'Up':\n print('up')\n moveRobL(z=1)\n elif arg == 'f' or arg == 'Down':\n print('down')\n moveRobL(z=-1)\n else:\n pass\n \n \"\"\"\n rotate x,y\n x′=xcosθ−ysinθ\n y′=ycosθ+xsinθ\n \"\"\"\n\ndef moveRobL(x=0,y=0,z=0):\n if not rob.is_program_running():\n rob.movel((x,y,z,0,0,0), relative=True, wait = False, vel = 10)\n\n\n\n\n\nif __name__ == \"__main__\":\n connect = True\n \n\n #Start Connection\n layout = [\n [sg.Text('Connect to robot:'),sg.InputText(), sg.Button('Connect')],\n [sg.Quit(button_color=('black', 'orange'))]\n ]\n windowStart = sg.Window('Connect to robot', auto_size_text=True).Layout(layout)\n\n \n while True:\n event, values = windowStart.Read()\n if event == 'Quit':\n connect = False\n if event == 'Connect':\n robotAdd = values[0]\n try:\n print(robotAdd)\n rob = urx.Robot(robotAdd)\n connect = True\n except socket.gaierror:\n print(\"Cannot coonect to \" + robotAdd)\n connect = False\n \n break\n windowStart.CloseNonBlocking()\n #//Start Connection\n \n \n #GUI Lay Out\n \n if connect == True:\n\n gui_rows = [[sg.Text('Robot URX Control')],\n [sg.Text('Robot: ' + robotAdd , size=(15, 1)), sg.RealtimeButton('Connect')],\n [sg.RealtimeButton('RotLeft', size=(10, 1)), sg.RealtimeButton('Forward', size=(10, 1)), sg.RealtimeButton('RotRight', size=(10, 1))], \n [sg.RealtimeButton('Left', size=(10, 1)), sg.T(' ' * 21), sg.RealtimeButton('Right', size=(10, 1))],\n [sg.RealtimeButton('Up', size=(10, 1)), sg.RealtimeButton('Backward', size=(10, 1)), sg.RealtimeButton('Down', size=(10, 1))], \n [sg.Quit(button_color=('black', 'orange'))] \n ] \n \n #create windows\n window = sg.Window('Robot URX Control', return_keyboard_events=True, use_default_focus=False, auto_size_text=True).Layout(gui_rows)\n lastEvent = '---'\n\n \"\"\"\n rob.set_tcp((0, 0, 0.1, 0, 0, 0))\n rob.set_payload(2, (0, 0, 0.1))\n time.sleep(0.2) #leave some time to robot to process the setup commands\n \"\"\"\n\n #loop window app\n try: \n while True: \n event, values = window.ReadNonBlocking()\n print(event)\n if event == 'Connect' and lastEvent != event: \n print('Need create pop up for connection')\n if event is not None and lastEvent != event:\n moveIt(event)\n if event is None and rob is not None:\n rob.stopl()\n \n if event == 'Quit' or values is None:\n break\n lastEvent = event \n finally:\n window.CloseNonBlocking()\n rob.close()\n\n","sub_path":"urx-controll.py","file_name":"urx-controll.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"441194463","text":"from PIL import Image\nimport json\nimport os\nimport pandas as pd\nimport numpy as np\nfrom random import shuffle\n\n\ndef prepare_category_dict(root_path=\"\", is_training=True):\n if is_training:\n training_csv_path = os.path.join(root_path, 'train_modified.csv')\n else:\n training_csv_path = os.path.join(root_path, 'vali_modified.csv')\n print(\"reading csv file: \", training_csv_path)\n training_csv = pd.read_csv(training_csv_path, usecols=['image_path', 'category']).as_matrix()\n\n training_dict = {}\n\n for i in range(len(training_csv)):\n key = training_csv[i, 0]\n val = training_csv[i, 1]\n training_dict[key] = val\n\n return training_dict\n\n\ndef convert_to_coco_bbox(coors):\n x1 = float(coors[0])\n y1 = float(coors[1])\n x2 = float(coors[2])\n y2 = float(coors[3])\n W = x2 - x1\n H = y2 - y1\n return [x1, y1, W, H]\n\ndef get_synthetic_categories():\n categories = []\n for index in range(48):\n dic = {\n 'id': index,\n 'name': '{}'.format(index),\n 'supercategory': 'fashion'\n }\n categories.append(dic)\n return categories\n\ndef get_categories(num_categories=5):\n complete_categories = []\n for i in range(num_categories+1):\n dic = {\n 'id': i,\n 'name': '{}'.format(i),\n 'supercategory': 'fashion'\n }\n complete_categories.append(dic)\n return complete_categories\n\ndef dump_annotation_file(IS_TRAINING=True):\n # SAVE_PATH = 'instances_fashion_train2018.json' if IS_TRAINING else 'instances_fashion_test2018.json'\n\n SAVE_PATH = 'annotations/instances_cs231_train_2018.json' if IS_TRAINING else 'annotations/instances_cs231_test_2018.json'\n\n images, anns, categories = [], [], []\n images_to_annos = {}\n # put all your fashion data here img/Anno needs to be here.\n # root_path = 'tf-faster-rcnn/data/'\n # coco_path = '/afs/cs.stanford.edu/u/xw1/fashion_recommendation/tf-faster-rcnn/data/coco/annotations/'\n # coco_path = '/home/feiliu/Desktop/cs231N_Spring_2018/final_project/fashion_recommendation/tensorpack/examples/data/coco/'\n coco_path = '/home/feiliu/Desktop/cs231N_Spring_2018/final_project/fashion_recommendation/deep_fashion_data/'\n # coco_path = '/Users/feiliu/Desktop/CS231N/final_project/fashion_recommendation/tensorpack/examples/data/coco/'\n # root_path = '/afs/cs.stanford.edu/u/xw1/fashion_recommendation/tf-faster-rcnn/data/fashion/'\n # root_path = '/home/feiliu/Desktop/cs231N_Spring_2018/final_project/deep_fashion_data/'\n # docker_image = root_path #'/cs231_project/tf-faster-rcnn/data/deep_fashion_data/'\n root_path = '/home/feiliu/Desktop/cs231N_Spring_2018/final_project/fashion_recommendation/deep_fashion_data/'\n # root_path = '/Users/feiliu/Desktop/CS231N/final_project/fashion_recommendation/deep_fashion_data/'\n # root_path = '/afs/cs.stanford.edu/u/xw1/fashion_recommendation/tf-faster-rcnn/data/fashion/'\n # docker_image = '/cs231_project/tf-faster-rcnn/data/deep_fashion_data/'\n # docker_image = '/afs/cs.stanford.edu/u/xw1/fashion_recommendation/tf-faster-rcnn/data/fashion/'\n docker_image = root_path\n category_file_path = root_path+\"Anno/list_category_img.txt\"\n json_output_path = os.path.join(coco_path, SAVE_PATH)\n # json_output_path = '/home/feiliu/Desktop/cs231N_Spring_2018/final_project/fashion_recommendation/tf-faster-rcnn/data/coco/annotations/instances_fashion_train2018.json'\n # json_output_path = '/afs/cs.stanford.edu/u/xw1/fashion_recommendation/tf-faster-rcnn/data/coco/annotations/instances_fashion_train2018.json'\n bbox_file_path = root_path+\"Anno/list_bbox.txt\"\n subsample_limit = 10 #600000000\n\n categorical_dict = prepare_category_dict(root_path, IS_TRAINING)\n\n\n with open(category_file_path, 'r') as f:\n content = f.readlines()\n content = content[2:]\n shuffle(content)\n bbox_map = {}\n\n with open(bbox_file_path, 'r') as f:\n bbox_content = f.readlines()\n bbox_content = bbox_content[2:]\n for bbox_line in bbox_content:\n pair = bbox_line.split()\n image_path = docker_image + pair[0]\n bbox_coors =pair[1:]\n bbox_map[image_path]=convert_to_coco_bbox(bbox_coors)\n\n i = 0\n category_map = {}\n category_set = set()\n for line in content:\n if i > subsample_limit:\n break\n pair = line.split()\n image_read_path = root_path + pair[0]\n image_path = docker_image + pair[0]\n\n \n \n img = Image.open(image_read_path)\n width, height = img.size\n\n if pair[0] in categorical_dict:\n category_map[image_path]=categorical_dict[pair[0]]\n dic = {'file_name': pair[0], 'id': image_path, 'height': height, 'width': width}\n images.append(dic)\n i += 1\n # categories.append(categorical_dict[pair[0]])\n category_set.add(categorical_dict[pair[0]])\n\n else:\n continue\n print(\"category set:\", category_set)\n ann_index = 0\n\n for image_dic in images:\n image_path = image_dic['id']\n bbox_coors = bbox_map[image_path]\n dic2 = {'segmentation': [], 'area': bbox_coors[2]*bbox_coors[3],\n 'iscrowd': 0, 'image_id': image_dic['id'], 'bbox': bbox_coors,\n 'category_id': category_map[image_path], 'id': ann_index}\n ann_index+=1\n anns.append(dic2)\n\n\n assert len(images) == len(anns)\n # assert len(images) == len(categories)\n print(len(images))\n\n data = {'images':images, 'annotations':anns, 'categories':get_categories()}\n\n with open(json_output_path, 'w+') as outfile:\n json.dump(data, outfile)\n\nif __name__=='__main__':\n dump_annotation_file(True)\n dump_annotation_file(False)\n # prepare_category_dict(root_path=\"\", is_training=True)\n\n","sub_path":"deep_fashion_to_coco.py","file_name":"deep_fashion_to_coco.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"341539478","text":"'''Trains a simple binarize CNN on the FMNIST dataset.\nModified from keras' examples/mnist_mlp.py\nGets to 98.98% test accuracy after 20 epochs using tensorflow backend\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\n#np.random.seed(1337) # for reproducibility\n\nimport tensorflow.keras\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.datasets import fashion_mnist\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Input, Dense, Dropout, Activation\nfrom tensorflow.keras.layers import BatchNormalization, MaxPooling2D, AveragePooling2D, Conv2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.optimizers import SGD, Adam, RMSprop\nfrom tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping, CSVLogger, ReduceLROnPlateau, TensorBoard\nfrom keras.utils import to_categorical\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.metrics import top_k_categorical_accuracy\n#from binary_ops import binary_tanh as binary_tanh_op\n#from binary_layers import BinaryDense, BinaryConv2D\n\nfrom tensorflow.keras.utils import plot_model\n#from keras.utils import model_to_dot\n\n#from IPython.display import SVG\n#import graphviz\n#import pydot_ng as pydot\n\n##################################################################################################################################\n# Def new functions\ndef relu6(x):\n \"\"\"Relu 6\n \"\"\"\n return K.relu(x, max_value=6.0)\n\ndef top_3_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=3)\n##################################################################################################################################\nH = 1.\nkernel_lr_multiplier = 'Glorot'\n\n# nn\nbatch_size = 50\nepochs = 5\ntotal_epochs = 15\nchannels = 1\nimg_rows = 28\nimg_cols = 28\nfilters = 32\nkernel_size = (3, 3)\npool_size = (2, 2)\nhidden_units = 128\nclasses = 10\nuse_bias = False\n\n# learning rate schedule\nlr_start = 1e-3\nlr_end = 1e-4\nlr_decay = (lr_end / lr_start)**(1. / epochs)\n\n# BN\nepsilon = 1e-6\nmomentum = 0.9\n\n# dropout\np1 = 0.25\np2 = 0.5\n\n##################################################################################################################################\n# the data, shuffled and split between train_f and test_f sets\n(X_train_f, y_train_f), (X_test_f, y_test_f) = fashion_mnist.load_data()\n\nX_train_f = X_train_f.reshape(60000, 1, 28, 28)\nX_test_f = X_test_f.reshape(10000, 1, 28, 28)\nX_train_f = X_train_f.astype('float32')\nX_test_f = X_test_f.astype('float32')\nX_train_f /= 255\nX_test_f /= 255\nprint(X_train_f.shape[0], 'train_f samples')\nprint(X_test_f.shape[0], 'test_f samples')\n\n# convert class vectors to binary class matrices\n#Y_train_f = np_utils.to_categorical(y_train_f, classes) * 2 - 1 # -1 or 1 for hinge loss\n#Y_test_f = np_utils.to_categorical(y_test_f, classes) * 2 - 1\n\nY_train_f = to_categorical(y_train_f, classes)\nY_test_f = to_categorical(y_test_f, classes)\n\n##################################################################################################################################\ninput = Input(shape=(1,28,28))\n\n# conv1\nconv1 = Conv2D(32, kernel_size, strides=(1, 1), padding='valid', data_format='channels_first',\n dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, name='conv1')(input)\npool21= MaxPooling2D(pool_size=pool_size, name='pool21', data_format='channels_first')(conv1)\nbn1 = BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1')(pool21)\nact1 = Activation(relu6, name='act1')(bn1)\n\n# conv2\nconv2 = Conv2D(64, kernel_size, strides=(1, 1), padding='valid', data_format='channels_first',\n dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, name='conv2')(act1)\npool22= MaxPooling2D(pool_size=pool_size, name='pool22', data_format='channels_first')(conv2)\nbn2 = BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn2')(pool22)\nact2 = Activation(relu6, name='act2')(bn2)\n\n\n\n# conv4\nconv3 = Conv2D(64, kernel_size, strides=(1, 1), padding='valid', data_format='channels_first',\n dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, name='conv4')(act2)\n#bpool1= MaxPooling2D(pool_size=pool_size, name='bpool1', data_format='channels_first')(conv4)\nbn3 = BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4')(conv3)\nact3 = Activation(relu6, name='act4')(bn3)\n\n# conv5\nconv4 = Conv2D(128, kernel_size, strides=(1, 1), padding='valid', data_format='channels_first',\n dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, name='conv5')(act3)\n#bpool2= MaxPooling2D(pool_size=pool_size, name='bpool2', data_format='channels_first')(conv5)\nbn4 = BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn5')(conv4)\nact4 = Activation(relu6, name='act5')(bn4)\n\n\n#flatten_2\nflat2= Flatten()(act4)\n#Dense_21\ndns21 = Dense(128, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,\n bias_constraint=None, name='dns21')(flat2)\nbn_d3 = BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn_d3')(dns21)\nact_d3= Activation(relu6, name='act_d3')(bn_d3)\n\n#Dense_22\ndns22 = Dense(classes, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,\n bias_constraint=None, name='dns22')(act_d3)\nbn_d4 = BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn_d4')(dns22)\nb_dro = Dropout(0.01)(bn_d4)\nact_d4= Activation('softmax', name='act_d4')(b_dro)\n\n##################################################################################################################################\n# model generation\nmerged2 = Model(inputs=[input],outputs=[act_d4])\n\n##################################################################################################################################\n# model compilation\n#opt = Adam(lr=lr_start)\nopt = Adam()\nearlystop = EarlyStopping(monitor='val_acc', patience=30, verbose=0, mode='auto')\nmerged2.compile(loss=tensorflow.keras.losses.categorical_crossentropy, optimizer=opt, metrics=['accuracy', top_3_accuracy])\n\n#merged2.compile(loss= keras.losses.categorical_crossentropy, optimizer= keras.optimizers.Adadelta(), metrics=['accuracy'])\n\n#merged2.compile(loss='squared_hinge', optimizer=opt, metrics=['accuracy'])\n\n##################################################################################################################################\n# model details\nmerged2.summary()\n\n##################################################################################################################################\n# model visualization\n\n##################################################################################################################################\n# model fit\n##### Begin Batch Train #####\n\n#lr_scheduler = LearningRateScheduler(lambda e: lr_start * lr_decay ** e)\nfmnist = 'training_fmnist' + '_baseline_' + '.log'\ncsv_logger_f = CSVLogger(fmnist)\nreduce_lr_f = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=2, min_lr=0.0001)\n\nprint (\"#################### Training Fashion MNIST Dataset ####################\")\nhistory1 = merged2.fit(X_train_f, Y_train_f,\n batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(X_test_f, Y_test_f),\n callbacks=[reduce_lr_f])\n##### End Batch Train #####\n\n##################################################################################################################################\n# Test Scoring\nscore2 = merged2.evaluate(X_test_f, Y_test_f, verbose=0)\n\n##################################################################################################################################\n# model details\nmerged2.summary()\n\nprint('Test score baseline fashion mnist:', score2[0])\nprint('Test accuracy baseline fashion mnist:', score2[1])\n\n##################################################################################################################################\n## model saving\nmerged2.save('fmnist_baseline.h5')\n\n##################################################################################################################################\n","sub_path":"tensorflow_examples/fashionMNIST/trainFashionMNIST_CMPSC497_modified.py","file_name":"trainFashionMNIST_CMPSC497_modified.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"389037269","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: nre\n Description :\n Author : chenhao\n date: 2019-10-08\n-------------------------------------------------\n Change Activity:\n 2019-10-08:\n-------------------------------------------------\n\"\"\"\n\nfrom collections import defaultdict\n\nfrom keras import Model\nfrom keras.layers import *\nfrom keras.losses import sparse_categorical_crossentropy\nfrom keras.regularizers import l1_l2\n\nfrom eigen_nltk.classify import ClassifyContext\nfrom eigen_nltk.core import ModelEstimator\nfrom eigen_nltk.model_utils import pick_slice, get_seq_embedding_model, get_base_customer_objects\nfrom eigen_nltk.optimizer import get_optimizer_cls, AccumOptimizer\nfrom eigen_nltk.trans import DataParser\nfrom eigen_nltk.utils import padding_seq, add_offset, get_major_element, get_distribution\n\nEntityClsContext = ClassifyContext\n\n\nclass EntityClsDataParser(DataParser):\n def __init__(self, context):\n assert isinstance(context, EntityClsContext)\n self.context = context\n self.tokenizer = context.tokenizer\n\n\nclass EntityClsEstimator(ModelEstimator):\n customer_objects = get_base_customer_objects()\n\n def __init__(self, name, context, max_len, logger_level=\"INFO\"):\n self.context = context\n self.data_parser = EntityClsDataParser(context)\n self.max_len = max_len\n self.vocab_size = self.context.vocab_size\n self.label_size = self.context.label_size\n super().__init__(name, self.data_parser, logger_level)\n\n def _build_model(self, use_bert=True, fine_tune_bert=False, use_lstm=False,\n word_embedding_dim=16, lstm_dim=16, freeze_layer_num=0, drop_rate=0.2, l1=0, l2=0.01,\n bert_ckpt_path=None, bert_keras_path=None, **kwargs):\n entity_input = Input(shape=(self.max_len, 1), dtype='float32', name='e1')\n\n seq_embedding_model = get_seq_embedding_model(self.max_len, self.vocab_size,\n freeze_layer_num, word_embedding_dim, lstm_dim,\n use_bert, fine_tune_bert, use_lstm,\n bert_ckpt_path, bert_keras_path)\n\n words_input, seg_input = seq_embedding_model.inputs\n feature = seq_embedding_model.output\n\n entity_feature = Lambda(pick_slice)([feature, entity_input])\n entity_feature = Concatenate()([entity_feature, Lambda(lambda x: x[:, 0, :])(feature)])\n final_feature = Dropout(drop_rate)(entity_feature)\n out = Dense(self.label_size, activation=\"sigmoid\", kernel_regularizer=l1_l2(l1, l2))(final_feature)\n model = Model([words_input, seg_input, entity_input], out)\n return model\n\n def _compile_model(self, optimizer_name, optimizer_args, acc_num=1, **kwargs):\n opt_cls = get_optimizer_cls(optimizer_name)\n optimizer = opt_cls(**optimizer_args)\n if acc_num > 1:\n self.logger.info(\"get soft batch with acc_num = {}\".format(acc_num))\n optimizer = AccumOptimizer(optimizer, acc_num)\n self.training_model.compile(optimizer, loss=sparse_categorical_crossentropy, metrics=[\"accuracy\"])\n return self.training_model\n\n def _get_model_train_input(self, train_data):\n x = []\n seg = []\n entity_pos = []\n y = []\n for item in train_data:\n x.append(padding_seq(item['x'], self.max_len))\n seg.append(padding_seq(item['seg'], self.max_len))\n entity, start, end = item['entity_info']\n entity_tag = [0] * self.max_len\n entity_tag[start] = 1\n entity_pos.append(entity_tag)\n if 'label' in item.keys():\n label = item['label']\n y.append(self.context.label2id[label])\n\n x = np.array(x)\n seg = np.array(seg)\n entity_pos = np.array(entity_pos)[:, :, np.newaxis]\n if y:\n y = np.array(y)\n return [x, seg, entity_pos], y\n\n def create_model(self, model_args):\n model_args[\"max_len\"] = self.max_len\n super().create_model(model_args)\n\n # add more information to the origin data\n def _get_enhanced_data(self, data):\n short_data = self._get_short_data(data)\n rs_data = []\n for idx, item in enumerate(short_data):\n text = item['content']\n token_input = self.data_parser.get_token_input(text)\n char2token_mapping = token_input['char2token_mapping']\n item.update(**token_input)\n entity_list = item['entity_list']\n offset = item['offset']\n for entity, entity_type, span_list in entity_list:\n span_list = [add_offset(span, -offset) for span in span_list]\n for s, e in span_list:\n if s < 0 or e >= len(char2token_mapping):\n continue\n start, end = char2token_mapping[s], char2token_mapping[e]\n tmp_item = copy.deepcopy(item)\n tmp_item, start, end = add_entity_tag(tmp_item, start, end)\n tmp_item[\"entity_info\"] = (entity, start, end)\n if entity_type:\n tmp_item[\"label\"] = entity_type\n rs_data.append(tmp_item)\n self.logger.info(\"get {0} enhanced data from {1} origin data\".format(len(rs_data), len(data)))\n\n return rs_data\n\n def _get_short_data(self, data):\n return self.data_parser.get_short_data(data, self.max_len - 2)\n\n def train_model(self, train_data, dev_data, train_args, compile_args):\n \"\"\"\n\n :param dev_data:\n :param train_data: [{\"title\":\"test\", \"content\":\"The band performs with a high level of musicality , energy and spirit while combining sensitive group interplay with dynamic solo improvisations.\",\n \"entity_list\":[[\"余盆网\",\"1\",[[1,4],[101,104]]],[\"懒财网\",\"1\",[[32,35],[82,85],[87,90]]]]]\n :param train_args:\n :return: model\n \"\"\"\n return super().train_model(train_data, dev_data, train_args, compile_args)\n\n def _get_predict_data_from_model_output(self, origin_data, enhanced_data, pred_data, show_detail=False,\n return_distribute=False):\n rs_dict = defaultdict(dict)\n pred_hard = np.argmax(pred_data, axis=-1)\n if show_detail:\n print(\"raw ner output:\\n{}\".format(pred_hard))\n for rel, item in zip(pred_hard, enhanced_data):\n idx = item['idx']\n rel_name = self.context.id2label[rel]\n entity_name = item['entity_info'][0]\n tmp_dict = rs_dict[idx]\n if entity_name in tmp_dict.keys():\n tmp_dict[entity_name].append(rel_name)\n else:\n tmp_dict[entity_name] = [rel_name]\n rs_list = []\n for idx in range(len(origin_data)):\n tmp_dict = rs_dict[idx]\n if return_distribute:\n tmp_list = [(k, get_distribution(v)) for k, v in tmp_dict.items()]\n else:\n tmp_list = [(k, get_major_element(v)) for k, v in tmp_dict.items()]\n rs_list.append(tmp_list)\n return rs_list\n\n\nENTITY_START = ['[s]', 10]\nENTITY_END = ['[e]', 11]\n\n\ndef add_entity_tag(item, start, end):\n token = item['token']\n x = item['x']\n token.insert(start, ENTITY_START[0])\n x.insert(start, ENTITY_START[1])\n end += 1\n token.insert(end, ENTITY_END[0])\n x.insert(end, ENTITY_END[1])\n end += 1\n item['seg'] = item['seg'] + [0] * 2\n return item, start, end\n","sub_path":"eigen_nltk/entity_cliassify.py","file_name":"entity_cliassify.py","file_ext":"py","file_size_in_byte":7715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"454866187","text":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# ==============================================================================\n# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n# ==============================================================================\n# Modified from tensorflow object detection examples:\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/label_image/label_image.py\n# https://github.com/mystic123/tensorflow-yolo-v3/blob/master/utils.py\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport openvino_tensorflow as ovtf\nimport time\nfrom PIL import Image, ImageFont, ImageDraw\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.compat.v1.GraphDef()\n assert os.path.exists(model_file), \"Could not find model path\"\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\n\ndef letter_box_image(image_path, input_height, input_width,\n fill_value) -> np.ndarray:\n assert os.path.exists(image_path), \"Could not find image path\"\n image = Image.open(image_path)\n height_ratio = float(input_height) / image.size[1]\n width_ratio = float(input_width) / image.size[0]\n fit_ratio = min(width_ratio, height_ratio)\n fit_height = int(image.size[1] * fit_ratio)\n fit_width = int(image.size[0] * fit_ratio)\n fit_image = np.asarray(\n image.resize((fit_width, fit_height), resample=Image.BILINEAR))\n\n fill_value = np.full(fit_image.shape[2], fill_value, fit_image.dtype)\n to_return = np.tile(fill_value, (input_height, input_width, 1))\n pad_top = int(0.5 * (input_height - fit_height))\n pad_left = int(0.5 * (input_width - fit_width))\n to_return[pad_top:pad_top + fit_height, pad_left:pad_left +\n fit_width] = fit_image\n\n return to_return, image\n\n\ndef load_coco_names(label_file):\n names = {}\n assert os.path.exists(label_file), \"could not find label file path\"\n with open(label_file) as f:\n for id, name in enumerate(f):\n names[id] = name\n return names\n\n\ndef letter_box_pos_to_original_pos(letter_pos, current_size,\n ori_image_size) -> np.ndarray:\n letter_pos = np.asarray(letter_pos, dtype=np.float)\n current_size = np.asarray(current_size, dtype=np.float)\n ori_image_size = np.asarray(ori_image_size, dtype=np.float)\n final_ratio = min(current_size[0] / ori_image_size[0],\n current_size[1] / ori_image_size[1])\n pad = 0.5 * (current_size - final_ratio * ori_image_size)\n pad = pad.astype(np.int32)\n to_return_pos = (letter_pos - pad) / final_ratio\n return to_return_pos\n\n\ndef convert_to_original_size(box, size, original_size, is_letter_box_image):\n if is_letter_box_image:\n box = box.reshape(2, 2)\n box[0, :] = letter_box_pos_to_original_pos(box[0, :], size,\n original_size)\n box[1, :] = letter_box_pos_to_original_pos(box[1, :], size,\n original_size)\n else:\n ratio = original_size / size\n box = box.reshape(2, 2) * ratio\n return list(box.reshape(-1))\n\n\ndef draw_boxes(boxes, img, cls_names, detection_size, is_letter_box_image):\n draw = ImageDraw.Draw(img)\n for cls, bboxs in boxes.items():\n color = (256, 256, 256)\n for box, score in bboxs:\n box = convert_to_original_size(box, np.array(detection_size),\n np.array(img.size),\n is_letter_box_image)\n draw.rectangle(box, outline=color)\n draw.text(\n box[:2],\n '{} {:.2f}%'.format(cls_names[cls], score * 100),\n fill=color)\n\n\ndef iou(box1, box2):\n b1_x0, b1_y0, b1_x1, b1_y1 = box1\n b2_x0, b2_y0, b2_x1, b2_y1 = box2\n\n int_x0 = max(b1_x0, b2_x0)\n int_y0 = max(b1_y0, b2_y0)\n int_x1 = min(b1_x1, b2_x1)\n int_y1 = min(b1_y1, b2_y1)\n\n int_area = (int_x1 - int_x0) * (int_y1 - int_y0)\n\n b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)\n b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)\n\n iou = int_area / (b1_area + b2_area - int_area + 1e-05)\n\n return iou\n\n\ndef non_max_suppression(predictions_with_boxes,\n confidence_threshold,\n iou_threshold=0.4):\n conf_mask = np.expand_dims(\n (predictions_with_boxes[:, :, 4] > confidence_threshold), -1)\n predictions = predictions_with_boxes * conf_mask\n\n result = {}\n for i, image_pred in enumerate(predictions):\n shape = image_pred.shape\n non_zero_idxs = np.nonzero(image_pred)\n image_pred = image_pred[non_zero_idxs]\n image_pred = image_pred.reshape(-1, shape[-1])\n\n bbox_attrs = image_pred[:, :5]\n classes = image_pred[:, 5:]\n classes = np.argmax(classes, axis=-1)\n\n unique_classes = list(set(classes.reshape(-1)))\n\n for cls in unique_classes:\n cls_mask = classes == cls\n cls_boxes = bbox_attrs[np.nonzero(cls_mask)]\n cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]\n cls_scores = cls_boxes[:, -1]\n cls_boxes = cls_boxes[:, :-1]\n\n while len(cls_boxes) > 0:\n box = cls_boxes[0]\n score = cls_scores[0]\n if not cls in result:\n result[cls] = []\n result[cls].append((box, score))\n cls_boxes = cls_boxes[1:]\n # iou threshold check for overlapping boxes\n ious = np.array([iou(box, x) for x in cls_boxes])\n iou_mask = ious < iou_threshold\n cls_boxes = cls_boxes[np.nonzero(iou_mask)]\n cls_scores = cls_scores[np.nonzero(iou_mask)]\n\n return result\n\n\nif __name__ == \"__main__\":\n image_file = \"examples/data/grace_hopper.jpg\"\n model_file = \"examples/data/yolo_v3_darknet.pb\"\n label_file = \"examples/data/coco.names\"\n input_height = 416\n input_width = 416\n input_mean = 0\n input_std = 255\n input_layer = \"inputs\"\n output_layer = \"output_boxes\"\n backend_name = \"CPU\"\n output_dir = \".\"\n conf_threshold = 0.6\n iou_threshold = 0.5\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--graph\", help=\"Optional. Path to graph/model to be executed.\")\n parser.add_argument(\"--input_layer\", help=\"Optional. Name of input layer.\")\n parser.add_argument(\n \"--output_layer\", help=\"Optional. Name of output layer.\")\n parser.add_argument(\n \"--labels\", help=\"Optional. Path to labels mapping file.\")\n parser.add_argument(\n \"--image\", help=\"Optional. Input image to be processed.\")\n parser.add_argument(\n \"--input_height\",\n type=int,\n help=\"Optional. Specify input height value.\")\n parser.add_argument(\n \"--input_width\", type=int, help=\"Optional. Specify input width value.\")\n parser.add_argument(\n \"--input_mean\", type=int, help=\"Optional. Specify input mean value.\")\n parser.add_argument(\n \"--input_std\", type=int, help=\"Optional. Specify input std value.\")\n parser.add_argument(\n \"--backend\",\n help=\"Optional. Specify the target device to infer on;\"\n \"CPU, GPU, MYRIAD, or VAD-M is acceptable. Default value is CPU.\")\n parser.add_argument(\n \"--output_dir\",\n help=\"Optional. Directory that stores the output\"\n \" image with bounding boxes. Default is directory from where this sample is launched.\"\n )\n parser.add_argument(\n \"--conf_threshold\",\n type=float,\n help=\"Optional. Specify confidence threshold. Default is 0.6.\")\n parser.add_argument(\n \"--iou_threshold\",\n type=float,\n help=\"Optional. Specify iou threshold. Default is 0.5.\")\n parser.add_argument(\n \"--disable_ovtf\",\n help=\"Optional. Disable openvino_tensorflow pass and run on stock TF\",\n action='store_true')\n args = parser.parse_args()\n if args.graph:\n model_file = args.graph\n if not args.input_layer:\n raise Exception(\"Specify input layer for this network\")\n else:\n input_layer = args.input_layer\n if not args.output_layer:\n raise Exception(\"Specify output layer for this network\")\n else:\n output_layer = args.output_layer\n if args.labels:\n label_file = args.labels\n else:\n label_file = None\n if args.image:\n image_file = args.image\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.backend:\n backend_name = args.backend\n if args.output_dir:\n output_dir = args.output_dir\n if args.conf_threshold:\n conf_threshold = args.conf_threshold\n if args.iou_threshold:\n iou_threshold = args.iou_threshold\n\n # Load graph and process input image\n graph = load_graph(model_file)\n img_resized, img = letter_box_image(image_file, input_height, input_width,\n 128)\n img_resized = img_resized.astype(np.float32)\n\n # Load label file\n if label_file:\n classes = load_coco_names(label_file)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name)\n\n if not args.disable_ovtf:\n #Print list of available backends\n print('Available Backends:')\n backends_list = ovtf.list_backends()\n for backend in backends_list:\n print(backend)\n ovtf.set_backend(backend_name)\n else:\n ovtf.disable()\n\n # Initialize session and run\n config = tf.compat.v1.ConfigProto()\n with tf.compat.v1.Session(graph=graph, config=config) as sess:\n # Warmup\n detected_boxes = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: [img_resized]})\n # Run\n import time\n start = time.time()\n detected_boxes = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: [img_resized]})\n elapsed = time.time() - start\n print('Inference time in ms: %.2f' % (elapsed * 1000))\n\n # apply non max suppresion, draw boxes and save updated image\n filtered_boxes = non_max_suppression(detected_boxes, conf_threshold,\n iou_threshold)\n draw_boxes(filtered_boxes, img, classes, (input_width, input_height), True)\n if output_dir:\n img.save(os.path.join(output_dir, \"detections.jpg\"))\n else:\n img.save(\"detections.jpg\")\n","sub_path":"examples/object_detection_sample.py","file_name":"object_detection_sample.py","file_ext":"py","file_size_in_byte":11798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"419457154","text":"import os\n\nfrom pipenv.project import Project\nfrom pipenv.utils import temp_environ, normalize_drive, get_windows_path\nfrom pipenv.vendor import delegator\n\nimport pytest\n\n\n@pytest.mark.dotvenv\ndef test_venv_in_project(PipenvInstance, pypi):\n with temp_environ():\n os.environ['PIPENV_VENV_IN_PROJECT'] = '1'\n with PipenvInstance(pypi=pypi) as p:\n c = p.pipenv('install requests')\n assert c.return_code == 0\n assert normalize_drive(p.path) in p.pipenv('--venv').out\n\n\n@pytest.mark.dotvenv\ndef test_venv_at_project_root(PipenvInstance):\n with temp_environ():\n with PipenvInstance(chdir=True) as p:\n os.environ['PIPENV_VENV_IN_PROJECT'] = '1'\n c = p.pipenv('install')\n assert c.return_code == 0\n assert normalize_drive(p.path) in p.pipenv('--venv').out\n del os.environ['PIPENV_VENV_IN_PROJECT']\n os.mkdir('subdir')\n os.chdir('subdir')\n # should still detect installed\n assert normalize_drive(p.path) in p.pipenv('--venv').out\n\n\n@pytest.mark.dotvenv\ndef test_reuse_previous_venv(PipenvInstance, pypi):\n with PipenvInstance(chdir=True, pypi=pypi) as p:\n os.mkdir('.venv')\n c = p.pipenv('install requests')\n assert c.return_code == 0\n assert normalize_drive(p.path) in p.pipenv('--venv').out\n","sub_path":"tests/integration/test_dot_venv.py","file_name":"test_dot_venv.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"625959697","text":"from statistics import median as median\n\ndef quickSort(toSort):\n listLen = len(toSort)\n sorted = toSort\n if listLen == 2:\n first = toSort[0]\n second = toSort[1]\n if first > second:\n sorted = [second, first]\n elif listLen > 2:\n medianVal = median(toSort)\n firstHalf = []\n secondHalf = []\n for item in toSort:\n if item < medianVal:\n firstHalf.append(item)\n else:\n secondHalf.append(item)\n firstSorted = quickSort(firstHalf)\n secondSorted = quickSort(secondHalf)\n sorted = firstSorted + secondSorted\n return sorted\n\n\ndef formatList(toSort):\n toSortLen = len(toSort)\n for index in range(toSortLen):\n currentItem = toSort[index]\n currentStr = str(currentItem)\n toSort[index] = currentStr\n return toSort\n\ndef main():\n testList = [\"apple\", \"jelly\", \"banana\", \"animal\", 3]\n formattedTestList = formatList(testList)\n quickSorted = quickSort(formattedTestList)\n print(quickSorted)\n\nmain()","sub_path":"Projects/sortProject.py","file_name":"sortProject.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"408106121","text":"from gpaw import GPAW, PW, FermiDirac\nfrom gpaw.response.g0w0 import G0W0\nfrom ase.build import mx2\nfrom ase.parallel import world\nfrom pathlib import Path\n\n\ndef get_bandrange(calc):\n \"\"\"lower and upper band range\n \"\"\"\n # bands (-8,4)\n lb, ub = max(calc.wfs.nvalence // 2 - 8, 0), calc.wfs.nvalence // 2 + 4\n return lb, ub\n\n\ndef write_gpw_file():\n \"\"\"write gs dft calculation\n \"\"\"\n # start by cleaning up a bit\n if world.rank == 0:\n for name in Path().glob('*.npy'):\n Path(name).unlink()\n for name in Path().glob('*.tmp.*.pckl'):\n Path(name).unlink()\n\n world.barrier()\n params = dict(\n mode=PW(400),\n xc='PBE',\n basis='dzp',\n kpts={'size': (6, 6, 1), 'gamma': True},\n occupations=FermiDirac(width=0.05))\n\n slab = mx2('MoSS', '2H', 3.184, 3.127)\n slab.center(vacuum=8, axis=2)\n slab.pbc = (1, 1, 0)\n slab.calc = GPAW(txt='gs.txt', **params)\n slab.get_forces()\n slab.get_stress()\n slab.calc.write('gs.gpw')\n\n\ndef get_nblocks(size):\n s2n = {24: 12, 36: 12, 40: 10, 48: 12, 64: 8}\n return s2n.get(size, size // 2)\n\n\ndef gw_calc(kptsize=12, ecut=200.0, gwg_and_gw=False, gw_only=True):\n \"\"\"Calculate the gw bandstructure\"\"\"\n\n calc = GPAW('gs.gpw', txt=None)\n kpts = {'size': (kptsize, kptsize, 1), 'gamma': True}\n\n calc.set(kpts=kpts,\n fixdensity=True,\n txt='gs_gw.txt')\n calc.get_potential_energy()\n calc.diagonalize_full_hamiltonian(ecut=ecut)\n calc.write('gs_gw_nowfs.gpw')\n calc.write('gs_gw.gpw', mode='all')\n lb, ub = get_bandrange(calc)\n\n calc = G0W0(calc='gs_gw.gpw',\n bands=(lb, ub),\n ecut=ecut,\n ecut_extrapolation=True,\n truncation='2D',\n nblocks=get_nblocks(world.size),\n q0_correction=True,\n filename='g0w0',\n restartfile='g0w0.tmp',\n savepckl=True)\n\n calc.calculate()\n\n\nif __name__ == '__main__':\n write_gpw_file()\n gw_calc(kptsize=6, ecut=200) # cut down size\n","sub_path":"benchmarks/GW-benchmark.py","file_name":"GW-benchmark.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"261006912","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 3 16:58:36 2019\n\n@author: DELL-1\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei'] \nmatplotlib.rcParams['font.family']='sans-serif'\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\ndef finance_salary(get_all_data):\n finance_salary = {}\n finance = list(get_all_data['company_finance'].value_counts().index)\n for i in range(len(finance)):\n get_finance = get_all_data[False^get_all_data['company_finance'].str.contains(finance[i])]\n finance_salary[finance[i]] = ((get_finance['salary_max']+get_finance['salary_min'])/2).mean()\n finances = [\n '不需要融资',\n '未融资',\n '天使轮',\n 'A轮',\n 'B轮',\n 'C轮',\n 'D轮及以上',\n '上市公司'\n ]\n\n dicts = {}\n for f in finances:\n dicts[f] = finance_salary[f]\n\n attr = dicts.keys()\n value = dicts.values()\n plt.figure(figsize=(10,10))\n color = ['red',\n 'orange',\n 'yellow',\n 'green',\n '#48D1CC',\n 'blue',\n 'purple',\n '#4A4A4A'] \n plt.bar(attr,\n value,\n width = 0.6,\n color = color,\n edgecolor='black')\n for a,b in zip(attr,value):\n plt.text(a,b+0.2,'%.2f'%b,ha='center',va='bottom')\n plt.xlabel('公司融资类型')\n plt.ylabel('平均薪资')\n plt.title('融资情况与平均薪资的关系')\n plt.savefig(\"C:/WeSite/DataCharts/薪资关联/融资情况与薪资水平-100dpi.jpg\")\n plt.show()","sub_path":"数据可视化/finance_salary.py","file_name":"finance_salary.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"217510467","text":"from tti_propagators import *\nfrom model import *\nfrom sources import *\nimport numpy as np\nfrom devito import norm\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\nfrom AzureUtilities import read_h5_model\n# Shot number (get from batch)\nshot_no = 0\npathfile = '/app/model_phhil/'\n# Read models\nrho = read_h5_model(pathfile+'rho_with_salt_2D.h5')\nepsilon = read_h5_model(pathfile+'epsilon_with_salt_2D.h5')\ndelta = read_h5_model(pathfile+'delta_with_salt_2D.h5')\ntheta = read_h5_model(pathfile+'theta_with_salt_2D.h5')\nm0 = read_h5_model(pathfile+'migration_velocity_2D.h5')\ndm = read_h5_model(pathfile+'perturbation_2D.h5')\n\n\nn = (801, 267)\no = (0., 0.)\nd = (12.5, 12.5)\nso = 8\ndt_full = .64\n\nmodel = Model(shape=n, origin=o, spacing=d, vp=np.sqrt(1./m0), space_order=so,\n epsilon=epsilon, delta=delta, theta=theta, nbpml=40,\n dm=dm, rho=rho, dt=dt_full)\n\n# Time axis\nt0 = 0.\ntn = 300.\ndt = model.critical_dt\ntime = TimeAxis(start=t0, step=dt_full, stop=tn)\nprint(time.num)\nsrc = RickerSource(name='src', grid=model.grid, f0=0.020, time_range=time, npoint=1)\nsrc.coordinates.data[:, 0] = 5000.\nsrc.coordinates.data[:, 1] = 350.\n\nnrec = 501\nrec_coords = np.empty((nrec, 2))\nrec_coords[:, 0] = np.linspace(0., 10000., nrec)\nrec_coords[:, 1] = 6.\n####### RUN #########\n# Forward\nop = TTIPropagators(model, space_order=so)\n\nrec, u, v = op.forward(src, rec_coords, save=True)\ngrad = op.gradient(rec, u, v, isic=True)\n\nlinD, u, v, summary = op.born(src, rec_coords, sub=(4, 1), autotune=('aggressive', 'runtime'))\ngrad2 = op.gradient(linD, u, v, sub=(4, 1), isic=True, autotune=('basic', 'runtime'))\n\nprint(norm(grad))\nprint(norm(grad2))\nprint(norm(rec))\nprint(norm(linD))\n","sub_path":"src/AzureBatch/docker/tti_image/tti/run_rev2d.py","file_name":"run_rev2d.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"228591786","text":"import mysql.connector\nfrom ConectarMsql import Conectar\n#dato={\n# 'user':'root',\n# 'password':'',\n# 'database':'proyectopy',\n# 'host':'127.0.0.1'\n#}\n#\n#mi_conexion=mysql.connector.connect(** dato)\n#mi_cursor=mi_conexion.cursor()\n\nc=Conectar()\nc.mi_cursor.execute(\"SELECT * FROM usuario1\")\nasd=c.mi_cursor.fetchall()\nfor i in asd:\n print(i[4])\nc.mi_conexion.commit()\nc.mi_conexion.close()","sub_path":"prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"339918312","text":"import PyPDF2\n\nwith open('C:\\\\Users\\\\JR0544\\\\PycharmProjects\\\\ZeroToMastery\\\\PDFs\\\\2020ViaBenefitsCostSummary.pdf', 'rb') as file:\n reader = PyPDF2.PdfFileReader(file)\n page = reader.getPage(0)\n # print(reader.getNumPages())\n # print(reader.getPage(0))\n print(page.rotateClockwise(90))\n writer = PyPDF2.PdfFileWriter()\n writer.addPage(page)\n with open('tilt.pdf', 'wb') as file1:\n writer.write(file1)\n","sub_path":"ZeroToMastery/pdf/readpdf.py","file_name":"readpdf.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"306976225","text":"from flask import Flask , url_for , request, render_template\napp = Flask(__name__)\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\n#code start from here\n\n@app.route('/', methods =['GET','POST'])\ndef main():\n\tif request.method == 'GET':\n\t\treturn render_template(\"main.html\")\n\n\telif request.method == 'POST':\n\t\ttitle = request.form['title'];\n\t\treturn render_template(\"donat.html\")\n\n'''\nif __name__ == '__main__':\n import os\n HOST = os.environ.get('SERVER_HOST', 'localhost')\n try:\n PORT = int(os.environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n app.run(HOST, PORT, debug = True)\n\n'''\n\n\n\n\n\n\n\n","sub_path":"prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"156974531","text":"# Using oracle coaching (initialize by neural network)\n\nimport random\n\nimport numpy as np\nimport optparse\nimport sys\nimport csv\n\nfrom scipy.stats import ks_2samp\n\nNodeSplit = 0\nNodeLeaf = 1\nNodeFringe = 2\nActionDimension = -1\nHOME = 0\nAWAY = 1\nMAX_DEPTH = 40\nLOW_COLOR = 40\nHIGH_COLOR = 220\n\n\nclass CUTree:\n def __init__(self, gamma, n_actions, dim_sizes, dim_names, max_hist, max_back_depth=1, minSplitInstances=20,\n significance_level=0.999, is_episodic=0):\n \n self.node_id_count = 0\n self.root = UNode(self.genId(), NodeLeaf, None, n_actions, 1)\n self.n_actions = n_actions\n self.max_hist = max_hist\n self.max_back_depth = max_back_depth\n self.gamma = gamma\n self.history = []\n self.n_dim = len(dim_sizes)\n self.dim_sizes = dim_sizes\n self.dim_names = dim_names\n self.minSplitInstances = minSplitInstances\n self.significanceLevel = significance_level\n \n self.nodes = {self.root.idx: self.root} # root_id:root_node\n \n self.term = UNode(self.genId(), NodeLeaf, None, 1, 1) # dummy terminal node with 0 value\n self.start = UNode(self.genId(), NodeLeaf, None, 1, 1)\n self.nodes[self.term.idx] = self.term # term_id:term_node\n self.nodes[self.start.idx] = self.start\n \n # def getBestAction(self, currentObs):\n # t = self.getTime()\n # i = Instance(t, currentObs, None, currentObs, None, None, np.zeros(2))\n # self.insertInstance(i)\n # next_state = self.getInstanceLeaf(i)\n # bestInst = None\n # maxCorr = 1000000\n # # currentObs = np.transpose(currentObs)\n # for inst in next_state.instances:\n # sub = np.subtract(currentObs, inst.currentObs)\n # diff = np.var(sub)\n # if maxCorr > diff and inst.action is not None:\n # maxCorr = diff\n # bestInst = inst\n # self.popInstance()\n # return bestInst.action\n \n def getBestAction(self, currentObs):\n if random.randint(0, 100) % 5 ==0:\n # seed = random.randint(0, 100) % 2\n # if seed:\n # return [0, 1]\n # else:\n return [1, 0]\n else:\n t = self.getTime()\n # action 0\n i = Instance(t, currentObs, 0, currentObs, None, None, None)\n self.insertInstance(i)\n next_state_0 = self.getInstanceLeaf(i)\n self.popInstance()\n # action 1\n i = Instance(t, currentObs, 1, currentObs, None, None, None)\n self.insertInstance(i)\n next_state_1 = self.getInstanceLeaf(i)\n self.popInstance()\n if next_state_1.utility(HOME) >= next_state_0.utility(HOME):\n return [0, 1]\n else:\n return [1, 0]\n \n def tocsvFile(self, filename):\n '''\n Store a record of U-Tree in file, make it easier to rebuild tree\n :param filename: the path of file to store the record\n :return:\n '''\n with open(filename, 'w', newline='') as csvfile:\n fieldname = ['idx', 'dis', 'dis_value', 'par', 'q']\n writer = csv.writer(csvfile)\n writer.writerow(fieldname)\n for i, node in self.nodes.items():\n if node.nodeType == NodeSplit:\n writer.writerow([node.idx,\n node.distinction.dimension,\n node.distinction.continuous_divide_value if node.distinction.continuous_divide_value else None,\n node.parent.idx if node.parent else None,\n None,\n None])\n else:\n writer.writerow([node.idx,\n None,\n None,\n node.parent.idx if node.parent else None,\n node.qValues])\n # node.qValues_home,\n # node.qValues_away])\n \n def tocsvFileComplete(self, filename):\n '''\n Store a record of U-Tree in file including the instances in each leaf node,\n make it easier to rebuild tree\n :param filename: the path of file to store record\n :return:\n '''\n with open(filename, 'w', newline='') as csvfile:\n fieldnamecomplete = ['idx', 'dis', 'dis_value', 'par', 'q_home', 'q_away', 'instances']\n writer = csv.writer(csvfile)\n writer.writerow(fieldnamecomplete)\n for i, node in self.nodes.items():\n if node.nodeType == NodeSplit:\n writer.writerow([node.idx,\n node.distinction.dimension,\n node.distinction.continuous_divide_value if node.distinction.continuous_divide_value else None,\n node.parent.idx if node.parent else None,\n None,\n None,\n None])\n else:\n writer.writerow([node.idx,\n None,\n None,\n node.parent.idx if node.parent else None,\n # node.qValues_home,\n # node.qValues_away,\n [inst.timestep for inst in node.instances]])\n \n def fromcsvFile(self, filename):\n '''\n Load U-Tree structure from csv file\n :param filename: the path of file to load record\n :return:\n '''\n with open(filename, 'r') as csvfile:\n fieldname = ['idx', 'dis', 'dis_value', 'par', 'q']\n reader = csv.reader(csvfile)\n self.node_id_count = 0\n for record in reader:\n if not record:\n continue\n if record[0] == fieldname[0]: # idx determines header or not\n continue\n if not record[4]: # qValues determines NodeSplit or NodeLeaf\n node = UNode(int(record[0]), NodeSplit, self.nodes[int(record[3])] if record[3] else None,\n self.n_actions, self.nodes[int(record[3])].depth + 1 if record[3] else 1)\n node.distinction = Distinction(dimension=int(record[1]),\n back_idx=0,\n dimension_name=self.dim_names[int(record[1])]\n if int(record[1]) > -1 else 'actions',\n iscontinuous=True if record[2] else False,\n continuous_divide_value=float(record[2]) if record[\n 2] else None) # default back_idx is 0\n else:\n node = UNode(int(record[0]), NodeLeaf, self.nodes[int(record[3])] if record[3] else None,\n self.n_actions, self.nodes[int(record[3])].depth + 1 if record[3] else 1)\n node.qValues = np.array(list(map(float, record[4][1:-1].split())))\n # node.qValues_home = float(record[4])\n # node.qValues_away = float(record[5])\n if node.parent:\n self.nodes[int(record[3])].children.append(node)\n if node.idx == 1:\n self.root = node\n elif node.idx == 2:\n self.term = node\n self.nodes[int(node.idx)] = node\n self.node_id_count += 1\n \n def print_tree(self):\n \"\"\"\n print U tree\n :return:\n \"\"\"\n self.print_tree_recursive(\"\", self.root)\n \n def print_tree_recursive(self, blank, node):\n '''\n recursively print tree from root to leaves\n :param node: the node to be expand\n :return:\n '''\n if node.nodeType == NodeSplit:\n print(blank + \"idx={}, dis={}, par={}\".format(node.idx,\n node.distinction.dimension,\n node.parent.idx if node.parent else None))\n for child in node.children:\n self.print_tree_recursive(blank + \" \", child)\n else:\n print(blank + \"idx={}, q={}, par={}\".\n format(node.idx,\n # node.transitions_home_home,\n # node.transitions_home_away,\n # node.transitions_away_home,\n # node.transitions_away_away,\n node.qValues,\n # node.qValues_home,\n # node.qValues_away,\n node.parent.idx if node.parent else None))\n \n def getInstanceQvalues(self, instance, reward):\n \"\"\"\n get the Q-value from instance, q(I,a)\n :return: state's maximum Q\n \"\"\"\n self.insertInstance(instance)\n # set goal's q_value as equal to previous shot\n next_state = self.getInstanceLeaf(instance)\n self.popInstance()\n return next_state.utility(True)\n # return next_state.utility(home_identifier=True), \\\n # next_state.utility(home_identifier=False)\n \n def getInstanceFigure(self, instance):\n self.insertInstance(instance)\n # set goal's q_value as equal to previous shot\n fig = self.getInstanceLeafWithColor(instance)\n self.popInstance()\n return fig\n \n def getTime(self):\n \"\"\"\n :return: length of history\n \"\"\"\n return len(self.history)\n \n def updateCurrentNode(self, instance, beginflag):\n \"\"\"\n add the new instance ot LeafNode\n :param instance: instance to add\n :return:\n \"\"\"\n old_state = self.getLeaf(previous=1) # get the leaf\n if old_state == self.term: # if leaf is the dummy terminal node\n return\n self.insertInstance(instance) # add the new instance to U-Tree history\n new_state = self.getLeaf() # get the leaf of next state\n new_state.addInstance(instance, self.max_hist) # add the instance to leaf node\n if old_state != self.start: # last instance is not goal\n old_state.updateModel(new_state=new_state.idx,\n action=self.history[-2].action,\n reward=self.history[-2].reward) # update state by adding action reward, adding action count and recording transition states\n if instance.nextObs[0] == -1: # this instance lead to goal\n new_state.updateModel(new_state=self.start.idx,\n action=instance.action,\n reward=instance.reward)\n elif instance.nextObs[0] == 0:\n new_state.updateModel(new_state=self.term.idx,\n action=instance.action,\n reward=instance.reward)\n \n def sweepLeaves(self):\n '''\n Serve as a public function calls sweepRecursive\n :return:\n '''\n return self.sweepRecursive(self.root, self.gamma)\n \n def sweepRecursive(self, node, gamma):\n \"\"\"\n Apply single step of value iteration to leaf node\n or recursively to children if it is a split node\n :param node: target node\n :param gamma: gamma in dynamic programming\n :return:\n \"\"\"\n if node.nodeType == NodeLeaf:\n # home team\n for action, reward in enumerate(node.rewards):\n c = float(node.count[action]) # action count\n if c == 0:\n continue\n exp = 0\n for node_to, t in node.transitions[action].items():\n if reward[node_to] > 0:\n exp += reward[node_to] / c\n if node.idx != node_to:\n exp += gamma * (self.nodes[node_to].utility(True) * t) / c\n node.qValues[action] = exp\n \n # # assert is just for debugging, replace all assert to comment\n # assert node.nodeType == NodeSplit\n for c in node.children:\n self.sweepRecursive(c, gamma)\n \n def insertInstance(self, instance):\n \"\"\"\n append new instance to history\n :param instance: current instance\n :return:\n \"\"\"\n self.history.append(instance)\n # if len(self.history)>self.max_hist:\n # self.history = self.history[1:]\n \n def popInstance(self):\n self.history.pop(len(self.history) - 1)\n \n def nextInstance(self, instance):\n \"\"\"\n get the next instance\n :param instance: current instance\n :return: the next instance\n \"\"\"\n # assert instance.timestep + 1 < len(self.history)\n return self.history[instance.timestep + 1]\n \n def transFromInstances(self, node, n_id, action):\n \"\"\"\n compute transition probability from current node to n_id node when perform action\n Formula (7) in U tree paper\n :param node: current node\n :param n_id: target node\n :param action: action to perform\n :return: transition probability\n \"\"\"\n count = 0\n total = 0\n \n for inst in node.instances:\n if inst.action == action:\n leaf_to = self.getInstanceLeaf(inst, previous=1)\n if leaf_to.idx == n_id:\n count += 1\n total += 1\n \n if total:\n return count / total\n else:\n return 0\n \n def rewardFromInstances(self, node, action):\n \"\"\"\n compute reward of perform action on current node\n Formula (6) in U tree paper\n :param node: current node\n :param action: action to perform\n :return: reward computed\n \"\"\"\n rtotal = 0\n total = 0\n \n for inst in node.instances:\n if inst.action == action:\n rtotal += inst.reward\n total += 1\n if total:\n return rtotal / total\n else:\n return 0\n \n def modelFromInstances(self, node):\n \"\"\"\n rebuild model for leaf node, with newly added instance\n :param node:\n :return:\n \"\"\"\n node.rewards = [{} for i in range(self.n_actions)]\n node.count = np.zeros(self.n_actions) # re-initialize count\n node.transitions = [{} for i in range(self.n_actions)] # re-initialize transition\n\n for inst in node.instances:\n leaf_to = self.getInstanceLeaf(inst, previous=1) # get the to node\n # update the node, add action reward, action count and transition states\n node.updateModel(leaf_to.idx, inst.action, inst.reward)\n \n def getLeaf(self, previous=0):\n '''\n Get leaf corresponding to current history\n :param previous: 0 is not check goal, 1 is check it\n :return:\n '''\n idx = len(self.history) - 1\n node = self.root\n \n if previous == 1:\n if idx == -1 or self.history[idx].nextObs[0] == -1:\n return self.start\n \n while node.nodeType != NodeLeaf:\n # assert node.nodeType == NodeSplit\n child = node.applyDistinction(self.history, idx)\n node = node.children[child] # go the children node\n return node\n \n def getInstanceLeafWithColor(self, inst):\n idx = inst.timestep\n fig = inst.currentObs[:14400]\n node = self.root\n while node.nodeType != NodeLeaf: # iteratively find children\n child = node.applyDistinction(self.history, idx)\n dis_num = node.distinction.dimension\n if dis_num < 14400 and dis_num != -1:\n if child == 0:\n fig[dis_num] = LOW_COLOR\n else:\n fig[dis_num] = HIGH_COLOR\n node = node.children[child]\n return fig\n \n def getInstanceLeaf(self, inst, ntype=NodeLeaf, previous=0):\n \"\"\"\n Get leaf that inst records a transition from\n previous=0 indicates transition_from, previous=1 indicates transition_to\n :param inst: target instance\n :param ntype: target node type\n :param previous: previous=0 indicates present inst, previous=1 indicates next inst\n :return:\n \"\"\"\n idx = inst.timestep + previous\n \n if previous == 1:\n if idx >= len(self.history):\n return self.term\n elif inst.nextObs[0] == -1:\n return self.start\n \n node = self.root\n while node.nodeType != ntype: # iteratively find children\n # keep applying node's distinction until we find ntype node, where the instance should belong\n child = node.applyDistinction(self.history, idx)\n node = node.children[child]\n return node\n \n def genId(self):\n \"\"\"\n :return: a new ID for node\n \"\"\"\n self.node_id_count += 1\n return self.node_id_count\n \n def reduceId(self, count):\n '''\n After splitFringe(maybe something else), reduce to normal\n :param count: the reduce number\n :return:\n '''\n self.node_id_count -= count\n \n def split(self, node, distinction):\n \"\"\"\n split decision tree on nodes\n :param node: node to split\n :param distinction: distinction to split\n :return:\n \"\"\"\n # assert node.nodeType == NodeLeaf\n # assert distinction.back_idx >= 0\n node.nodeType = NodeSplit\n node.distinction = distinction\n \n # Add children\n if distinction.dimension == ActionDimension:\n for i in range(self.n_actions):\n idx = self.genId()\n n = UNode(idx, NodeLeaf, node, self.n_actions, node.depth + 1)\n # n.qValues_home = np.copy(node.qValues_home)\n # n.qValues_away = np.copy(node.qValues_away)\n self.nodes[idx] = n\n node.children.append(n)\n elif distinction.iscontinuous == False:\n for i in range(self.dim_sizes[distinction.dimension]):\n idx = self.genId()\n n = UNode(idx, NodeLeaf, node, self.n_actions, node.depth + 1)\n # n.qValues_home = np.copy(node.qValues_home)\n # n.qValues_away = np.copy(node.qValues_away)\n self.nodes[idx] = n\n node.children.append(n)\n else:\n for i in range(2):\n idx = self.genId()\n n = UNode(idx, NodeLeaf, node, self.n_actions, node.depth + 1)\n # n.qValues_home = np.copy(node.qValues_home)\n # n.qValues_away = np.copy(node.qValues_away)\n self.nodes[idx] = n\n node.children.append(n)\n \n # Add instances to children\n for inst in node.instances:\n n = self.getInstanceLeaf(inst, previous=0)\n # assert n.parent.idx == node.idx, \"node={}, par={}, n={}\".format(node.idx, n.parent.idx, n.idx)\n n.addInstance(inst, self.max_hist)\n \n # Rebuild is essential, yes, since all the transitions will change.\n for i, n in self.nodes.items():\n if n.nodeType == NodeLeaf:\n self.modelFromInstances(n)\n \n node.instances = []\n # update Q-values for children\n for n in node.children:\n self.sweepRecursive(n, self.gamma)\n \n def splitToFringe(self, node, distinction):\n \"\"\"\n Create fringe nodes instead of leaf nodes after splitting; these nodes\n aren't used in the agent's model\n :param node: node to split\n :param distinction: distinction used for splitting\n :return:\n \"\"\"\n # assert distinction.back_idx >= 0\n node.distinction = distinction\n \n # Add children\n if distinction.dimension == ActionDimension: # ActionDimension = -1, means use action to split\n for i in range(self.n_actions):\n idx = self.genId() # generate new id for new node\n fringe_node = UNode(idx, NodeFringe, node, self.n_actions, node.depth + 1)\n node.children.append(fringe_node) # append new children to node\n elif distinction.iscontinuous == False:\n for i in range(self.dim_sizes[distinction.dimension]):\n idx = self.genId()\n fringe_node = UNode(idx, NodeFringe, node, self.n_actions, node.depth + 1)\n node.children.append(fringe_node)\n else:\n for i in range(2):\n idx = self.genId()\n fringe_node = UNode(idx, NodeFringe, node, self.n_actions, node.depth + 1)\n node.children.append(fringe_node)\n \n # Add instances to children\n for inst in node.instances:\n n = self.getInstanceLeaf(inst, ntype=NodeFringe, previous=0)\n # assert n.parent.idx == node.idx, \"idx={}\".format(n.idx)\n n.addInstance(inst, self.max_hist) # add instance to children\n \n def unsplit(self, node):\n \"\"\"\n Unsplit node\n :param node: the node to unsplit\n :return:\n \"\"\"\n node.distinction = None\n self.reduceId(len(node.children))\n if node.nodeType == NodeSplit:\n # assert len(node.children) > 0\n node.nodeType = NodeLeaf\n for c in node.children:\n del self.nodes[c.idx]\n for i, n in self.nodes.items():\n if n.nodeType == NodeLeaf:\n self.modelFromInstances(n)\n \n # clear children\n node.children = []\n \n def testFringe(self):\n \"\"\"\n Tests fringe nodes for viable splits, splits nodes if they're found\n :return: how many real splits it takes\n \"\"\"\n return self.testFringeRecursive(self.root) # starting from root\n \n def testFringeRecursive(self, node):\n \"\"\"\n recursively perform test in fringe, until return total number of split\n :param node: node to test\n :return: number of splits\n \"\"\"\n if node.depth >= MAX_DEPTH:\n return 0\n if node.nodeType == NodeLeaf and node.modified: # NodeSplit = 0 NodeLeaf = 1 NodeFringe = 2\n node.modified = False\n d = self.getUtileDistinction(node) # test is performed here\n if d: # if find distinction, use distinction to split\n self.split(node, d) # please use break point to see how to split here\n return 1 + self.testFringeRecursive(node)\n return 0\n \n # assert node.nodeType == NodeSplit\n total = 0\n for c in node.children:\n total += self.testFringeRecursive(c)\n return total\n \n def getUtileDistinction(self, node):\n \"\"\"\n Different kinds of tests are performed here\n 1. find all the possible distinction\n 2. try to split node according to distinction and get expected future discounted returns\n 3. perform test until find the proper distinction, otherwise, return None\n \"\"\"\n # assert node.nodeType == NodeLeaf\n if len(node.instances) < self.minSplitInstances:\n return None\n cds = self.getCandidateDistinctions(node) # Get all the candidate distinctions\n return self.ksTest(node, cds)\n \n def mseTest(self, node, cds):\n \"\"\"\n Mean squared-error test is performed here.\n It is less theoretically based. So it's free to choose threshold.\n :param node: the node to test\n :param cds: a list of candidate distinction to choose\n :return: the best distinction or None is not found\n \"\"\"\n # Get all expected future discounted returns for all instances in a node\n root_utils = [self.getEFDRs(node, index) for index in [HOME, AWAY]]\n root_len = len(root_utils[0])\n # calculate MSE of root\n root_predict = [sum(r) ** 2 / root_len for r in root_utils]\n root_mse = [(sum(root_val ** 2 for root_val in root_utils[index]) - root_predict[index] / (root_len - 1))\n for index in [HOME, AWAY]]\n # get the best distinction\n dist_min = self.significanceLevel / root_len\n cd_min = None\n \n for cd in cds: # test all possible distinctions until find the one satisfy the test\n self.splitToFringe(node, cd) # split to fringe node with split candidate\n # record\n stop = 0\n child_mse_home = []\n child_mse_away = []\n for c in node.children:\n # give action a chance to split first\n if len(c.instances) < self.minSplitInstances and cd.dimension != ActionDimension:\n stop = 1\n break\n if len(c.instances) <= 1: # goal state\n continue\n # Get all expected future discounted returns for all instances in a children\n child_util_home = self.getEFDRs(c, HOME)\n child_util_away = self.getEFDRs(c, AWAY)\n # calculate MSE(weighted) of child\n child_len = len(child_util_home)\n child_predict_home = sum(child_util_home) ** 2 / child_len\n child_predict_away = sum(child_util_away) ** 2 / child_len\n child_weight = float(child_len) / root_len\n child_mse_home.append(sum(child_val ** 2 for child_val in child_util_home) - child_predict_home /\n (child_len - 1) * child_weight)\n child_mse_away.append(sum(child_val ** 2 for child_val in child_util_away) - child_predict_away /\n (child_len - 1) * child_weight)\n self.unsplit(node) # delete split fringe node\n child_mse = [child_mse_home, child_mse_away]\n # if not enough instance in a node, stop split\n if stop == 1:\n continue\n \n # calculate difference between parents and students\n p = max(abs(sum(child_mse[index]) - root_mse[index]) for index in [HOME, AWAY])\n if p > dist_min:\n print(\"MSE passed, p={}, d={}, back={}\".format(p, cd.dimension, cd.back_idx))\n dist_min = p\n cd_min = cd\n \n # print the best\n if cd_min:\n print(\"Will be split, p={}, d={}, back={}\".format(dist_min, cd_min.dimension, cd_min.back_idx))\n return cd_min\n \n def ksTest(self, node, cds):\n \"\"\"\n KS test is performed here\n :param node: the node to test\n :param cds: a list of candidate distinction to choose\n :return: the best distinction or None is not found\n \"\"\"\n root_utils = self.getEFDRs(node, HOME) # Get all expected future discounted returns for all instances in a node\n # get the best distinction\n dist_min = self.significanceLevel\n cd_min = None\n \n for cd in cds: # test all possible distinctions until find the one satisfy KS test\n self.splitToFringe(node, cd) # split to fringe node with split candidate\n # record\n stop = 0\n child_utils = []\n for c in node.children:\n if len(c.instances) < self.minSplitInstances and cd.dimension != ActionDimension:\n stop = 1\n break\n # Get all expected future discounted returns for all instances in a children\n child_utils.append(self.getEFDRs(c, HOME))\n self.unsplit(node) # delete split fringe node\n # if not enough instance in a node, stop split\n if stop == 1:\n continue\n \n # Computes the Kolmogorov-Smirnov statistic between parent EFDR and child EFDR\n for i, cu in enumerate(child_utils):\n k, p = ks_2samp(root_utils, cu)\n if p < dist_min: # significance_level=0.00005, if p below it, this distinction is significant\n dist_min = p\n cd_min = cd\n print(\"KS passed, p={}, d={}, back={}\".format(p, cd.dimension, cd.back_idx))\n \n # print the best\n if cd_min:\n print(\"Will be split, p={}, d={}, back={}\".format(dist_min, cd_min.dimension, cd_min.back_idx))\n return cd_min\n \n def getEFDRs(self, node, index):\n \"\"\"\n Get all expected future discounted returns for all instances in a node\n (q-value is just the average EFDRs)\n :param: index: if index is home, calculate based on Q_home, else Q_away\n \"\"\"\n efdrs = np.zeros(len(node.instances))\n for i, inst in enumerate(node.instances):\n next_state = self.getInstanceLeaf(inst, previous=1) # Get leaf that inst records a transition from\n # split home and away\n efdrs[i] = inst.reward\n if node.parent and next_state != node.parent and next_state != self.term:\n next_state_util = next_state.utility(index == HOME) # maximum Q value\n efdrs[i] += self.gamma * next_state_util # r + gamma * maxQ\n return efdrs\n \n # def getEFDRs(self, node, index):\n # \"\"\"\n # Get all expected future discounted returns for all instances in a node\n # (q-value is just the average EFDRs)\n # :param: index: if index is home, calculate based on Q_home, else Q_away\n # \"\"\"\n # efdrs = np.zeros(len(node.instances))\n # for i, inst in enumerate(node.instances):\n # next_state = self.getInstanceLeaf(inst, previous=1) # Get leaf that inst records a transition to\n # if inst.action == AbsorbAction:\n # efdrs[i] = inst.reward if index == HOME else -inst.reward\n # else:\n # if node.parent == next_state:\n # efdrs[i] = inst.reward if index == HOME else -inst.reward\n # else:\n # if index == HOME:\n # next_home_state_util = next_state.utility(True) # maximum Q value\n # efdrs[i] = inst.reward + self.gamma * next_home_state_util # r + gamma * maxQ\n # else:\n # next_away_state_util = next_state.utility(False) # maximum Q value\n # efdrs[i] = -inst.reward + self.gamma * next_away_state_util # r + gamma * maxQ\n #\n # return efdrs\n \n def ksTestonQ(self, node, cds, diff_significanceLevel=float(0.001)):\n \"\"\"\n KS test is performed here\n 1. find all the possible distinction\n 2. try to split node according to distinction and get expected future discounted returns\n 3. perform ks test until find the proper distinction, otherwise, return None\n :param diff_significanceLevel:\n :param node:\n :return:\n \"\"\"\n assert node.nodeType == NodeLeaf\n diff_significanceLevel = self.significanceLevel\n if diff_significanceLevel < 0.0001:\n diff_significanceLevel = 0.0001\n print(\"Sig:\" + str(diff_significanceLevel))\n root_utils = self.getQs(node)\n variance = np.var(root_utils)\n # root_utils_home, root_utils_away = self.getQs(node)\n # variance_home = np.var(root_utils_home)\n # variance_away = np.var(root_utils_away)\n diff_max = float(0)\n cd_split = None\n for cd in cds:\n self.splitToFringe(node, cd)\n stop = 0\n child_qs = []\n for c in node.children:\n if len(c.instances) < self.minSplitInstances:\n stop = 1\n break\n child_qs.append(self.getQs(c))\n \n self.unsplit(node)\n if stop == 1:\n continue\n \n for i, cq in enumerate(child_qs):\n \n if (len(cq) == 0):\n # if len(cq[0]) == 0 or len(cq[1]) == 0:\n continue\n else:\n variance_child = np.var(cq)\n # variance_child_home = np.var(cq[0])\n \n diff = abs(variance - variance_child)\n # diff_home = abs(variance_home - variance_child_home)\n # diff_away = abs(variance_away - variance_child_away)\n # diff = diff_home if diff_home > diff_away else diff_away\n if diff > diff_significanceLevel and diff > diff_max:\n diff_max = diff\n cd_split = cd\n print('vanriance test passed, diff=', diff, ',d=', cd.dimension)\n \n # hand split action\n if cd.dimension == ActionDimension and cd_split is not None:\n break\n \n if cd_split:\n print('Will be split, p=', diff_max, ',d=', cd_split.dimension_name)\n return cd_split\n else:\n return cd_split\n \n # def varDiff(self, listA=[], listB=[], diff=0):\n # if len(listA) == 0 or len(listB) == 0:\n # return diff - 1\n # mean_a = sum(listA) / len(listA)\n # var_a = float(0)\n # for number_a in listA:\n # var_a += (number_a - mean_a) ** 2\n #\n # mean_b = sum(listB) / len(listB)\n # var_b = float(0)\n # for number_b in listB:\n # var_b += (number_b - mean_b) ** 2\n #\n # return abs(var_a / len(listA) - var_b / len(listB))\n \n def getQs(self, node):\n \"\"\"\n Get all expected future discounted returns for all instances in a node\n (q-value is just the average EFDRs)\n \"\"\"\n efdrs = np.zeros(len(node.instances))\n # efdrs_home = np.zeros(len(node.instances))\n # efdrs_away = np.zeros(len(node.instances))\n for i, inst in enumerate(node.instances):\n efdrs[i] = inst.qValue\n # efdrs_home[i] = inst.qValue[0]\n # efdrs_away[i] = inst.qValue[1]\n \n return efdrs\n # return [efdrs_home, efdrs_away]\n \n def getCandidateDistinctions(self, node, select_interval=100):\n \"\"\"\n construct all candidate distinctions\n :param node: target nodes\n :return: all candidate distinctions\n \"\"\"\n p = node.parent\n anc_distinctions = []\n while p:\n # assert p.nodeType == NodeSplit\n anc_distinctions.append(p.distinction)\n p = p.parent # append all the parent nodes' distinction to anc_distinctions list\n \n candidates = []\n for i in range(self.max_back_depth):\n for j in range(-1, self.n_dim): # no action here\n if j > -1 and self.dim_sizes[j] == 'continuous':\n # value=sum([inst.currentObs[j] for inst in node.instances])/len(node.instances)\n # d = Distinction(dimension=j, back_idx=i, dimension_name=self.dim_names[j],\n # iscontinuous=True, continuous_divide_value=value)\n # if d in anc_distinctions:\n # continue\n # candidates.append(d)\n count = 0\n for inst in sorted(node.instances, key=lambda inst: inst.currentObs[j]):\n count += 1\n # choose one from 30\n if count % select_interval != 0:\n continue\n d = Distinction(dimension=j,\n back_idx=i,\n dimension_name=self.dim_names[j],\n iscontinuous=True,\n continuous_divide_value=inst.currentObs[j])\n # we don't need duplicate distinction\n if d in anc_distinctions:\n continue\n candidates.append(d)\n else:\n d = Distinction(dimension=j,\n back_idx=i,\n dimension_name=self.dim_names[j] if j > -1 else 'actions')\n if d in anc_distinctions:\n continue\n candidates.append(d)\n \n return candidates\n\n\nclass UNode:\n def __init__(self, idx, nodeType, parent, n_actions, depth):\n self.idx = idx\n self.nodeType = nodeType\n self.parent = parent\n \n self.children = []\n \n # reward in instances maybe negative, but reward in node must be positive\n self.rewards = [{} for i in range(n_actions)] # r_home(s, a, s')\n self.count = np.zeros(n_actions)\n self.transitions = [{} for i in range(n_actions)]\n # self.qValues_home = 0\n # self.qValues_away = 0\n self.qValues = np.zeros(n_actions)\n \n self.distinction = None\n self.instances = []\n \n self.depth = depth\n \n self.modified = False\n \n def utility(self, home_identifier):\n \"\"\"\n :param: index: if index is HOME, return Q_home, else return Q_away\n :return: maximum Q value\n \"\"\"\n return max(self.qValues)\n # return self.qValues_home if home_identifier else self.qValues_away\n \n def addInstance(self, instance, max_hist):\n \"\"\"\n add new instance to node instance list\n if instance length exceed maximum history length, select most recent history\n :param instance:\n :param max_hist:\n :return:\n \"\"\"\n # assert (self.nodeType == NodeLeaf or self.nodeType == NodeFringe)\n self.instances.append(instance)\n if not self.modified:\n self.modified = True\n if len(self.instances) > max_hist:\n self.instances = self.instances[1:]\n \n def updateModel(self, new_state, action, reward):\n \"\"\"\n 1. add action reward\n 2. add action count\n 3. record transition states\n :param new_state: new transition state\n :param action: new action\n :param reward: reward of action\n :param home_identifier: identify home and away\n :return:\n \"\"\"\n # self.qValues_home = (self.count * self.qValues_home + qValue[0]) \\\n # / (self.count + 1)\n # self.qValues_away = (self.count * self.qValues_away + qValue[1]) \\\n # / (self.count + 1)\n self.count[action] += 1 # summation of the number of actions\n if new_state not in self.transitions[action]:\n self.transitions[action][new_state] = 1 # record transition\n self.rewards[action][new_state] = reward\n else:\n self.transitions[action][new_state] += 1\n self.rewards[action][new_state] += reward\n \n # if new_state not in self.transitions:\n # self.transitions[new_state] = 1\n # else:\n # self.transitions[new_state] += 1\n \n def applyDistinction(self, history, idx, previous=0):\n \"\"\"\n :param history: history of instances\n :param idx: the idx of instance to apply distinction\n :return: the index of children\n \"\"\"\n # assert self.nodeType != NodeFringe\n # assert len(history) > self.distinction.back_idx\n # assert len(history) > idx\n # assert self.distinction.back_idx >= 0\n \n # if back_idx is too far for idx, pick the first child\n # if self.distinction.back_idx > idx:\n # return 0\n \n # find the instance from history, may back trace to former instance\n inst = history[idx - self.distinction.back_idx]\n \n if self.distinction.dimension == ActionDimension:\n return inst.action # action distinction\n # assert self.distinction.dimension >= 0\n # previous 0: current node, previous 1: last node\n if previous == 0:\n if self.distinction.iscontinuous:\n if inst.currentObs[self.distinction.dimension] <= self.distinction.continuous_divide_value:\n return 0\n else:\n return 1\n else:\n return int(inst.currentObs[self.distinction.dimension] != 0)\n else:\n if self.distinction.iscontinuous:\n if inst.nextObs[self.distinction.dimension] <= self.distinction.continuous_divide_value:\n return 0\n else:\n return 1\n else:\n return int(inst.nextObs[self.distinction.dimension])\n\n\nclass Instance:\n \"\"\"\n records the transition as an instance\n \"\"\"\n \n def __init__(self, timestep, currentObs, action, nextObs, reward, home_identifier, qValue):\n self.timestep = int(timestep)\n self.action = action\n self.reward = reward\n self.nextObs = nextObs # record the state data\n self.currentObs = currentObs # record the state data\n # self.reward = reward # reserve for getEFDR only\n # self.home_identifier = home_identifier\n self.qValue = qValue\n\n\nclass Distinction:\n \"\"\"\n For split node\n \"\"\"\n \n def __init__(self, dimension, back_idx, dimension_name='unknown', iscontinuous=False, continuous_divide_value=None):\n \"\"\"\n initialize distinction\n :param dimension: split of the node is based on the dimension\n :param back_idx: history index, how many time steps backward from the current time this feature will be examined\n :param dimension_name: the name of dimension\n :param iscontinuous: continuous or not\n :param continuous_divide_value: the value of continuous division\n \"\"\"\n self.dimension = dimension\n self.back_idx = back_idx\n self.dimension_name = dimension_name\n self.iscontinuous = iscontinuous\n self.continuous_divide_value = continuous_divide_value\n \n def __eq__(self, distinction):\n return self.dimension == distinction.dimension and self.back_idx == distinction.back_idx \\\n and self.continuous_divide_value == distinction.continuous_divide_value\n","sub_path":"utree_training/C_UTree_CUT.py","file_name":"C_UTree_CUT.py","file_ext":"py","file_size_in_byte":37619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"277999314","text":"from django_redis import get_redis_connection\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom spit.models import Spit\nfrom spit.serializer import SpitSerializer\n\n\nclass SpitViewSet(ModelViewSet):\n queryset = Spit.objects.all()\n serializer_class = SpitSerializer\n # 禁用 drf认证 允许匿名吐槽\n def perform_authentication(self, request):\n pass\n\n def retrieve(self, request, pk):\n spit = self.get_object()\n spit.visits += 1\n spit.save()\n try:\n user = self.request.user\n except Exception:\n user = None\n\n if user is not None and user.is_authenticated:\n redis_conn = get_redis_connection('spit')\n flag_collect = redis_conn.hget(\"spit_collect_%s\" % user.id, str(id))\n flag_thumbup = redis_conn.hget(\"spit_thumbup_%s\" % user.id, str(id))\n if flag_collect:\n spit.collected = True\n if flag_thumbup:\n spit.hasthumbup = True\n\n ser = self.get_serializer(instance=spit)\n return Response(ser.data)\n\n # 获取所有一级吐槽\n def list(self, request):\n\n spitList = self.get_queryset().filter(parent=None).order_by(\"-publishtime\")\n retSpitList = []\n\n try:\n user = self.request.user\n except Exception:\n user = None\n\n if user is not None and user.is_authenticated:\n redis_conn = get_redis_connection('spit')\n for spit in spitList:\n flag_collect = redis_conn.hget(\"spit_collect_%s\" % user.id, str(spit.id))\n flag_thumbup = redis_conn.hget(\"spit_thumbup_%s\" % user.id, str(spit.id))\n if flag_collect:\n spit.collected = True\n if flag_thumbup:\n spit.hasthumbup = True\n\n retSpitList.append(spit)\n else:\n retSpitList = spitList\n\n ser = self.get_serializer(instance=retSpitList, many=True)\n return Response(ser.data)\n\n # 获取吐槽的评论\n @action(methods=[\"GET\"], detail=True, url_path=\"children\")\n def get_children(self, request, pk):\n\n spitList = self.get_queryset().filter(parent=pk).order_by(\"-publishtime\")\n retSpitList = []\n\n try:\n user = self.request.user\n except Exception:\n user = None\n\n if user is not None and user.is_authenticated:\n redis_conn = get_redis_connection('spit')\n for spit in spitList:\n flag_collect = redis_conn.hget(\"spit_collect_%s\" % user.id, str(spit.id))\n flag_thumbup = redis_conn.hget(\"spit_thumbup_%s\" % user.id, str(spit.id))\n if flag_collect:\n spit.collected = True\n if flag_thumbup:\n spit.hasthumbup = True\n\n retSpitList.append(spit)\n else:\n retSpitList = spitList\n\n ser = self.get_serializer(instance=retSpitList, many=True)\n return Response(ser.data)\n\n # 点赞和取消点赞\n @action(methods=[\"PUT\"], detail=True, url_path=\"updatethumbup\")\n def update_thumbup(self, request, pk):\n try:\n user = request.user\n except Exception:\n user = None\n\n spit = self.get_object()\n\n if user is not None and user.is_authenticated:\n redis_conn = get_redis_connection('spit')\n flag = redis_conn.hget(\"spit_thumbup_%s\" %user.id, pk)\n if flag:\n spit.thumbup -= 1\n spit.save()\n redis_conn.hdel('spit_thumbup_%s' % user.id, pk)\n return Response({'success': True, 'message': '取消点赞成功'})\n else:\n redis_conn.hset(\"spit_thumbup_%s\" % user.id, pk, 1)\n spit.thumbup += 1\n spit.save()\n return Response({'success': True, 'message': '点赞成功'})\n else:\n return Response({'success': False, 'message': '未登录'},status=400)\n\n # 收藏和取消收藏\n @action(methods=[\"PUT\"], detail=True, url_path=\"collect\")\n def collect(self, request, pk):\n try:\n user = request.user\n except Exception:\n user = None\n\n if user is not None and user.is_authenticated:\n redis_conn = get_redis_connection('spit')\n flag = redis_conn.hget(\"spit_collect_%s\" %user.id, pk)\n if flag:\n redis_conn.hdel('spit_collect_%s' % user.id, pk)\n return Response({'success': True, 'message': '取消收藏成功'})\n else:\n redis_conn.hset(\"spit_collect_%s\" % user.id, pk, 1)\n return Response({'success': True, 'message': '收藏成功'})\n else:\n return Response({'success': False, 'message': '未登录'},status=400)\n\n","sub_path":"tenpowwer/spit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"290308654","text":"# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v12.errors\",\n marshal=\"google.ads.googleads.v12\",\n manifest={\"MediaFileErrorEnum\",},\n)\n\n\nclass MediaFileErrorEnum(proto.Message):\n r\"\"\"Container for enum describing possible media file errors.\n \"\"\"\n\n class MediaFileError(proto.Enum):\n r\"\"\"Enum describing possible media file errors.\"\"\"\n UNSPECIFIED = 0\n UNKNOWN = 1\n CANNOT_CREATE_STANDARD_ICON = 2\n CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES = 3\n CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA = 4\n DUPLICATE_MEDIA = 5\n EMPTY_FIELD = 6\n RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 7\n FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE = 8\n INVALID_MEDIA_FILE_ID = 9\n INVALID_MEDIA_SUB_TYPE = 10\n INVALID_MEDIA_FILE_TYPE = 11\n INVALID_MIME_TYPE = 12\n INVALID_REFERENCE_ID = 13\n INVALID_YOU_TUBE_ID = 14\n MEDIA_FILE_FAILED_TRANSCODING = 15\n MEDIA_NOT_TRANSCODED = 16\n MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE = 17\n NO_FIELDS_SPECIFIED = 18\n NULL_REFERENCE_ID_AND_MEDIA_ID = 19\n TOO_LONG = 20\n UNSUPPORTED_TYPE = 21\n YOU_TUBE_SERVICE_UNAVAILABLE = 22\n YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION = 23\n YOU_TUBE_VIDEO_NOT_FOUND = 24\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/ads/googleads/v12/errors/types/media_file_error.py","file_name":"media_file_error.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"194847070","text":"\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter import messagebox\nimport student_gui\nimport student_func\n\nclass ParentWindow(Frame):\n def __init__(self, master, *args, **kwargs):\n Frame.__init__(self, master, *args, **kwargs)\n\n # define our master frame configuration\n self.master = master\n self.master.minsize(500,400) #(Height, Width)\n self.master.maxsize(500,400)\n # This CenterWindow method will center our app on the user's screen\n student_func.center_window(self,500,300)\n self.master.title(\"Student Tracker\")\n self.master.configure(bg=\"#F0F0F0\")\n # This protocol method is a tkinter built-in method to catch if \n # the user clicks the upper corner, \"X\" on Windows OS.\n self.master.protocol(\"WM_DELETE_WINDOW\", lambda: student_func.ask_quit(self))\n arg = self.master\n\n # load in the GUI widgets from a separate module, \n # keeping your code comparmentalized and clutter free\n student_gui.load_gui(self)\n \n # Instantiate the Tkinter menu dropdown object\n # This is the menu that will appear at the top of our window\n menubar = Menu(self.master)\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", underline=1,accelerator=\"Ctrl+Q\",command=lambda: student_func.ask_quit(self))\n menubar.add_cascade(label=\"File\", underline=0, menu=filemenu)\n helpmenu = Menu(menubar, tearoff=0) # defines the particular drop down colum and tearoff=0 means do not separate from menubar\n helpmenu.add_separator()\n helpmenu.add_command(label=\"How to use this program\")\n helpmenu.add_separator()\n helpmenu.add_command(label=\"About This Student Tracker\") # add_command is a child menubar item of the add_cascde parent item\n menubar.add_cascade(label=\"Help\", menu=helpmenu) # add_cascade is a parent menubar item (visible heading)\n \"\"\"\n Finally, we apply the config method of the widget to display the menu\n From here we could also pass in additional aprams for additional \n functionalityor appearances such as a borderwidth.\n \"\"\"\n self.master.config(menu=menubar, borderwidth='2')\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n App = ParentWindow(root)\n root.mainloop()\n","sub_path":"Student_Tracker/student_main.py","file_name":"student_main.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"250665223","text":"#!/usr/bin/python\n\nimport time\nimport math\n\n# Converts a decimal number to its binary representation\ndef dtob(n):\n # Get the largest power of 2 that fits into the number\n mag = int(math.log(n, 2))\n\n # Then going from this max, we find if the current power of 2 fits\n # If it does then add one to the binary representation.\n # Each loop multiply the binary representation by 10 as we are going\n # through the 2^3, kinda like 10^3 bases, and placeholders need to be\n # there.\n b = 0\n for i in range(mag, -1, -1):\n base = 2 ** i\n b *= 10\n \n if n >= base:\n n -= base\n b += 1\n\n return b\n\ndef reverse(n):\n if n < 10:\n return n\n else:\n magnitude = int(math.log10(n))\n\n rem = n % 10\n n /= 10\n\n return rem * 10 ** magnitude + reverse(n)\n\nprint(\"Finds the sum of all double-base palindromes less than n\")\nn = int(raw_input(\"Enter n > \"))\n\nstart = time.time()\n\nres = 0\nfor i in range(1, n):\n binary = dtob(i)\n if i == reverse(i) and binary == reverse(binary):\n res += i\n\nend = time.time()\n\nprint(\"Result: {}\".format(res))\nprint(\"{} s\".format(end - start))\n","sub_path":"36/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"481131361","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport efficient_net\nimport imagenet_classes\n\nif __name__ == \"__main__\":\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--images\", default=['rottweiler.jpg'], nargs=\"+\", type=str, help=\"Files to classify.\")\n parser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed.\")\n parser.add_argument(\"--threads\", default=1, type=int, help=\"Maximum number of threads to use.\")\n parser.add_argument(\"--verbose\", default=False, action=\"store_true\", help=\"Verbose TF logging.\")\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n\n\n # Fix random seeds and threads\n np.random.seed(args.seed)\n tf.random.set_seed(args.seed)\n tf.config.threading.set_inter_op_parallelism_threads(args.threads)\n tf.config.threading.set_intra_op_parallelism_threads(args.threads)\n\n # Report only errors by default\n if not args.verbose:\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n # Load EfficientNet=B0\n efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=True)\n\n for image_path in args.images:\n # Load the file\n with open(image_path, \"rb\") as image_file:\n image = tf.image.decode_image(image_file.read(), channels=3, dtype=tf.float32)\n\n # Resize to 224,224\n image = tf.image.resize(image, size=(224, 224))\n\n # Compute the prediction\n start = time.time()\n [prediction], *_ = efficientnet_b0.predict(tf.expand_dims(image, 0))\n print(\"Image {} [{} ms]: label {}\".format(\n image_path,\n 1000 * (time.time() - start),\n imagenet_classes.imagenet_classes[tf.argmax(prediction)]\n ))\n\n","sub_path":"image_classification.py","file_name":"image_classification.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"545110532","text":"from utils import *\r\nimport argparse\r\n\r\n\r\ndef evaluation(prd_dir, g_dir):\r\n dir_precision(g_dir, prd_dir)\r\n dir_recall(g_dir, prd_dir)\r\n dir_acc(g_dir, prd_dir)\r\n dir_dice(g_dir, prd_dir)\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--prd_dir')\r\nparser.add_argument('--g_dir', default='/gdata/chenzw/TRUE_DATA/val_data/val_label')\r\nCONFIGS, unparsered = parser.parse_known_args()\r\nevaluation(CONFIGS.prd_dir, CONFIGS.g_dir)\r\n","sub_path":"UNet_test/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"511218498","text":"from django.shortcuts import render, redirect, get_object_or_404, reverse\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom Blog.models import BlogPost, Comment\nfrom Blog.forms import PostForm, CommentForm\nfrom django.forms import modelformset_factory\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.utils import timezone\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\n\n\nclass BlogPostList(ListView):\n queryset = BlogPost.objects.filter(status=1).order_by('-created_on')\n template_name = 'blog/blogs.html' \n paginate_by = 5\n\n\n@login_required\ndef createBlogPost(request):\n\n if request.method == 'POST': \n postForm = PostForm(request.POST)\n\n if postForm.is_valid():\n post_form = postForm.save(commit=False)\n post_form.author = request.user\n post_form.status = 1\n post_form.save() \n messages.success(request,\n \"Posted!\")\n return redirect(\"blog:posts\")\n else:\n messages.error(request, \"Some errors occured\")\n else:\n postForm = PostForm()\n return render(request, 'blog/blogpost_form.html',\n {'form': postForm})\n\n\n@login_required\ndef blogpost_detail(request, slug):\n post = get_object_or_404(BlogPost, slug=slug)\n comments = post.comments_on_blog.filter(active=True, parent__isnull=True)\n is_liked = False\n if post.likes.filter(id=request.user.id).exists():\n is_liked = True\n if request.method == 'POST':\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n parent_obj = None\n\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n\n\n if parent_id:\n parent_obj = Comment.objects.get(id=parent_id)\n if parent_obj:\n replay_comment = comment_form.save(commit=False)\n replay_comment_name = request.user\n replay_comment.parent = parent_obj\n new_comment = comment_form.save(commit=False) \n new_comment.post = post\n new_comment.name = request.user\n \n new_comment.save()\n return redirect(post.get_absolute_url())\n else:\n comment_form = CommentForm()\n return render(request,\n 'blog/blogpost_detail.html',\n {'blogpost': post,\n 'comments': comments,\n 'is_liked':is_liked,\n 'comment_form': comment_form})\n\nclass BPUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = BlogPost\n fields = ['title', 'content', 'category', 'image']\n success_url = \"\"\n template_name = 'blog/blogpost_form.html'\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\nclass BPDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = BlogPost\n success_url = '/blogs/'\n template_name = 'blog/blogpost_confirm_delete.html'\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n@login_required\ndef like_blogpost(request):\n post = get_object_or_404(BlogPost, id=request.POST.get('blogpost_id'))\n is_liked = False\n if post.likes.filter(id=request.user.id).exists(): \n post.likes.remove(request.user)\n is_liked = False \n else:\n post.likes.add(request.user)\n is_liked = True\n return redirect(post.get_absolute_url())\n \nclass SearchResultsView(ListView):\n model = BlogPost\n template_name = 'blog/search_results.html'\n\n def get_queryset(self): # new\n query = self.request.GET.get('q' or None)\n object_list = BlogPost.objects.filter(title__icontains=query)\n \n return object_list\n\nclass SearchByCategory(ListView):\n model = BlogPost\n template_name = 'blog/search_results.html'\n\n def get_queryset(self): \n query = self.request.GET.get('q')\n object_list = BlogPost.objects.filter(category = query)\n return object_list","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"415917948","text":"'''\nCreated on 11 Dec 2012\n@author: kreczko\n\nThis script creates toy MC for the unfolding tests.\nIt:\n 1) reads the existing unfolding histograms\n 2) creates a new set by changing the number of events in each bin according\n to a Possoinian\n 3) creates scale factors for each bin\n 4) reads the BLT unfolding ntuple (unbinned unfolding data)\n 5) creates a new set (truth, response, measured) based on 3) and 4)\nThis script uses around 300 MB RAM per instance for n=10000\n'''\nfrom optparse import OptionParser\nfrom tools.toy_mc import generate_toy_MC_from_distribution,\\\n generate_toy_MC_from_2Ddistribution\nfrom tools.Unfolding import get_unfold_histogram_tuple\nfrom tools.file_utilities import make_folder_if_not_exists\nfrom rootpy.io import File\nfrom ROOT import TH1F\nfrom config import XSectionConfig\nfrom tools.ROOT_utils import set_root_defaults\n\ndef main():\n set_root_defaults()\n # prevent directory ownership of ROOT histograms (python does the garbage collection)\n TH1F.AddDirectory( False )\n parser = OptionParser()\n parser.add_option( \"-n\", \"--n_toy_mc\",\n dest = \"n_toy_mc\", default = 300,\n help = \"number of toy MC to create\", type = int )\n parser.add_option( \"-o\", \"--output\",\n dest = \"output_folder\", default = 'data/toy_mc/',\n help = \"output folder for toy MC\" )\n parser.add_option( \"-v\", \"--variable\", dest = \"variable\", default = 'MET',\n help = \"set the variable to analyse (MET, HT, ST, MT, WPT)\" )\n parser.add_option( \"-m\", \"--metType\", dest = \"metType\", default = 'type1',\n help = \"set MET type for analysis of MET, ST or MT\" )\n parser.add_option( \"-c\", \"--centre-of-mass-energy\", dest = \"CoM\", default = 8,\n help = \"set the centre of mass energy for analysis. Default = 8 [TeV]\", type = int )\n parser.add_option( '-V', '--verbose', dest = \"verbose\", action = \"store_true\",\n help = \"Print the event number, reco and gen variable value\" )\n\n ( options, _ ) = parser.parse_args()\n measurement_config = XSectionConfig( options.CoM )\n\n centre_of_mass = options.CoM\n ttbar_xsection = measurement_config.ttbar_xsection\n variable = options.variable\n met_type = measurement_config.translate_options[options.metType]\n n_toy_mc = options.n_toy_mc\n make_folder_if_not_exists( options.output_folder )\n \n # get histograms\n input_file_hists = File( measurement_config.unfolding_madgraph )\n # define output file\n out_file_template = '%s/toy_mc_%s_N_%d_%dTeV.root'\n out_file_name = out_file_template % (options.output_folder, variable, n_toy_mc, centre_of_mass)\n output = File( out_file_name, 'recreate' )\n \n for channel in ['electron', 'muon']:\n # first get the weights\n h_truth, h_measured, h_response, _ = get_unfold_histogram_tuple( input_file_hists,\n variable,\n channel,\n met_type,\n centre_of_mass,\n ttbar_xsection,\n load_fakes = False )\n # create directories\n directory = output.mkdir( channel )\n mkdir = directory.mkdir\n cd = directory.cd\n cd()\n # generate toy MC\n for i in range( 1, n_toy_mc + 1 ):\n mkdir( 'toy_%d' % i )\n cd( 'toy_%d' % i )\n # create histograms\n # add tuples (truth, measured, response) of histograms\n truth = generate_toy_MC_from_distribution(h_truth)\n measured = generate_toy_MC_from_distribution(h_measured)\n response = generate_toy_MC_from_2Ddistribution(h_response)\n \n truth.SetName('truth')\n measured.SetName('measured')\n response.SetName('response')\n \n truth.Write()\n measured.Write()\n response.Write()\n output.Write()\n output.Close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/unfolding_tests/create_toy_mc.py","file_name":"create_toy_mc.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"543041121","text":"import socket \n\ndef get_local_ip():\n \"\"\"\n heuristic for determining an ip address to connect to the grpc service\n :return: ip address in string form\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n except socket.error:\n return '127.0.0.1'\n\n return s.getsockname()[0]\n\nip=get_local_ip()\nport = 50052\nenvironment = \"windows\"\nconfiguration =\"release\"\n","sub_path":"ansys/dpf/core/ipconfig.py","file_name":"ipconfig.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"158043972","text":"\"Various supporting utility functions.\"\n\nimport random\nimport urllib3\n\nimport numpy as np\n\nimport params\n\n\ndef download_text(\n address='http://www.umich.edu/~umfandsf/other/ebooks/alice30.txt'):\n \"\"\"Download test text and store in file test-text.txt.\n\n Alice in Wonderland is downloaded by default.\n\n Returns:\n str: Text of Alice in Wonderland.\n\n \"\"\"\n http = urllib3.PoolManager()\n response = http.request('GET', address)\n text = response.data.decode('utf-8')\n with open('test-text.txt', 'w') as file:\n file.write(text)\n return text\n\n\ndef retrieve_text(filename):\n \"\"\"Retrieve the text string from file test-text.txt.\n\n Args:\n filename (str): Name of file, stored in project directory, to open.\n\n Returns:\n str: Text from file.\n\n \"\"\"\n with open(filename, 'r') as file:\n text = file.read()\n return text\n\n\ndef create_one_hots(text, dictionary):\n \"\"\"Create list of one hot Numpy arrays from text.\n\n Args:\n text (str): The text being used.\n dictionary (dict): A dictionary of\n characters to unique character number.\n\n Returns:\n tuple: tuple[0]: a list of training one-hot numpy arrays representing\n the training text; tuple[1]: the same but for the evaluation text.\n\n \"\"\"\n text_one_hots = []\n for char in list(text):\n one_hot = dictionary[char]\n text_one_hots.append(one_hot)\n\n split = int(params.EVAL_SPLIT * len(text_one_hots))\n train_one_hots = text_one_hots[split:]\n eval_one_hots = text_one_hots[:split]\n\n return train_one_hots, eval_one_hots\n\n\ndef output_one_hot(chars_size, text_value):\n \"\"\"Create one-hot numpy array from a character's number.\n\n Args:\n chars_size (int): Number of set of unique characters in our text.\n text_value (int): Character number.\n\n Returns:\n numpy.ndarray: One_hot of\n dimension chars_size representing text character.\n\n \"\"\"\n onehot = np.zeros([chars_size], dtype=float)\n onehot[text_value] = 1.0\n onehot = np.reshape(onehot, [1, -1])\n return onehot\n\n\ndef build_dataset(chars):\n \"\"\"Create dict and reverse dict of text chars <=> one_hot / char number.\n\n Args:\n chars (set): Set of characters (str) in the text.\n\n Returns:\n tuple: tuple[0] is dict of schema\n {: ...}\n tuple[1] is dict of schema\n {: ...}\n\n \"\"\"\n chars_size = len(chars)\n dictionary = {}\n reverse_dictionary = {}\n chars = sorted(chars)\n\n for char in chars:\n char_number = len(dictionary)\n one_hot = output_one_hot(chars_size, char_number)\n dictionary[char] = one_hot\n reverse_dictionary[char_number] = char\n\n return dictionary, reverse_dictionary\n\n\ndef create_training_io(text_one_hots, index, batch, chars_size):\n \"\"\"Create Numpy input and output arrays for a batch.\n\n Args:\n text_one_hots (List): List of one hot\n Numpy arrays representing the text's characters.\n index (int): The starting index in text_one_hots\n from which to create the input/output arrays.\n batch (int): Size of a batch.\n chars_size (int): The number of unique characters in the text.\n\n Returns:\n tuple: Input and output numpy arrays.\n\n \"\"\"\n input_x = np.zeros((batch, params.N_INPUT * chars_size), dtype='float32')\n output_y = np.zeros((batch, chars_size), dtype='float32')\n\n for item in range(0, batch):\n\n input_vectors = text_one_hots[index: index + params.N_INPUT]\n input_vectors = np.concatenate(input_vectors, axis=1)\n input_vectors = np.reshape(input_vectors, [-1, input_vectors.shape[1]])\n\n output_hot = text_one_hots[index + params.N_INPUT]\n output_hot = np.reshape(output_hot, [-1, chars_size])\n\n input_x[item] = input_vectors[0]\n output_y[item] = output_hot[0]\n\n index += 1\n\n return input_x, output_y\n\n\ndef one_hot_to_char(logits, reverse_dictionary):\n \"\"\"Determine the highest probability character from the RNN logits output.\n\n Args:\n logits (numpy.ndarray): Logits\n reverse_dictionary (dict): Dict of\n character number (int) to character (str).\n\n Returns:\n str: Single character string.\n\n \"\"\"\n char_no = np.argmax(logits)\n char = reverse_dictionary[char_no]\n return char\n\n\ndef create_example_text( # pylint: disable=too-many-arguments\n sess, x, logits, chars, dictionary, reverse_dictionary):\n \"\"\"Iteratively create text based on a random input to the trained RNN.\n\n Args:\n sess (Session): Tensorflow session.\n x (Tensor): Tensorflow model placeholder input to RNN.\n logits (Operation): Tensorflow logits operation.\n chars (set): Set of characters in the training/evaluation text.\n dictionary (dict): Dict of character (str)\n to character one_hot numpy representation.\n reverse_dictionary (dict): Dict of\n character number (int) to character (str).\n\n \"\"\"\n chars_list = list(chars)\n chars_size = len(chars)\n test_text = [chars_list[random.randint(0, chars_size - 1)]\n for _ in range(params.N_INPUT)]\n characters = ''\n while len(characters) < params.TEST_TEXT_LENGTH:\n test_input = []\n # Create one hot inputs to RNN to get the next character out.\n for character in test_text:\n one_hot = dictionary[character]\n test_input.append(one_hot)\n test_input = np.concatenate(test_input, axis=1)\n # Get the RNN output logits.\n result = sess.run(logits, feed_dict={x: test_input})\n # Determine which character has the highest logits value.\n character = one_hot_to_char(result, reverse_dictionary)\n # Add character to output text.\n characters += character\n # Prepare the input characters for the next RNN input iteration.\n test_text.pop(0)\n test_text.append(character)\n print('\\n', characters, '\\n')\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"561138774","text":"from bs4 import BeautifulSoup\nimport json, urllib\nfrom datetime import datetime, datetime\nfrom time import sleep\nfrom getpass import getpass\nimport requests\nimport re\nfrom io import StringIO\nfrom googlesearch import search\nimport sys\n\nclass AppURLopener(urllib.request.FancyURLopener):\n version = \"Mozilla/5.0\"\n\ndef store_json(courselist, username):\n \n courses =[]\n\n for i in range(len(courselist)):\n if courselist[i].isbn != None:\n a_course = {'dept': courselist[i].dept, 'number': courselist[i].number, 'section': courselist[i].section, 'term': courselist[i].term, 'year': courselist[i].year, 'title': courselist[i].title, 'isbn': courselist[i].isbn, 'image': courselist[i].image, 'links': courselist[i].links}\n courses.append(a_course)\n else:\n null_course = {'dept': courselist[i].dept, 'number': courselist[i].number, 'section': courselist[i].section, 'term': courselist[i].term, 'year': courselist[i].year, 'title': 'No Textbook', 'isbn': 'No ISBN', 'image': None, 'links': []}\n courses.append(null_course)\n \n\n with open(f'json/books_{username}.json', 'w') as f:\n json.dump(courses, f)\n \n f.close()\n\nclass course_info:\n def __init__(self, dept, number, section, term, year):\n self.dept = dept\n self.number = number\n self.section = section\n self.term = term\n self.year = year\n self.links = []\n\n def book_info(self, title, isbn, image):\n self.title = title\n self.isbn = isbn\n self.image = image\n \n def append_links(self, results):\n for result in results:\n self.links.append(result)\n\n def to_str(self):\n return f'DEPARTMENT: {self.dept}\\nCOURSE#: {self.number}\\nSECTION: {self.section}\\nBOOK:{self.title}\\nISBN:{self.isbn}\\nIMAGE:{self.image}\\nLINKS:{self.links}\\n\\n'\n\nclass pullSchedule(object):\n\n page_urls = {\n 'home': '/twbkwbis.P_WWWLogin',\n 'login': '/twbkwbis.P_ValLogin',\n 'select_term': '/bwskflib.P_SelDefTerm',\n 'save_term': 'bwcklibs.P_StoreTerm',\n 'semester_info': '/bwskfshd.P_CrseSchdDetl'\n }\n\n def __init__(self, url, username, password=None):\n if(password == None):\n password = getpass(f\"Enter the password for {username}: \")\n\n self.base_url = url\n self.session = requests.Session()\n\n self.middle_url = None\n\n for attempt in ['/pls/owa_prod', '/pls/prod']:\n response = self.session.send(self.prepped_request(self.page_urls['home'], \"GET\", middle_url=attempt))\n\n if(response.status_code == 200):\n self.middle_url = attempt\n break\n \n if (self.middle_url == None):\n raise Exception(\"Couldn't determine URL! \")\n \n \n response = self.session.send(self.prepped_request(self.page_urls['login'], \"POST\", data={'sid':username, 'PIN':password}))\n \n if (response.cookies.get('SESSID') == None):\n raise Exception('INCORRECT LOGIN INFO')\n\n page = self.session.get(self.base_url + self.middle_url + self.page_urls['select_term'], headers={'referer':self.base_url})\n self.soup = BeautifulSoup(page.text, 'html5lib')\n\n self.offset = datetime.strptime(response.headers['Date'], '%a, %d %b %Y %H:%M:%S %Z') - datetime.utcnow()\n\n self.cache = None\n\n def prepped_request(self, page_url, method, data={}, middle_url=None):\n \n if (middle_url == None):\n middle_url = self.middle_url\n \n sleep(0.5)\n\n prepped = requests.Request(method, self.base_url + middle_url + page_url, headers={'referer':self.base_url}, data=data)\n print(\"Prepped Request:\" + self.base_url + middle_url + page_url)\n return self.session.prepare_request(prepped)\n \n\n def get_classes(self, term, year):\n self.term = term\n self.year = year\n semester = ''\n\n if(term == 'A' or term == 'B'):\n semester = 'Fall'\n elif(term == 'C' or term =='D'):\n semester = 'Spring'\n elif(term == 'EI' or term == 'E2'):\n semester = 'Summer'\n else:\n raise Exception(f\"Ivalid Term {term}\")\n\n options = self.soup.find_all('option')\n \n form_option = None\n\n for elmt in options:\n if f'{semester} {year}' in elmt.text:\n form_option = elmt\n\n term_id = form_option.get('value')\n\n if(not self.cache):\n prepped = self.prepped_request(self.page_urls['semester_info'], \"POST\", {'term_in': term_id})\n response = self.session.send(prepped)\n form = BeautifulSoup(response.text, 'html5lib')\n self.cache = form\n else:\n form = self.cache\n\n return form\n\n def extract_class_info(self, form):\n blacklist = ['Scheduled Meeting Times']\n courses = []\n\n for course in form.find_all('caption', class_='captiontext'):\n if course.text not in blacklist:\n content = course.text\n start = content.find('- ')\n courses.append(content[start+1:])\n\n student_schedule = []\n\n depts = []\n nums = []\n sects = []\n\n for course in courses:\n dept = re.compile(r'\\s\\D\\D\\s')\n number = re.compile(r'\\d\\d\\d\\d')\n sect = re.compile(r'...\\Z')\n\n deptmatches = dept.finditer(course)\n numbermatches = number.finditer(course)\n sectmatches = sect.finditer(course)\n\n for match in deptmatches:\n depts.append(course[match.start()+1:match.end()-1])\n\n for match in numbermatches:\n nums.append(course[match.start():match.end()])\n\n for match in sectmatches:\n sects.append(course[match.start():match.end()])\n\n for i in range(len(courses)):\n if sects[i][0] == 'A' or sects[i][0] == 'B' or sects[i][0] == 'C' or sects[i][0] == 'D' or sects[i][0] == 'E':\n if sects[i][0] == self.term:\n student_schedule.append(course_info(depts[i], nums[i], sects[i], self.term, self.year))\n else:\n print(\"Invalid section format, likely an independent study, contact professor!\")\n\n return student_schedule\n\nclass pullBooks(object):\n\n page_urls = {\n 'find_books': '/shop/wpi/page/find-textbooks',\n 'results': '/shop/BNCBTBListView?catalogId=10001&langId=-1&storeId=32554',\n 'referer': '/shop/TextbookForwardControllerCmd'\n }\n\n\n\n def __init__(self, url, courselist):\n self.base_url = url\n self.courselist = courselist\n\n self.session = requests.Session()\n\n self.response = self.session.send(self.prepped_request(self.page_urls['find_books'], \"GET\"))\n\n self.cache = None\n\n def get_books(self):\n\n blacklist = ['PE']\n\n for course in self.courselist:\n if course.dept in blacklist:\n self.courselist.remove(course)\n\n search_results = []\n\n for i in range(len(self.courselist)):\n self.response = self.session.send(self.prepped_request(self.page_urls['find_books'], \"GET\"))\n self.cache = None\n term = ''\n if self.courselist[i].term == 'A' or self.courselist[i].term == 'B':\n term = 'Fall'\n elif self.courselist[i].term == 'C' or self.courselist[i].term == 'D':\n term = 'Spring'\n elif self.courselist[i].term == 'EI' or self.courselist[i].term == 'EII':\n term = 'Summer'\n\n term_string = f'{term} {self.courselist[i].year}({self.courselist[i].term}-term)'\n \n #ISOLATE TERM CODE\n\n inputpage = BeautifulSoup(self.response.text, 'html5lib')\n\n terms = inputpage.find_all('li', class_='bncbOptionItem termOption')\n\n term_code = None\n\n for term_item in terms:\n if term_string in term_item.text:\n term_code = int(term_item.get('data-optionvalue'))\n \n if term_code == None:\n raise Exception(\"Error obtaining term code\")\n\n opener = AppURLopener()\n #DEPT URL\n\n dept_url = f'https://wpi.bncollege.com/shop/TextBookProcessDropdownsCmd?catalogId=10001&langId=-1&storeId=32554&campusId=30747386&termId={term_code}&deptId=&courseId=§ionId=&dropdown=term&isOER=false'\n\n json_url = opener.open(dept_url)\n\n data = json.loads(json_url.read())\n\n dept_code = None\n\n for elmt in data:\n if self.courselist[i].dept in elmt['categoryName']:\n dept_code = elmt['categoryId']\n\n if dept_code == None:\n raise Exception(\"Error obtaining dept code\")\n\n #COURSE URL\n\n course_url = f'https://wpi.bncollege.com/shop/TextBookProcessDropdownsCmd?catalogId=10001&langId=-1&storeId=32554&campusId=30747386&termId={term_code}&deptId={dept_code}&courseId=§ionId=&dropdown=dept&isOER=false'\n\n json_url = opener.open(course_url)\n\n data = json.loads(json_url.read())\n\n course_code = None\n\n for elmt in data:\n if self.courselist[i].number in elmt['categoryName']:\n course_code = elmt['categoryId']\n\n if course_code == None:\n raise Exception(\"Error obtaining course code\")\n\n #SECTION URL\n\n section_url = f'https://wpi.bncollege.com/shop/TextBookProcessDropdownsCmd?catalogId=10001&langId=-1&storeId=32554&campusId=30747386&termId={term_code}&deptId={dept_code}&courseId={course_code}§ionId=&dropdown=course&isOER=false'\n\n json_url = opener.open(section_url)\n\n data = json.loads(json_url.read())\n\n section_code = None\n\n for elmt in data:\n if self.courselist[i].section in elmt['categoryName']:\n section_code = elmt['categoryId']\n\n if section_code == None:\n raise Exception('Error obtaining section code')\n\n #SEARCH TERM CODE MATCHING SECTION AND SAVE IN VARIABLE\n\n\n search_info = {'isOER': False, 'storeId': 32554, 'catalogId': 10001, 'langId': -1, 'clearAll': '',\n 'viewName': 'TBWizardView', 'secCatList': '', 'removeSectionId': '', 'mcEnabled': 'N',\n 'showCampus': False, 'selectTerm': 'Select Term', 'selectDepartment': 'Select Department',\n 'selectSection': 'Select Section', 'selectCourse': 'Select Course', 'campus1': 30747386,\n 'firstTermName_30747386': 'Spring 2021(C-term)', 'firstTermID_30747386': 100604605,\n 'section_1': section_code, 'section_2': '', 'section_3': '', 'section_4': '', 'numberOfCourseAlready': 4}\n self.response = self.session.send(self.prepped_request(self.page_urls['results'], \"POST\", data=search_info))\n\n search_results.append(BeautifulSoup(self.response.text, 'html5lib'))\n\n return search_results\n\n def prepped_request(self, page_url, method, data={}):\n \n sleep(0.5)\n\n prepped = requests.Request(method, self.base_url + page_url, headers={'referer':self.base_url + self.page_urls['referer'], 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}, data=data)\n print(\"Prepped Request:\" + self.base_url + page_url)\n return self.session.prepare_request(prepped)\n\n\n def update_classes(self, pages):\n\n for i in range(len(pages)):\n title = None\n ISBN = None\n Image = None\n\n if not pages[i].find('div', class_='noMaterial_assigned'):\n title = pages[i].find('a', class_='clr121').get('title')\n\n ISBN = pages[i].find('span', class_='compass-logo-tblist').get('data-isbn')\n\n Image = pages[i].find('img', class_='noImageDisReq').get('src')\n\n self.courselist[i].book_info(title, ISBN, Image)\n \n def find_online_vendors(self):\n sellers = ['amazon.com', 'barnesandnoble.com', 'cambridge.org']\n\n for course in self.courselist:\n search_results = None\n filtered_results = []\n if(course.title != None and course.isbn != None):\n search_results = search(f'{course.title} {str(course.isbn)}', tld=\"co.in\", num=10, stop=10, pause=2)\n\n for result in search_results:\n for seller in sellers:\n if seller in result:\n filtered_results.append(result)\n\n course.append_links(filtered_results)\n #NEED TO WEBSCRAPE THE LINKS FOR PRICES\n\ndef main():\n year = sys.argv[1]\n term = sys.argv[2]\n username = sys.argv[3]\n password = sys.argv[4]\n banner = pullSchedule('https://bannerweb.wpi.edu', username, password)\n classes = banner.extract_class_info(banner.get_classes(term, year))\n\n bn_finder = pullBooks('https://wpi.bncollege.com', classes)\n bn_finder.update_classes(bn_finder.get_books())\n bn_finder.find_online_vendors()\n \n print(\"\\n\\nCOURSE-LIST\\n\\n\")\n\n for course in classes:\n print(course.to_str())\n\n store_json(classes, 'pjomullan')\n\nif __name__==\"__main__\": \n main()\n","sub_path":"web_scraper/banner_scrape.py","file_name":"banner_scrape.py","file_ext":"py","file_size_in_byte":13471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"38908389","text":"import subprocess\nimport shlex\nimport sys\nimport os.path\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom entities.OS import OS\n\nclass SysOS(object):\n\n def getDistributorID(self):\n proc1 = subprocess.Popen(shlex.split('lsb_release --id'), stdout=subprocess.PIPE)\n out, err = proc1.communicate()\n distributor_id = out.split(\":\")\n return distributor_id[1].strip()\n\n def getRelease(self):\n proc1 = subprocess.Popen(shlex.split('lsb_release -r'), stdout=subprocess.PIPE)\n out, err = proc1.communicate()\n release = out.split(\":\")\n return release[1].strip()\n\n def getCodeName(self):\n proc1 = subprocess.Popen(shlex.split('lsb_release -c'), stdout=subprocess.PIPE)\n out, err = proc1.communicate()\n codename = out.split(\":\")\n return codename[1].strip()\n\n def getDescription(self):\n proc1 = subprocess.Popen(shlex.split('lsb_release -d'), stdout=subprocess.PIPE)\n out, err = proc1.communicate()\n description = out.split(\":\")\n return description[1].strip()\n\n def OSinfo(self):\n os = OS(self.getDistributorID(), self.getRelease(), self.getCodeName(), self.getDescription())\n return os\n\n","sub_path":"system/SysOS.py","file_name":"SysOS.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"166108559","text":"import numpy as np #we use numpy for array manipulations in counting nuclei in initially b&w image\r\nimport Image # we use Image to create, open, and save images\r\nimport matplotlib.pyplot as plt #used to display images\r\nfrom skimage.feature import blob_log #imports the Laplacian of Gaussian function for use in counting number of nuclei\r\nimport glob #used to consider things in an image as objects\r\nfrom skimage.io import imread #used to read an image\r\nimport cv2 #used to read images and convert colored images to grayscale\r\n# 0f1f896d9ae5a04752d3239c690402c022db4d72c0d2c087d73380896f72c466 was /\r\n# file used for test outputs, any jpg file will work\r\n\r\n\r\n#defines function for converting color image to grayscale\r\ndef rgb2gray(name):\r\n file_name = name\r\n original = cv2.imread(file_name)\r\n gray = cv2.cvtColor(original, cv2.COLOR_RGB2GRAY)\r\n\r\n plt.imshow(gray, cmap='Greys_r')\r\n plt.show()\r\n\r\n gray_img = Image.fromarray(gray)\r\n gray_img.save('gray_' + file_name, cmap='Greys_r')\r\n\r\n return 'gray_' + file_name\r\n\r\n\r\n#defines function for resizing an image while preserving aspect ratio\r\ndef resize_PAR(name, scale_percent = 80):\r\n file_name = name + '.jpg'\r\n original = cv2.imread(file_name)\r\n print('Original Dimensions: ', original.shape)\r\n length = int(original.shape[0] * scale_percent / 100)\r\n width = int(original.shape[1] * scale_percent / 100)\r\n dimensions = (length, width)\r\n resized = cv2.resize(original, dimensions, interpolation = cv2.INTER_AREA)\r\n print('Resized Dimensions: ', resized.shape)\r\n\r\n resized_img = Image.fromarray(resized)\r\n\r\n plt.imshow(resized_img)\r\n plt.show()\r\n\r\n resized_file_name = 'resize_PAR_' + file_name\r\n resized_img.save(resized_file_name, cmap='Greys_r')\r\n\r\n return resized_file_name\r\n\r\n\r\n#defines function for resizing the image to 512x512\r\ndef resize(name, new_length=512, new_width=512):\r\n file_name = name + '.jpg'\r\n original = cv2.imread(file_name)\r\n print('Original Dimensions: ', original.shape)\r\n\r\n dimensions = (new_length, new_width)\r\n\r\n plt.imshow(original)\r\n plt.show()\r\n resized_file_name = 'resize_' + str(new_length) + 'x' + str(new_width) + \"_\" + file_name\r\n\r\n resized = cv2.resize(original, dimensions, interpolation=cv2.INTER_AREA)\r\n\r\n print('Resized Dimenesions: ', resized.shape)\r\n\r\n #displays the resized image\r\n plt.imshow(resized)\r\n plt.show()\r\n\r\n resized_img = Image.fromarray(resized)\r\n resized_img.save(resized_file_name, cmap='Greys_r')\r\n\r\n return resized_file_name #returns the new file name\r\n\r\n\r\n#defines a function for finding original height of the image\r\ndef find_orig_height(name):\r\n file_name = name + '.jpg' #sets file name\r\n original = cv2.imread(file_name) #reads the image\r\n orig_height, orig_width, channels = original.shape #sets variables equal to the dimensions of the image\r\n return orig_height #returns the original image height\r\n\r\n\r\n#defines a function for finding the new height of the image\r\ndef find_new_height(name):\r\n #we will have the input be the resized image\r\n resized = cv2.imread(name) #reads the resized image\r\n new_height, new_width, channels = resized.shape #sets variables equal to the dimensions of the image\r\n return new_height #returns the height of the resized image\r\n\r\n\r\n#defines function for counting the nuclei in an image that is in grayscale\r\ndef count_nuclei(name, new_height, original_height):\r\n img = Image.open(name) #defines img as the nuclei image we want to examine\r\n\r\n pix_val = list(img.getdata()) #puts the values of each pixel from the nuclei image into a list\r\n #each pixel value is a list\r\n\r\n pix_val_size = len(pix_val) #sets pix_val_size equal to the length of the data set of 1-channel pixel values\r\n\r\n #iterates for each row (pixel)\r\n for i in range(pix_val_size):\r\n j = 0 #j must be 0 for proper use in the below if statement\r\n\r\n #if the pixel is not intense enough, sets the pixel to (0, 0, 0), or the color black\r\n if pix_val[i] < 115:\r\n pix_val[i] = 0\r\n\r\n #otherwise, the pixel is set to 255, or the color white\r\n else:\r\n pix_val[i] = 255\r\n\r\n true_img = Image.new(img.mode, img.size) #creates a new, blank image with same dimensions as the original one\r\n true_img.putdata(pix_val) #fills the blank image with the new pixel values\r\n\r\n #displays the new images, with each nuclei highlighted red\r\n plt.imshow(true_img, cmap='Greys_r')\r\n plt.show()\r\n\r\n true_img.save('B&W.jpg') #saves the new image as \"B&W.jpg\"\r\n\r\n\r\n\r\n ex_file = glob.glob('B&W.jpg')[0] #sets ex_file to be the \"red.jpg\", which was created above, with objects in it (the nuclei)\r\n count_img = imread(ex_file, as_gray=True) #converts the image to grayscale\r\n\r\n #calls the imported blob_log function, which uses surface formulas to find the number of objects in an image\r\n blobs_log = blob_log(count_img, min_sigma=14*new_height/original_height, max_sigma = 45*new_height/(2*original_height), num_sigma= 10*new_height/original_height, threshold=.1)\r\n #min_sigma is the minimum size of an object we want to actually count as an object\r\n #max_sigma is the maximum size of an object we want to actually count as an object\r\n #num_sigma is the number of intermediate values of standard deviations to consider between min and max sigma\r\n #threshold is the lowest pixel intensity we want to consider when counting something as an object\r\n\r\n numrows=len(blobs_log) # the length of blobs_log is the number of nuclei\r\n\r\n return print(\"Number of nuclei: \", numrows) #prints the number of nuclei in the image\r\n\r\n\r\n#defines function for counting nuclei in an image that is not in grayscale\r\ndef count_nuclei_bw(name, original_height, new_height):\r\n img = Image.open(name)\r\n\r\n pix_val = list(img.getdata())\r\n\r\n array = np.asarray(pix_val)\r\n\r\n row = np.size(array, 0)\r\n\r\n for i in range(row):\r\n j = 0 # j must be 0 for proper use in the below if statement\r\n\r\n # if the pixel is not intense enough, sets the pixel to (0, 0, 0), or the color black\r\n if array[i, j] < 20 and array[i, j + 1] < 20 and array[i, j + 2] < 20:\r\n array[i, j] = 0\r\n array[i, j + 1] = 0\r\n array[i, j + 2] = 0\r\n\r\n # otherwise, the pixel is set to (255, 0, 0), or the color red\r\n else:\r\n array[i, j] = 255\r\n array[i, j + 1] = 0\r\n array[i, j + 2] = 0\r\n\r\n im_list = tuple(map(tuple, array)) #converts the array of new pixel values back into a tuple so that the tuple can be used to generate a new image\r\n true_img = Image.new(img.mode, img.size)\r\n true_img.putdata(im_list)\r\n\r\n # displays the new images, with each nuclei highlighted red\r\n plt.imshow(true_img)\r\n plt.show()\r\n\r\n true_img.save('red.jpg') # saves the new image as \"red.jpg\"\r\n\r\n ex_file = glob.glob('red.jpg')[0]\r\n count_img = imread(ex_file, as_gray=True)\r\n\r\n blobs_log = blob_log(count_img, min_sigma=14*new_height/original_height, max_sigma=45*new_height/(2*original_height), num_sigma=10*new_height/original_height, threshold=.1)\r\n\r\n numrows = len(blobs_log)\r\n\r\n print(\"Number of nuclei: \", numrows)\r\n\r\n\r\n#the main function, which calls the above function to resize the image, convert it to grayscale if necessary,\r\n# and count the nuclei in it\r\ndef main():\r\n file_name = input(\"Enter file name:\") #asks user for the file name\r\n\r\n resized_dim = resize_PAR(file_name, 150) #calls resize_PAR function to obtain a resized image\r\n #resized_dim = resize(file_name) #calls resize function to obtain a resized image\r\n\r\n orig_height = find_orig_height(file_name) # calls find_orig_height function to obtain the image's original height\r\n new_height = find_new_height((resized_dim)) #calls find_new_height function to obtain the resized image's height\r\n\r\n #count_nuclei(rgb2gray(resized_dim), new_height, orig_height) #calls count_nuclei function to find the number of nuclei in the resized, grayscale image\r\n count_nuclei_bw(resized_dim, new_height, orig_height) #calls count_nuclei_bw function to find the number of nuclei in the resized, initially black-and-white image\r\n\r\n\r\nmain()","sub_path":"BWcount.py","file_name":"BWcount.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"245765893","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nModule: pypub.scrapers.taylorfrancis\n\n\nNOTE: THIS IS FOR A DEPRECATED VERSION OF T&F!! THE HTML TAGS NEED TO BE CHANGED.\n\n\n\nTasks/Examples:\n---------------\n1) ****** Get references given a doi value *******\nfrom pypub.scrapers import ________ as __\n\nrefs = __.get_references('0006899387903726',verbose=True)\n\nrefs = __.get_references('S1042368013000776',verbose=True)\n\ndf = refs[0].to_data_frame(refs)\n\n\nCurrently I am building something that allows extraction of references from\na URL.\n\n\"\"\"\n# Standard imports\nimport sys\nimport os\n\n# Third party imports\nimport requests\nfrom bs4 import BeautifulSoup\n\n# Local imports\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom pypub.utils import get_truncated_display_string as td\nfrom pypub.utils import findValue\nfrom pypub.pypub_errors import *\nfrom pypub.scrapers.base_objects import *\n\n_TF_URL = 'http://tandfonline.com/'\n\n\nclass TaylorFrancisAuthor(BaseAuthor):\n\n def __init__(self, li_tag):\n\n \"\"\"\n Parameters\n ----------\n li_tag\n\n Returns\n -------\n\n Improvements\n ------------\n 1) Allow retrieval of icon info:\n - corresponding author info\n - email author\n 2) Split name into parts\n\n \"\"\"\n super().__init__()\n\n # Get author name\n self.name = li_tag.contents[0].text\n\n self.affiliations = []\n self.email = None\n self.superscripts = []\n\n self.affmap = {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8}\n\n # TODO: THIS DOESN'T WORK. FIX IT.\n # The superscripts are siblings of the li_tag, not children!\n # Need to figure out how to get them and separate them by author.\n # Parse superscripts\n supers = li_tag.find_all('sup')\n for x in supers:\n if x.text != '*':\n self.superscripts.append(x.text)\n\n def populate_affiliations(self, aff_labels):\n self.affiliations = [aff_labels[self.affmap[x]] for x in self.superscripts]\n\n def __repr__(self):\n return u'' + \\\n 'name: %s\\n' % self.name + \\\n 'affiliations: %s\\n' % self.affiliations + \\\n 'email: %s\\n' % self.email\n\n\nclass TaylorFrancisEntry(BaseEntry):\n \"\"\"\n This could be a step above the reference since it would, for example,\n contain all authors on a paper.\n\n Attributes\n ----------\n doi : string\n The unique identifier\n\n See Also\n ----------\n TaylorFrancisRef\n\n Examples\n ----------\n from pypub.scrapers import taylorfrancis as tf\n url = ''\n tfe = __.TaylorFrancisEntry(url,verbose=True)\n\n Improvements\n ----------\n - Add citing articles\n\n \"\"\"\n def __init__(self, soup, verbose=False):\n super().__init__()\n\n # Get entry content information\n mainContent = soup.find('div', {'id': 'journal_content'})\n if mainContent is None:\n mainContent = soup.find('div', {'id': 'pb-page-content'})\n if mainContent is None:\n raise ParseException('Unable to find main content of page')\n\n # Metadata:\n # ---------\n titlebox = mainContent.find('div', {'class': 'description'})\n if titlebox is not None:\n self.title = titlebox.find('h1').text.title()\n else:\n self.title = None\n\n import pdb\n pdb.set_trace()\n\n # This box contains the publication name as well as Volume and Issue\n pubbox = mainContent.find('div', {'class': 'borderedmodule'})\n pubbox = pubbox.find('td')\n self.publication = findValue(pubbox, 'h2')\n if self.publication is not None:\n self.publication = self.publication.strip()\n\n # Parsing out the integer values of the volume and issue\n vol_issue = pubbox.find('h3')\n if vol_issue is None:\n raise ParseException('Unable to find volume and issue data')\n else:\n vol_issue = vol_issue.text\n issue_index = vol_issue.find('Issue')\n\n # If an issue number is listed, extract it\n if issue_index != -1:\n vol_text = vol_issue[0:issue_index]\n all_issue_text = vol_issue[issue_index:]\n issue_text = all_issue_text[0:all_issue_text.find(',')]\n issue_num_text = [x for x in issue_text if x.isdigit()]\n self.issue = ''.join(issue_num_text)\n else:\n vol_text = vol_issue\n self.issue = None\n\n vol_num_text = [x for x in vol_text if x.isdigit()]\n self.volume = ''.join(vol_num_text)\n\n\n\n # Two dates are given: original publication date and\n # online publication date. This returns the original journal pub date.\n datebox = mainContent.find('div', {'class' : 'articleDates'})\n if datebox is None:\n raise ParseException('Unable to find publishing dates')\n alldates = datebox.find_all('li')\n full_date_text = alldates[-1].text\n date_index = full_date_text.find('Published online: ')\n if date_index > -1:\n date = full_date_text[(date_index + 18):]\n else: date = ''\n\n self.date = date\n self.year = self.date[-4:]\n\n # Keywords\n # TaylorFrancis keeps keywords below the abstract, separate from header info\n abstract_section = mainContent.find('div', {'class' : 'abstract'})\n keybox = abstract_section.find('ul', {'class' : 'keywords'})\n if keybox is None:\n raise ParseException('Unable to find keywords')\n wordlist = keybox.find_all('li')\n self.keywords = [w.text[0:w.text.find(',')] for w in wordlist]\n\n\n metabox = mainContent.find('div', {'class' : 'doiMeta'})\n\n self.pages = findValue(mainContent, 'div', label_type='class', label_name='pageRange')\n\n\n # DOI Retrieval:\n # --------------\n # This might be more reliable than assuming we have the DOI in the title\n self.doi = findValue(metabox, 'dd')\n doi_startindex = self.doi.find('10.')\n self.doi = self.doi[doi_startindex:] # to get rid of whitespace at the beginning\n\n\n # Authors:\n # --------\n # Find list items within the ordered list with id 'authors'\n # Need to find only classless li's so that it doesn't also retrieve the child li's corresponding\n # to author affiliations at this stage.\n authorList = metabox.find_all('span', {'class' : 'hlFld-ContribAuthor'})\n self.authors = [TaylorFrancisAuthor(x) for x in authorList]\n\n # Find the list of affiliations from the tabbed module at the bottom of the page\n tabModule = mainContent.find('div', {'id' : 'tabModule'})\n aff_list = tabModule.find('ul', {'class' : 'affiliations'})\n affs = aff_list.find_all('li')\n affiliations = []\n for aff in affs:\n affiliations.append(aff.text[1:]) # Get rid of the leading superscript letter\n\n # Assign affiliations to authors\n for author in self.authors:\n author.populate_affiliations(affiliations)\n\n def __repr__(self):\n return u'' + \\\n ' title: %s\\n' % td(self.title) + \\\n ' authors: %s\\n' % self.authors + \\\n ' keywords: %s\\n' % self.keywords + \\\n ' publication: %s\\n' % self.publication + \\\n ' date: %s\\n' % self.date + \\\n ' volume: %s\\n' % self.volume + \\\n ' issue: %s\\n' % self.issue + \\\n ' pages: %s\\n' % self.pages + \\\n ' doi: %s\\n' % self.doi\n\n @classmethod\n def from_doi(doi):\n entry = TaylorFrancisEntry(_TF_URL + '/doi/abs/' + str(doi))\n return entry\n\n\n# TODO: Inherit from some abstract ref class\n# I think the abstract class should only require conversion to a common standard\nclass TaylorFrancisRef(BaseRef):\n \"\"\"\n This is the result class of calling get_references. It contains the\n bibliographic information about the reference, as well as additional meta\n information such as a DOI (if known).\n\n Attributes:\n -----------\n ref_id : int\n The index of the reference in the citing document. A value of 1\n indicates that the reference is the first reference in the citing\n document.\n title : string\n authors : string\n List of the authors. This list may be truncated if there are too many\n authors, e.g.: 'N. Zabihi, A. Mourtzinos, M.G. Maher, et al.'\n publication : string\n Abbreviated (typically?) form of the journal\n volume : string\n date : string\n This appears to always be the year\n doi : string\n Digital Object Identifier. May be None if not present. This is\n currently based on the presence of a link to fulltext via Crossref.\n pdf_link : string (default None)\n If not None, this link points to the pdf of the article.\n\n\n See Also:\n get_references\n\n \"\"\"\n def __init__(self, ref_tags, ref_id):\n\n \"\"\"\n\n Parameters:\n -----------\n ref_tags: bs4.element.Tag\n Html tags as soup of the reference. Information provided is that\n needed in order to form a citation for the given reference.\n ref_id: int\n The id of the reference as ordered in the citing entry. A value\n of 1 indicates that this object is the first reference in the bibliography.\n\n\n \"\"\"\n super().__init__()\n self.ref_tags = ref_tags\n\n # Reference Bibliography Section:\n #--------------------------------\n self.ref_id = ref_id + 1 # Input is 0 indexed\n\n self.volume = None\n self.pages = None\n\n all_text = ref_tags.find_all(text=True)\n self.citation = all_text[1]\n\n # 'all_text' is a list of the text segments within each citation.\n # If it is a short list, it means that the citation is likely a book,\n # and doesn't include page numbers, PMID, DOI, etc.\n if len(all_text) > 5:\n metadata = all_text[3]\n metadata = metadata[2:] # Get rid of leading '; '\n divider = metadata.find(':') # This divides volume number from page range\n self.volume = metadata[0:divider]\n self.pages = metadata[divider+1:metadata.find(';')]\n\n self.date = findValue(ref_tags, 'span')\n\n\n # Reference Link Section:\n #------------------------------\n\n self.crossref = None\n self.pubmed = None\n self.pubmed_id = None\n self.doi = None\n self.web_of_science = None\n\n # External links (i.e. PubMed, CrossRef) are kept in tags,\n # while the IDs are conveniently kept in tags\n links = ref_tags.find_all('a')\n ids = ref_tags.find_all('pub-id')\n\n for link in ids:\n id_type = link['pub-id-type']\n if id_type == 'pmid':\n self.pubmed_id = link.text\n elif id_type == 'doi':\n self.doi = link.text\n\n if links is not None:\n for link in links:\n href = link['href'][1:] # Get rid of leading '/'\n text = link.text.lower()\n\n if 'crossref' in text:\n self.crossref = _TF_URL + href\n elif 'pubmed' in text:\n self.pubmed = _TF_URL + href\n elif 'science' in text:\n self.web_of_science = _TF_URL + href\n\n def __repr__(self):\n return u'' + \\\n 'ref_id: %s\\n' % self.ref_id + \\\n 'citation: %s\\n' % self.citation + \\\n 'date: %s \\n' % self.date + \\\n 'crossref_link: %s\\n' % self.crossref + \\\n 'pubmed: %s\\n' % self.pubmed + \\\n 'pubmed_id: %s\\n' % self.pubmed_id + \\\n 'doi: %s\\n' % self.doi\n\n\ndef get_references(input, verbose=False):\n \"\"\"\n This function gets references for a Taylor and Francis URL that is of the\n form:\n \"\"\"\n\n # Step 1 - Make the request\n #--------------------------------------------------------------------------\n soup = _make_soup(input, 'references', verbose)\n\n # Step 2 - Get the references tags\n #--------------------------------------------------------------------------\n # The reference tags contain most of the information about references\n # They are however missing a lot of the linking information\n # e.g. link to the article, pdf download, etc\n\n reference_section = soup.find('ul', {'class' : 'references'})\n\n if reference_section is None:\n # Then we might be a guest. In other words, we might not have sufficient\n # privileges to access the data we want. Generally this is protected via\n # IP mask. When I'm working from home I need to VPN into work so\n # that I can access the data :/\n print(\"reference_section is None\")\n # TODO: check what this guest tag actually looks like\n # When we don't have proper access rights, this is present in the html\n temp = soup.find('li', {'id' : 'menuGuest'})\n if temp is None:\n #We might have no references ... (Doubtful)\n raise ParseException(\"References were not found ..., code error likely\")\n else:\n raise InsufficientCredentialsException(\"Insufficient access rights to get referencs, requires certain IP addresses (e.g. university based IP)\")\n\n ref_tags = reference_section.find_all('li')\n\n n_refs = len(ref_tags)\n\n if n_refs == 0:\n return None\n\n\n # Step 3 - Create reference objects\n #--------------------------------------------------------------------------\n # The reference objects parse out information for each reference\n # as well as external links.\n if verbose:\n print('Creating reference objects')\n ref_objects = [TaylorFrancisRef(ref_tag, ref_id) for \\\n ref_tag, ref_id in \\\n zip(ref_tags, range(n_refs))]\n\n\n #All done!\n #---------\n return ref_objects\n\n\ndef get_entry_info(input, verbose=False):\n soup = _make_soup(input, 'entry', verbose)\n return TaylorFrancisEntry(soup, verbose)\n\n\ndef get_pdf_link(input, verbose=False, soup=None):\n if _is_url(input):\n doi = _extract_doi(input)\n elif _is_doi(input):\n doi = input\n else:\n raise ValueError('Input not recognized as a valid DOI or Taylor and Francis URL.')\n\n pdf_link = _TF_URL + 'doi/pdf/' + doi\n return pdf_link\n\n\ndef _make_soup(input, type, verbose=False):\n # Check if the input is a DOI or URL\n if _is_url(input):\n doi = _extract_doi(input)\n elif _is_doi(input):\n doi = input\n else:\n raise ValueError('Input not recognized as a valid DOI or Taylor and Francis URL.')\n\n # Web page retrieval\n #-------------------\n soup = _connect(doi, type, verbose)\n return soup\n\n\ndef _is_url(input):\n if input.find('tandfonline') != -1:\n return True\n else:\n return False\n\n\ndef _is_doi(input):\n if input.find('10.') == 0:\n return True\n else:\n return False\n\n\ndef _extract_doi(url):\n # DOI is used in Taylor and Francis URLs after the middle word, which\n # can be 'full', 'abs', or 'ref', i.e.\n # http://www.tandfonline.com/doi/full/10.1080/2326263X.2015.1134958\n # This finds the indices of the middle word and gets everything afterward\n if 'full' in url:\n articleindex = url.find('/full/')\n doi = url[articleindex+6:]\n elif 'ref' in url or 'abs' in url:\n articleindex = url.find('/ref/')\n doi = url[articleindex+5:]\n elif 'abs' in url:\n articleindex = url.find('/abs/')\n doi = url[articleindex+5:]\n elif 'figure' in url:\n articleindex = url.find('/figure/')\n doi = url[articleindex+8:]\n else:\n raise ParseException('DOI cannot be found in Taylor and Francis URL.')\n return doi\n\n\ndef _connect(doi, type, verbose=None):\n if type == 'entry':\n prefix = 'abs/'\n elif type == 'references':\n prefix = 'ref/'\n else:\n prefix = 'full/'\n\n # Construct valid Taylor and Francis URL from given DOI\n url = _TF_URL + 'doi/' + prefix + doi\n\n # Web page retrieval\n # -------------------\n s = requests.Session()\n\n if verbose:\n print('Requesting main page for doi: %s' % doi)\n\n resp = s.get(url)\n soup = BeautifulSoup(resp.text)\n\n with open('tf_test.html', 'wb') as file:\n file.write(resp.content)\n\n return soup\n","sub_path":"pypub/scrapers/taylorfrancis.py","file_name":"taylorfrancis.py","file_ext":"py","file_size_in_byte":16478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"570217972","text":"import re\n\n\nclass qqspam:\n def __init__(self):\n self.tags = ['ads']\n\n def detect(self, full_name):\n '''\n ★加Q群537592562下载 253款破解版看片APP★免会员★免推广★无限看\n return:\n tuple (True, 'QQ_Spam', [result])\n '''\n pattern = '(无限看|免推广|免会员|看片APP|加Q群|537592562|破解版|请加Q|1943675344)'\n result = re.findall(pattern, full_name)\n if result:\n return (True, 'QQ_Spam', result)\n else:\n return (False, 'QQ_Spam', None)\n","sub_path":"plugin/spam_name/qqspam.py","file_name":"qqspam.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"30769214","text":"#!/bin/python3\n\n# Write a factorial function that takes a positive integer, N, as a parameter\n# and prints the result of N! (N factorial).\nimport os\n\n\n# Complete the factorial function below.\ndef factorial(n):\n if n <= 1:\n return n\n else:\n return n * factorial(n-1)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n result = factorial(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"day9_recursion.py","file_name":"day9_recursion.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"616370723","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom os import path, makedirs\nimport ROOT\n\ndef get_events(filename, in_reverse):\n\tevent_array = []\n\tdata_array, time_array = read_data(\"data/data_\" + str(filename) + \".txt\", 2, in_reverse)\n\tfor event in range(len(time_array)):\n\t\tevent_array.append(data_array[(event * 1024):((event + 1) * 1024)])\n\t\n\treturn event_array, time_array\n\ndef get_event(filename, event_nr, in_reverse):\n\tn_ch = 2\n\ttime_next = False\n\tevent_found = False\n\tdata_array = []\n\tfor line in open(\"data/data_\" + str(filename) + \".txt\").readlines():\n\t\tif \"Event\" in line and event_found:\n\t\t\tbreak\n\t\t\n\t\tif (\"Event #\" + str(event_nr)) in line:\n\t\t\tevent_found = True\n\t\t\ttime_next = True\n\t\t\tcontinue\n\t\t\n\t\telif not event_found:\n\t\t\tcontinue\n\t\t\t\n\t\telif time_next:\n\t\t\ttime = float(line.split(\".\")[0])\n\t\t\ttime_next = False\n\t\t\tcontinue\n\t\t\t\n\t\telif \"t\" in line:\n\t\t\tcontinue\n\t\t\n\t\tline_array = []\n\t\tdata_line = line.split()\n\t\tif in_reverse:\n\t\t\tfor n in range(n_ch - 1, -1, -1): \n\t\t\t\tline_array.append(float(data_line[2 * n]))\n\t\t\t\tline_array.append(-1.*float(data_line[2 * n + 1]))\n\t\telse:\t\n\t\t\tfor n in range(n_ch): \n\t\t\t\tline_array.append(float(data_line[2 * n]))\n\t\t\t\tline_array.append(-1.*float(data_line[2 * n + 1]))\t\t\n\t\t\n\t\tdata_array.append(line_array)\n\t\n\treturn data_array, time\n\ndef get_maximum(event, ch = 0):\n\tindex \t= 0\n\tmaximum = 0.\n\t\n\tfor i in range(len(event)):\n\t\tif event[i][ch * 2 + 1] > maximum:\n\t\t\tindex \t= i\n\t\t\tmaximum = event[i][ch * 2 + 1]\n\n\treturn maximum, index\n\n\ndef get_baseline(event, ch = 0):\n\t\n\tmaximum, index_max = get_maximum(event, ch)\n\tminimum = 501.\n\t\n\tfor i in range(index_max, index_max - 100, -1):\n\t\tif event[i][ch * 2 + 1] < minimum:\n\t\t\tindex_end = i\n\t\t\tminimum = event[i][ch * 2 + 1]\n\t\t\n\tindex_begin \t= 0\n\t\t\n\tfit_func \t= ROOT.TF1(\"fit_func\", \"pol0\", event[index_begin][ch * 2], event[index_end][ch * 2])\n\tgraph \t\t= ROOT.TGraph(1024)\n\t\n\tfor i in range(1024):\n\t\tgraph.SetPoint(i, event[i][ch * 2], event[i][ch * 2 + 1])\n\t\n\tgraph.Fit(\"fit_func\", \"QR0C\")\n\n\treturn fit_func.Eval(event[index_end][0])\n\ndef get_minimum(event, ch = 0):\n\tminimum = 501.\n\t\n\tfor data in event:\n\t\tif data[ch * 2 + 1] < minimum:\n\t\t\tminimum = data[ch * 2 + 1]\n\t\n\treturn minimum\n\ndef get_amplitude_from_fit(event):\n\tmaximum, index_max = get_maximum(event, 1)\n\t#maximum_low , index_max_low = get_maximum(event, 0)\n\tmaximum_right = 0.\n\tindex_max_right = 0.\n\tmaximum_low_right = 0.\n\tindex_max_low_right = 0.\n\t\n\tfor i in range(len(event) - 1, 0, -1):\n\t\tif event[i][3] > maximum_right:\n\t\t\tindex_max_right = i\n\t\t\tmaximum_right = event[i][3]\n\t\t\n\t\tif event[i][1] > maximum_low_right:\n\t\t\tindex_max_low_right = i\n\t\t\tmaximum_low_right = event[i][1]\n\t\n\tminimum = 501.\n\tminimum_right = 501.\n\tminimum_low = 501.\n\tminimum_low_right = 501.\n\tindex_min_right = 1023\n\tindex_min_low = 0\n\tindex_min_low_right = 0\n\t\n\t\n#\tfor i in range(index_max_low, index_max_low - 50, -1):\n#\t\tif event[i][1] < minimum_low:\n#\t\t\tindex_min_low = i\n#\t\t\tminimum_low = event[i][1]\n\t\n#\tfor i in range(index_max_low_right, index_max_low_right + 30):\n#\t\tif event[i][1] < minimum_low_right:\n#\t\t\tindex_min_low_right = i\n#\t\t\tminimum_low_right = event[i][1]\n\t\n#\tif maximum >= 499.:\n\tfor i in range(index_max, index_max - 50, -1):\n\t\tif event[i][3] < minimum:\n\t\t\tindex_min = i\n\t\t\tminimum = event[i][3]\n\t\n\tfor i in range(index_max_right, index_max_right + 150):\n\t\tif i >= 1023:\n\t\t\tindex_min_right = 1023\n\t\t\tminimum_right = event[1023][3]\n\t\t\tbreak\n\t\t\t\n\t\tif event[i][3] < minimum_right:\n\t\t\tindex_min_right = i\n\t\t\tminimum_right = event[i][3]\n\t\n#\telse:\n#\t\tindex_min = 0\n#\t\tindex_max = 0\n\n\tindex_min_low = index_min\n\tmaximum_low = 0.\n\tfor i in range(index_min_low, index_min_low + 50):\n\t\tif i >= 1023:\n\t\t\tindex_max_low = 1023\n\t\t\tmaximum_low = event[1023][1]\n\t\t\tbreak\n\t\t\t\n\t\tif event[i][1] > maximum_low:\n\t\t\tindex_max_low = i\n\t\t\tmaximum_low = event[i][1]\n\t\n\tminimum_low_right = 501.\n\tfor i in range(index_max_low, index_max_low + 70):\n\t\tif i >= 1023:\n\t\t\tindex_min_low_right = 1023\n\t\t\tminimum_low_right = event[1023][1]\n\t\t\tbreak\n\t\t\t\n\t\tif event[i][1] < minimum_low_right:\n\t\t\tindex_min_low_right = i\n\t\t\tminimum_low_right = event[i][1]\n\t\t\t\n\t\n\tn_sat_ch2=0\n\tn_sat_ch3=0\n\tfor i in range(1024):\n\t\tif event[i][1] >= 499.:\n\t\t\tn_sat_ch2 += 1\n\t\t\n\t\tif event[i][3] >= 499.:\n\t\t\tn_sat_ch3 += 1\n\t\t\t\n\tgraph_ch2\t\t= ROOT.TGraph(1024 - n_sat_ch2)\n\tgraph_ch3\t\t= ROOT.TGraph(1024 - n_sat_ch3)\n\tfit_func_ch2 \t= ROOT.TF1(\"fit_func_ch2\", \"landau\", event[index_min_low][0], event[index_min_low_right][0])\n\tfit_func_ch2_d\t= ROOT.TF1(\"fit_func_ch2_d\", \"landau\", event[0][0], event[1023][0])\n\tfit_func_ch3 \t= ROOT.TF1(\"fit_func_ch3\", \"landau\", event[index_min][2], event[index_min_right][2])\n\tfit_func_ch3_d \t= ROOT.TF1(\"fit_func_ch3_d\", \"landau\", event[0][2], event[1023][2])\n\t\n\tk=0\n\tfor i in range(1024 - n_sat_ch2):\n\t\tif event[i][1] >= 499.:\n\t\t\tk+=1\n\t\t\tcontinue\n\t\t\t\n\t\tgraph_ch2.SetPoint(i-k, event[i+k][0], event[i+k][1])\n\t\n\tk=0\n\tfor i in range(1024 - n_sat_ch3):\n\t\tif event[i][3] >= 499.:\n\t\t\tk+=1\n\t\t\tcontinue\n\t\t\t\n\t\tgraph_ch3.SetPoint(i-k, event[i+k][2], event[i+k][3])\n\t\n\tgraph_ch3.Fit(\"fit_func_ch3\", \"QR0\")\n\tgraph_ch2.Fit(\"fit_func_ch2\", \"QR0\")\n\t\n\tfit_func_ch2_d.SetParameter(0, fit_func_ch2.GetParameter(0))\n\tfit_func_ch2_d.SetParameter(1, fit_func_ch2.GetParameter(1))\n\tfit_func_ch2_d.SetParameter(2, fit_func_ch2.GetParameter(2))\n\tfit_func_ch3_d.SetParameter(0, fit_func_ch3.GetParameter(0))\n\tfit_func_ch3_d.SetParameter(1, fit_func_ch3.GetParameter(1))\n\tfit_func_ch3_d.SetParameter(2, fit_func_ch3.GetParameter(2))\n\t\n\t#if maximum_low < 499.:\n\t#\tlow_ampl = maximum_low\n\t#else:\n\tlow_ampl = fit_func_ch2_d.GetMaximum(event[0][0], event[1023][0])\n\t\n\t#if maximum < 499.:\n\t#\thigh_ampl = maximum\n\t#else:\n\thigh_ampl = fit_func_ch3_d.GetMaximum(event[0][0], event[1023][0])\n\t\n\treturn low_ampl, high_ampl\n\ndef scatter_plot(filename, in_reverse):\n\tevent_array, time_array\t\t= get_events(filename, in_reverse)\n\tamplitudech0_array\t\t\t= []\n\tamplitudech1_array\t\t\t= []\n\t\n\tamplitude_fit_low_array\t\t= []\n\tamplitude_fit_high_array\t= []\n\tfor event in event_array:\n\t\tmaximum_ch0, index_max_ch0 \t= get_maximum(event, 0)\n\t\tmaximum_ch1, index_max_ch1 \t= get_maximum(event, 1)\n\t\t\n\t\tbaseline_ch0 \t= get_baseline(event, 0)\n\t\tbaseline_ch1\t= get_baseline(event, 1)\n\t\tamplitudech0_array.append(maximum_ch0 - baseline_ch0)\n\t\tamplitudech1_array.append(maximum_ch1 - baseline_ch1)\n\t\tlow_ampl, high_ampl = get_amplitude_from_fit(event)\n\t\tif low_ampl >= 2000.:\n\t\t\tamplitude_fit_low_array.append(2000.)\n\t\t\n\t\telse:\n\t\t\tamplitude_fit_low_array.append(low_ampl)\n\t\t\t\n\t\tif high_ampl >= 2000.:\n\t\t\tamplitude_fit_high_array.append(2000.)\n\t\t\n\t\telse:\n\t\t\tamplitude_fit_high_array.append(high_ampl)\t\n\t\t\t\n\t\t\n\tcanvas\t= ROOT.TCanvas(\"canvas\")\n\thist\t= ROOT.TH2F(\"ampl_scatter\" + str(filename), \"Scatterplot;Low Gain Voltage [mV];High Gain Voltage [mV]\", 100, min(amplitudech0_array), max(amplitudech0_array), 100, min(amplitudech1_array), max(amplitudech1_array))\n\thist_fit= ROOT.TH2F(\"ampl_fit_scatter\" + str(filename), \"Fitted Amplitudes;Low Gain Voltage [mV];High Gain Voltage [mV]\", 100, min(amplitude_fit_low_array), max(amplitude_fit_low_array), 100, min(amplitude_fit_high_array), max(amplitude_fit_high_array))\n\t\n\tfor i in range(len(amplitudech0_array)):\n\t\thist.Fill(amplitudech0_array[i], amplitudech1_array[i])\n\t\thist_fit.Fill(amplitude_fit_low_array[i], amplitude_fit_high_array[i])\n\t\n\troot_file = ROOT.TFile(\"res/kanne/\" + str(filename) + \"/ampl_hist_scattter.root\", \"RECREATE\")\n\thist.Write()\n\thist.Draw(\"COLZ\")\n\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_scatter.pdf\"))\n\troot_file.Close()\n\t\n\troot_file_fit = ROOT.TFile(\"res/kanne/\" + str(filename) + \"/ampl_hist_scattter_fit.root\", \"RECREATE\")\n\thist_fit.Write()\n\thist_fit.Draw(\"COLZ\")\n\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_scatter_fit.pdf\"))\n\troot_file_fit.Close()\n\t\n\tfit_func_x = ROOT.TF1(\"fit_func_x\", \"landau\", 0., max(amplitude_fit_low_array))\n\tfit_func_y = ROOT.TF1(\"fit_func_y\", \"landau\", 0., max(amplitude_fit_high_array))\n\t\n\t\n\thist_fit_x = hist_fit.ProjectionX()\n\thist_fit_x.Draw()\n\thist_fit_x.Fit(\"fit_func_x\", \"QR\")\n\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_scatter_fit_X.pdf\"))\n\thist_fit_y = hist_fit.ProjectionY()\n\thist_fit_y.Draw()\n\thist_fit_y.Fit(\"fit_func_y\", \"QR\")\n\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_scatter_fit_Y.pdf\"))\n\t\n\t\ndef hist_amplitudes(filename, n_ch = 1, in_reverse = False):\n\tevent_array, time_array\t\t= get_events(filename, in_reverse)\n\ttotal_time\t\t\t\t\t= sum(time_array)\n\tcanvas\t\t\t\t\t\t= ROOT.TCanvas(\"canvas\")\n\t\n\tacceptance \t\t\t\t\t= 0.00179 # m^2 sr\n\ttotal_time *= acceptance\n\tfor ch in range(n_ch):\n\t\tamplitude_array\t\t\t= []\n\t\tamplitude0_array\t\t= []\n\t\t\n\t\tfor event in event_array:\n\t\t\tmaximum, index_max \t= get_maximum(event, ch)\n\t\t\n\t\t\tbaseline\t\t\t= 0.#get_baseline(event, ch)\n\t\t\tamplitude_array.append(maximum - baseline)\n\t\t\tif maximum < 499.:\n\t\t\t\tamplitude0_array.append(maximum - baseline)\n\t\t\n\t\thist\t\t\t\t= ROOT.TH1F(\"ampl_hist\" + str(filename) + \"ch\" + str(ch), \"Channel \" + str(ch) + \" flux;Voltage [mV];#phi #upoint #varepsilon_{total} [1/(s m^{2} sr)]\", 100, min(amplitude_array), max(amplitude_array))\n\t\thist_non_sat\t\t= ROOT.TH1F(\"ampl_hist\" + str(filename) + \"ch\" + str(ch) + \"non_sat\", \"Channel \" + str(ch) + \" flux, Non-saturated;Voltage [mV];#phi #upoint #varepsilon_{total} [1/(s m^{2} sr)]\", 100, min(amplitude0_array), 499.)\n\t\tfit_func \t\t\t= ROOT.TF1(\"fit_func\", \"landau\", min(amplitude_array), max(amplitude_array))\n\t\tif ch == 1:\n\t\t\tfit_func_non_sat\t= ROOT.TF1(\"fit_func_non_sat\", \"landau\", 100., 499.)\n\t\telse:\n\t\t\tfit_func_non_sat\t= ROOT.TF1(\"fit_func_non_sat\", \"landau\", 0., 499.)\n\t\t\t\n\t\tfor ampl in amplitude_array:\n\t\t\thist.Fill(ampl)\n\t\t\t#if ampl < 499.:\n\t\t\t#\thist_non_sat.Fill(ampl)\n\t\t\n\t\tfor ampl in amplitude0_array:\n\t\t\thist_non_sat.Fill(ampl)\n\t\t\n\t\thist.Scale(1./total_time)\t\n\t\troot_file = ROOT.TFile(\"res/kanne/\" + str(filename) + \"/ampl_hist_ch\" + str(ch) + \".root\", \"RECREATE\")\n\t\thist.Fit(\"fit_func\", \"QR\")\n\t\thist.Write()\n\t\thist.Draw()\n\t\thist.GetYaxis().SetTitleOffset(1.2)\n\t\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_ch\" + str(ch) + \".pdf\"))\n\t\t\n\t\troot_file.Close()\n\t\t\n\t\thist_non_sat.Scale(1./total_time)\t\n\t\troot_file_non_sat = ROOT.TFile(\"res/kanne/\" + str(filename) + \"/ampl_hist_ch\" + str(ch) + \"_non_sat.root\", \"RECREATE\")\n\t\thist_non_sat.Fit(\"fit_func_non_sat\", \"QR\")\n\t\thist_non_sat.Write()\n\t\thist_non_sat.Draw()\n\t\thist_non_sat.GetYaxis().SetTitleOffset(1.2)\n\t\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/ampl_hist_ch\" + str(ch) + \"_non_sat.pdf\"))\n\t\t\n\t\troot_file_non_sat.Close()\n\t\t\n\t\ndef plot_events(filename, in_reverse):\n\tcanvas \t= ROOT.TCanvas(\"canvas\")\n\tevent_array, time_array = get_events(filename, in_reverse)\n\t\n\tj=0\n\tfor event in event_array:\n\t\t#event, time = get_event(filename, event_nr, in_reverse)\n\t\tmaximum, index_max = get_maximum(event, 1)\n\t\tmaximum_low , index_max_low = get_maximum(event, 0)\n\t\tmaximum_right = 0.\n\t\tindex_max_right = 0.\n\t\tfor i in range(len(event) - 1, 0, -1):\n\t\t\tif event[i][3] > maximum_right:\n\t\t\t\tindex_max_right = i\n\t\t\t\tmaximum_right = event[i][3]\n\t\t\n\t\tminimum = 501.\n\t\tminimum_right = 501.\n\t\tminimum_low = 501.\n\t\tindex_min_right = 1023\n\t\tindex_min_low = 0\n\t\t\n\t\tfor i in range(index_max_low, index_max_low - 50, -1):\n\t\t\tif event[i][1] < minimum_low:\n\t\t\t\tindex_min_low = i\n\t\t\t\tminimum_low = event[i][1]\n\t\t\n\t\tif maximum >= 499.:\n\t\t\tfor i in range(index_max, index_max - 50, -1):\n\t\t\t\tif event[i][3] < minimum:\n\t\t\t\t\tindex_min = i\n\t\t\t\t\tminimum = event[i][3]\n\t\t\t\n\t\t\tfor i in range(index_max_right, index_max_right + 150):\n\t\t\t\tif event[i][3] < minimum_right:\n\t\t\t\t\tindex_min_right = i\n\t\t\t\t\tminimum_right = event[i][3]\n\t\t\n\t\telse:\n\t\t\tindex_min = 0\n\t\t\tindex_max = 0\n\t\n\t\tn_sat_ch2=0\n\t\tn_sat_ch3=0\n\t\tfor i in range(1024):\n\t\t\tif event[i][1] >= 499.:\n\t\t\t\tn_sat_ch2 += 1\n\t\t\t\n\t\t\tif event[i][3] >= 499.:\n\t\t\t\tn_sat_ch3 += 1\n\t\t\t\t\n\t\tgraph_ch2\t\t= ROOT.TGraph(1024 - n_sat_ch2)\n\t\tgraph_ch3\t\t= ROOT.TGraph(1024 - n_sat_ch3)\n\t\tfit_func_ch2 \t= ROOT.TF1(\"fit_func_ch2\", \"landau\", event[index_min_low][0], event[index_max_low][0])\n\t\tfit_func_ch2_d\t= ROOT.TF1(\"fit_func_ch2_d\", \"landau\", event[0][0], event[1023][0])\n\t\tfit_func_ch3 \t= ROOT.TF1(\"fit_func_ch3\", \"landau\", event[index_min][2], event[index_min_right][2])\n\t\tfit_func_ch3_d \t= ROOT.TF1(\"fit_func_ch3_d\", \"landau\", event[0][2], event[1023][2])\n\t\t\n\t\tfit_func_ch2_d.SetLineStyle(2)\n\t\tfit_func_ch3_d.SetLineStyle(2)\n\t\tfit_func_ch2_d.SetLineColor(8)\n\t\t\n\t\tgraph_ch3.SetTitle(\"Time since last event: \" + str(time_array[j]) + \";Time [ns];Voltage [mV]\")\n\t\t\n\t\tk=0\n\t\tfor i in range(1024 - n_sat_ch2):\n\t\t\tif event[i][1] >= 499.:\n\t\t\t\tk+=1\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tgraph_ch2.SetPoint(i-k, event[i+k][0], event[i+k][1])\n\t\t\n\t\tk=0\n\t\tfor i in range(1024 - n_sat_ch3):\n\t\t\tif event[i][3] >= 499.:\n\t\t\t\tk+=1\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tgraph_ch3.SetPoint(i-k, event[i+k][2], event[i+k][3])\n\t\t\n\t\tgraph_ch3.Fit(\"fit_func_ch3\", \"QR0\")\n\t\tgraph_ch2.Fit(\"fit_func_ch2\", \"QR0\")\n\t\tgraph_ch3.SetLineColor(4)\n\t\tgraph_ch3.Draw(\"AP\")\n\t\tgraph_ch2.Draw(\"SAME\")\n\t\t\n\t\tfit_func_ch2_d.SetParameter(0, fit_func_ch2.GetParameter(0))\n\t\tfit_func_ch2_d.SetParameter(1, fit_func_ch2.GetParameter(1))\n\t\tfit_func_ch2_d.SetParameter(2, fit_func_ch2.GetParameter(2))\n\t\tfit_func_ch3_d.SetParameter(0, fit_func_ch3.GetParameter(0))\n\t\tfit_func_ch3_d.SetParameter(1, fit_func_ch3.GetParameter(1))\n\t\tfit_func_ch3_d.SetParameter(2, fit_func_ch3.GetParameter(2))\n\t\t\n\t\tfit_func_ch2_d.Draw(\"SAME\")\n\t\tfit_func_ch3_d.Draw(\"SAME\")\n\t\t\n\t\tcanvas.SaveAs(ensureDir(\"res/kanne/\" + str(filename) + \"/event_\" + str(j) + \".pdf\"))\n\t\tj += 1\n\t\t\n\ndef read_data(filename, n_ch = 1, in_reverse = False):\n\tdata = open(str(filename)).readlines()\n\t\n\tn_events = len(data) / 1027 \n\tdata_array = []\n\ttime_array = []\n\t\n\ttime_next = False\n\ttime_prev = 0\n\ti = 0\n\tfor line in data:\n\t\tif (i / 1027) == n_events:\n\t\t\tbreak\n\t\n\t\ti += 1\n\t\t\n\t\tif \"Event\" in line:\n\t\t\ttime_next = True\n\t\t\tcontinue\n\t\t\t\n\t\telif time_next:\n\t\t\ttime_array.append(float(line.split(\".\")[0]) - time_prev)\n\t\t\ttime_prev = float(line.split(\".\")[0])\n\t\t\ttime_next = False\n\t\t\tcontinue\n\t\t\t\n\t\telif \"t\" in line:\n\t\t\tcontinue\n\t\t\n\t\tline_array = []\n\t\tdata_line = line.split()\n\t\t\n\t\tif in_reverse:\n\t\t\tfor n in range(n_ch - 1, -1, -1): \n\t\t\t\tline_array.append(float(data_line[2 * n]))\n\t\t\t\tline_array.append(-1.*float(data_line[2 * n + 1]))\n\t\telse:\t\n\t\t\tfor n in range(n_ch): \n\t\t\t\tline_array.append(float(data_line[2 * n]))\n\t\t\t\tline_array.append(-1.*float(data_line[2 * n + 1]))\n\t\t\t\n\t\tdata_array.append(line_array)\n\t\t\n\t\n\treturn data_array, time_array\n\t\t\ndef ensureDir(filename): # for example filename = \"result/file.root\"\n\td = path.dirname(filename)\n\tif not path.exists(d):\n\t\tmakedirs(d)\n\treturn filename\n\n\n","sub_path":"analysis_kanne.py","file_name":"analysis_kanne.py","file_ext":"py","file_size_in_byte":14551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"68909186","text":"# ---------------------------------------------------------------- \n# Independent Study 496, Student Stree Prediction\n#\n# file_name: dtw_.py\n# Functionality: Class, DTW_clusters: do clustering based on DTW distance\n# Author: Yunfei Luo\n# Start date: EST Mar.25th.2020\n# Last update: EST Apr.8th.2020\n# ----------------------------------------------------------------\n\nimport numpy as np\nimport src.experiments.clustering.density_based_clustering as dbc\n\nclass DTW_clusters:\n def __init__(self, eps, min_samples):\n self.eps = eps\n self.min_samples = min_samples\n self.random_state = 0\n self.dist_matrix = list() # 2D array of distance matrix\n self.groups = dict() # dictionary, map: pts_ind -> group\n self.pts = list()\n \n # helper functions\n def cluster_by_construct_graph(self):\n '''\n Construct graph where each node represent each data point;\n Add Edge between two nodes if their distance is below eps T;\n Collect cluster information by retrieve connected graphs.\n '''\n # helper function\n def dfs(ind, choosen, group_id):\n for i in range(len(choosen)):\n if choosen[i]:\n continue\n if self.dist_matrix[ind][i] <= self.eps:\n choosen[i] = True\n self.groups[i] = group_id\n dfs(i, choosen, group_id)\n\n # group the data points w.r.t eps\n choosen = [False for _ in range(len(self.pts))]\n group_id = -1\n while False in choosen:\n group_id += 1\n ind = choosen.index(False)\n choosen[ind] = True\n self.groups[ind] = group_id\n dfs(ind, choosen, group_id)\n \n def density_based_clustering(self):\n '''\n clusteri by DBSCAN or OPTICS (xi)\n '''\n group_assign = dict()\n #clusters = OPTICS(min_samples=2, max_eps=75, cluster_method='xi', metric='precomputed').fit(self.dist_matrix)\n clusters = dbc.density_based_clustering(eps=self.eps, min_samples=self.min_samples, cluster_method='xi', metric='precomputed').fit(self.dist_matrix)\n \n for i in range(len(self.pts)):\n self.groups[i] = clusters.labels_[i]\n try:\n group_assign[clusters.labels_[i]].append(self.pts[i])\n except:\n group_assign[clusters.labels_[i]] = [self.pts[i]]\n \n # # visualize\n # import matplotlib.pyplot as plt\n # for pt in group_assign[6]:\n # plt.plot([i[0] for i in pt], [i[1] for i in pt])\n # plt.show()\n\n def fit(self, data):\n # calculate distance matrix\n dist_matrix = list()\n for pt1 in data:\n row = list()\n for pt2 in data:\n row.append(self.dtw_dist(pt1, pt2))\n dist_matrix.append(row)\n self.dist_matrix = np.array(dist_matrix)\n self.pts = data\n\n # plt.imshow(self.dist_matrix, cmap='gray')\n # plt.show()\n \n self.density_based_clustering()\n\n return self\n \n def predict(self, pts):\n res = list()\n for pt in pts:\n group = -1\n min_dist = np.inf\n for i in range(len(self.pts)):\n curr_dist = self.dtw_dist(pt, self.pts[i])\n if curr_dist < min_dist:\n min_dist = curr_dist\n group = self.groups[i]\n res.append(group)\n return res\n\n # helper functions\n # Calculate distance between to points\n def dist(self, p1, p2):\n return np.linalg.norm(p1-p2, ord=2)\n\n # Calculate DTW distance between to series data\n def dtw_dist(self, ts1, ts2):\n DTW = dict()\n DTW[(0, 0)] = 0\n \n for i in range(len(ts1)):\n for j in range(len(ts2)):\n if i == 0 and j == 0:\n continue\n cost = self.dist(ts1[i], ts2[j])\n min_ = None\n if i - 1 >= 0 and j - 1 >= 0:\n min_ = min(DTW[(i-1, j)], DTW[(i, j-1)], DTW[(i-1, j-1)])\n elif i - 1 >= 0:\n min_ = DTW[(i-1, j)]\n elif j - 1 >= 0:\n min_ = DTW[(i, j-1)]\n DTW[(i, j)] = cost + min_\n \n return DTW[(len(ts1) - 1, len(ts2) - 1)]\n","sub_path":"src/experiments/clustering/dtw_.py","file_name":"dtw_.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"538281804","text":"from concurrent.futures import ThreadPoolExecutor, as_completed\nfrom aktash import AKDBG\nfrom threading import Thread\nfrom tqdm import tqdm\nimport bdb\nimport inspect\nimport sys\nimport traceback\n\ndef threader(worker_function, input_array, threads_number=10):\n\t_inq = []\n\t_outq = []\n\n\tdef _worker():\n\t\twhile len(_inq) > 0:\n\t\t\tinput_item = _inq.pop(0)\n\t\t\ttry:\n\t\t\t\tif inspect.isgeneratorfunction(worker_function):\n\t\t\t\t# добавить inspect и проверять, что функция - генератор\n\t\t\t\t\tfor output_item in worker_function(input_item, _inq):\n\t\t\t\t\t\t_outq.append(output_item)\n\t\t\t\telse:\n\t\t\t\t\t_outq.append(worker_function(input_item, _inq))\n\t\t\texcept Exception as e:\n\t\t\t\tif isinstance(e, (KeyboardInterrupt, bdb.BdbQuit)) or AKDBG:\n\t\t\t\t\traise e\n\t\t\t\texc_info = sys.exc_info()\n\t\t\t\tprint('exception in thread, input item:', input_item)\n\t\t\t\tprint('traceback: ')\n\t\t\t\ttraceback.print_exception(*exc_info)\n\t\t\t\tprint('continuing the loop')\n\t\t\t\tcontinue\n\n\tfor item in input_array:\n\t\t_inq.append(item)\n\n\tif threads_number == 1:\n\t\t# single thread - just run syncronously (eg. for inline debugger)\n\t\t_worker()\n\n\telse:\n\t\tthreads = []\n\t\tfor i in range(threads_number):\n\t\t\tthread = Thread(target=_worker)\n\t\t\tthread.start()\n\t\t\tthreads.append(thread)\n\n\t\t[i.join() for i in threads]\n\n\treturn _outq[:]\n\n\ndef pooler(worker_function, input_array, threads=10, tqdm_=None, errors=None):\n\tif threads < 2:\n\t\tfor data in input_array:\n\t\t\tyield worker_function(*data)\n\t\treturn\n\n\tif tqdm_:\n\t\ttry:\n\t\t\tl = len(input_array)\n\t\texcept:\n\t\t\tl = None\n\t\tt = tqdm(total=l, desc=f'routing (with geometries) {threads} threads')\n\n\twith ThreadPoolExecutor(max_workers=threads) as e:\n\t\tfuture_map = {e.submit(worker_function, data): data for data in input_array}\n\t\tfor future in as_completed(future_map):\n\t\t\tif future.exception() is None:\n\t\t\t\tyield future.result(), future_map[future]\n\t\t\telif errors == 'ignore':\n\t\t\t\tyield None, future_map[future]\n\t\t\telse:\n\t\t\t\traise future.exception()\n\t\t\tif tqdm_:\n\t\t\t\tt.update()\n\n","sub_path":"aktash/threader.py","file_name":"threader.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"308062595","text":"class Node:\n\n def __init__(self):\n self.LeftP = int(0)\n self.Name = ''\n self.Mark = int(0)\n self.RightP = int(0)\n\n def getLeftP(self):\n return self.LeftP\n\n def setLeftP(self, ptr):\n self.LeftP = ptr\n\n\n def getName(self):\n return self.Name\n\n def setName(self, name):\n self.Name = name\n\n def getMark(self):\n return self.Mark\n\n def setMark(self, mark):\n self.Mark = mark\n\n def getRightP(self):\n return self.RightP\n\n def setRightP(self, ptr):\n self.RightP = ptr\n\n#dummy node at index 0\n#starts from index 1\nResultTree = [None] + [Node() for i in range(30)]\n\n#link nodes by left pointer\nfor index in range(1, 30):\n ResultTree[index].setLeftP(index+1)\n\n#Initialise Root and NextFree\nRoot = 0\nNextFree = 1\n\ndef validate_name(name):\n if name.isalpha():\n return True\n else:\n return False\n\ndef validate_mark(mark):\n if str(mark).isnumeric() and int(mark) >=0 and int(mark) <= 100:\n return True\n else:\n return False\n\ndef AddData(name, mark):\n global ResultTree, Root, NextFree\n\n if not validate_name(name) and not validate_mark(mark):\n print('Error: Invalid name or mark')\n return\n\n if NextFree == 0:\n print('Error: No free node available')\n\n else:\n temp = ResultTree[NextFree].getLeftP()\n ResultTree[NextFree].setName(name)\n ResultTree[NextFree].setMark(int(mark))\n ResultTree[NextFree].setLeftP(0)\n ResultTree[NextFree].setRightP(0)\n\n ##either add this or indent below\n Previous = 2\n LastMove = 'X'\n ###################################3\n \n if Root == 0:\n Root = NextFree\n else:\n Current = Root\n while Current != 0:\n Previous = Current\n if int(mark) < ResultTree[Current].getMark():\n ##move left\n LastMove = 'L'\n Current = ResultTree[Current].getLeftP()\n else:\n ##move right\n LastMove = 'R'\n Current = ResultTree[Current].getRightP()\n\n #either indent this or add previous and lastmove abv\n if LastMove == 'L': \n ResultTree[Previous].setLeftP(NextFree)\n else:\n ResultTree[Previous].setRightP(NextFree)\n ###################################################\n NextFree = temp\n\ndef DisplayData():\n global ResultTree, Root, NextFree\n\n print('Root: {0}'.format(Root))\n print('NextFree: {0}'.format(NextFree))\n print()\n print('{0:^10}{1:^10}{2:^20}{3:^10}{4:^10}'.format('Node', 'LeftP', 'Name', 'Mark', 'RightP'))\n\n for index in range(1,31):\n leftp = ResultTree[index].getLeftP()\n name = ResultTree[index].getName()\n mark = ResultTree[index].getMark()\n rightp = ResultTree[index].getRightP()\n print('{0:^10}{1:^10}{2:^20}{3:^10}{4:^10}'.format(index, leftp, name, mark, rightp))\n\n\ndef ConstructBT():\n infile = open('EXAM.txt','r')\n for line in infile:\n name, score = line[:-1].split(',')\n AddData(name, score)\n infile.close()\n\nConstructBT()\n\nDisplayData()\n\ndef GetLowest():\n global ResultTree\n\n if Root == 0:\n print('Error: Tree is empty')\n return\n \n Previous = 2\n Current = Root\n \n while Current != 0:\n Previous = Current\n Current = ResultTree[Current].getLeftP()\n\n lowest_score = ResultTree[Previous].getMark()\n name = ResultTree[Previous].getName()\n\n print('Student: {0}'.format(name))\n print('Lowest Mark: {0}'.format(lowest_score))\n\nGetLowest()\n\n\ndef SearchMoreEqual_func(ResultTree, Root):\n if Root != 0:\n SearchMoreEqual_func(ResultTree, ResultTree[Root].getLeftP())\n if ResultTree[Root].getMark() >= 70:\n print('Student: {0}'.format(ResultTree[Root].getName()))\n print('Mark: {0}'.format(ResultTree[Root].getMark()))\n print()\n SearchMoreEqual_func(ResultTree, ResultTree[Root].getRightP())\n\ndef SearchMoreEqual():\n global ResultTree, Root\n SearchMoreEqual_func(ResultTree, Root)\n\nSearchMoreEqual()\n \n \n \n \n \n \n \n \n \n\n \n","sub_path":"Revision Papers/05 PJC Mid Year 2015/task_3/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"525762052","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nRead a DICOM file and print protocol for publication.\n\nUsage: python dcm_protocol.py imagefile [-v]\n\n-v (optional): Verbose mode, prints all DICOM data elements\n\nWithout the -v option, a few of the most common dicom file\ndata elements are printed: some info about the patient and about\nthe image.\n\n\"\"\"\n# Copyright (c) 2008-2012 Darcy Mason\n# This file is part of pydicom, released under an MIT license.\n# See the file license.txt included with this distribution, also\n# available at http://pydicom.googlecode.com\n\nfrom __future__ import print_function\n\nimport sys\nimport dicom\n\n# check command line arguments make sense\nif not 1 < len(sys.argv) < 4:\n print(__doc__)\n sys.exit()\n\n# read the file\nfilename = sys.argv[1]\ndcm = dicom.read_file(filename)\n\n# Verbose mode:\nif len(sys.argv) == 3:\n if sys.argv[2] == \"-v\": # user asked for all info\n print(dcm)\n else: # unknown command argument\n print(__doc__)\n sys.exit()\n\n# Normal mode:\nprint(\"Filename..........:\", filename)\npat_name = dcm.PatientName\ndisplay_name = pat_name.family_name + \", \" + pat_name.given_name\nprint(\"Patient name......:\", display_name, \"@\"+ dcm.InstitutionName)\n#print(\"Institution Name.:\", dcm.InstitutionName)\ntry:\n print(\"Patient id,age,sex:\", \n \",\".join([dcm.PatientID, dcm.PatientAge, dcm.PatientSex]))\nexcept:\n try:\n print(\"Patient id,age,sex:\", \n \",\".join([dcm.PatientID, \"N/A\", dcm.PatientSex]))\n except:\n pass\nprint(\"Modality..........:\", dcm.PatientID + \"_\" + dcm.StudyDate + \",\" +\n dcm.Modality.strip()+ \n \",\" + dcm.ManufacturerModelName + \",\" + dcm.Manufacturer)\nprint(\"Study Date (Time).:\", dcm.StudyDate, \"(\"+dcm.StudyTime+\")\")\n#print(\"Model Name.......:\", dcm.ManufacturerModelName)\n#print(display_name, dcm.PatientID, dcm.Modality, dcm.StudyDate, dcm.ManufacturerModelName)\n\nif dcm.Modality == \"MR\":\n output = (\n \"%sT %s (%s): %s %s%s, TR = %s ms, TE = %s ms, %sflip angle = %s°, \"\n \"FOV = %s x %s mm, slice thickness = %s mm, contiguous %s mm \"\n \"sections, %s x %s matrix, NEX = %s, voxel size = %s x %s x %s mm\"\n #\"\\n\\n(contiguous mm sections could be reconstructed, should be \"\n #\"determined by the differences between Image Position 0020,0032)\\n\"\n )\n print(\"Protocol..........:\", output % (dcm.MagneticFieldStrength,\n dcm.ManufacturerModelName,\n dcm.Manufacturer,\n dcm.MRAcquisitionType,\n dcm.SeriesDescription,\n \" (\"+ dcm.SequenceName + \")\" if hasattr(dcm, \"SequenceName\") \n else \"\", \n dcm.RepetitionTime, \n dcm.EchoTime, \n \"TI = \"+ dcm.InversionTime + \" ms, \" if hasattr(dcm,\n \"InversionTIme\") else \"\", \n dcm.FlipAngle,\n round(dcm.Columns * float(dcm.PixelSpacing[1]), 0), #FOV\n round(dcm.Rows * float(dcm.PixelSpacing[0]), 0), #FOV\n round(dcm.SliceThickness, 1),\n round(dcm.SliceThickness, 1),\n #dcm.SpacingBetweenSlices, \n dcm.Rows,\n dcm.Columns, \n dcm.NumberOfAverages,\n round(float(dcm.PixelSpacing[0]), 2),\n round(float(dcm.PixelSpacing[1]), 2),\n round(dcm.SliceThickness, 1)))\nif dcm.Modality == \"CT\":\n output = (\n \"\\n%sT %s: %s%s, TR = %s ms, TE = %s ms, flip angle = %s°, \"\n \"FOV = %s x %s mm, slice thickness = %s mm, contiguous %s mm \"\n \"sections, %s x %s matrix, NEX = %s, voxel size = %s x %s x %s mm\"\n \"\\n\\n(contiguous mm sections might have been reconstructed, should\"\n \"be determined by the differences btwn Image Position (0020,0032)\"\n \"\\n\"\n )\n print(\"Protocol..........:\", output % (dcm.MagneticFieldStrength,\n dcm.ManufacturerModelName,\n dcm.SeriesDescription,\n \" (\"+ dcm.SequenceName + \")\" if hasattr(dcm, \"SequenceName\") \n else \"\", \n dcm.RepetitionTime, \n dcm.EchoTime, \n dcm.FlipAngle,\n round(dcm.Columns * float(dcm.PixelSpacing[1]), 0), #FOV\n round(dcm.Rows * float(dcm.PixelSpacing[0]), 0), #FOV\n round(dcm.SliceThickness, 1),\n round(dcm.SliceThickness, 1),\n #dcm.SpacingBetweenSlices, \n dcm.Rows,\n dcm.Columns, \n dcm.NumberOfAverages,\n round(float(dcm.PixelSpacing[0]), 2),\n round(float(dcm.PixelSpacing[1]), 2),\n round(dcm.SliceThickness, 1)))\n\nprint()\n","sub_path":"imaging/dcm_protocol.py","file_name":"dcm_protocol.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"359734253","text":"# truncatable primes\nfrom math import sqrt\n\ndef isprime(n):\n\tif n == 1: return False\n\tif n == 2 or n == 3 or n == 5: return True\n\tif n%3 == 0 or n%5 ==0: return False\n\tfor i in range(2,int(sqrt(n))+1):\n\t\tif n%i==0: return False\n\treturn True\n\ndef all_nos(n):\n\tif isprime(n) == False: return False\n\tlim = len(str(n)) - 1\n\tans = [n]\n\tfor x in range(lim):\n\t\trem = n % 10**(x+1)\n\t\tif isprime(rem) == False: return False\n\t\tnumber = n // (10**(x+1))\n\t\tif isprime(number) == False: return False\n\treturn True\n\nno = 9\ntruncs = []\ntotal = howmany = 0\nwhile True:\n\tif all_nos(no): \n\t\ttotal += no\n\t\thowmany += 1\n\tif (howmany == 11): break\n\tno+=2\nprint (total)\n\n\n","sub_path":"euler/py/037.py","file_name":"037.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"454704322","text":"\ndef collatz(n):\n while n != 1:\n if n % 2 == 0:\n n = n // 2\n else:\n n = n * 3 + 1\n print(n)\n\nprint('Enter no')\ntry:\n collatz(int(input()))\nexcept ValueError:\n print('Expecting only integer values!')\n\n\n","sub_path":"collatzesequence.py","file_name":"collatzesequence.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"64455655","text":"import bookmarks\nimport messages\nfrom time import sleep\nfrom updater import Updater\nfrom DumbTools import DumbKeyboard\nfrom DumbTools import DumbPrefs\nfrom AuthTools import CheckAdmin\n\nTITLE = 'PrimeWire'\nPREFIX = '/video/lmwtkiss'\n\nICON = 'icon-default.png'\nART = 'art-default.jpg'\nMOVIE_ICON = 'icon-movie.png'\nTV_ICON = 'icon-tv.png'\nBOOKMARK_ADD_ICON = 'icon-add-bookmark.png'\nBOOKMARK_REMOVE_ICON = 'icon-remove-bookmark.png'\n\nBM = bookmarks.Bookmark(PREFIX, TITLE, BOOKMARK_ADD_ICON, BOOKMARK_REMOVE_ICON)\nMC = messages.NewMessageContainer(PREFIX, TITLE)\n\n####################################################################################################\ndef Start():\n\n ObjectContainer.title1 = TITLE\n\n DirectoryObject.thumb = R(ICON)\n DirectoryObject.art = R(ART)\n\n InputDirectoryObject.art = R(ART)\n\n VideoClipObject.art = R(ART)\n\n Log.Debug('*' * 80)\n Log.Debug('* Platform.OS = %s' %Platform.OS)\n Log.Debug('* Platform.OSVersion = %s' %Platform.OSVersion)\n Log.Debug('* Platform.ServerVersion = %s' %Platform.ServerVersion)\n Log.Debug('*' * 80)\n\n HTTP.CacheTime = CACHE_1HOUR\n HTTP.Headers['User-Agent'] = (\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/41.0.2272.101 Safari/537.36'\n )\n\n ValidatePrefs()\n\n####################################################################################################\n@handler(PREFIX, TITLE, thumb=ICON, art=ART)\ndef MainMenu():\n\n Log.Debug('*' * 80)\n Log.Debug('* Client.Product = %s' %Client.Product)\n Log.Debug('* Client.Platform = %s' %Client.Platform)\n Log.Debug('* Client.Version = %s' %Client.Version)\n\n admin = CheckAdmin()\n\n oc = ObjectContainer(no_cache=admin)\n\n if admin:\n Updater(PREFIX + '/updater', oc)\n\n oc.add(DirectoryObject(\n key=Callback(Section, title='Movies', type='movies'), title='Movies', thumb=R(MOVIE_ICON)\n ))\n oc.add(DirectoryObject(\n key=Callback(Section, title='TV Shows', type='tv'), title='TV Shows', thumb=R(TV_ICON)\n ))\n if not Prefs['no_bm']:\n oc.add(DirectoryObject(\n key=Callback(BookmarksMain), title='My Bookmarks', thumb=R('icon-bookmarks.png')\n ))\n\n if Client.Product in DumbPrefs.clients:\n DumbPrefs(PREFIX, oc, title='Preferences', thumb=R('icon-prefs.png'))\n elif admin:\n oc.add(PrefsObject(title='Preferences'))\n\n if Client.Product in DumbKeyboard.clients:\n DumbKeyboard(PREFIX, oc, Search, dktitle='Search', dkthumb=R('icon-search.png'))\n else:\n oc.add(InputDirectoryObject(\n key=Callback(Search), title='Search', prompt='Search', thumb=R('icon-search.png')\n ))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/validateprefs')\ndef ValidatePrefs():\n \"\"\"\n Need to check urls\n if no good then block channel from running\n \"\"\"\n\n if (Prefs['pw_site_url'] != Dict['pw_site_url']) and not Prefs['custom_url']:\n Dict['pw_site_url'] = Prefs['pw_site_url']\n elif Prefs['custom_url']:\n Dict['pw_site_url'] = Prefs['pw_site_url_custom']\n Dict.Save()\n Log.Debug('*' * 80)\n\n if not Prefs['no_bm']:\n try:\n test = HTTP.Request(Dict['pw_site_url'] + '/watch-2741621-Brooklyn-Nine-Nine', cacheTime=0).headers\n Log.Debug('* \\\"%s\\\" is a valid url' %Dict['pw_site_url'])\n Log.Debug('* \\\"%s\\\" headers = %s' %(Dict['pw_site_url'], test))\n Dict['domain_test'] = 'Pass'\n except:\n Log.Debug('* \\\"%s\\\" is not a valid domain for this channel.' %Dict['pw_site_url'])\n Log.Debug('* Please pick a different URL')\n Dict['domain_test'] = 'Fail'\n else:\n try:\n test = HTTP.Request(Dict['pw_site_url'], cacheTime=0).headers\n Log.Debug('* \\\"%s\\\" headers = %s' %(Dict['pw_site_url'], test))\n Dict['domain_test'] = 'Pass'\n except:\n Log.Debug('* \\\"%s\\\" is not a valid domain for this channel.' %Dict['pw_site_url'])\n Log.Debug('* Please pick a different URL')\n Dict['domain_test'] = 'Fail'\n\n Log.Debug('*' * 80)\n Dict.Save()\n\n####################################################################################################\ndef DomainTest():\n \"\"\"Setup MessageContainer if Dict[\\'domain_test\\'] failed\"\"\"\n\n if Dict['domain_test'] == 'Fail':\n return MC.message_container('Error', error_message())\n else:\n return False\n\n####################################################################################################\ndef error_message():\n return '%s is NOT a Valid Site URL for this channel. Please pick a different Site URL.' %Dict['pw_site_url']\n\n####################################################################################################\ndef bm_prefs_html(url):\n if not Prefs['no_bm']:\n html = HTML.ElementFromURL(url)\n return (False, html)\n else:\n try:\n html = HTML.ElementFromURL(url)\n return (False, html)\n except:\n HTTP.ClearCache()\n Log.Error(error_message())\n return (True, MC.message_container('Error', error_message()))\n\n####################################################################################################\n@route(PREFIX + '/bookmarksmain')\ndef BookmarksMain():\n \"\"\"\n Setup Bookmark Main Menu.\n Seperate by TV or Movies\n \"\"\"\n\n bm = Dict['Bookmarks']\n\n if DomainTest() != False:\n return DomainTest()\n elif not bm:\n return MC.message_container('Bookmarks', 'Bookmarks list Empty')\n\n oc = ObjectContainer(title2='My Bookmarks', no_cache=True)\n\n for key in sorted(bm.keys()):\n if len(bm[key]) == 0:\n del Dict['Bookmarks'][key]\n Dict.Save()\n else:\n if 'TV' in key:\n thumb=R(TV_ICON)\n else:\n thumb=R(MOVIE_ICON)\n\n oc.add(DirectoryObject(\n key=Callback(BookmarksSub, category=key),\n title=key, summary='Display %s Bookmarks' %key, thumb=thumb\n ))\n\n if len(oc) > 0:\n return oc\n else:\n return MC.message_container('Bookmarks', 'Bookmark list Empty')\n\n####################################################################################################\n@route(PREFIX + '/bookmarkssub')\ndef BookmarksSub(category):\n \"\"\"List Bookmarks Alphabetically\"\"\"\n\n bm = Dict['Bookmarks']\n\n if DomainTest() != False:\n return DomainTest()\n elif not category in bm.keys():\n return MC.message_container('Error',\n '%s Bookmarks list is dirty, or no %s Bookmark list exist.' %(category, category))\n\n oc = ObjectContainer(title2='My Bookmarks | %s' %category, no_cache=True)\n\n for bookmark in sorted(bm[category], key=lambda k: k['title']):\n title = bookmark['title']\n thumb = bookmark['thumb']\n url = bookmark['url']\n category = bookmark['category']\n item_id = bookmark['id']\n\n oc.add(DirectoryObject(\n key=Callback(MediaSubPage, title=title, category=category, thumb=thumb, item_url=url, item_id=item_id),\n title=title, thumb=thumb\n ))\n\n if len(oc) > 0:\n oc.add(DirectoryObject(\n key=Callback(UpdateBMCovers, category=category), title='Update Bookmark Covers',\n summary='Some Cover URL\\'s change over time, Use this to update covers to current URL',\n thumb=R('icon-refresh.png')\n ))\n return oc\n else:\n return MC.message_container('Bookmarks', '%s Bookmarks list Empty' %category)\n\n####################################################################################################\n@route(PREFIX + '/section')\ndef Section(title, type='movies'):\n\n if DomainTest() != False:\n return DomainTest()\n\n if type == 'tv':\n rel_url = 'index.php?tv=&sort=%s'\n else:\n rel_url = 'index.php?sort=%s'\n\n oc = ObjectContainer(title2=title)\n\n oc.add(DirectoryObject(key=Callback(Media, title='Popular', rel_url=rel_url % ('views')), title='Popular'))\n oc.add(DirectoryObject(key=Callback(Media, title='Featured', rel_url=rel_url % ('featured')), title='Featured'))\n oc.add(DirectoryObject(key=Callback(Media, title='Highly Rated', rel_url=rel_url % ('ratings')), title='Highly Rated'))\n oc.add(DirectoryObject(key=Callback(Media, title='Recently Added', rel_url=rel_url % ('date')), title='Recently Added'))\n oc.add(DirectoryObject(key=Callback(Media, title='Latest Releases', rel_url=rel_url % ('release')), title='Latest Releases'))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media', page=int, search=bool)\ndef Media(title, rel_url, page=1, search=False):\n\n if DomainTest() != False:\n return DomainTest()\n\n url = Dict['pw_site_url'] + '/%s&page=%i' %(rel_url, page)\n\n if not Prefs['no_bm']:\n if Dict['pw_site_url'] != Dict['pw_site_url_old']:\n Dict['pw_site_url_old'] = Dict['pw_site_url']\n Dict.Save()\n HTTP.ClearCache()\n html = HTML.ElementFromURL(url)\n else:\n try:\n html = HTML.ElementFromURL(url)\n except:\n HTTP.ClearCache()\n Log.Error(error_message())\n return MC.message_container('Error', error_message())\n\n oc = ObjectContainer(title2=title, no_cache=True)\n\n for item in html.xpath('//div[@class=\"index_container\"]//a[contains(@href, \"/watch-\")]'):\n\n item_url = item.xpath('./@href')[0]\n item_title = item.xpath('./h2/text()')[0]\n item_thumb = item.xpath('./img/@src')[0]\n item_id = item_thumb.split('/')[-1].split('_')[0]\n\n if item_thumb.startswith('//'):\n item_thumb = 'http:%s' % (item_thumb)\n elif item_thumb.startswith('/'):\n item_thumb = 'http://%s%s' % (url.split('/')[2], item_thumb)\n\n oc.add(DirectoryObject(\n key = Callback(MediaSubPage, item_url=item_url, title=item_title, thumb=item_thumb, item_id=item_id),\n title = item_title,\n thumb = item_thumb\n ))\n\n next_check = html.xpath('//div[@class=\"pagination\"]/a[last()]/@href')\n\n if len(next_check) > 0:\n\n next_check = next_check[0].split('page=')[-1].split('&')[0]\n\n if int(next_check) > page:\n\n oc.add(NextPageObject(\n key = Callback(Media, title=title, rel_url=rel_url, page=page+1),\n title = 'More...'\n ))\n\n if len(oc) > 0:\n return oc\n elif search:\n return MC.message_container('Search',\n 'No Search results for \\\"%s\\\"' %title)\n else:\n return MC.message_container('Error',\n 'No media for \\\"%s\\\"' %title)\n\n####################################################################################################\n@route(PREFIX + '/media/subpage')\ndef MediaSubPage(title, thumb, item_url, item_id, category=None):\n \"\"\"\n Split into MediaSeason (TV) or MediaVersion (Movie)\n Include Bookmark option here\n \"\"\"\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title2=title, no_cache=True)\n\n if not item_url.startswith('http'):\n url = Dict['pw_site_url'] + item_url\n else:\n url = item_url\n\n if not category:\n t, html = bm_prefs_html(url)\n if t:\n return html\n\n category = 'TV Shows' if html.xpath('//div[@class=\"tv_container\"]') else 'Movies'\n\n if category == 'TV Shows':\n oc.add(DirectoryObject(\n key = Callback(MediaSeasons, url=url, title=title, thumb=thumb),\n title = title,\n thumb = thumb\n ))\n else:\n oc.add(DirectoryObject(\n key = Callback(MediaVersions, url=url, title=title, thumb=thumb),\n title = title,\n thumb = thumb\n ))\n\n BM.add_remove_bookmark(title, thumb, item_url, item_id, category, oc)\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media/seasons')\ndef MediaSeasons(url, title, thumb):\n\n if DomainTest() != False:\n return DomainTest()\n\n t, html = bm_prefs_html(url)\n if t:\n return html\n\n oc = ObjectContainer(title2=title)\n\n for season in html.xpath('//div[@class=\"tv_container\"]//a[@data-id]/@data-id'):\n\n oc.add(DirectoryObject(\n key = Callback(MediaEpisodes, url=url, title='Season %s' % (season), thumb=thumb),\n title = 'Season %s' % (season),\n thumb = thumb\n ))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media/episodes')\ndef MediaEpisodes(url, title, thumb):\n\n if DomainTest() != False:\n return DomainTest()\n\n t, html = bm_prefs_html(url)\n if t:\n return html\n\n oc = ObjectContainer(title2=title)\n\n for item in html.xpath('//div[@data-id=\"%s\"]//a[contains(@href, \"/tv-\")]' % (title.split(' ')[-1])):\n\n item_title = '%s %s' % (item.xpath('.//text()')[0].strip(), item.xpath('.//text()')[1].strip().replace('’', \"'\"))\n\n if '0 links' in item_title.lower():\n continue\n\n item_url = item.xpath('./@href')[0]\n\n oc.add(DirectoryObject(\n key = Callback(MediaVersions, url=item_url, title=item_title, thumb=thumb),\n title = item_title,\n thumb = thumb\n ))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media/versions')\ndef MediaVersions(url, title, thumb):\n\n if DomainTest() != False:\n return DomainTest()\n elif not url.startswith('http'):\n url = Dict['pw_site_url'] + url\n\n t, html = bm_prefs_html(url)\n if t:\n return html\n\n summary = html.xpath('//meta[@name=\"description\"]/@content')[0].split(' online - ', 1)[-1].split('. Download ')[0]\n\n oc = ObjectContainer(title2=title)\n\n for ext_url in html.xpath('//a[contains(@href, \"/goto.php?\")]/@href'):\n\n url = ext_url.split('url=')[-1].split('&')[0]\n url = String.Base64Decode(url)\n\n if url.split('/')[2].replace('www.', '') in ['youtube.com']:\n continue\n\n # Trick to use the bundled Vidzi URL Service\n if 'vidzi.tv' in url:\n url = url.replace('http://', 'vidzi://')\n\n if URLService.ServiceIdentifierForURL(url) is not None:\n\n host = url.split('/')[2].replace('www.', '')\n\n oc.add(DirectoryObject(\n key = Callback(MediaPlayback, url=url, title=title),\n title = '%s - %s' % (host, title),\n summary = summary,\n thumb = thumb\n ))\n\n if len(oc) < 1:\n return MC.message_container('No Sources', 'No compatible sources found')\n else:\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media/playback')\ndef MediaPlayback(url, title):\n\n if DomainTest() != False:\n return DomainTest()\n\n Log.Debug('*' * 80)\n Log.Debug('* Client.Product = %s' %Client.Product)\n Log.Debug('* Client.Platform = %s' %Client.Platform)\n Log.Debug('* MediaPlayback Title = %s' %title)\n Log.Debug('* MediaPlayback URL = %s' %url)\n Log.Debug('*' * 80)\n\n oc = ObjectContainer(title2=title)\n oc.add(URLService.MetadataObjectForURL(url))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/media/search')\ndef Search(query=''):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title2='Search for \\\"%s\\\"' %query)\n\n c_list = [('Movies', 'index.php?search_keywords=%s'), ('TV Shows', 'index.php?tv=&search_keywords=%s')]\n\n for c, url in c_list:\n rel_url = url %(String.Quote(query, usePlus=True).lower())\n if 'TV' in c:\n thumb=R(TV_ICON)\n else:\n thumb=R(MOVIE_ICON)\n\n oc.add(DirectoryObject(\n key=Callback(Media, title=query, rel_url=rel_url, search=True),\n title=c, thumb=thumb\n ))\n\n return oc\n\n####################################################################################################\n@route(PREFIX + '/bookmarks/update/covers')\ndef UpdateBMCovers(category):\n\n bm = Dict['Bookmarks']\n bookmark_list = []\n for bookmark in sorted(bm[category], key=lambda k: k['title']):\n title = bookmark['title']\n thumb = bookmark['thumb']\n url = bookmark['url']\n category = bookmark['category']\n item_id = bookmark['id']\n\n bookmark_list.append(\n {'id': item_id, 'title': title, 'url': url, 'thumb': thumb, 'category': category}\n )\n\n Thread.Create(update_bm_thumb, bookmark_list=bookmark_list)\n\n return MC.message_container('Update Bookmark Covers',\n '\\\"%s\\\" Bookmark covers will be updated' %category)\n\n####################################################################################################\ndef update_bm_thumb(bookmark_list=list):\n\n for nbm in bookmark_list:\n category = nbm['category']\n item_id = nbm['id']\n item_url = nbm['url']\n\n if not item_url.startswith('http'):\n url = Dict['pw_site_url'] + item_url\n else:\n url = item_url\n\n html = HTML.ElementFromURL(url)\n Log.Debug('*' * 80)\n Log.Debug('* Updating \\\"%s\\\" Bookmark Cover' %nbm['title'])\n thumb = html.xpath('//meta[@property=\"og:image\"]/@content')[0]\n if not thumb.startswith('http'):\n thumb = 'http:' + thumb\n\n Log.Debug('* thumb = %s' %thumb)\n nbm.update({'thumb': thumb})\n\n # delete bm first so we can re-append it with new values\n bm_c = Dict['Bookmarks'][category]\n for i in xrange(len(bm_c)):\n if bm_c[i]['id'] == item_id:\n bm_c.pop(i)\n Dict.Save()\n break\n\n # now append updatd bookmark to correct category\n temp = {}\n temp.setdefault(category, Dict['Bookmarks'][category]).append(nbm)\n Dict['Bookmarks'][category] = temp[category]\n Dict.Save()\n\n timer = int(Util.RandomInt(2,5) + Util.Random())\n sleep(timer) # sleep (0-30) seconds inbetween cover updates\n\n return\n","sub_path":"Contents/Code/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"164287424","text":"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n\n'''\nSyntax and Output tests.\n\nTODO\n- #finally\n- #filter\n- #errorCatcher\n- #echo\n- #silent\n'''\n\n\n##################################################\n# DEPENDENCIES ##\n\nimport os\nimport os.path\nimport sys\nimport unittest\nimport warnings\n\nfrom Cheetah.NameMapper import NotFound\nfrom Cheetah.Template import Template\nfrom Cheetah.Parser import ParseError\nfrom Cheetah.Compiler import DEFAULT_COMPILER_SETTINGS\nfrom Cheetah.compat import PY2\n\n\nclass Unspecified(object):\n pass\n\n\ndef testdecorator(func):\n return func\n\n\nclass DummyClass:\n _called = False\n\n def __str__(self):\n return 'object'\n\n def meth(self, arg=\"arff\"):\n return str(arg)\n\n def meth1(self, arg=\"doo\"):\n return arg\n\n def meth2(self, arg1=\"a1\", arg2=\"a2\"):\n return str(arg1) + str(arg2)\n\n def methWithPercentSignDefaultArg(self, arg1=\"110%\"):\n return str(arg1)\n\n def callIt(self, arg=1234):\n self._called = True\n self._callArg = arg\n\n\ndef dummyFunc(arg=\"Scooby\"):\n return arg\n\n\ndefaultTestNameSpace = {\n 'aStr': 'blarg',\n 'anInt': 1,\n 'aFloat': 1.5,\n 'aList': ['item0', 'item1', 'item2'],\n 'aDict': {'one': 'item1',\n 'two': 'item2',\n 'nestedDict': {1: 'nestedItem1',\n 'two': 'nestedItem2'\n },\n 'nestedFunc': dummyFunc,\n },\n 'aFunc': dummyFunc,\n 'anObj': DummyClass(),\n 'aMeth': DummyClass().meth1,\n 'aStrToBeIncluded': \"$aStr $anInt\",\n 'none': None,\n 'emptyString': '',\n 'numOne': 1,\n 'numTwo': 2,\n 'zero': 0,\n 'tenDigits': 1234567890,\n 'webSafeTest': 'abc <=> &',\n 'strip1': ' \\t strippable whitespace \\t\\t \\n',\n 'strip2': ' \\t strippable whitespace \\t\\t ',\n 'strip3': ' \\t strippable whitespace \\t\\t\\n1 2 3\\n',\n\n 'blockToBeParsed': \"\"\"$numOne $numTwo\"\"\",\n 'includeBlock2': \"\"\"$numOne $numTwo $aSetVar\"\"\",\n\n 'includeFileName': 'parseTest.txt',\n 'listOfLambdas': [lambda x: x, lambda x: x, lambda x: x],\n 'list': [\n {'index': 0, 'numOne': 1, 'numTwo': 2},\n {'index': 1, 'numOne': 1, 'numTwo': 2},\n ],\n 'nameList': [('john', 'doe'), ('jane', 'smith')],\n 'letterList': ['a', 'b', 'c'],\n '_': lambda x: 'Translated: ' + x,\n 'unicodeData': u'aoeu12345\\u1234',\n}\n\n\n##################################################\n# TEST BASE CLASSES\n\nclass OutputTest(unittest.TestCase):\n report = '''\nTemplate output mismatch:\n\n Input Template =\n%(template)s%(end)s\n\n Expected Output =\n%(expected)s%(end)s\n\n Actual Output =\n%(actual)s%(end)s'''\n\n convertEOLs = True\n _EOLreplacement = None\n _debugEOLReplacement = False\n\n DEBUGLEV = 0\n _searchList = [defaultTestNameSpace]\n\n _useNewStyleCompilation = True\n # _useNewStyleCompilation = False\n\n _extraCompileKwArgs = None\n\n def searchList(self):\n return self._searchList\n\n def verify(self, input, expectedOutput,\n inputEncoding=None,\n outputEncoding=None,\n convertEOLs=Unspecified):\n if self._EOLreplacement:\n if convertEOLs is Unspecified:\n convertEOLs = self.convertEOLs\n if convertEOLs:\n input = input.replace('\\n', self._EOLreplacement)\n expectedOutput = expectedOutput.replace(\n '\\n', self._EOLreplacement)\n\n self._input = input\n if self._useNewStyleCompilation:\n extraKwArgs = self._extraCompileKwArgs or {}\n\n templateClass = Template.compile(\n source=input,\n compilerSettings=self._getCompilerSettings(),\n keepRefToGeneratedCode=True,\n **extraKwArgs\n )\n moduleCode = templateClass._CHEETAH_generatedModuleCode\n searchList = self.searchList() or self._searchList\n self.template = templateObj = templateClass(searchList=searchList)\n else:\n self.template = templateObj = Template(\n input,\n searchList=self.searchList(),\n compilerSettings=self._getCompilerSettings(),\n )\n moduleCode = templateObj._CHEETAH_generatedModuleCode\n if self.DEBUGLEV >= 1:\n print(\"Module: %s\" % templateObj.__module__)\n print(\n moduleCode.encode('ascii', 'backslashreplace').decode('ascii'))\n try:\n # rather than __str__, because of unicode\n output = templateObj.respond()\n assert output == expectedOutput, \\\n self._outputMismatchReport(output, expectedOutput)\n finally:\n templateObj.shutdown()\n\n def _getCompilerSettings(self):\n return {}\n\n def _outputMismatchReport(self, output, expectedOutput):\n if self._debugEOLReplacement and self._EOLreplacement:\n EOLrepl = self._EOLreplacement\n marker = '*EOL*'\n return self.report % {\n 'template': self._input.replace(EOLrepl, marker),\n 'expected': expectedOutput.replace(EOLrepl, marker),\n 'actual': output.replace(EOLrepl, marker),\n 'end': '(end)',\n }\n else:\n return self.report % {'template': self._input,\n 'expected': expectedOutput,\n 'actual': output,\n 'end': '(end)'}\n\n def genClassCode(self):\n if hasattr(self, 'template'):\n return self.template.generatedClassCode()\n\n def genModuleCode(self):\n if hasattr(self, 'template'):\n return self.template.generatedModuleCode()\n\n##################################################\n# TEST CASE CLASSES\n\n\nclass EmptyTemplate(OutputTest):\n convertEOLs = False\n\n def test1(self):\n \"\"\"an empty string for the template\"\"\"\n\n warnings.filterwarnings('error',\n 'You supplied an empty string for the source!',\n UserWarning)\n try:\n self.verify(\"\", \"\")\n except UserWarning:\n pass\n else:\n self.fail(\"Should warn about empty source strings.\")\n\n try:\n self.verify(\"#implements foo\", \"\")\n except NotImplementedError:\n pass\n else:\n self.fail(\n \"This should barf about respond() not being implemented.\")\n\n self.verify(\"#implements respond\", \"\")\n\n self.verify(\"#implements respond(foo=1234)\", \"\")\n\n\nclass Backslashes(OutputTest):\n convertEOLs = False\n\n def setUp(self):\n fp = open('backslashes.txt', 'w')\n fp.write(\n r'\\ #LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"' + '\\n\\n\\n\\n\\n\\n\\n')\n fp.flush()\n fp.close()\n\n def tearDown(self):\n if os.path.exists('backslashes.txt'):\n os.remove('backslashes.txt')\n\n def test1(self):\n \"\"\" a single \\\\ using rawstrings\"\"\"\n self.verify(r\"\\ \",\n r\"\\ \")\n\n def test2(self):\n \"\"\" a single \\\\ using rawstrings and lots of lines\"\"\"\n self.verify(r\"\\ \" + \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\",\n r\"\\ \" + \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n\n def test3(self):\n \"\"\" a single \\\\ without using rawstrings\"\"\"\n self.verify(\"\\ \\ \",\n \"\\ \\ \")\n\n def test4(self):\n \"\"\" single line from an apache conf file\"\"\"\n self.verify(r'#LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"',\n r'#LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"')\n\n def test5(self):\n \"\"\" single line from an apache conf file with many NEWLINES\n\n The NEWLINES are used to make sure that MethodCompiler.commitStrConst()\n is handling long and short strings in the same fashion. It uses\n triple-quotes for strings with lots of \\\\n in them and repr(theStr) for\n shorter strings with only a few newlines.\"\"\"\n\n self.verify(\n r'#LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"' + '\\n\\n\\n\\n\\n\\n\\n',\n r'#LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"' + '\\n\\n\\n\\n\\n\\n\\n')\n\n def test6(self):\n \"\"\" test backslash handling in an included file\"\"\"\n self.verify(\n r'#include \"backslashes.txt\"',\n r'\\ #LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\"' + '\\n\\n\\n\\n\\n\\n\\n')\n\n def test7(self):\n \"\"\" a single \\\\ without using rawstrings plus many NEWLINES\"\"\"\n self.verify(\"\\ \\ \" + \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\",\n \"\\ \\ \" + \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n\n def test8(self):\n \"\"\"\n Single line from an apache conf file\n with single quotes and many NEWLINES\n \"\"\"\n\n self.verify(\n r\"\"\"#LogFormat '%h %l %u %t \\\"%r\\\" %>s %b'\"\"\" + '\\n\\n\\n\\n\\n\\n\\n',\n r\"\"\"#LogFormat '%h %l %u %t \\\"%r\\\" %>s %b'\"\"\" + '\\n\\n\\n\\n\\n\\n\\n')\n\n\nclass NonTokens(OutputTest):\n def test1(self):\n \"\"\"dollar signs not in Cheetah $vars\"\"\"\n self.verify(\"$ $$ $5 $. $ test\",\n \"$ $$ $5 $. $ test\")\n\n def test2(self):\n \"\"\"hash not in #directives\"\"\"\n self.verify(\"# \\# #5 \",\n \"# # #5 \")\n\n def test3(self):\n \"\"\"escapted comments\"\"\"\n self.verify(\" \\##escaped comment \",\n \" ##escaped comment \")\n\n def test4(self):\n \"\"\"escapted multi-line comments\"\"\"\n self.verify(\" \\#*escaped comment \\n*# \",\n \" #*escaped comment \\n*# \")\n\n def test5(self):\n \"\"\"1 dollar sign\"\"\"\n self.verify(\"$\",\n \"$\")\n\n def _X_test6(self):\n \"\"\"1 dollar sign followed by hash\"\"\"\n self.verify(\"\\n$#\\n\",\n \"\\n$#\\n\")\n\n def test6(self):\n \"\"\"1 dollar sign followed by EOL Slurp Token\"\"\"\n if DEFAULT_COMPILER_SETTINGS['EOLSlurpToken']:\n self.verify(\"\\n$%s\\n\" % DEFAULT_COMPILER_SETTINGS['EOLSlurpToken'],\n \"\\n$\")\n else:\n self.verify(\"\\n$#\\n\",\n \"\\n$#\\n\")\n\n\nclass Comments_SingleLine(OutputTest):\n def test1(self):\n \"\"\"## followed by WS\"\"\"\n self.verify(\"## \",\n \"\")\n\n def test2(self):\n \"\"\"## followed by NEWLINE\"\"\"\n self.verify(\"##\\n\",\n \"\")\n\n def test3(self):\n \"\"\"## followed by text then NEWLINE\"\"\"\n self.verify(\"## oeuao aoe uaoe \\n\",\n \"\")\n\n def test4(self):\n \"\"\"## gobbles leading WS\"\"\"\n self.verify(\" ## oeuao aoe uaoe \\n\",\n \"\")\n\n def test5(self):\n \"\"\"## followed by text then NEWLINE, + leading WS\"\"\"\n self.verify(\" ## oeuao aoe uaoe \\n\",\n \"\")\n\n def test6(self):\n \"\"\"## followed by EOF\"\"\"\n self.verify(\"##\",\n \"\")\n\n def test7(self):\n \"\"\"## followed by EOF with leading WS\"\"\"\n self.verify(\" ##\",\n \"\")\n\n def test8(self):\n \"\"\"## gobble line\n with text on previous and following lines\"\"\"\n self.verify(\"line1\\n ## aoeu 1234 \\nline2\",\n \"line1\\nline2\")\n\n def test9(self):\n \"\"\"## don't gobble line\n with text on previous and following lines\"\"\"\n self.verify(\"line1\\n 12 ## aoeu 1234 \\nline2\",\n \"line1\\n 12 \\nline2\")\n\n def test10(self):\n \"\"\"## containing $placeholders\n \"\"\"\n self.verify(\"##$a$b $c($d)\",\n \"\")\n\n def test11(self):\n \"\"\"## containing #for directive\n \"\"\"\n self.verify(\"##for $i in range(15)\",\n \"\")\n\n\nclass Comments_MultiLine_NoGobble(OutputTest):\n \"\"\"Multiline comments used to not gobble whitespace\n\n They do now, but this can be turned off with a compilerSetting.\n \"\"\"\n def _getCompilerSettings(self):\n return {'gobbleWhitespaceAroundMultiLineComments': False}\n\n def test1(self):\n \"\"\"#* *# followed by WS\n Shouldn't gobble WS\n \"\"\"\n self.verify(\"#* blarg *# \",\n \" \")\n\n def test2(self):\n \"\"\"#* *# preceded and followed by WS\n Shouldn't gobble WS\n \"\"\"\n self.verify(\" #* blarg *# \",\n \" \")\n\n def test3(self):\n \"\"\"#* *# followed by WS, with NEWLINE\n Shouldn't gobble WS\n \"\"\"\n self.verify(\"#* \\nblarg\\n *# \",\n \" \")\n\n def test4(self):\n \"\"\"#* *# preceded and followed by WS, with NEWLINE\n Shouldn't gobble WS\n \"\"\"\n self.verify(\" #* \\nblarg\\n *# \",\n \" \")\n\n\nclass Comments_MultiLine(OutputTest):\n \"\"\"\n Note: Multiline comments don't gobble whitespace!\n \"\"\"\n\n def test1(self):\n \"\"\"#* *# followed by WS\n Should gobble WS\n \"\"\"\n self.verify(\"#* blarg *# \",\n \"\")\n\n def test2(self):\n \"\"\"#* *# preceded and followed by WS\n Should gobble WS\n \"\"\"\n self.verify(\" #* blarg *# \",\n \"\")\n\n def test3(self):\n \"\"\"#* *# followed by WS, with NEWLINE\n Shouldn't gobble WS\n \"\"\"\n self.verify(\"#* \\nblarg\\n *# \",\n \"\")\n\n def test4(self):\n \"\"\"#* *# preceded and followed by WS, with NEWLINE\n Shouldn't gobble WS\n \"\"\"\n self.verify(\" #* \\nblarg\\n *# \",\n \"\")\n\n def test5(self):\n \"\"\"#* *# containing nothing\n \"\"\"\n self.verify(\"#**#\",\n \"\")\n\n def test6(self):\n \"\"\"#* *# containing only NEWLINES\n \"\"\"\n self.verify(\" #*\\n\\n\\n\\n\\n\\n\\n\\n*# \",\n \"\")\n\n def test7(self):\n \"\"\"#* *# containing $placeholders\n \"\"\"\n self.verify(\"#* $var $var(1234*$c) *#\",\n \"\")\n\n def test8(self):\n \"\"\"#* *# containing #for directive\n \"\"\"\n self.verify(\"#* #for $i in range(15) *#\",\n \"\")\n\n def test9(self):\n \"\"\" text around #* *# containing #for directive\n \"\"\"\n self.verify(\"foo\\nfoo bar #* #for $i in range(15) *# foo\\n\",\n \"foo\\nfoo bar foo\\n\")\n\n def test10(self):\n \"\"\" text around #* *# containing #for directive and trailing whitespace\n which should be gobbled\n \"\"\"\n self.verify(\"foo\\nfoo bar #* #for $i in range(15) *# \\ntest\",\n \"foo\\nfoo bar \\ntest\")\n\n def test11(self):\n \"\"\"\n Text around #* *# containing #for directive and newlines:\n trailing whitespace which should be gobbled.\n \"\"\"\n self.verify(\"foo\\nfoo bar #* \\n\\n#for $i in range(15) \\n\\n*# \\ntest\",\n \"foo\\nfoo bar \\ntest\")\n\n\nclass Placeholders(OutputTest):\n def test1(self):\n \"\"\"1 placeholder\"\"\"\n self.verify(\"$aStr\", \"blarg\")\n\n def test2(self):\n \"\"\"2 placeholders\"\"\"\n self.verify(\"$aStr $anInt\", \"blarg 1\")\n\n def test3(self):\n \"\"\"2 placeholders, back-to-back\"\"\"\n self.verify(\"$aStr$anInt\", \"blarg1\")\n\n def test4(self):\n \"\"\"1 placeholder enclosed in ()\"\"\"\n self.verify(\"$(aStr)\", \"blarg\")\n\n def test5(self):\n \"\"\"1 placeholder enclosed in {}\"\"\"\n self.verify(\"${aStr}\", \"blarg\")\n\n def test6(self):\n \"\"\"1 placeholder enclosed in []\"\"\"\n self.verify(\"$[aStr]\", \"blarg\")\n\n def test7(self):\n \"\"\"1 placeholder enclosed in () + WS\n\n Test to make sure that $(.. matches\n \"\"\"\n self.verify(\"$( aStr )\", \"blarg\")\n\n def test8(self):\n \"\"\"1 placeholder enclosed in {} + WS\"\"\"\n self.verify(\"${ aStr }\", \"blarg\")\n\n def test9(self):\n \"\"\"1 placeholder enclosed in [] + WS\"\"\"\n self.verify(\"$[ aStr ]\", \"blarg\")\n\n def test10(self):\n \"\"\"1 placeholder enclosed in () + WS + * cache\n\n Test to make sure that $*(.. matches\n \"\"\"\n self.verify(\"$*( aStr )\", \"blarg\")\n\n def test11(self):\n \"\"\"1 placeholder enclosed in {} + WS + *cache\"\"\"\n self.verify(\"$*{ aStr }\", \"blarg\")\n\n def test12(self):\n \"\"\"1 placeholder enclosed in [] + WS + *cache\"\"\"\n self.verify(\"$*[ aStr ]\", \"blarg\")\n\n def test13(self):\n \"\"\"1 placeholder enclosed in {} + WS + **cache\"\"\"\n self.verify(\"$*5*{ aStr }\", \"blarg\")\n\n def test14(self):\n \"\"\"1 placeholder enclosed in [] + WS + **cache\"\"\"\n self.verify(\"$*5*[ aStr ]\", \"blarg\")\n\n def test15(self):\n \"\"\"1 placeholder enclosed in {} + WS + **cache\"\"\"\n self.verify(\"$*0.5d*{ aStr }\", \"blarg\")\n\n def test16(self):\n \"\"\"1 placeholder enclosed in [] + WS + **cache\"\"\"\n self.verify(\"$*.5*[ aStr ]\", \"blarg\")\n\n def test17(self):\n \"\"\"1 placeholder + **cache\"\"\"\n self.verify(\"$*5*aStr\", \"blarg\")\n\n def test18(self):\n \"\"\"1 placeholder **cache\"\"\"\n self.verify(\"$*0.5h*aStr\", \"blarg\")\n\n def test19(self):\n \"\"\"1 placeholder surrounded by single quotes and multiple newlines\"\"\"\n self.verify(\"\"\"'\\n\\n\\n\\n'$aStr'\\n\\n\\n\\n'\"\"\",\n \"\"\"'\\n\\n\\n\\n'blarg'\\n\\n\\n\\n'\"\"\")\n\n def test20(self):\n \"\"\"silent mode $!placeholders \"\"\"\n self.verify(\n \"$!aStr$!nonExistant$!*nonExistant$!{nonExistant}\", \"blarg\")\n\n try:\n self.verify(\"$!aStr$nonExistant\", \"blarg\")\n except NotFound:\n pass\n else:\n self.fail('should raise NotFound exception')\n\n def test21(self):\n \"\"\"Make sure that $*caching is actually working\"\"\"\n namesStr = 'You Me Them Everyone'\n names = namesStr.split()\n\n tmpl = Template.compile('#for name in $names: $name ', baseclass=dict)\n assert str(tmpl({'names': names})).strip() == namesStr\n\n tmpl = tmpl.subclass('#for name in $names: $*name ')\n assert str(tmpl({'names': names})) == 'You ' * len(names)\n\n tmpl = tmpl.subclass('#for name in $names: $*1*name ')\n assert str(tmpl({'names': names})) == 'You ' * len(names)\n\n tmpl = tmpl.subclass('#for name in $names: $*1*(name) ')\n assert str(tmpl({'names': names})) == 'You ' * len(names)\n\n tmpl = tmpl.subclass('#for name in $names: $*1*(name) ')\n assert str(tmpl(names=names)) == 'You ' * len(names)\n\n\nclass Placeholders_Vals(OutputTest):\n convertEOLs = False\n\n def test1(self):\n \"\"\"string\"\"\"\n self.verify(\"$aStr\", \"blarg\")\n\n def test2(self):\n \"\"\"string - with whitespace\"\"\"\n self.verify(\" $aStr \", \" blarg \")\n\n def test3(self):\n \"\"\"empty string - with whitespace\"\"\"\n self.verify(\"$emptyString\", \"\")\n\n def test4(self):\n \"\"\"int\"\"\"\n self.verify(\"$anInt\", \"1\")\n\n def test5(self):\n \"\"\"float\"\"\"\n self.verify(\"$aFloat\", \"1.5\")\n\n def test6(self):\n \"\"\"list\"\"\"\n self.verify(\"$aList\", \"['item0', 'item1', 'item2']\")\n\n def test7(self):\n \"\"\"None\n\n The default output filter is ReplaceNone.\n \"\"\"\n self.verify(\"$none\", \"\")\n\n def test8(self):\n \"\"\"True, False\n \"\"\"\n self.verify(\"$True $False\", \"%s %s\" % (repr(True), repr(False)))\n\n def test9(self):\n \"\"\"$_\n \"\"\"\n self.verify(\"$_('foo')\", \"Translated: foo\")\n\n\nclass PlaceholderStrings(OutputTest):\n def test1(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\"$str(c'$aStr')\", \"blarg\")\n\n def test2(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\"$str(c'$aStr.upper')\", \"BLARG\")\n\n def test3(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\n \"$str(c'$(aStr.upper.replace(c\\\"A$str()\\\",\\\"\\\"))')\", \"BLRG\")\n\n def test4(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\"#echo $str(c'$(aStr.upper)')\", \"BLARG\")\n\n def test5(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\"#if 1 then $str(c'$(aStr.upper)') else 0\", \"BLARG\")\n\n def test6(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\n \"#if 1\\n$str(c'$(aStr.upper)')#slurp\\n#else\\n0#end if\", \"BLARG\")\n\n def test7(self):\n \"\"\"some c'text $placeholder text' strings\"\"\"\n self.verify(\"#def foo(arg=c'$(\\\"BLARG\\\")')\\n\"\n \"$arg#slurp\\n\"\n \"#end def\\n\"\n \"$foo()$foo(c'$anInt')#slurp\",\n\n \"BLARG1\")\n\n\nclass UnicodeStrings(OutputTest):\n def test1(self):\n \"\"\"unicode data in placeholder\n \"\"\"\n # self.verify(u\"$unicodeData\", defaultTestNameSpace['unicodeData'],\n # outputEncoding='utf8')\n self.verify(u\"$unicodeData\", defaultTestNameSpace['unicodeData'])\n\n def test2(self):\n \"\"\"unicode data in body\n \"\"\"\n self.verify(u\"aoeu12345\\u1234\", u\"aoeu12345\\u1234\")\n # self.verify(u\"#encoding utf8#aoeu12345\\u1234\", u\"aoeu12345\\u1234\")\n\n\nclass EncodingDirective(OutputTest):\n def test1(self):\n \"\"\"basic #encoding \"\"\"\n self.verify(\"#encoding utf-8\\n1234\",\n \"1234\")\n\n def test2(self):\n \"\"\"basic #encoding \"\"\"\n self.verify(\"#encoding ascii\\n1234\",\n \"1234\")\n\n def test3(self):\n \"\"\"basic #encoding \"\"\"\n source = b\"#encoding utf-8\\n\\xe1\\x88\\xb4\"\n if not PY2:\n source = source.decode('utf-8')\n self.verify(source,\n u'\\u1234', outputEncoding='utf8')\n\n def test4(self):\n \"\"\"basic #encoding \"\"\"\n source = b\"#encoding latin-1\\n\\xe1\\x88\\xb4\"\n if not PY2:\n source = source.decode('latin-1')\n self.verify(source,\n u\"\\xe1\\x88\\xb4\")\n\n def test5(self):\n \"\"\"basic #encoding \"\"\"\n self.verify(\"#encoding latin-1\\nAndr\\202\",\n u'Andr\\202')\n\n def test6(self):\n '''Using #encoding on the second line'''\n source = b\"\"\"### Comments on the first line\n#encoding utf-8\\n\\xe1\\x88\\xb4\"\"\"\n if not PY2:\n source = source.decode('utf-8')\n self.verify(source,\n u'\\u1234', outputEncoding='utf8')\n\n\nclass UnicodeDirective(OutputTest):\n def test1(self):\n \"\"\"basic #unicode \"\"\"\n self.verify(\"#unicode utf-8\\n1234\",\n u\"1234\")\n\n self.verify(\"#unicode ascii\\n1234\",\n u\"1234\")\n\n self.verify(\"#unicode latin-1\\n1234\",\n u\"1234\")\n\n self.verify(\"#unicode latin-1\\n1234?\",\n u\"1234?\")\n self.verify(\"#unicode: latin-1\\n1234?\",\n u\"1234?\")\n self.verify(\"# unicode : latin-1\\n1234?\",\n u\"1234?\")\n\n self.verify(u\"#unicode latin-1\\n1234?\",\n u\"1234?\")\n\n self.verify(\"#encoding latin-1\\n1234?\",\n u\"1234?\")\n\n\nclass Placeholders_Esc(OutputTest):\n convertEOLs = False\n\n def test1(self):\n \"\"\"1 escaped placeholder\"\"\"\n self.verify(\"\\$var\",\n \"$var\")\n\n def test2(self):\n \"\"\"2 escaped placeholders\"\"\"\n self.verify(\"\\$var \\$_\",\n \"$var $_\")\n\n def test3(self):\n \"\"\"2 escaped placeholders - back to back\"\"\"\n self.verify(\"\\$var\\$_\",\n \"$var$_\")\n\n def test4(self):\n \"\"\"2 escaped placeholders - nested\"\"\"\n self.verify(\"\\$var(\\$_)\",\n \"$var($_)\")\n\n def test5(self):\n \"\"\"2 escaped placeholders - nested and enclosed\"\"\"\n self.verify(\"\\$(var(\\$_)\",\n \"$(var($_)\")\n\n\nclass Placeholders_Calls(OutputTest):\n def test1(self):\n \"\"\"func placeholder - no ()\"\"\"\n self.verify(\"$aFunc\",\n \"Scooby\")\n\n def test2(self):\n \"\"\"func placeholder - with ()\"\"\"\n self.verify(\"$aFunc()\",\n \"Scooby\")\n\n def test3(self):\n r\"\"\"func placeholder - with (\\n\\n)\"\"\"\n self.verify(\"$aFunc(\\n\\n)\",\n \"Scooby\", convertEOLs=False)\n\n def test4(self):\n r\"\"\"func placeholder - with (\\n\\n) and $() enclosure\"\"\"\n self.verify(\"$(aFunc(\\n\\n))\",\n \"Scooby\", convertEOLs=False)\n\n def test5(self):\n r\"\"\"func placeholder - with (\\n\\n) and ${} enclosure\"\"\"\n self.verify(\"${aFunc(\\n\\n)}\",\n \"Scooby\", convertEOLs=False)\n\n def test6(self):\n \"\"\"func placeholder - with (int)\"\"\"\n self.verify(\"$aFunc(1234)\",\n \"1234\")\n\n def test7(self):\n r\"\"\"func placeholder - with (\\nint\\n)\"\"\"\n self.verify(\"$aFunc(\\n1234\\n)\",\n \"1234\", convertEOLs=False)\n\n def test8(self):\n \"\"\"func placeholder - with (string)\"\"\"\n self.verify(\"$aFunc('aoeu')\",\n \"aoeu\")\n\n def test9(self):\n \"\"\"func placeholder - with ('''string''')\"\"\"\n self.verify(\"$aFunc('''aoeu''')\",\n \"aoeu\")\n\n def test10(self):\n r\"\"\"func placeholder - with ('''\\nstring\\n''')\"\"\"\n self.verify(\"$aFunc('''\\naoeu\\n''')\",\n \"\\naoeu\\n\", convertEOLs=False)\n\n def test11(self):\n r\"\"\"func placeholder - with ('''\\nstring'\\n''')\"\"\"\n self.verify(\"$aFunc('''\\naoeu'\\n''')\",\n \"\\naoeu'\\n\", convertEOLs=False)\n\n def test12(self):\n r'''func placeholder - with (\"\"\"\\nstring\\n\"\"\")'''\n self.verify('$aFunc(\"\"\"\\naoeu\\n\"\"\")',\n \"\\naoeu\\n\", convertEOLs=False)\n\n def test13(self):\n \"\"\"func placeholder - with (string*int)\"\"\"\n self.verify(\"$aFunc('aoeu'*2)\",\n \"aoeuaoeu\")\n\n def test14(self):\n \"\"\"func placeholder - with (int*int)\"\"\"\n self.verify(\"$aFunc(2*2)\",\n \"4\")\n\n def test15(self):\n \"\"\"func placeholder - with (int*float)\"\"\"\n self.verify(\"$aFunc(2*2.0)\",\n \"4.0\")\n\n def test16(self):\n r\"\"\"func placeholder - with (int\\n*\\nfloat)\"\"\"\n self.verify(\"$aFunc(2\\n*\\n2.0)\",\n \"4.0\", convertEOLs=False)\n\n def test17(self):\n \"\"\"func placeholder - with ($arg=float)\"\"\"\n self.verify(\"$aFunc($arg=4.0)\",\n \"4.0\")\n\n def test18(self):\n \"\"\"func placeholder - with (arg=float)\"\"\"\n self.verify(\"$aFunc(arg=4.0)\",\n \"4.0\")\n\n def test19(self):\n \"\"\"deeply nested argstring, no enclosure\"\"\"\n self.verify(\"$aFunc($arg=$aMeth($arg=$aFunc(1)))\",\n \"1\")\n\n def test20(self):\n \"\"\"deeply nested argstring, no enclosure + with WS\"\"\"\n self.verify(\"$aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) )\",\n \"1\")\n\n def test21(self):\n \"\"\"deeply nested argstring, () enclosure + with WS\"\"\"\n self.verify(\"$(aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) ) )\",\n \"1\")\n\n def test22(self):\n \"\"\"deeply nested argstring, {} enclosure + with WS\"\"\"\n self.verify(\"${aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) ) }\",\n \"1\")\n\n def test23(self):\n \"\"\"deeply nested argstring, [] enclosure + with WS\"\"\"\n self.verify(\"$[aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) ) ]\",\n \"1\")\n\n def test24(self):\n \"\"\"deeply nested argstring, () enclosure + *cache\"\"\"\n self.verify(\"$*(aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) ) )\",\n \"1\")\n\n def test25(self):\n \"\"\"deeply nested argstring, () enclosure + *15*cache\"\"\"\n self.verify(\"$*15*(aFunc( $arg = $aMeth( $arg = $aFunc( 1 ) ) ) )\",\n \"1\")\n\n def test26(self):\n \"\"\"a function call with the Python None kw.\"\"\"\n self.verify(\"$aFunc(None)\",\n \"\")\n\n\nclass NameMapper(OutputTest):\n def test1(self):\n \"\"\"autocalling\"\"\"\n self.verify(\"$aFunc! $aFunc().\",\n \"Scooby! Scooby.\")\n\n def test2(self):\n \"\"\"nested autocalling\"\"\"\n self.verify(\"$aFunc($aFunc).\",\n \"Scooby.\")\n\n def test3(self):\n \"\"\"list subscription\"\"\"\n self.verify(\"$aList[0]\",\n \"item0\")\n\n def test4(self):\n \"\"\"list slicing\"\"\"\n self.verify(\"$aList[:2]\",\n \"['item0', 'item1']\")\n\n def test5(self):\n \"\"\"list slicing and subcription combined\"\"\"\n self.verify(\"$aList[:2][0]\",\n \"item0\")\n\n def test6(self):\n \"\"\"dictionary access - NameMapper style\"\"\"\n self.verify(\"$aDict.one\",\n \"item1\")\n\n def test7(self):\n \"\"\"dictionary access - Python style\"\"\"\n self.verify(\"$aDict['one']\",\n \"item1\")\n\n def test8(self):\n \"\"\"dictionary access combined with autocalled string method\"\"\"\n self.verify(\"$aDict.one.upper\",\n \"ITEM1\")\n\n def test9(self):\n \"\"\"dictionary access combined with string method\"\"\"\n self.verify(\"$aDict.one.upper()\",\n \"ITEM1\")\n\n def test10(self):\n \"\"\"nested dictionary access - NameMapper style\"\"\"\n self.verify(\"$aDict.nestedDict.two\",\n \"nestedItem2\")\n\n def test11(self):\n \"\"\"nested dictionary access - Python style\"\"\"\n self.verify(\"$aDict['nestedDict']['two']\",\n \"nestedItem2\")\n\n def test12(self):\n \"\"\"nested dictionary access - alternating style\"\"\"\n self.verify(\"$aDict['nestedDict'].two\",\n \"nestedItem2\")\n\n def test13(self):\n \"\"\"nested dictionary access using method - alternating style\"\"\"\n self.verify(\"$aDict.get('nestedDict').two\",\n \"nestedItem2\")\n\n def test14(self):\n \"\"\"nested dictionary access - NameMapper style - followed by method\"\"\"\n self.verify(\"$aDict.nestedDict.two.upper\",\n \"NESTEDITEM2\")\n\n def test15(self):\n \"\"\"nested dictionary access - alternating style - followed by method\"\"\"\n self.verify(\"$aDict['nestedDict'].two.upper\",\n \"NESTEDITEM2\")\n\n def test16(self):\n \"\"\"\n Nested dictionary access - NameMapper style - followed by method,\n then slice.\n \"\"\"\n self.verify(\"$aDict.nestedDict.two.upper[:4]\",\n \"NEST\")\n\n def test17(self):\n \"\"\"nested dictionary access - Python style using a soft-coded key\"\"\"\n self.verify(\"$aDict[$anObj.meth('nestedDict')].two\",\n \"nestedItem2\")\n\n def test18(self):\n \"\"\"object method access\"\"\"\n self.verify(\"$anObj.meth1\",\n \"doo\")\n\n def test19(self):\n \"\"\"object method access, followed by complex slice\"\"\"\n self.verify(\"$anObj.meth1[0: ((4//4*2)*2)//$anObj.meth1(2) ]\",\n \"do\")\n\n def test20(self):\n \"\"\"object method access, followed by a very complex slice\n If it can pass this one, it's safe to say it works!!\"\"\"\n self.verify(\n \"$( anObj.meth1[0:\\n (\\n(4//4*2)*2)//$anObj.meth1(2)\\n ] )\",\n \"do\")\n\n def test21(self):\n \"\"\"object method access with % in the default arg for the meth.\n\n This tests a bug that Jeff Johnson found and submitted a patch to SF\n for.\"\"\"\n\n self.verify(\"$anObj.methWithPercentSignDefaultArg\",\n \"110%\")\n\n\n# class NameMapperDict(OutputTest):\n#\n# _searchList = [{\"update\": \"Yabba dabba doo!\"}]\n#\n# def test1(self):\n# if NameMapper_C_VERSION:\n# return # This feature is not in the C version yet.\n# self.verify(\"$update\", \"Yabba dabba doo!\")\n#\n\nclass CacheDirective(OutputTest):\n\n def test1(self):\n r\"\"\"simple #cache \"\"\"\n self.verify(\"#cache:$anInt\",\n \"1\")\n\n def test2(self):\n r\"\"\"simple #cache + WS\"\"\"\n self.verify(\" #cache \\n$anInt#end cache\",\n \"1\")\n\n def test3(self):\n r\"\"\"simple #cache ... #end cache\"\"\"\n self.verify(\"\"\"#cache id='cache1', timer=150m\n$anInt\n#end cache\n$aStr\"\"\",\n \"1\\nblarg\")\n\n def test4(self):\n r\"\"\"2 #cache ... #end cache blocks\"\"\"\n self.verify(\"\"\"#slurp\n#def foo\n#cache ID='cache1', timer=150m\n$anInt\n#end cache\n#cache id='cache2', timer=15s\n #for $i in range(5)\n$i#slurp\n #end for\n#end cache\n$aStr#slurp\n#end def\n$foo$foo$foo$foo$foo\"\"\",\n \"1\\n01234blarg\"*5) # noqa: E226,E501 missing whitespace around operator\n\n def test5(self):\n r\"\"\"nested #cache blocks\"\"\"\n self.verify(\"\"\"#slurp\n#def foo\n#cache ID='cache1', timer=150m\n$anInt\n#cache id='cache2', timer=15s\n #for $i in range(5)\n$i#slurp\n #end for\n$*(6)#slurp\n#end cache\n#end cache\n$aStr#slurp\n#end def\n$foo$foo$foo$foo$foo\"\"\",\n \"1\\n012346blarg\"*5) # noqa: E226,E501 missing whitespace around operator\n\n def test6(self):\n r\"\"\"Make sure that partial directives don't match\"\"\"\n self.verify(\"#cache_foo\",\n \"#cache_foo\")\n self.verify(\"#cached\",\n \"#cached\")\n\n\nclass CallDirective(OutputTest):\n\n def test1(self):\n r\"\"\"simple #call \"\"\"\n self.verify(\"#call int\\n$anInt#end call\",\n \"1\")\n # single line version\n self.verify(\"#call int: $anInt\",\n \"1\")\n self.verify(\"#call int: 10\\n$aStr\",\n \"10\\nblarg\")\n\n def test2(self):\n r\"\"\"simple #call + WS\"\"\"\n self.verify(\"#call int\\n$anInt #end call\",\n \"1\")\n\n def test3(self):\n r\"\"\"a longer #call\"\"\"\n self.verify('''\\\n#def meth(arg)\n$arg.upper()#slurp\n#end def\n#call $meth\n$(1234+1) foo#slurp\n#end call''',\n \"1235 FOO\")\n\n def test4(self):\n r\"\"\"#call with keyword #args\"\"\"\n self.verify('''\\\n#def meth(arg1, arg2)\n$arg1.upper() - $arg2.lower()#slurp\n#end def\n#call self.meth\n#arg arg1\n$(1234+1) foo#slurp\n#arg arg2\nUPPER#slurp\n#end call''',\n \"1235 FOO - upper\")\n\n def test5(self):\n r\"\"\"#call with single-line keyword #args \"\"\"\n self.verify('''\\\n#def meth(arg1, arg2)\n$arg1.upper() - $arg2.lower()#slurp\n#end def\n#call self.meth\n#arg arg1:$(1234+1) foo#slurp\n#arg arg2:UPPER#slurp\n#end call''',\n \"1235 FOO - upper\")\n\n def test6(self):\n \"\"\"#call with python kwargs and cheetah output for the 1s positional\n arg\"\"\"\n\n self.verify('''\\\n#def meth(arg1, arg2)\n$arg1.upper() - $arg2.lower()#slurp\n#end def\n#call self.meth arg2=\"UPPER\"\n$(1234+1) foo#slurp\n#end call''',\n \"1235 FOO - upper\")\n\n def test7(self):\n \"\"\"#call with python kwargs and #args\"\"\"\n self.verify('''\\\n#def meth(arg1, arg2, arg3)\n$arg1.upper() - $arg2.lower() - $arg3#slurp\n#end def\n#call self.meth arg2=\"UPPER\", arg3=999\n#arg arg1:$(1234+1) foo#slurp\n#end call''',\n \"1235 FOO - upper - 999\")\n\n def test8(self):\n \"\"\"#call with python kwargs and #args, and using a function to get the\n function that will be called\"\"\"\n self.verify('''\\\n#def meth(arg1, arg2, arg3)\n$arg1.upper() - $arg2.lower() - $arg3#slurp\n#end def\n#call getattr(self, \"meth\") arg2=\"UPPER\", arg3=999\n#arg arg1:$(1234+1) foo#slurp\n#end call''',\n \"1235 FOO - upper - 999\")\n\n def test9(self):\n \"\"\"nested #call directives\"\"\"\n self.verify('''\\\n#def meth(arg1)\n$arg1#slurp\n#end def\n#def meth2(x,y)\n$x$y#slurp\n#end def\n##\n#call self.meth\n1#slurp\n#call self.meth\n2#slurp\n#call self.meth\n3#slurp\n#end call 3\n#set two = 2\n#call self.meth2 y=c\"$(10//$two)\"\n#arg x\n4#slurp\n#end call 4\n#end call 2\n#end call 1''',\n \"12345\")\n\n\nclass I18nDirective(OutputTest):\n def test1(self):\n r\"\"\"simple #call \"\"\"\n self.verify(\"#i18n \\n$anInt#end i18n\",\n \"1\")\n\n # single line version\n self.verify(\"#i18n: $anInt\",\n \"1\")\n self.verify(\"#i18n: 10\\n$aStr\",\n \"10\\nblarg\")\n\n\nclass CaptureDirective(OutputTest):\n def test1(self):\n r\"\"\"simple #capture\"\"\"\n self.verify('''\\\n#capture cap1\n$(1234+1) foo#slurp\n#end capture\n$cap1#slurp\n''',\n \"1235 foo\")\n\n def test2(self):\n r\"\"\"slightly more complex #capture\"\"\"\n self.verify('''\\\n#def meth(arg)\n$arg.upper()#slurp\n#end def\n#capture cap1\n$(1234+1) $anInt $meth(\"foo\")#slurp\n#end capture\n$cap1#slurp\n''',\n \"1235 1 FOO\")\n\n\nclass SlurpDirective(OutputTest):\n def test1(self):\n r\"\"\"#slurp with 1 \\n \"\"\"\n self.verify(\"#slurp\\n\",\n \"\")\n\n def test2(self):\n r\"\"\"#slurp with 1 \\n, leading whitespace\n Should gobble\"\"\"\n self.verify(\" #slurp\\n\",\n \"\")\n\n def test3(self):\n r\"\"\"#slurp with 1 \\n, leading content\n Shouldn't gobble\"\"\"\n self.verify(\" 1234 #slurp\\n\",\n \" 1234 \")\n\n def test4(self):\n r\"\"\"#slurp with WS then \\n, leading content\n Shouldn't gobble\"\"\"\n self.verify(\" 1234 #slurp \\n\",\n \" 1234 \")\n\n def test5(self):\n r\"\"\"#slurp with garbage chars then \\n, leading content\n Should eat the garbage\"\"\"\n self.verify(\" 1234 #slurp garbage \\n\",\n \" 1234 \")\n\n\nclass EOLSlurpToken(OutputTest):\n _EOLSlurpToken = DEFAULT_COMPILER_SETTINGS['EOLSlurpToken']\n\n def test1(self):\n r\"\"\"#slurp with 1 \\n \"\"\"\n self.verify(\"%s\\n\" % self._EOLSlurpToken,\n \"\")\n\n def test2(self):\n r\"\"\"#slurp with 1 \\n, leading whitespace\n Should gobble\"\"\"\n self.verify(\" %s\\n\" % self._EOLSlurpToken,\n \"\")\n\n def test3(self):\n r\"\"\"#slurp with 1 \\n, leading content\n Shouldn't gobble\"\"\"\n self.verify(\" 1234 %s\\n\" % self._EOLSlurpToken,\n \" 1234 \")\n\n def test4(self):\n r\"\"\"#slurp with WS then \\n, leading content\n Shouldn't gobble\"\"\"\n self.verify(\" 1234 %s \\n\" % self._EOLSlurpToken,\n \" 1234 \")\n\n def test5(self):\n r\"\"\"#slurp with garbage chars then \\n, leading content\n Should NOT eat the garbage\"\"\"\n self.verify(\" 1234 %s garbage \\n\" % self._EOLSlurpToken,\n \" 1234 %s garbage \\n\" % self._EOLSlurpToken)\n\n\nif not DEFAULT_COMPILER_SETTINGS['EOLSlurpToken']:\n del EOLSlurpToken\n\n\nclass RawDirective(OutputTest):\n def test1(self):\n \"\"\"#raw till EOF\"\"\"\n self.verify(\"#raw\\n$aFunc().\\n\\n\",\n \"$aFunc().\\n\\n\")\n\n def test2(self):\n \"\"\"#raw till #end raw\"\"\"\n self.verify(\"#raw\\n$aFunc().\\n#end raw\\n$anInt\",\n \"$aFunc().\\n1\")\n\n def test3(self):\n \"\"\"#raw till #end raw gobble WS\"\"\"\n self.verify(\" #raw \\n$aFunc().\\n #end raw \\n$anInt\",\n \"$aFunc().\\n1\")\n\n def test4(self):\n \"\"\"#raw till #end raw using explicit directive closure\n Shouldn't gobble\"\"\"\n self.verify(\" #raw #\\n$aFunc().\\n #end raw #\\n$anInt\",\n \" \\n$aFunc().\\n\\n1\")\n\n def test5(self):\n \"\"\"single-line short form #raw: \"\"\"\n self.verify(\"#raw: $aFunc().\\n\\n\",\n \"$aFunc().\\n\\n\")\n\n self.verify(\"#raw: $aFunc().\\n$anInt\",\n \"$aFunc().\\n1\")\n\n def test6(self):\n \"\"\" Escape characters in a #raw block \"\"\"\n # noqa\n self.verify(\"\"\"\\\n#raw: This escape should be preserved: \\\\$unexpanded So should this one: \\\\#blah The string \"\\\\012\" should not disappear.\"\"\", # noqa\n r\"\"\"This escape should be preserved: \\$unexpanded So should this one: \\#blah The string \"\\012\" should not disappear.\"\"\") # noqa\n\n\nclass BreakpointDirective(OutputTest):\n def test1(self):\n \"\"\"#breakpoint part way through source code\"\"\"\n self.verify(\"$aFunc(2).\\n#breakpoint\\n$anInt\",\n \"2.\\n\")\n\n def test2(self):\n \"\"\"#breakpoint at BOF\"\"\"\n self.verify(\"#breakpoint\\n$anInt\",\n \"\")\n\n def test3(self):\n \"\"\"#breakpoint at EOF\"\"\"\n self.verify(\"$anInt\\n#breakpoint\",\n \"1\\n\")\n\n\nclass StopDirective(OutputTest):\n def test1(self):\n \"\"\"#stop part way through source code\"\"\"\n self.verify(\"$aFunc(2).\\n#stop\\n$anInt\",\n \"2.\\n\")\n\n def test2(self):\n \"\"\"#stop at BOF\"\"\"\n self.verify(\"#stop\\n$anInt\",\n \"\")\n\n def test3(self):\n \"\"\"#stop at EOF\"\"\"\n self.verify(\"$anInt\\n#stop\",\n \"1\\n\")\n\n def test4(self):\n \"\"\"#stop in pos test block\"\"\"\n self.verify(\"\"\"\\\n$anInt\n#if 1\ninside the if block\n#stop\n#end if\nblarg\"\"\",\n \"1\\ninside the if block\\n\")\n\n def test5(self):\n \"\"\"#stop in neg test block\"\"\"\n self.verify(\"\"\"$anInt\n#if 0\ninside the if block\n#stop\n#end if\nblarg\"\"\",\n \"1\\nblarg\")\n\n\nclass ReturnDirective(OutputTest):\n\n def test1(self):\n \"\"\"#return'ing an int \"\"\"\n self.verify(\"\"\"1\n$str($test-6)\n3\n#def test\n#if 1\n#return (3 *2) \\\n + 2\n#else\naoeuoaeu\n#end if\n#end def\n\"\"\",\n \"1\\n2\\n3\\n\")\n\n def test2(self):\n \"\"\"#return'ing an string \"\"\"\n self.verify(\"\"\"1\n$str($test[1])\n3\n#def test\n#if 1\n#return '123'\n#else\naoeuoaeu\n#end if\n#end def\n\"\"\",\n \"1\\n2\\n3\\n\")\n\n def test3(self):\n \"\"\"\n #return'ing an string AND streaming other output via the transaction\n \"\"\"\n self.verify(\"\"\"1\n$str($test(trans=trans)[1])\n3\n#def test\n1.5\n#if 1\n#return '123'\n#else\naoeuoaeu\n#end if\n#end def\n\"\"\",\n \"1\\n1.5\\n2\\n3\\n\")\n\n\nclass YieldDirective(OutputTest):\n convertEOLs = False\n\n def test1(self):\n \"\"\"simple #yield \"\"\"\n\n src1 = \"\"\"#for i in range(10)\\n#yield i\\n#end for\"\"\"\n src2 = \"\"\"#for i in range(10)\\n$i#slurp\\n#yield\\n#end for\"\"\"\n src3 = (\"#def iterator\\n\"\n \"#for i in range(10)\\n#yield i\\n#end for\\n\"\n \"#end def\\n\"\n \"#for i in $iterator\\n$i#end for\"\n )\n\n for src in (src1, src2, src3):\n klass = Template.compile(src, keepRefToGeneratedCode=True)\n # print klass._CHEETAH_generatedModuleCode\n iter = klass().respond()\n output = [str(i) for i in iter]\n assert ''.join(output) == '0123456789'\n # print ''.join(output)\n\n # @@TR: need to expand this to cover error conditions etc.\n\n\nclass ForDirective(OutputTest):\n\n def test1(self):\n \"\"\"#for loop with one local var\"\"\"\n self.verify(\"#for $i in range(5)\\n$i\\n#end for\",\n \"0\\n1\\n2\\n3\\n4\\n\")\n\n self.verify(\"#for $i in range(5):\\n$i\\n#end for\",\n \"0\\n1\\n2\\n3\\n4\\n\")\n\n self.verify(\"#for $i in range(5): ##comment\\n$i\\n#end for\",\n \"0\\n1\\n2\\n3\\n4\\n\")\n\n self.verify(\"#for $i in range(5) ##comment\\n$i\\n#end for\",\n \"0\\n1\\n2\\n3\\n4\\n\")\n\n def test2(self):\n \"\"\"#for loop with WS in loop\"\"\"\n self.verify(\"#for $i in range(5)\\n$i \\n#end for\",\n \"0 \\n1 \\n2 \\n3 \\n4 \\n\")\n\n def test3(self):\n \"\"\"#for loop gobble WS\"\"\"\n self.verify(\" #for $i in range(5) \\n$i \\n #end for \",\n \"0 \\n1 \\n2 \\n3 \\n4 \\n\")\n\n def test4(self):\n \"\"\"#for loop over list\"\"\"\n self.verify(\"#for $i, $j in [(0,1),(2,3)]\\n$i,$j\\n#end for\",\n \"0,1\\n2,3\\n\")\n\n def test5(self):\n \"\"\"#for loop over list, with #slurp\"\"\"\n self.verify(\"#for $i, $j in [(0,1),(2,3)]\\n$i,$j#slurp\\n#end for\",\n \"0,12,3\")\n\n def test6(self):\n \"\"\"#for loop with explicit closures\"\"\"\n self.verify(\"#for $i in range(5)#$i#end for#\",\n \"01234\")\n\n def test7(self):\n \"\"\"#for loop with explicit closures and WS\"\"\"\n self.verify(\" #for $i in range(5)#$i#end for# \",\n \" 01234 \")\n\n def test8(self):\n \"\"\"#for loop using another $var\"\"\"\n self.verify(\" #for $i in range($aFunc(5))#$i#end for# \",\n \" 01234 \")\n\n def test9(self):\n \"\"\"test methods in for loops\"\"\"\n self.verify(\"#for $func in $listOfLambdas\\n$func($anInt)\\n#end for\",\n \"1\\n1\\n1\\n\")\n\n def test10(self):\n \"\"\"#for loop over list, using methods of the items\"\"\"\n self.verify(\n \"#for i, j in [('aa','bb'),('cc','dd')]\\n\"\n \"$i.upper,$j.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n self.verify(\n \"#for $i, $j in [('aa','bb'),('cc','dd')]\\n\"\n \"$i.upper,$j.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n\n def test11(self):\n \"\"\"#for loop over list, using ($i,$j) style target list\"\"\"\n self.verify(\n \"#for (i, j) in [('aa','bb'),('cc','dd')]\\n\"\n \"$i.upper,$j.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n self.verify(\n \"#for ($i, $j) in [('aa','bb'),('cc','dd')]\\n\"\n \"$i.upper,$j.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n\n def test12(self):\n \"\"\"#for loop over list, using i, (j,k) style target list\"\"\"\n self.verify(\n \"#for i, (j, k) in enumerate([('aa','bb'),('cc','dd')])\\n\"\n \"$j.upper,$k.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n self.verify(\n \"#for $i, ($j, $k) in enumerate([('aa','bb'),('cc','dd')])\\n\"\n \"$j.upper,$k.upper\\n#end for\",\n \"AA,BB\\nCC,DD\\n\")\n\n def test13(self):\n \"\"\"single line #for\"\"\"\n self.verify(\"#for $i in range($aFunc(5)): $i\",\n \"01234\")\n\n def test14(self):\n \"\"\"single line #for with 1 extra leading space\"\"\"\n self.verify(\"#for $i in range($aFunc(5)): $i\",\n \" 0 1 2 3 4\")\n\n def test15(self):\n \"\"\"2 times single line #for\"\"\"\n self.verify(\"#for $i in range($aFunc(5)): $i#slurp\\n\"*2, # noqa: E226,E501 missing whitespace around operator\n \"01234\"*2) # noqa: E226 missing whitespace around operator\n\n def test16(self):\n \"\"\"false single line #for \"\"\"\n self.verify(\"#for $i in range(5): \\n$i\\n#end for\",\n \"0\\n1\\n2\\n3\\n4\\n\")\n\n\nclass RepeatDirective(OutputTest):\n\n def test1(self):\n \"\"\"basic #repeat\"\"\"\n self.verify(\"#repeat 3\\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n self.verify(\"#repeat 3: \\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n\n self.verify(\"#repeat 3 ##comment\\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n\n self.verify(\"#repeat 3: ##comment\\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n\n def test2(self):\n \"\"\"#repeat with numeric expression\"\"\"\n self.verify(\"#repeat 3*3//3\\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n\n def test3(self):\n \"\"\"#repeat with placeholder\"\"\"\n self.verify(\"#repeat $numTwo\\n1\\n#end repeat\",\n \"1\\n1\\n\")\n\n def test4(self):\n \"\"\"#repeat with placeholder * num\"\"\"\n self.verify(\"#repeat $numTwo*1\\n1\\n#end repeat\",\n \"1\\n1\\n\")\n\n def test5(self):\n \"\"\"#repeat with placeholder and WS\"\"\"\n self.verify(\" #repeat $numTwo \\n1\\n #end repeat \",\n \"1\\n1\\n\")\n\n def test6(self):\n \"\"\"single-line #repeat\"\"\"\n self.verify(\"#repeat $numTwo: 1\",\n \"11\")\n self.verify(\"#repeat $numTwo: 1\\n\"*2, # noqa: E226,E501 missing whitespace around operator\n \"1\\n1\\n\"*2) # noqa: E226,E501 missing whitespace around operator\n\n # false single-line\n self.verify(\"#repeat 3: \\n1\\n#end repeat\",\n \"1\\n1\\n1\\n\")\n\n\nclass AttrDirective(OutputTest):\n\n def test1(self):\n \"\"\"#attr with int\"\"\"\n self.verify(\"#attr $test = 1234\\n$test\",\n \"1234\")\n\n def test2(self):\n \"\"\"#attr with string\"\"\"\n self.verify(\"#attr $test = 'blarg'\\n$test\",\n \"blarg\")\n\n def test3(self):\n \"\"\"#attr with expression\"\"\"\n self.verify(\"#attr $test = 'blarg'.upper()*2\\n$test\",\n \"BLARGBLARG\")\n\n def test4(self):\n \"\"\"#attr with string + WS\n Should gobble\"\"\"\n self.verify(\" #attr $test = 'blarg' \\n$test\",\n \"blarg\")\n\n def test5(self):\n \"\"\"#attr with string + WS + leading text\n Shouldn't gobble\"\"\"\n self.verify(\" -- #attr $test = 'blarg' \\n$test\",\n \" -- \\nblarg\")\n\n\nclass DefDirective(OutputTest):\n\n def test1(self):\n \"\"\"#def without argstring\"\"\"\n self.verify(\"#def testMeth\\n1234\\n#end def\\n$testMeth\",\n \"1234\\n\")\n\n self.verify(\"#def testMeth ## comment\\n1234\\n#end def\\n$testMeth\",\n \"1234\\n\")\n\n self.verify(\"#def testMeth: ## comment\\n1234\\n#end def\\n$testMeth\",\n \"1234\\n\")\n\n def test2(self):\n \"\"\"#def without argstring, gobble WS\"\"\"\n self.verify(\" #def testMeth \\n1234\\n #end def \\n$testMeth\",\n \"1234\\n\")\n\n def test3(self):\n \"\"\"#def with argstring, gobble WS\"\"\"\n self.verify(\n \" #def testMeth($a=999) \\n1234-$a\\n #end def\\n$testMeth\",\n \"1234-999\\n\")\n\n def test4(self):\n \"\"\"#def with argstring, gobble WS, string used in call\"\"\"\n self.verify(\n \" #def testMeth($a=999) \\n\"\n \"1234-$a\\n #end def\\n$testMeth('ABC')\",\n \"1234-ABC\\n\")\n\n def test5(self):\n \"\"\"#def with argstring, gobble WS, list used in call\"\"\"\n self.verify(\n \" #def testMeth($a=999) \\n\"\n \"1234-$a\\n #end def\\n$testMeth([1,2,3])\",\n \"1234-[1, 2, 3]\\n\")\n\n def test6(self):\n \"\"\"#def with 2 args, gobble WS, list used in call\"\"\"\n self.verify(\n \" #def testMeth($a, $b='default') \\n\"\n \"1234-$a$b\\n #end def\\n$testMeth([1,2,3])\",\n \"1234-[1, 2, 3]default\\n\")\n\n def test7(self):\n \"\"\"#def with *args, gobble WS\"\"\"\n self.verify(\n \" #def testMeth($*args) \\n1234-$args\\n #end def\\n$testMeth\",\n \"1234-()\\n\")\n\n def test8(self):\n \"\"\"#def with **KWs, gobble WS\"\"\"\n self.verify(\n \" #def testMeth($**KWs) \\n1234-$KWs\\n #end def\\n$testMeth\",\n \"1234-{}\\n\")\n\n def test9(self):\n \"\"\"#def with *args + **KWs, gobble WS\"\"\"\n self.verify(\n \" #def testMeth($*args, $**KWs) \\n\"\n \"1234-$args-$KWs\\n #end def\\n$testMeth\",\n \"1234-()-{}\\n\")\n\n def test10(self):\n \"\"\"#def with *args + **KWs, gobble WS\"\"\"\n self.verify(\n \" #def testMeth($*args, $**KWs) \\n\"\n \"1234-$args-$KWs.a\\n #end def\\n$testMeth(1,2, a=1)\",\n \"1234-(1, 2)-1\\n\")\n\n def test11(self):\n \"\"\"single line #def with extra WS\"\"\"\n self.verify(\n \"#def testMeth: aoeuaoeu\\n- $testMeth -\",\n \"- aoeuaoeu -\")\n\n def test12(self):\n \"\"\"single line #def with extra WS and nested $placeholders\"\"\"\n self.verify(\n \"#def testMeth: $anInt $aFunc(1234)\\n- $testMeth -\",\n \"- 1 1234 -\")\n\n def test13(self):\n \"\"\"single line #def escaped $placeholders\"\"\"\n self.verify(\n \"#def testMeth: \\$aFunc(\\$anInt)\\n- $testMeth -\",\n \"- $aFunc($anInt) -\")\n\n def test14(self):\n \"\"\"single line #def 1 escaped $placeholders\"\"\"\n self.verify(\n \"#def testMeth: \\$aFunc($anInt)\\n- $testMeth -\",\n \"- $aFunc(1) -\")\n\n def test15(self):\n \"\"\"single line #def 1 escaped $placeholders + more WS\"\"\"\n self.verify(\n \"#def testMeth : \\$aFunc($anInt)\\n- $testMeth -\",\n \"- $aFunc(1) -\")\n\n def test16(self):\n \"\"\"multiline #def with $ on methodName\"\"\"\n self.verify(\"#def $testMeth\\n1234\\n#end def\\n$testMeth\",\n \"1234\\n\")\n\n def test17(self):\n \"\"\"single line #def with $ on methodName\"\"\"\n self.verify(\"#def $testMeth:1234\\n$testMeth\",\n \"1234\")\n\n def test18(self):\n \"\"\"single line #def with an argument\"\"\"\n self.verify(\"#def $testMeth($arg=1234):$arg\\n$testMeth\",\n \"1234\")\n\n def test19(self):\n \"\"\"#def that extends over two lines with arguments\"\"\"\n self.verify(\"#def $testMeth($arg=1234,\\n\"\n + \" $arg2=5678)\\n\"\n + \"$arg $arg2\\n\"\n + \"#end def\\n\"\n + \"$testMeth\",\n \"1234 5678\\n\")\n\n\nclass DecoratorDirective(OutputTest):\n def test1(self):\n \"\"\"single line #def with decorator\"\"\"\n\n self.verify(\"#@ blah\", \"#@ blah\")\n self.verify(\"#@23 blah\", \"#@23 blah\")\n self.verify(\"#@@TR: comment\", \"#@@TR: comment\")\n\n self.verify(\n \"#from Cheetah.Tests.SyntaxAndOutput import testdecorator\\n\"\n + \"#@testdecorator\"\n + \"\\n#def $testMeth():1234\\n$testMeth\",\n \"1234\")\n\n self.verify(\n \"#from Cheetah.Tests.SyntaxAndOutput import testdecorator\\n\"\n + \"#@testdecorator\"\n + \"\\n#block $testMeth():1234\",\n \"1234\")\n\n try:\n self.verify(\n \"#from Cheetah.Tests.SyntaxAndOutput import testdecorator\\n\"\n + \"#@testdecorator\\n sdf\"\n + \"\\n#def $testMeth():1234\\n$testMeth\",\n\n \"1234\")\n except ParseError:\n pass\n else:\n self.fail('should raise a ParseError')\n\n def test2(self):\n \"\"\"#def with multiple decorators\"\"\"\n self.verify(\n \"#from Cheetah.Tests.SyntaxAndOutput import testdecorator\\n\"\n + \"#@testdecorator\\n\"\n + \"#@testdecorator\\n\"\n + \"#def testMeth\\n\"\n + \"1234\\n\"\n \"#end def\\n\"\n \"$testMeth\",\n \"1234\\n\")\n\n\nclass BlockDirective(OutputTest):\n\n def test1(self):\n \"\"\"#block without argstring\"\"\"\n self.verify(\"#block testBlock\\n1234\\n#end block\",\n \"1234\\n\")\n\n self.verify(\"#block testBlock ##comment\\n1234\\n#end block\",\n \"1234\\n\")\n\n def test2(self):\n \"\"\"#block without argstring, gobble WS\"\"\"\n self.verify(\" #block testBlock \\n1234\\n #end block \",\n \"1234\\n\")\n\n def test3(self):\n \"\"\"#block with argstring, gobble WS\n\n Because blocks can be reused in multiple parts of the template\n arguments (!!with defaults!!) can be given.\n \"\"\"\n self.verify(\" #block testBlock($a=999) \\n1234-$a\\n #end block \",\n \"1234-999\\n\")\n\n def test4(self):\n \"\"\"#block with 2 args, gobble WS\"\"\"\n self.verify(\n \" #block testBlock($a=999, $b=444) \\n1234-$a$b\\n #end block \",\n \"1234-999444\\n\")\n\n def test5(self):\n \"\"\"#block with 2 nested blocks\n\n Blocks can be nested to any depth and the name of the block is optional\n for the #end block part: #end block OR #end block [name] \"\"\"\n\n self.verify(\"\"\"#block testBlock\nthis is a test block\n#block outerNest\nouter\n#block innerNest\ninner\n#end block innerNest\n#end block outerNest\n---\n#end block testBlock\n\"\"\",\n \"this is a test block\\nouter\\ninner\\n---\\n\")\n\n def test6(self):\n \"\"\"single line #block \"\"\"\n self.verify(\n \"#block testMeth: This is my block\",\n \"This is my block\")\n\n def test7(self):\n \"\"\"single line #block with WS\"\"\"\n self.verify(\n \"#block testMeth: This is my block\",\n \"This is my block\")\n\n def test8(self):\n \"\"\"single line #block 1 escaped $placeholders\"\"\"\n self.verify(\n \"#block testMeth: \\$aFunc($anInt)\",\n \"$aFunc(1)\")\n\n def test9(self):\n \"\"\"single line #block 1 escaped $placeholders + WS\"\"\"\n self.verify(\n \"#block testMeth: \\$aFunc( $anInt )\",\n \"$aFunc( 1 )\")\n\n def test10(self):\n \"\"\"single line #block 1 escaped $placeholders + more WS\"\"\"\n self.verify(\n \"#block testMeth : \\$aFunc( $anInt )\",\n \"$aFunc( 1 )\")\n\n def test11(self):\n \"\"\"multiline #block $ on argstring\"\"\"\n self.verify(\"#block $testBlock\\n1234\\n#end block\",\n \"1234\\n\")\n\n def test12(self):\n \"\"\"single line #block with $ on methodName \"\"\"\n self.verify(\n \"#block $testMeth: This is my block\",\n \"This is my block\")\n\n def test13(self):\n \"\"\"single line #block with an arg \"\"\"\n self.verify(\n \"#block $testMeth($arg='This is my block'): $arg\",\n \"This is my block\")\n\n def test14(self):\n \"\"\"single line #block with None for content\"\"\"\n self.verify(\n \"\"\"#block $testMeth: $None\\ntest $testMeth-\"\"\",\n \"test -\")\n\n def test15(self):\n \"\"\"single line #block with nothing for content\"\"\"\n self.verify(\n \"\"\"#block $testMeth: \\nfoo\\n#end block\\ntest $testMeth-\"\"\",\n \"foo\\ntest foo\\n-\")\n\n\nclass IncludeDirective(OutputTest):\n\n def setUp(self):\n fp = open('parseTest.txt', 'w')\n fp.write(\"$numOne $numTwo\")\n fp.flush()\n fp.close()\n\n def tearDown(self):\n if os.path.exists('parseTest.txt'):\n os.remove('parseTest.txt')\n\n def test1(self):\n \"\"\"#include raw of source $emptyString\"\"\"\n self.verify(\"#include raw source=$emptyString\",\n \"\")\n\n def test2(self):\n \"\"\"#include raw of source $blockToBeParsed\"\"\"\n self.verify(\"#include raw source=$blockToBeParsed\",\n \"$numOne $numTwo\")\n\n def test3(self):\n \"\"\"#include raw of 'parseTest.txt'\"\"\"\n self.verify(\"#include raw 'parseTest.txt'\",\n \"$numOne $numTwo\")\n\n def test4(self):\n \"\"\"#include raw of $includeFileName\"\"\"\n self.verify(\"#include raw $includeFileName\",\n \"$numOne $numTwo\")\n\n def test5(self):\n \"\"\"#include raw of $includeFileName, with WS\"\"\"\n self.verify(\" #include raw $includeFileName \",\n \"$numOne $numTwo\")\n\n def test6(self):\n \"\"\"#include raw of source= , with WS\"\"\"\n self.verify(\" #include raw source='This is my $Source '*2 \",\n \"This is my $Source This is my $Source \")\n\n def test7(self):\n \"\"\"#include of $blockToBeParsed\"\"\"\n self.verify(\"#include source=$blockToBeParsed\",\n \"1 2\")\n\n def test8(self):\n \"\"\"#include of $blockToBeParsed, with WS\"\"\"\n self.verify(\" #include source=$blockToBeParsed \",\n \"1 2\")\n\n def test9(self):\n \"\"\"#include of 'parseTest.txt', with WS\"\"\"\n self.verify(\" #include source=$blockToBeParsed \",\n \"1 2\")\n\n def test10(self):\n \"\"\"#include of \"parseTest.txt\", with WS\"\"\"\n self.verify(\" #include source=$blockToBeParsed \",\n \"1 2\")\n\n def test11(self):\n \"\"\"#include of 'parseTest.txt', with WS and surrounding text\"\"\"\n self.verify(\"aoeu\\n #include source=$blockToBeParsed \\naoeu\",\n \"aoeu\\n1 2aoeu\")\n\n def test12(self):\n \"\"\"#include of 'parseTest.txt', with WS and explicit closure\"\"\"\n self.verify(\" #include source=$blockToBeParsed# \",\n \" 1 2 \")\n\n\nclass SilentDirective(OutputTest):\n\n def _X_test1(self):\n \"\"\"simple #silent\"\"\"\n self.verify(\"#silent $aFunc\",\n \"\")\n\n def _X_test2(self):\n \"\"\"simple #silent\"\"\"\n self.verify(\"#silent $anObj.callIt\\n$anObj.callArg\",\n \"1234\")\n\n self.verify(\"#silent $anObj.callIt ##comment\\n$anObj.callArg\",\n \"1234\")\n\n def _X_test3(self):\n \"\"\"simple #silent\"\"\"\n self.verify(\"#silent $anObj.callIt(99)\\n$anObj.callArg\",\n \"99\")\n\n def test4(self):\n \"\"\"#silent 1234\n \"\"\"\n self.verify(\"#silent 1234\",\n \"\")\n\n\nclass SetDirective(OutputTest):\n\n def test1(self):\n \"\"\"simple #set\"\"\"\n self.verify(\"#set $testVar = 'blarg'\\n$testVar\",\n \"blarg\")\n self.verify(\"#set testVar = 'blarg'\\n$testVar\",\n \"blarg\")\n\n self.verify(\"#set testVar = 'blarg'##comment\\n$testVar\",\n \"blarg\")\n\n def test2(self):\n \"\"\"simple #set with no WS between operands\"\"\"\n self.verify(\"#set $testVar='blarg'\",\n \"\")\n\n def test3(self):\n \"\"\"#set + use of var\"\"\"\n self.verify(\"#set $testVar = 'blarg'\\n$testVar\",\n \"blarg\")\n\n def test4(self):\n \"\"\"#set + use in an #include\"\"\"\n self.verify(\n \"#set global $aSetVar = 1234\\n#include source=$includeBlock2\",\n \"1 2 1234\")\n\n def test5(self):\n \"\"\"#set with a dictionary\"\"\"\n self.verify(\"\"\"\\\n#set $testDict = {'one':'one1','two':'two2','three':'three3'}\n$testDict.one\n$testDict.two\"\"\",\n \"one1\\ntwo2\")\n\n def test6(self):\n \"\"\"#set with string, then used in #if block\"\"\"\n\n self.verify(\"\"\"#set $test='a string'\\n#if $test#blarg#end if\"\"\",\n \"blarg\")\n\n def test7(self):\n \"\"\"simple #set, gobble WS\"\"\"\n self.verify(\" #set $testVar = 'blarg' \",\n \"\")\n\n def test8(self):\n \"\"\"simple #set, don't gobble WS\"\"\"\n self.verify(\" #set $testVar = 'blarg'#---\",\n \" ---\")\n\n def test9(self):\n \"\"\"simple #set with a list\"\"\"\n self.verify(\" #set $testVar = [1, 2, 3] \\n$testVar\",\n \"[1, 2, 3]\")\n\n def test10(self):\n \"\"\"simple #set global with a list\"\"\"\n self.verify(\" #set global $testVar = [1, 2, 3] \\n$testVar\",\n \"[1, 2, 3]\")\n\n def test11(self):\n \"\"\"simple #set global with a list and *cache\n\n Caching only works with global #set vars. Local vars are not accesible\n to the cache namespace.\n \"\"\"\n\n self.verify(\" #set global $testVar = [1, 2, 3] \\n$*testVar\",\n \"[1, 2, 3]\")\n\n def test12(self):\n \"\"\"simple #set global with a list and **cache\"\"\"\n self.verify(\" #set global $testVar = [1, 2, 3] \\n$*5*testVar\",\n \"[1, 2, 3]\")\n\n def test13(self):\n \"\"\"simple #set with a list and **cache\"\"\"\n self.verify(\" #set global $testVar = [1, 2, 3] \\n$*.5*testVar\",\n \"[1, 2, 3]\")\n\n def test14(self):\n \"\"\"simple #set without NameMapper on\"\"\"\n self.verify(\n \"\"\"#compiler useNameMapper = 0\\n#set $testVar = 1 \\n$testVar\"\"\",\n \"1\")\n\n def test15(self):\n \"\"\"simple #set without $\"\"\"\n self.verify(\"\"\"#set testVar = 1 \\n$testVar\"\"\",\n \"1\")\n\n def test16(self):\n \"\"\"simple #set global without $\"\"\"\n self.verify(\"\"\"#set global testVar = 1 \\n$testVar\"\"\",\n \"1\")\n\n def test17(self):\n \"\"\"simple #set module without $\"\"\"\n self.verify(\"\"\"#set module __foo__ = 'bar'\\n$__foo__\"\"\",\n \"bar\")\n\n def test18(self):\n \"\"\"#set with i,j=list style assignment\"\"\"\n self.verify(\"\"\"#set i,j = [1,2]\\n$i$j\"\"\",\n \"12\")\n self.verify(\"\"\"#set $i,$j = [1,2]\\n$i$j\"\"\",\n \"12\")\n\n def test19(self):\n \"\"\"#set with (i,j)=list style assignment\"\"\"\n self.verify(\"\"\"#set (i,j) = [1,2]\\n$i$j\"\"\",\n \"12\")\n self.verify(\"\"\"#set ($i,$j) = [1,2]\\n$i$j\"\"\",\n \"12\")\n\n def test20(self):\n \"\"\"#set with i, (j,k)=list style assignment\"\"\"\n self.verify(\"\"\"#set i, (j,k) = [1,(2,3)]\\n$i$j$k\"\"\",\n \"123\")\n self.verify(\"\"\"#set $i, ($j,$k) = [1,(2,3)]\\n$i$j$k\"\"\",\n \"123\")\n\n\nclass IfDirective(OutputTest):\n\n def test1(self):\n \"\"\"simple #if block\"\"\"\n self.verify(\"#if 1\\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n self.verify(\"#if 1:\\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n self.verify(\"#if 1: \\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n self.verify(\"#if 1: ##comment \\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n self.verify(\"#if 1 ##comment \\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n self.verify(\"#if 1##for i in range(10)#$i#end for##end if\",\n '0123456789')\n\n self.verify(\"#if 1: #for i in range(10)#$i#end for\",\n '0123456789')\n\n self.verify(\"#if 1: #for i in range(10):$i\",\n '0123456789')\n\n def test2(self):\n \"\"\"simple #if block, with WS\"\"\"\n self.verify(\" #if 1\\n$aStr\\n #end if \\n\",\n \"blarg\\n\")\n\n def test3(self):\n \"\"\"simple #if block, with WS and explicit closures\"\"\"\n self.verify(\" #if 1#\\n$aStr\\n #end if #--\\n\",\n \" \\nblarg\\n --\\n\")\n\n def test4(self):\n \"\"\"#if block using $numOne\"\"\"\n self.verify(\"#if $numOne\\n$aStr\\n#end if\\n\",\n \"blarg\\n\")\n\n def test5(self):\n \"\"\"#if block using $zero\"\"\"\n self.verify(\"#if $zero\\n$aStr\\n#end if\\n\",\n \"\")\n\n def test6(self):\n \"\"\"#if block using $emptyString\"\"\"\n self.verify(\"#if $emptyString\\n$aStr\\n#end if\\n\",\n \"\")\n\n def test7(self):\n \"\"\"#if ... #else ... block using a $emptyString\"\"\"\n self.verify(\n \"#if $emptyString\\n$anInt\\n#else\\n$anInt - $anInt\\n#end if\",\n \"1 - 1\\n\")\n\n def test8(self):\n \"\"\"#if ... #elif ... #else ... block using a $emptyString\"\"\"\n self.verify(\n \"#if $emptyString\\n$c\\n\"\n \"#elif $numOne\\n$numOne\\n#else\\n$c - $c\\n#end if\",\n \"1\\n\")\n\n def test9(self):\n \"\"\"#if 'not' test, with #slurp\"\"\"\n self.verify(\"#if not $emptyString\\n$aStr#slurp\\n#end if\\n\",\n \"blarg\")\n\n def test10(self):\n \"\"\"#if block using $*emptyString\n\n This should barf\n \"\"\"\n try:\n self.verify(\"#if $*emptyString\\n$aStr\\n#end if\\n\",\n \"\")\n except ParseError:\n pass\n else:\n self.fail('This should barf')\n\n def test11(self):\n \"\"\"\n #if block using invalid top-level $(placeholder) syntax - should barf\n \"\"\"\n\n for badSyntax in (\"#if $*5*emptyString\\n$aStr\\n#end if\\n\",\n \"#if ${emptyString}\\n$aStr\\n#end if\\n\",\n \"#if $(emptyString)\\n$aStr\\n#end if\\n\",\n \"#if $[emptyString]\\n$aStr\\n#end if\\n\",\n \"#if $!emptyString\\n$aStr\\n#end if\\n\",\n ):\n try:\n self.verify(badSyntax, \"\")\n except ParseError:\n pass\n else:\n self.fail('This should barf')\n\n def test12(self):\n \"\"\"#if ... #else if ... #else ... block using a $emptyString\n Same as test 8 but using else if instead of elif\"\"\"\n self.verify(\n \"#if $emptyString\\n$c\\n#else \"\n \"if $numOne\\n$numOne\\n#else\\n$c - $c\\n#end if\",\n \"1\\n\")\n\n def test13(self):\n \"\"\"#if# ... #else # ... block using a $emptyString with \"\"\"\n self.verify(\"#if $emptyString# $anInt#else#$anInt - $anInt#end if\",\n \"1 - 1\")\n\n def test14(self):\n \"\"\"single-line #if: simple\"\"\"\n self.verify(\"#if $emptyString then 'true' else 'false'\",\n \"false\")\n\n def test15(self):\n \"\"\"single-line #if: more complex\"\"\"\n self.verify(\"#if $anInt then 'true' else 'false'\",\n \"true\")\n\n def test16(self):\n \"\"\"single-line #if: with the words 'else' and 'then' in the output \"\"\"\n self.verify(\n \"#if ($anInt and not $emptyString==''' else ''') \"\n \"then $str('then') else 'else'\",\n \"then\")\n\n def test17(self):\n \"\"\"single-line #if: \"\"\"\n self.verify(\"#if 1: foo\\n#if 0: bar\\n#if 1: foo\",\n \"foo\\nfoo\")\n\n self.verify(\"#if 1: foo\\n#if 0: bar\\n#if 1: foo\",\n \"foo\\nfoo\")\n\n def test18(self):\n \"\"\"single-line #if: \\n#else: \"\"\"\n self.verify(\"#if 1: foo\\n#elif 0: bar\",\n \"foo\\n\")\n\n self.verify(\"#if 1: foo\\n#elif 0: bar\\n#else: blarg\\n\",\n \"foo\\n\")\n\n self.verify(\"#if 0: foo\\n#elif 0: bar\\n#else: blarg\\n\",\n \"blarg\\n\")\n\n\nclass UnlessDirective(OutputTest):\n\n def test1(self):\n \"\"\"#unless 1\"\"\"\n self.verify(\"#unless 1\\n 1234 \\n#end unless\",\n \"\")\n\n self.verify(\"#unless 1:\\n 1234 \\n#end unless\",\n \"\")\n\n self.verify(\"#unless 1: ##comment\\n 1234 \\n#end unless\",\n \"\")\n\n self.verify(\"#unless 1 ##comment\\n 1234 \\n#end unless\",\n \"\")\n\n def test2(self):\n \"\"\"#unless 0\"\"\"\n self.verify(\"#unless 0\\n 1234 \\n#end unless\",\n \" 1234 \\n\")\n\n def test3(self):\n \"\"\"#unless $none\"\"\"\n self.verify(\"#unless $none\\n 1234 \\n#end unless\",\n \" 1234 \\n\")\n\n def test4(self):\n \"\"\"#unless $numTwo\"\"\"\n self.verify(\"#unless $numTwo\\n 1234 \\n#end unless\",\n \"\")\n\n def test5(self):\n \"\"\"#unless $numTwo with WS\"\"\"\n self.verify(\" #unless $numTwo \\n 1234 \\n #end unless \",\n \"\")\n\n def test6(self):\n \"\"\"single-line #unless\"\"\"\n self.verify(\"#unless 1: 1234\", \"\")\n self.verify(\"#unless 0: 1234\", \"1234\")\n self.verify(\"#unless 0: 1234\\n\"*2, \"1234\\n\"*2) # noqa: E226,E501 missing whitespace around operator\n\n\nclass PSP(OutputTest):\n def searchList(self):\n return None\n\n def test1(self):\n \"\"\"simple <%= [int] %>\"\"\"\n self.verify(\"<%= 1234 %>\", \"1234\")\n\n def test2(self):\n \"\"\"simple <%= [string] %>\"\"\"\n self.verify(\"<%= 'blarg' %>\", \"blarg\")\n\n def test3(self):\n \"\"\"simple <%= None %>\"\"\"\n self.verify(\"<%= None %>\", \"\")\n\n def test4(self):\n \"\"\"simple <%= [string] %> + $anInt\"\"\"\n self.verify(\"<%= 'blarg' %>$anInt\", \"blarg1\")\n\n def test5(self):\n \"\"\"simple <%= [EXPR] %> + $anInt\"\"\"\n self.verify(\"<%= ('blarg'*2).upper() %>$anInt\", \"BLARGBLARG1\")\n\n def test6(self):\n \"\"\"for loop in <%%>\"\"\"\n self.verify(\"<% for i in range(5):%>1<%end%>\", \"11111\")\n\n def test7(self):\n \"\"\"for loop in <%%> and using <%=i%>\"\"\"\n self.verify(\"<% for i in range(5):%><%=i%><%end%>\", \"01234\")\n\n def test8(self):\n \"\"\"for loop in <% $%> and using <%=i%>\"\"\"\n self.verify(\"\"\"<% for i in range(5):\n i=i*2$%><%=i%><%end%>\"\"\", \"02468\")\n\n def test9(self):\n \"\"\"for loop in <% $%> and using <%=i%> plus extra text\"\"\"\n self.verify(\"\"\"<% for i in range(5):\n i=i*2$%><%=i%>-<%end%>\"\"\", \"0-2-4-6-8-\")\n\n def test10(self):\n \"\"\" Using getVar and write within a PSP \"\"\"\n self._searchList = [{'me': 1}]\n template = '''This is my template\n<%\nme = self.getVar('me')\nif isinstance(me, int):\n write('Bork')\nelse:\n write('Nork')\n%>'''\n self.verify(template, 'This is my template\\nBork')\n\n\nclass WhileDirective(OutputTest):\n def test1(self):\n \"\"\"simple #while with a counter\"\"\"\n self.verify(\n \"#set $i = 0\\n#while $i < 5\\n$i#slurp\\n#set $i += 1\\n#end while\",\n \"01234\")\n\n\nclass ContinueDirective(OutputTest):\n def test1(self):\n \"\"\"#continue with a #while\"\"\"\n self.verify(\"\"\"#set $i = 0\n#while $i < 5\n#if $i == 3\n #set $i += 1\n #continue\n#end if\n$i#slurp\n#set $i += 1\n#end while\"\"\",\n \"0124\")\n\n def test2(self):\n \"\"\"#continue with a #for\"\"\"\n self.verify(\"\"\"#for $i in range(5)\n#if $i == 3\n #continue\n#end if\n$i#slurp\n#end for\"\"\",\n \"0124\")\n\n\nclass BreakDirective(OutputTest):\n def test1(self):\n \"\"\"#break with a #while\"\"\"\n self.verify(\"\"\"\\\n#set $i = 0\n#while $i < 5\n#if $i == 3\n #break\n#end if\n$i#slurp\n#set $i += 1\n#end while\"\"\",\n \"012\")\n\n def test2(self):\n \"\"\"#break with a #for\"\"\"\n self.verify(\"\"\"\\\n#for $i in range(5)\n#if $i == 3\n #break\n#end if\n$i#slurp\n#end for\"\"\",\n \"012\")\n\n\nclass TryDirective(OutputTest):\n\n def test1(self):\n \"\"\"simple #try\n \"\"\"\n self.verify(\"#try\\n1234\\n#except\\nblarg\\n#end try\",\n \"1234\\n\")\n\n def test2(self):\n \"\"\"#try / #except with #raise\n \"\"\"\n self.verify(\"#try\\n#raise ValueError\\n#except\\nblarg\\n#end try\",\n \"blarg\\n\")\n\n def test3(self):\n \"\"\"#try / #except with #raise + WS\n\n Should gobble\n \"\"\"\n self.verify(\n \" #try \\n #raise ValueError \\n #except \\nblarg\\n #end try\",\n \"blarg\\n\")\n\n def test4(self):\n \"\"\"#try / #except with #raise + WS and leading text\n\n Shouldn't gobble\n \"\"\"\n self.verify(\n \"--#try \\n #raise ValueError \\n #except \\nblarg\\n #end try#--\",\n \"--\\nblarg\\n --\")\n\n def test5(self):\n \"\"\"nested #try / #except with #raise\n \"\"\"\n self.verify(\"\"\"\\\n#try\n #raise ValueError\n#except\n #try\n #raise ValueError\n #except\nblarg\n #end try\n#end try\"\"\", # noqa\n \"blarg\\n\")\n\n\nclass PassDirective(OutputTest):\n def test1(self):\n \"\"\"#pass in a #try / #except block\n \"\"\"\n self.verify(\"#try\\n#raise ValueError\\n#except\\n#pass\\n#end try\",\n \"\")\n\n def test2(self):\n \"\"\"#pass in a #try / #except block + WS\n \"\"\"\n self.verify(\n \" #try \\n #raise ValueError \\n \"\n \"#except \\n #pass \\n #end try\",\n \"\")\n\n\nclass AssertDirective(OutputTest):\n def test1(self):\n \"\"\"simple #assert\n \"\"\"\n self.verify(\"#set $x = 1234\\n#assert $x == 1234\",\n \"\")\n\n def test2(self):\n \"\"\"simple #assert that fails\n \"\"\"\n def test(self=self):\n self.verify(\"#set $x = 1234\\n#assert $x == 999\",\n \"\"),\n self.assertRaises(AssertionError, test)\n\n def test3(self):\n \"\"\"simple #assert with WS\n \"\"\"\n self.verify(\"#set $x = 1234\\n #assert $x == 1234 \",\n \"\")\n\n\nclass RaiseDirective(OutputTest):\n def test1(self):\n \"\"\"simple #raise ValueError\n\n Should raise ValueError\n \"\"\"\n def test(self=self):\n self.verify(\"#raise ValueError\",\n \"\"),\n self.assertRaises(ValueError, test)\n\n def test2(self):\n \"\"\"#raise ValueError in #if block\n\n Should raise ValueError\n \"\"\"\n def test(self=self):\n self.verify(\"#if 1\\n#raise ValueError\\n#end if\\n\",\n \"\")\n self.assertRaises(ValueError, test)\n\n def test3(self):\n \"\"\"#raise ValueError in #if block\n\n Shouldn't raise ValueError\n \"\"\"\n self.verify(\"#if 0\\n#raise ValueError\\n#else\\nblarg#end if\\n\",\n \"blarg\\n\")\n\n\nclass ImportDirective(OutputTest):\n def test1(self):\n \"\"\"#import math\n \"\"\"\n self.verify(\"#import math\",\n \"\")\n\n def test2(self):\n \"\"\"#import math + WS\n\n Should gobble\n \"\"\"\n self.verify(\" #import math \",\n \"\")\n\n def test3(self):\n \"\"\"#import math + WS + leading text\n\n Shouldn't gobble\n \"\"\"\n self.verify(\" -- #import math \",\n \" -- \")\n\n def test4(self):\n \"\"\"#from math import syn\n \"\"\"\n self.verify(\"#from math import cos\",\n \"\")\n\n def test5(self):\n \"\"\"#from math import cos + WS\n Should gobble\n \"\"\"\n self.verify(\" #from math import cos \",\n \"\")\n\n def test6(self):\n \"\"\"#from math import cos + WS + leading text\n Shouldn't gobble\n \"\"\"\n self.verify(\" -- #from math import cos \",\n \" -- \")\n\n def test7(self):\n \"\"\"#from math import cos -- use it\n \"\"\"\n self.verify(\"#from math import cos\\n$cos(0)\",\n \"1.0\")\n\n def test8(self):\n \"\"\"#from math import cos,tan,sin -- and use them\n \"\"\"\n self.verify(\"#from math import cos, tan, sin\\n$cos(0)-$tan(0)-$sin(0)\",\n \"1.0-0.0-0.0\")\n\n def test9(self):\n \"\"\"#import os.path -- use it\n \"\"\"\n\n self.verify(\"#import os.path\\n$os.path.exists('.')\",\n repr(True))\n\n def test10(self):\n \"\"\"#import os.path -- use it with NameMapper turned off\n \"\"\"\n self.verify(\"\"\"##\n#compiler-settings\nuseNameMapper=False\n#end compiler-settings\n#import os.path\n$os.path.exists('.')\"\"\",\n repr(True))\n\n def test11(self):\n \"\"\"#from math import *\n \"\"\"\n\n self.verify(\"#from math import *\\n$pow(1,2) $log10(10)\",\n \"1.0 1.0\")\n\n\nclass CompilerDirective(OutputTest):\n def test1(self):\n \"\"\"overriding the commentStartToken\n \"\"\"\n self.verify(\"\"\"$anInt##comment\n#compiler commentStartToken = '//'\n$anInt//comment\n\"\"\",\n \"1\\n1\\n\")\n\n def test2(self):\n \"\"\"overriding and resetting the commentStartToken\n \"\"\"\n self.verify(\"\"\"$anInt##comment\n#compiler commentStartToken = '//'\n$anInt//comment\n#compiler reset\n$anInt//comment\n\"\"\",\n \"1\\n1\\n1//comment\\n\")\n\n\nclass CompilerSettingsDirective(OutputTest):\n\n def test1(self):\n \"\"\"overriding the cheetahVarStartToken\n \"\"\"\n self.verify(\"\"\"$anInt\n#compiler-settings\ncheetahVarStartToken = @\n#end compiler-settings\n@anInt\n#compiler-settings reset\n$anInt\n\"\"\",\n \"1\\n1\\n1\\n\")\n\n def test2(self):\n \"\"\"overriding the directiveStartToken\n \"\"\"\n self.verify(\"\"\"#set $x = 1234\n$x\n#compiler-settings\ndirectiveStartToken = @\n#end compiler-settings\n@set $x = 1234\n$x\n\"\"\",\n \"1234\\n1234\\n\")\n\n def test3(self):\n \"\"\"overriding the commentStartToken\n \"\"\"\n self.verify(\"\"\"$anInt##comment\n#compiler-settings\ncommentStartToken = //\n#end compiler-settings\n$anInt//comment\n\"\"\",\n \"1\\n1\\n\")\n\n\nif sys.platform.startswith('java'):\n del CompilerDirective\n del CompilerSettingsDirective\n\n\nclass ExtendsDirective(OutputTest):\n\n def test1(self):\n \"\"\"#extends Cheetah.Templates._SkeletonPage\"\"\"\n self.verify(\"\"\"\\\n#from Cheetah.Templates._SkeletonPage import _SkeletonPage\n#extends _SkeletonPage\n#implements respond\n$spacer()\n\"\"\",\n '\"\"\\n')\n\n self.verify(\"\"\"\\\n#from Cheetah.Templates._SkeletonPage import _SkeletonPage\n#extends _SkeletonPage\n#implements respond(foo=1234)\n$spacer()$foo\n\"\"\",\n '\"\"'\n '1234\\n')\n\n def test2(self):\n \"\"\"#extends Cheetah.Templates.SkeletonPage without #import\"\"\"\n self.verify(\"\"\"#extends Cheetah.Templates.SkeletonPage\n#implements respond\n$spacer()\n\"\"\",\n '\"\"\\n')\n\n def test3(self):\n \"\"\"\n #extends Cheetah.Templates.SkeletonPage.SkeletonPage without #import\n \"\"\"\n self.verify(\"\"\"#extends Cheetah.Templates.SkeletonPage.SkeletonPage\n#implements respond\n$spacer()\n\"\"\",\n '\"\"\\n')\n\n def test4(self):\n \"\"\"#extends with globals and searchList test\"\"\"\n self.verify(\"\"\"#extends Cheetah.Templates.SkeletonPage\n#set global g=\"Hello\"\n#implements respond\n$g $numOne\n\"\"\",\n 'Hello 1\\n')\n\n\nclass SuperDirective(OutputTest):\n def test1(self):\n tmpl1 = Template.compile('''$foo $bar(99)\n #def foo: this is base foo\n #def bar(arg): super-$arg''')\n\n tmpl2 = tmpl1.subclass('''\n #implements dummy\n #def foo\n #super\n This is child foo\n #super(trans=trans)\n $bar(1234)\n #end def\n #def bar(arg): #super($arg)\n ''')\n expected = ('this is base foo '\n 'This is child foo\\nthis is base foo '\n 'super-1234\\n super-99')\n assert str(tmpl2()).strip() == expected\n\n\nclass ImportantExampleCases(OutputTest):\n def test1(self):\n \"\"\"how to make a comma-delimited list\"\"\"\n self.verify(\"\"\"#set $sep = ''\n#for $letter in $letterList\n$sep$letter#slurp\n#set $sep = ', '\n#end for\n\"\"\",\n \"a, b, c\")\n\n\nclass FilterDirective(OutputTest):\n convertEOLs = False\n\n def _getCompilerSettings(self):\n return {'useFilterArgsInPlaceholders': True}\n\n def test1(self):\n \"\"\"#filter Filter\n \"\"\"\n self.verify(\"#filter Filter\\n$none#end filter\",\n \"\")\n\n self.verify(\"#filter Filter: $none\",\n \"\")\n\n def test2(self):\n \"\"\"#filter ReplaceNone with WS\n \"\"\"\n self.verify(\"#filter Filter \\n$none#end filter\",\n \"\")\n\n def test3(self):\n \"\"\"#filter MaxLen -- maxlen of 5\"\"\"\n\n self.verify(\"#filter MaxLen \\n${tenDigits, $maxlen=5}#end filter\",\n \"12345\")\n\n def test4(self):\n \"\"\"#filter MaxLen -- no maxlen\n \"\"\"\n self.verify(\"#filter MaxLen \\n${tenDigits}#end filter\",\n \"1234567890\")\n\n def test5(self):\n \"\"\"#filter WebSafe -- basic usage\n \"\"\"\n self.verify(\"#filter WebSafe \\n$webSafeTest#end filter\",\n \"abc <=> &\")\n\n def test6(self):\n \"\"\"#filter WebSafe -- also space\n \"\"\"\n self.verify(\"#filter WebSafe \\n${webSafeTest, $also=' '}#end filter\",\n \"abc <=> &\")\n\n def test7(self):\n \"\"\"#filter WebSafe -- also space, without $ on the args\n \"\"\"\n self.verify(\"#filter WebSafe \\n${webSafeTest, also=' '}#end filter\",\n \"abc <=> &\")\n\n def test8(self):\n \"\"\"#filter Strip -- trailing newline\n \"\"\"\n self.verify(\"#filter Strip\\n$strip1#end filter\",\n \"strippable whitespace\\n\")\n\n def test9(self):\n \"\"\"#filter Strip -- no trailing newine\n \"\"\"\n self.verify(\"#filter Strip\\n$strip2#end filter\",\n \"strippable whitespace\")\n\n def test10(self):\n \"\"\"#filter Strip -- multi-line\n \"\"\"\n self.verify(\"#filter Strip\\n$strip3#end filter\",\n \"strippable whitespace\\n1 2 3\\n\")\n\n def test11(self):\n \"\"\"#filter StripSqueeze -- canonicalize all whitespace to ' '\n \"\"\"\n self.verify(\"#filter StripSqueeze\\n$strip3#end filter\",\n \"strippable whitespace 1 2 3\")\n\n\nclass EchoDirective(OutputTest):\n def test1(self):\n \"\"\"#echo 1234\n \"\"\"\n self.verify(\"#echo 1234\",\n \"1234\")\n\n\nclass ErrorCatcherDirective(OutputTest):\n pass\n\n\nclass VarExists(OutputTest): # Template.varExists()\n\n def test1(self):\n \"\"\"$varExists('$anInt')\n \"\"\"\n self.verify(\"$varExists('$anInt')\",\n repr(True))\n\n def test2(self):\n \"\"\"$varExists('anInt')\n \"\"\"\n self.verify(\"$varExists('anInt')\",\n repr(True))\n\n def test3(self):\n \"\"\"$varExists('$anInt')\n \"\"\"\n self.verify(\"$varExists('$bogus')\",\n repr(False))\n\n def test4(self):\n \"\"\"$varExists('$anInt') combined with #if false\n \"\"\"\n self.verify(\"#if $varExists('$bogus')\\n1234\\n#else\\n999\\n#end if\",\n \"999\\n\")\n\n def test5(self):\n \"\"\"$varExists('$anInt') combined with #if true\n \"\"\"\n self.verify(\"#if $varExists('$anInt')\\n1234\\n#else\\n999#end if\",\n \"1234\\n\")\n\n\nclass GetVar(OutputTest): # Template.getVar()\n def test1(self):\n \"\"\"$getVar('$anInt')\n \"\"\"\n self.verify(\"$getVar('$anInt')\",\n \"1\")\n\n def test2(self):\n \"\"\"$getVar('anInt')\n \"\"\"\n self.verify(\"$getVar('anInt')\",\n \"1\")\n\n def test3(self):\n \"\"\"$self.getVar('anInt')\n \"\"\"\n self.verify(\"$self.getVar('anInt')\",\n \"1\")\n\n def test4(self):\n \"\"\"$getVar('bogus', 1234)\n \"\"\"\n self.verify(\"$getVar('bogus', 1234)\",\n \"1234\")\n\n def test5(self):\n \"\"\"$getVar('$bogus', 1234)\n \"\"\"\n self.verify(\"$getVar('$bogus', 1234)\",\n \"1234\")\n\n def test6(self):\n \"\"\"$getVar('$') raises ValueError or NotFound(LookupError)\n \"\"\"\n with self.assertRaises((ValueError, LookupError)):\n self.verify(\"$getVar('$')\", \"never get here\")\n\n\nclass MiscComplexSyntax(OutputTest):\n def test1(self):\n \"\"\"Complex use of {},[] and () in a #set expression\n ----\n #set $c = {'A':0}[{}.get('a', {'a' : 'A'}['a'])]\n $c\n \"\"\"\n self.verify(\"#set $c = {'A':0}[{}.get('a', {'a' : 'A'}['a'])]\\n$c\",\n \"0\")\n\n\nclass CGI(OutputTest):\n \"\"\"CGI scripts with(out) the CGI environment and with(out) GET variables.\n \"\"\"\n convertEOLs = False\n\n def _beginCGI(self):\n os.environ['REQUEST_METHOD'] = \"GET\"\n\n def _endCGI(self):\n try:\n del os.environ['REQUEST_METHOD']\n except KeyError:\n pass\n _guaranteeNoCGI = _endCGI\n\n def test1(self):\n \"\"\"A regular template.\"\"\"\n self._guaranteeNoCGI()\n source = \"#extends Cheetah.Tools.CGITemplate\\n\" + \\\n \"#implements respond\\n\" + \\\n \"$cgiHeaders#slurp\\n\" + \\\n \"Hello, world!\"\n self.verify(source, \"Hello, world!\")\n\n def test2(self):\n \"\"\"A CGI script.\"\"\"\n self._beginCGI()\n source = \"#extends Cheetah.Tools.CGITemplate\\n\" + \\\n \"#implements respond\\n\" + \\\n \"$cgiHeaders#slurp\\n\" + \\\n \"Hello, world!\"\n self.verify(source, \"Content-type: text/html\\n\\nHello, world!\")\n self._endCGI()\n\n def test3(self):\n \"\"\"A (pseudo) Webware servlet.\n\n This uses the Python syntax escape to set\n self._CHEETAH__isControlledByWebKit.\n We could instead do\n '#silent self._CHEETAH__isControlledByWebKit = True',\n taking advantage of the fact that it will compile unchanged as long\n as there's no '$' in the statement. (It won't compile with an '$'\n because that would convert to a function call, and you can't assign\n to a function call.) Because this isn't really being called from\n Webware, we'd better not use any Webware services! Likewise, we'd\n better not call $cgiImport() because it would be misled.\n \"\"\"\n self._beginCGI()\n source = \"#extends Cheetah.Tools.CGITemplate\\n\" + \\\n \"#implements respond\\n\" + \\\n \"<% self._CHEETAH__isControlledByWebKit = True %>#slurp\\n\" + \\\n \"$cgiHeaders#slurp\\n\" + \\\n \"Hello, world!\"\n self.verify(source, \"Hello, world!\")\n self._endCGI()\n\n def test4(self):\n \"\"\"A CGI script with a GET variable.\"\"\"\n self._beginCGI()\n os.environ['QUERY_STRING'] = \"cgiWhat=world\"\n source = \"#extends Cheetah.Tools.CGITemplate\\n\" + \\\n \"#implements respond\\n\" + \\\n \"$cgiHeaders#slurp\\n\" + \\\n \"#silent $webInput(['cgiWhat'])##slurp\\n\" + \\\n \"Hello, $cgiWhat!\"\n self.verify(source,\n \"Content-type: text/html\\n\\nHello, world!\")\n del os.environ['QUERY_STRING']\n self._endCGI()\n\n\nclass WhitespaceAfterDirectiveTokens(OutputTest):\n def _getCompilerSettings(self):\n return {'allowWhitespaceAfterDirectiveStartToken': True}\n\n def test1(self):\n self.verify(\"# for i in range(10): $i\",\n \"0123456789\")\n self.verify(\"# for i in range(10)\\n$i# end for\",\n \"0123456789\")\n self.verify(\"# for i in range(10)#$i#end for\",\n \"0123456789\")\n\n\nclass DefmacroDirective(OutputTest):\n def _getCompilerSettings(self):\n def aMacro(src):\n return '$aStr'\n\n return {'macroDirectives': {'aMacro': aMacro}}\n\n def test1(self):\n self.verify(\"\"\"\\\n#defmacro inc: #set @src +=1\n#set i = 1\n#inc: $i\n$i\"\"\",\n \"2\")\n\n self.verify(\"\"\"\\\n#defmacro test\n#for i in range(10): @src\n#end defmacro\n#test: $i-foo#slurp\n#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo012\")\n\n self.verify(\"\"\"\\\n#defmacro test\n#for i in range(10): @src\n#end defmacro\n#test: $i-foo\n#for i in range(3): $i\"\"\",\n \"0-foo\\n1-foo\\n2-foo\\n3-foo\\n4-foo\\n5-foo\\n\"\n \"6-foo\\n7-foo\\n8-foo\\n9-foo\\n012\")\n\n self.verify(\"\"\"\\\n#defmacro test: #for i in range(10): @src\n#test: $i-foo#slurp\n-#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo-012\")\n\n self.verify(\"\"\"\\\n#defmacro test##for i in range(10): @src#end defmacro##slurp\n#test: $i-foo#slurp\n-#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo-012\")\n\n self.verify(\"\"\"\\\n#defmacro testFoo: nothing\n#defmacro test(foo=1234): #for i in range(10): @src\n#test foo=234: $i-foo#slurp\n-#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo-012\")\n\n self.verify(\"\"\"\\\n#defmacro testFoo: nothing\n#defmacro test(foo=1234): #for i in range(10): @src@foo\n#test foo='-foo'#$i#end test#-#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo-012\")\n\n self.verify(\"\"\"\\\n#defmacro testFoo: nothing\n#defmacro test(foo=1234): #for i in range(10): @src.strip()@foo\n#test foo='-foo': $i\n-#for i in range(3): $i\"\"\",\n \"0-foo1-foo2-foo3-foo4-foo5-foo6-foo7-foo8-foo9-foo-012\")\n\n def test2(self):\n self.verify(\"#aMacro: foo\",\n \"blarg\")\n self.verify(\"#defmacro nested: @macros.aMacro(@src)\\n#nested: foo\",\n \"blarg\")\n\n\nclass Indenter(OutputTest):\n convertEOLs = False\n\n source = \"\"\"\npublic class X\n{\n #for $method in $methods\n $getMethod($method)\n\n #end for\n}\n//end of class\n\n#def getMethod($method)\n #indent ++\n public $getType($method) ${method.Name}($getParams($method.Params));\n #indent --\n#end def\n\n#def getParams($params)\n #indent off\n\n #for $counter in $range($len($params))\n #if $counter == len($params) - 1\n $params[$counter]#slurp\n #else:\n $params[$counter],\n #end if\n #end for\n #indent on\n#end def\n\n#def getType($method)\n #indent push\n #indent=0\n #if $method.Type == \"VT_VOID\"\n void#slurp\n #elif $method.Type == \"VT_INT\"\n int#slurp\n #elif $method.Type == \"VT_VARIANT\"\n Object#slurp\n #end if\n #indent pop\n#end def\n\"\"\"\n\n control = \"\"\"\npublic class X\n{\n public void Foo(\n _input,\n _output);\n\n\n public int Bar(\n _str1,\n str2,\n _str3);\n\n\n public Object Add(\n value1,\n value);\n\n\n}\n//end of class\n\n\n\n\"\"\"\n\n def _getCompilerSettings(self):\n return {'useFilterArgsInPlaceholders': True}\n\n def searchList(self): # Inside Indenter class.\n class Method:\n def __init__(self, _name, _type, *_params):\n self.Name = _name\n self.Type = _type\n self.Params = _params\n methods = [Method(\"Foo\", \"VT_VOID\", \"_input\", \"_output\"),\n Method(\"Bar\", \"VT_INT\", \"_str1\", \"str2\", \"_str3\"),\n Method(\"Add\", \"VT_VARIANT\", \"value1\", \"value\")]\n return [{\"methods\": methods}]\n\n def test1(self): # Inside Indenter class.\n self.verify(self.source, self.control)\n\n\n##################################################\n# CREATE CONVERTED EOL VERSIONS OF THE TEST CASES\n\nif OutputTest._useNewStyleCompilation:\n extraCompileKwArgsForDiffBaseclass = {'baseclass': dict}\nelse:\n extraCompileKwArgsForDiffBaseclass = {'baseclass': object}\n\n\ndef install_eols():\n klasses = [v for v in globals().values()\n if isinstance(v, type) and issubclass(v, unittest.TestCase)]\n for klass in klasses:\n name = klass.__name__\n if hasattr(klass, 'convertEOLs') and klass.convertEOLs:\n win32Src = r\"class %(name)s_Win32EOL(%(name)s): \" \\\n r\"_EOLreplacement = '\\r\\n'\" % locals()\n macSrc = r\"class %(name)s_MacEOL(%(name)s): \" \\\n r\"_EOLreplacement = '\\r'\" % locals()\n exec(win32Src, globals())\n exec(macSrc, globals())\n\n src = r\"class %(name)s_DiffBaseClass(%(name)s): \" % locals()\n src += \" _extraCompileKwArgs = extraCompileKwArgsForDiffBaseclass\"\n exec(src, globals())\n\n del name\n del klass\n\n##################################################\n# if run from the command line ##\n\n\nif __name__ == '__main__':\n install_eols()\n unittest.main()\n","sub_path":"Cheetah/Tests/SyntaxAndOutput.py","file_name":"SyntaxAndOutput.py","file_ext":"py","file_size_in_byte":94072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"609436362","text":"import datetime as dt\nimport pandas as pd\nfrom sqlalchemy import and_\nfrom sqlalchemy.orm import sessionmaker\nfrom utils.database import io, config as cfg\nfrom utils.algorithm import etl\nfrom utils.database.models.base_public import FundTypeMappingSource, FundTypeMapping\n\n_engine_wt = cfg.load_engine()[\"2Gbp\"]\n_db_session = sessionmaker(bind=_engine_wt)\n_session = _db_session()\nUPDATE_TIME = etl.update_time[\"all\"]\n\n_entities_map = [\n (FundTypeMappingSource.fund_id, FundTypeMapping.fund_id),\n (FundTypeMappingSource.fund_name, FundTypeMapping.fund_name),\n (FundTypeMappingSource.data_source, FundTypeMapping.data_source),\n (FundTypeMappingSource.typestandard_code, FundTypeMapping.typestandard_code),\n (FundTypeMappingSource.typestandard_name, FundTypeMapping.typestandard_name),\n (FundTypeMappingSource.type_code, FundTypeMapping.type_code),\n (FundTypeMappingSource.type_name, FundTypeMapping.type_name),\n (FundTypeMappingSource.stype_code, FundTypeMapping.stype_code),\n (FundTypeMappingSource.stype_name, FundTypeMapping.stype_name),\n]\n\n\n_input_entities = [x[0] for x in _entities_map]\n_map_entities = [x[1] for x in _entities_map]\n_derivative_entities = []\n_output_entities = [*_map_entities, *_derivative_entities]\n\n\ndef fetch_multisource_fund_type(update_time):\n \"\"\"\n Fetch records of DOrgAssetScale table where record update time >= `update_time`\n Args:\n update_time: record update time\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n query_oh = _session.query(FundTypeMappingSource).filter(\n FundTypeMappingSource.update_time >= update_time\n ).with_entities(\n *_input_entities\n )\n df = pd.DataFrame(query_oh.all())\n df.columns = [x.name for x in _map_entities]\n df.index = df[[FundTypeMappingSource.fund_id.name, FundTypeMappingSource.typestandard_code.name]]\n return df\n\n\ndef transform():\n \"\"\"\n\n Args:\n style:\n\n Returns:\n\n \"\"\"\n # General process\n df = fetch_multisource_fund_type(UPDATE_TIME)\n\n # Process of different sources\n df_000001 = df.ix[df[FundTypeMappingSource.data_source.name] == \"000001\"]\n df_020001 = df.ix[df[FundTypeMappingSource.data_source.name] == \"020001\"]\n df_020002 = df.ix[df[FundTypeMappingSource.data_source.name] == \"020002\"]\n df_020003 = df.ix[df[FundTypeMappingSource.data_source.name] == \"020003\"]\n\n result = df_000001.join(\n df_020001, how=\"outer\", rsuffix=\"_020001\"\n ).join(\n df_020002, how=\"outer\", rsuffix=\"_020002\"\n ).join(\n df_020003, how=\"outer\", rsuffix=\"_020003\"\n )[df_020001.columns].fillna(df_020002).fillna(df_020003)\n return result\n\n\ndef main():\n io.to_sql(FundTypeMapping.__tablename__, _engine_wt, transform())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"SCRIPT/MUTUAL/etl/fund_type_mapping.py","file_name":"fund_type_mapping.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"128916848","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver \nimport time\nimport pandas as pd\nimport numpy as np\n\n\nsource = []\ndf = pd.DataFrame([])\nwith open('rumah.txt','r') as f:\n\tlines = f.readlines()\nfor i in lines:\n\tsource.append(i.rstrip())\nsource = set(source)\nsource = list(source)\nfeature_dict={}\nj=0\nfor i in source:\n\ttry:\n\t\turl = i\n\t\tdriver = webdriver.Safari()\n\t\tdriver.get(url)\n\t\ttime.sleep(2)\n\t\tsoup = BeautifulSoup(driver.page_source,'lxml')\n\t\t#Feature\n\t\t#1\n\t\ttry:\n\t\t\tfeature_dict['price'] = driver.find_element_by_class_name('_2xKfz').text.split(' ')[1]\n\t\texcept:\n\t\t\tfeature_dict['price'] = np.nan\n\t\t#2\n\t\ttry:\n\t\t\tfeature_dict['lokasi'] = driver.find_element_by_class_name('_2FRXm').text.split(',')[0]\n\t\texcept:\n\t\t\tfeature_dict['lokasi'] = np.nan\n\t\t#3\n\t\ttry:\n\t\t\tfeature_dict['lt'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_sqr_land'}).string\n\t\texcept:\n\t\t\tfeature_dict['lt'] = np.nan\n\t\t#4\n\t\ttry:\n\t\t\tfeature_dict['lb'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_sqr_building'}).string\n\t\texcept:\n\t\t\tfeature_dict['lb'] = np.nan\n\t\t#5\n\t\ttry:\n\t\t\tfeature_dict['lantai'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_floor'}).string\n\t\texcept:\n\t\t\tfeature_dict['lantai'] = np.nan\n\t\t#6\n\t\ttry:\n\t\t\tfeature_dict['tipe'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_type'}).string\n\t\texcept:\n\t\t\tfeature_dict['tipe'] = np.nan\n\t\t#7\n\t\ttry:\n\t\t\tfeature_dict['kamar'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_bedroom'}).string\n\t\texcept:\n\t\t\tfeature_dict['kamar'] = np.nan\n\t\t#8\n\t\ttry:\n\t\t\tfeature_dict['km'] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_bathroom'}).string\n\t\texcept:\n\t\t\tfeature_dict['km'] = np.nan\n\t\t#9\n\t\ttry:\n\t\t\tfeature_dict[\"sertifikat\"] = soup.find('span',{'class':'_2vNpt','data-aut-id':'value_p_certificate'}).string\n\t\texcept:\n\t\t\tfeature_dict['sertifikat'] = np.nan\n\t\t#10\n\t\ttry:\n\t\t\tfasilitas = soup.find('div',{'class':'_3FF8P'})\n\t\t\tfeature_dict['fasilitas_total'] = [i.string for i in fasilitas.find_all('span')]\n\t\texcept:\n\t\t\tfeature_dict['fasilitas_total'] = np.nan\n\t\t#11\n\t\ttry:\n\t\t\tdesc = soup.find('div',{'data-aut-id':'itemDescriptionContent'})\n\t\t\tfeature_dict['desc'] = [i.string for i in desc.find_all('p')]\n\t\texcept:\n\t\t\tfeature_dict['desc'] = np.nan\n\t\tdf = df.append(feature_dict,ignore_index=True)\n\texcept:\n\t\tpass\n\tdriver.close()\n\tj+=1\n\tif j%20==0:\n\t\tprint(f'{j} rows have been collected')\nprint('complete')\ndf.to_csv('jakarta_houseprice.csv')","sub_path":"data/olx/olx_data_collector.py","file_name":"olx_data_collector.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"324950168","text":"from django.db import models\nfrom django.utils import timezone\n\n\nclass PartRequest(models.Model):\n\trequest_owner = models.ForeignKey('auth.User', null=True)\n\trequest_description = models.TextField(max_length=200)\n\trequest_date = models.DateTimeField(default=timezone.now)\n\trequest_expire = models.DateTimeField(blank=True, null=True)\n\tpart_pic = models.ImageField(upload_to = 'photos/%Y/%m/%d', null=True)\n\tdef __str__(self):\n\t\treturn self.request_description\n\nclass PartOffer(models.Model):\n\toffer_owner = models.ForeignKey('auth.User', null=True)\n\tpart_request = models.ForeignKey(PartRequest, on_delete=models.CASCADE)\n\tpart_name = models.TextField(max_length=200)\n\tpart_cost = models.IntegerField(null=True)\n\tpart_pic = models.ImageField(upload_to = 'photos/%Y/%m/%d', null=True)\n\toffer_date = models.DateTimeField(default=timezone.now)\n\tdelivery_time = models.IntegerField(null=True)\n\tdelivery_cost = models.IntegerField(null=True)\n\tdef __str__(self):\n\t\treturn self.part_name","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"115636105","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('chemical_inventory', '0008_chemical_stripped_formula'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SupportingDocument',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=50)),\n ('file', models.FileField(upload_to='')),\n ('comment', models.TextField(blank=True)),\n ('date_added', models.DateTimeField(auto_now=True)),\n ('container', models.ForeignKey(to='chemical_inventory.Container')),\n ('owner', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='safetydatasheet',\n name='chemical',\n ),\n migrations.RemoveField(\n model_name='safetydatasheet',\n name='supplier',\n ),\n migrations.DeleteModel(\n name='SafetyDataSheet',\n ),\n ]\n","sub_path":"chemical_inventory/migrations/0009_add_supporting_document.py","file_name":"0009_add_supporting_document.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"157879580","text":"class TortaRedondas:\n def __init__(self,saborIngresado):\n #Definiendo atributos\n self.forma = 'Redonda'\n self.sabor = saborIngresado\n #Accion al crear objeto\n print ('Soy una torta nueva')\n def mostrarAtributos (self):\n print (f'Soy de forma {self.forma} y de sabor {self.sabor}')\n\n#Creo torta\ntortaChocolate = TortaRedondas('Chocolate')\ntortaVainilla = TortaRedondas('Vainilla')\n#Se muestran los atributos\nprint (tortaChocolate.sabor)\nprint (tortaVainilla.sabor)\nprint (tortaChocolate.forma)\nprint (tortaVainilla.forma)\n\n#\ntortaChocolate.mostrarAtributos()\ntortaVainilla.mostrarAtributos()","sub_path":"Clases/clasesyobjetos/primeraparte.py","file_name":"primeraparte.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"51668365","text":"\"\"\"Everything related to building logic for the spores goes here\"\"\"\nfrom sc2.constants import SPORECRAWLER\n\n\nclass BuildSpores:\n \"\"\"Ok for now\"\"\"\n def __init__(self, ai):\n self.ai = ai\n self.selected_base = None\n self.enemy_flying_dmg_units = False\n\n async def should_handle(self, iteration):\n \"\"\"Requirements to run handle\"\"\"\n if not self.ai.pools.ready:\n return False\n\n if self.ai.known_enemy_units.flying:\n if [au for au in self.ai.known_enemy_units.flying if au.can_attack_ground]:\n self.enemy_flying_dmg_units = True\n\n base = self.ai.townhalls\n spores = self.ai.spores\n\n if (not len(spores) < len(base.ready)) or self.ai.close_enemies_to_base:\n return False\n\n self.selected_base = base.ready.random\n return (\n (self.enemy_flying_dmg_units or self.ai.time >= 420)\n and not self.ai.already_pending(SPORECRAWLER)\n and not spores.closer_than(15, self.selected_base.position)\n and self.ai.can_afford(SPORECRAWLER)\n )\n\n async def handle(self, iteration):\n \"\"\"Build the spore right on the middle of the base\"\"\"\n for base in self.ai.townhalls:\n spore_position = (\n (self.ai.state.mineral_field | self.ai.state.vespene_geyser)\n .closer_than(10, base)\n .center.towards(base, 1)\n )\n if not self.ai.spores.closer_than(15, spore_position):\n await self.ai.build(SPORECRAWLER, spore_position)\n return True\n","sub_path":"actions/build/spores.py","file_name":"spores.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"565923176","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nfrom tracker.centroidtracker import CentroidTracker\nfrom tracker.trackableobject import TrackableObject\nimport cv2\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/yolo.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/coco_classes.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n # print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n out_boxes2 = []\n out_scores2 = []\n out_classes2 = []\n for i, c in enumerate(out_classes):\n if c == 2:\n out_boxes2.append(out_boxes[i])\n out_scores2.append(out_scores[i])\n out_classes2.append(out_classes[i])\n\n # font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n # size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n if len(out_classes2) != 0:\n for i, c in reversed(list(enumerate(out_classes2))):\n predicted_class = self.class_names[c]\n box = out_boxes2[i]\n score = out_scores2[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n # label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n out_boxes2[i] = [top, left, bottom, right]\n \n # print(label, (left, top), (right, bottom))\n\n # My kingdom for a good redistributable image drawing library.\n # for i in range(thickness):\n # draw.rectangle(\n # [left + i, top + i, right - i, bottom - i],\n # outline=(127, 255, 0))\n\n end = timer()\n # print(end - start)\n print('--------------------')\n return image, out_boxes2, out_scores2\n\n def close_session(self):\n self.sess.close()\n\ndef count_line(width, height ,x):\n # parking00.mp4, parking01.movのライン\n y = int(((height - (height / 3.4)) / width) * x) + int(height / 3.4)\n # parking07.movのライン\n # y = int(((height / 5) / width) * x) + int(height / 2)\n # parking06.movのライン\n # y = int(-1 * ((height + (height / 3)) / width) * x) + int(height + (height / 3))\n return y\n\ndef get_color(image, objects):\n color_list = {}\n for (object_ID, centroid) in objects.items():\n img = image[int(centroid[1]) : int(centroid[1]) + 40, int(centroid[0]) -20 : int(centroid[0]) + 20]\n r = np.floor(img.T[2].flatten().mean()).astype('int32')\n g = np.floor(img.T[1].flatten().mean()).astype('int32')\n b = np.floor(img.T[0].flatten().mean()).astype('int32')\n # print((r, g, b))\n hsv = cv2.cvtColor(np.array([[[b, g, r]]], dtype=np.uint8), cv2.COLOR_BGR2HSV)[0][0]\n\n if hsv[1] > 50:\n if hsv[2] > 60:\n if hsv[0] < 15 or hsv[0] >= 160:\n color_list[object_ID] = ('red', hsv)\n elif hsv[0] < 40:\n color_list[object_ID] = ('orange, yellow', hsv)\n elif hsv[0] < 80:\n color_list[object_ID] = ('green', hsv)\n elif hsv[0] < 130:\n color_list[object_ID] = ('blue', hsv)\n elif hsv[0] < 160:\n color_list[object_ID] = ('purple, pink', hsv)\n else:\n color_list[object_ID] = ('??', hsv)\n else:\n color_list[object_ID] = ('??', hsv)\n else:\n if hsv[2] > 180:\n color_list[object_ID] = ('white', hsv)\n elif hsv[2] > 120:\n color_list[object_ID] = ('gray', hsv)\n elif hsv[2] <= 120:\n color_list[object_ID] = ('black', hsv)\n else:\n color_list[object_ID] = ('??', hsv)\n return color_list \n\ndef track_objects(image, objects, count1, count2, trackableObjects, color_list):\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n\n for (objectID, centroid) in objects.items():\n to = trackableObjects.get(objectID, None)\n if to is None:\n to = TrackableObject(objectID, centroid)\n else:\n # x = [c[0] for c in to.centroids]\n # y = [c[1] for c in to.centroids]\n\n # direction_x = centroid[0] - np.mean(x)\n # direction_y = centroid[1] - np.mean(y)\n\n # to.centroids.append(centroid)\n if not to.counted:\n first_y = count_line(image.size[0], image.size[1], centroid[0])\n now_y = count_line(image.size[0], image.size[1], to.centroids[0][0])\n\n if centroid[1] < first_y:\n if to.centroids[0][1] > now_y:\n count1 += 1\n to.counted = True\n elif centroid[1] > first_y:\n if to.centroids[0][1] < now_y:\n count2 += 1\n to.counted = True\n\n trackableObjects[objectID] = to\n\n text = \"ID {} {}\".format(objectID, color_list[objectID][0])\n text2 = \" {}\".format(color_list[objectID][1])\n print(text + text2)\n draw = ImageDraw.Draw(image)\n draw.ellipse(\n [centroid[0] - 5, centroid[1] -5, centroid[0] + 5, centroid[1] + 5],\n fill=(127, 255, 0)\n )\n draw.text((centroid[0] -10, centroid[1] -25), text, fill=(127, 255, 0), font=font)\n del draw\n return image, count1, count2\n\ndef max_min_area(mask, boxes, scores, max_area, min_area):\n for (i, (top, left, bottom, right)) in enumerate(boxes):\n if scores[i] >= 0.20:\n top = top // 3\n left = left // 3\n bottom = bottom // 3\n right = right // 3\n mask1 = mask[top : bottom, left : right]\n\n max_lim = mask1.shape[1] * mask1.shape[0]\n min_lim = (mask.shape[1] * mask.shape[0]) * 0.005\n\n contours, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for _, cnt in enumerate(contours):\n area = cv2.contourArea(cnt)\n if max_area < area and max_lim > area:\n max_area = area\n if min_area > area and min_lim < area:\n min_area = area\n return max_area, min_area\n\ndef get_area(mask, boxes, scores):\n del_list = []\n flag = False\n for (i, box) in enumerate(boxes):\n if scores[i] >= 0.20:\n top = box[0] // 3\n left = box[1] // 3\n bottom = box[2] // 3\n right = box[3] // 3\n mask1 = mask[top : bottom, left : right]\n\n max_lim = mask1.shape[1] * mask1.shape[0]\n min_lim = (mask.shape[1] * mask.shape[0]) * 0.005\n\n contours, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) == 0:\n del_list.append(box)\n else:\n for _, cnt in enumerate(contours):\n area = cv2.contourArea(cnt)\n if area >= max_lim or area < min_lim:\n flag = True\n else:\n flag = False\n break\n if flag:\n del_list.append(box)\n else:\n del_list.append(box)\n for n in del_list:\n boxes = [box for box in boxes if box != n]\n return boxes\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n if video_path.isdigit():\n video_path = int(video_path)\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = cv2.VideoWriter_fourcc(*\"mp4v\")\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n # print(\"input video size: {}\".format(video_size))\n\n isOutput = True if output_path != \"\" else False\n if isOutput:\n # print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n\n # fgbg = cv2.createBackgroundSubtractorKNN()\n fgbg = cv2.bgsegm.createBackgroundSubtractorGSOC()\n ct = CentroidTracker(maxDisappeared=40, maxDistance=90)\n trackableObjects = {}\n del_ID = 0\n count_a = 0\n count_b = 0\n flag = False\n max_area = 0\n min_area = video_size[0] * video_size[1]\n area_time = 0\n accum_time = 0\n curr_fps = 0\n max_fps = 0\n fps = \"FPS: ??\"\n n = 0\n sum_fps = 0\n\n prev_time = timer()\n while True:\n _, frame = vid.read()\n if type(frame) == type(None): break\n\n resize_img = cv2.resize(frame, (frame.shape[1] // 3, frame.shape[0] // 3))\n mask = fgbg.apply(resize_img)\n # mask1 = mask\n # thresh = cv2.threshold(mask, 3, 255, cv2.THRESH_BINARY)[1]\n cv2.namedWindow('maskwindow', cv2.WINDOW_NORMAL)\n cv2.imshow('maskwindow', mask)\n\n if flag:\n out_image = frame\n contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for _, cnt in enumerate(contours):\n area = cv2.contourArea(cnt)\n if area > min_area and area < (max_area):\n flag = False\n break\n else:\n image = Image.fromarray(frame)\n image, out_boxes, out_scores = yolo.detect_image(image)\n out_boxes = get_area(mask, out_boxes, out_scores)\n # if len(out_boxes) != 0:\n # for i, box in enumerate(out_boxes):\n # cv2.rectangle(frame, (box[1] // 3, box[0] // 3), (box[3] // 3, box[2] // 3), (127, 255, 0), thickness=1)\n objects = ct.update(out_boxes)\n color_list = get_color(frame, objects)\n image, count_b, count_a = track_objects(image, objects, count_b, count_a, trackableObjects, color_list)\n out_image = np.asarray(image)\n if len(out_boxes) != 0:\n for i, box in enumerate(out_boxes):\n cv2.rectangle(out_image, (box[1], box[0]), (box[3], box[2]), (127, 255, 0), thickness=2)\n\n if len(objects) != 0:\n objects_ID = list(objects.keys())\n trackable_ID = list(trackableObjects.keys())\n non_IDs = list(set(trackable_ID) - set(objects_ID))\n for non_ID in non_IDs:\n del trackableObjects[non_ID]\n if area_time < 150:\n max_area, min_area = max_min_area(mask, out_boxes, out_scores, max_area, min_area)\n area_time += 1\n elif len(objects) == 0 and area_time >= 150:\n flag = True\n\n cv2.line(out_image, (0, count_line(out_image.shape[1], out_image.shape[0], 0)), (out_image.shape[1], count_line(out_image.shape[1], out_image.shape[0], out_image.shape[1])), color=(127, 255, 0), thickness=3)\n\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n if max_fps < curr_fps:\n max_fps = curr_fps\n sum_fps += curr_fps\n n += 1\n curr_fps = 0\n print(fps)\n cv2.putText(out_image, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(127, 255, 0), thickness=2)\n\n info = [\n (\"count B\", count_b),\n (\"coutn A\", count_a),\n (\"min area\", min_area),\n (\"max area\", max_area)\n ]\n for (i, (k, v)) in enumerate(info):\n textInfo = \"{}: {}\".format(k, v)\n cv2.putText(out_image, text=textInfo, org=(10, out_image.shape[0] - ((30 * i) + 20)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, color=(127, 255, 0), thickness=1)\n\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", out_image)\n # cv2.namedWindow('maskwindow', cv2.WINDOW_NORMAL)\n # cv2.imshow('maskwindow', mask1)\n\n if isOutput:\n out.write(out_image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print(\"Max area: {}, Min area: {}\".format(max_area, min_area))\n print(\"Max FPS: {}FPS\".format(max_fps))\n print(\"Ave FPS: {:.2f}FPS\".format(sum_fps / n))\n yolo.close_session()\n","sub_path":"yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":17922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"157510525","text":"class Solution:\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n dummy = ListNode(0)\n dummy.next, prev = head, dummy\n for _ in range(1, m):\n prev=prev.next\n pivot = prev.next\n for _ in range(m, n):\n node = pivot.next\n pivot.next = node.next\n node.next = prev.next\n prev.next = node\n return dummy.next\n\"\"\"\nFirst I find the node right before the first node in the reverse range. \nI call it prev. And I call the first node in the reverse range pivot. \nThen this pivot node goes through the reverse range. \nEvery next node it encounters is moved behind prev, i.e., to the start of the reverse range.\n\nprev pivot\n | |\n[A] --> [B] --> [C] --> [D] --> [E] --> [F] --> ...\nIn front of the pivot we have node C, which gets moved to after prev, and pivot moves on:\n\nprev pivot\n | |\n[A] --> [C] --> [B] --> [D] --> [E] --> [F] --> ...\nSo far the range from B to C has been reversed. If that was the goal, we can stop now. \nOtherwise... Now in front of the pivot we have node D, which gets moved to after prev, and pivot moves on:\n\nprev pivot\n | |\n[A] --> [D] --> [C] --> [B] --> [E] --> [F] --> ...\n\"\"\"","sub_path":"python/reverseBtw.py","file_name":"reverseBtw.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"22612993","text":"from Alunos import Alunos\naprovados=[]\nreprovados=[]\nmedia_turma=[]\nr='s'\nwhile r==\"s\":\n aluno=Alunos(8,8,10,4)\n aluno.dados()\n aluno.verifica()\n aluno.media()\n if(aluno.media() == True ):\n aprovados.append(aluno.nome)\n else:\n reprovados.append(aluno.nome)\n media_turma.append(aluno.nota)\n r=input('deseja inserir outro aluno? [s] [n] ')\nsoma=sum(media_turma)\nitens=len(media_turma)\nmedia=soma/itens\n\nprint(aprovados )\nprint(reprovados)\nprint(\"a media da turma foi :{} pontos de um total de {} pontos\".format(media,aluno.nota_max))\n","sub_path":"classes/exec14.py","file_name":"exec14.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"215375913","text":"from model import VGG16Model, ResNet152Model\nfrom preprocess import Pre\n\nimport chainer\nfrom chainer.datasets import split_dataset_random\nfrom chainer import iterators\nimport chainer.links as L\nimport chainer.functions as F\nimport numpy as np\nfrom chainer import training\nfrom chainer import serializers\nchainer.config.use_cudnn = 'auto'\nfrom chainer import datasets\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom chainer.training.triggers import MinValueTrigger\nfrom chainer.datasets import tuple_dataset\n\n\"\"\" path to directory including training images \"\"\"\ntrain_path = \"/home1/sompo/tanaka/color/amethyst/amethyst_single_color_pile_normal\"\ntrain_path_d = \"/home1/sompo/tanaka/color/amethyst/amethyst_single_color_pile_dark\"\ntrain_path_b = \"/home1/sompo/tanaka/color/amethyst/amethyst_single_color_pile_bright\"\n\n\"\"\" path to directory including test images \"\"\"\ntest_path = \"/home1/sompo/tanaka/color/octopus_single_color_pile_on\"\n#testには50000枚の画像\n\ntr = Pre(train_path, 30000, False, True)\n#tr_d = Pre(train_path_d, 1000, False, True)\n#tr_b = Pre(train_path_b, 1000, False, True)\n\n\n\"\"\" this is for training and validation \"\"\"\nte = Pre(test_path, 3000, True, True)\n\n\n\n\"\"\"\ntrain data : amethyst_single_color_pile_normal, dark, bright\nreference data : octopus_single_color_pile_on (9color * 5 images)\n-----\nvalidation data : amethyst 300, octopus 45\n-----\ntest data : octopus_single_color_pile_off (50000 images)\npreprocess : random clop, horizontal flip\n\"\"\"\n\nori_train = tr.set\n#ori_train_d = tr_d.set\n#ori_train_b = tr_b.set\n\n#octopusの一部をtrainに使う\ntrain_intest = te.add\n#for validation\ntest = te.rest\n\nprint(\"the number of reference data : \", len(train_intest))\n\nnew_data, new_label = [], []\nval, val_label = [], []\ni = 0\nfor data in ori_train:\n d, l = data\n if(i < (len(ori_train) - 100)):\n new_data.append(d)\n new_label.append(l)\n else:\n val.append(d)\n val_label.append(l)\n i += 1\n\"\"\"\ni = 0\nfor data in ori_train_b:\n d, l = data\n if(i < 29900):\n new_data.append(d)\n new_label.append(l)\n else:\n val.append(d)\n val_label.append(l)\n i += 1\ni = 0\nfor data in ori_train_d:\n d, l = data\n if(i < 29900):\n new_data.append(d)\n new_label.append(l)\n else:\n val.append(d)\n val_label.append(l)\n i += 1\n\"\"\"\nfor data in train_intest:\n d, l = data\n new_data.append(d)\n new_label.append(l)\n\nfor data in test:\n d, l = data\n val.append(d)\n val_label.append(l)\ntrain = tuple_dataset.TupleDataset(new_data, new_label)\ntest = tuple_dataset.TupleDataset(val, val_label)\n\n\nprint(\"making dataset done. The length of data is as follows.\")\nprint(len(train),len(test))\n\nbatchsize = 40\n\ntrain_iter = iterators.SerialIterator(train, batchsize, shuffle=True)\n#valid_iter = iterators.SerialIterator(valid, batchsize, repeat=False, shuffle=False)\ntest_iter = iterators.SerialIterator(\n test, batchsize, repeat=False, shuffle=False)\n\n\n\"\"\" fine-tuning \"\"\"\n#vgg16 = L.VGG16Layers()\n\n# 学習済みレイヤーの学習率を固定する\n#model = L.ResNet50Layers()\n#model = L.Classifier(VGG16Model(out_size=9))\nmodel = L.Classifier(ResNet152Model(out_size=9))\n\n\n\na=0.0001\noptimizer = chainer.optimizers.Adam(alpha=a)\noptimizer.setup(model)\n#model.predictor.base.disable_update()\n\n\nprint(\"learning rate を段々と減らしていく\")\nfor func_name in model.predictor.base._children:\n for param in model.predictor.base[func_name].params():\n param.update_rule.hyperparam.alpha *= 0.1\n\ngpu_id = 0\nif gpu_id >= 0:\n print(\"use GPU : \" + str(gpu_id))\n chainer.cuda.get_device(gpu_id).use()\n model.to_gpu(gpu_id)\n\n\n\n\nupdater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)\n\nmax_epoch = 300\n# TrainerにUpdaterを渡す\ntrainer = training.Trainer(updater, (max_epoch, 'epoch'), out='result')\n\nfrom chainer.training import extensions\ntrainer.extend(extensions.LogReport())\ntrainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id), name='val')\ntrainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy', 'elapsed_time']))\ntrainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss.png'))\ntrainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key='epoch', file_name='accuracy.png'))\ntrainer.extend(extensions.dump_graph('main/loss'))\ntrainer.extend(extensions.ProgressBar(update_interval=1))\n\ndef save_best_model(t):\n print(\"saving model..\")\n serializers.save_npz(\"Resnet152.model\", model)\n\ntrainer.extend(save_best_model,\n trigger=MinValueTrigger('val/main/loss',\n trigger=(1, 'epoch')))\n\nprint(\"start training...\")\ntrainer.run()\n","sub_path":"kirin/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"303068916","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\n\nnltk.download('stopwords')\nstop_words = set(stopwords.words('english'))\nkeys = [\"science\", \"sea\", \"fire\"]\n\nfor text in sys.stdin:\n text = text.lower()\n text = re.sub('[\\n]', ' ', text)\n text = re.sub('[^a-z0-9 .?!]', '', text)\n lines = re.split('[.?!]', text)\n for line in lines:\n words = []\n for word in line.strip().split():\n if word not in stop_words:\n words.append(word)\n\n if len(words) >= 3:\n for i in range(len(words)-2):\n for k in keys:\n if k in words[i]:\n print(line + '\\t' + '$_' + words[i+1] + '_' + words[i+2] + '\\t' + '1')\n elif k in words[i+1]:\n print(line + '\\t' + words[i] + '_$_' + words[i+2] + '\\t' + '1')\n elif k in words[i+2]:\n print(line + '\\t' + words[i] + '_' + words[i+1] + '_$' + '\\t' + '1')\n\n","sub_path":"Project_2/mapper_2.py","file_name":"mapper_2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"420016178","text":"import sys\nimport itertools\nimport copy\nfrom typing import List, Union\n\nfrom .data import Role\nfrom cassiopeia import Champion\n\nfrom .pull_data import get_data\n\n\ndef get_roles(champion_roles, composition: List[Union[Champion, str, int]], top=None, jungle=None, middle=None, adc=None, support=None, verbose=False):\n \"\"\" Returns a dictionary with keys Top, Jungle, Middle, ADC, Support and values as names of the input champions. \"\"\"\n if isinstance(composition[0], Champion):\n region = composition[0].region\n else:\n region = 'NA'\n if isinstance(composition[0], str):\n composition = [Champion(name=name, region='NA').id for name in composition]\n elif isinstance(composition[0], Champion):\n composition = [champion.id for champion in composition]\n if isinstance(top, str):\n top = Champion(name=top, region='NA').id\n elif isinstance(top, Champion):\n top = top.id\n if isinstance(jungle, str):\n jungle = Champion(name=jungle, region='NA').id\n elif isinstance(jungle, Champion):\n jungle = jungle.id\n if isinstance(middle, str):\n middle = Champion(name=middle, region='NA').id\n elif isinstance(middle, Champion):\n middle = middle.id\n if isinstance(adc, str):\n adc = Champion(name=adc, region='NA').id\n elif isinstance(adc, Champion):\n adc = adc.id\n if isinstance(support, str):\n support = Champion(name=support, region='NA').id\n elif isinstance(support, Champion):\n support = support.id\n\n second_best_metric = -float('inf')\n second_best_roles = None\n second_best_play_percents = None\n if None not in [top, jungle, middle, adc, support]:\n best_roles = {\n Role.top: top,\n Role.jungle: jungle,\n Role.middle: middle,\n Role.adc: adc,\n Role.support: support\n }\n best_play_percents = {\n top: champion_roles[top][Role.top],\n jungle: champion_roles[jungle][Role.jungle],\n middle: champion_roles[middle][Role.middle],\n adc: champion_roles[adc][Role.adc],\n support: champion_roles[support][Role.support],\n }\n best_metric = sum(v for v in best_play_percents.values())/5\n else:\n best_roles = {\n Role.top: composition[0],\n Role.jungle: composition[1],\n Role.middle: composition[2],\n Role.adc: composition[3],\n Role.support: composition[4]\n }\n best_play_percents = {\n composition[0]: champion_roles[composition[0]][Role.top],\n composition[1]: champion_roles[composition[1]][Role.jungle],\n composition[2]: champion_roles[composition[2]][Role.middle],\n composition[3]: champion_roles[composition[3]][Role.adc],\n composition[4]: champion_roles[composition[4]][Role.support],\n }\n best_metric = sum(v for v in best_play_percents.values())/5\n second_best_roles = {\n Role.top: composition[0],\n Role.jungle: composition[1],\n Role.middle: composition[2],\n Role.adc: composition[3],\n Role.support: composition[4]\n }\n second_best_play_percents = {\n composition[0]: champion_roles[composition[0]][Role.top],\n composition[1]: champion_roles[composition[1]][Role.jungle],\n composition[2]: champion_roles[composition[2]][Role.middle],\n composition[3]: champion_roles[composition[3]][Role.adc],\n composition[4]: champion_roles[composition[4]][Role.support],\n }\n second_best_metric = sum(v for v in best_play_percents.values())/5\n for champs in itertools.permutations(composition, 5):\n roles = {\n Role.top: champion_roles[champs[0]][Role.top],\n Role.jungle: champion_roles[champs[1]][Role.jungle],\n Role.middle: champion_roles[champs[2]][Role.middle],\n Role.adc: champion_roles[champs[3]][Role.adc],\n Role.support: champion_roles[champs[4]][Role.support],\n }\n if top is not None and champs[0] != top:\n continue\n if jungle is not None and champs[1] != jungle:\n continue\n if middle is not None and champs[2] != middle:\n continue\n if adc is not None and champs[3] != adc:\n continue\n if support is not None and champs[4] != support:\n continue\n\n metric = sum(v for v in roles.values())/5\n if metric > best_metric:\n second_best_metric = best_metric\n second_best_roles = best_roles\n best_metric = metric\n best_roles = {\n Role.top: champs[0],\n Role.jungle: champs[1],\n Role.middle: champs[2],\n Role.adc: champs[3],\n Role.support: champs[4]\n }\n best_play_percents = {\n champs[0]: champion_roles[champs[0]][Role.top],\n champs[1]: champion_roles[champs[1]][Role.jungle],\n champs[2]: champion_roles[champs[2]][Role.middle],\n champs[3]: champion_roles[champs[3]][Role.adc],\n champs[4]: champion_roles[champs[4]][Role.support],\n }\n if best_metric > metric > second_best_metric:\n second_best_metric = metric\n second_best_roles = {\n Role.top: champs[0],\n Role.jungle: champs[1],\n Role.middle: champs[2],\n Role.adc: champs[3],\n Role.support: champs[4]\n }\n second_best_play_percents = {\n champs[0]: champion_roles[champs[0]][Role.top],\n champs[1]: champion_roles[champs[1]][Role.jungle],\n champs[2]: champion_roles[champs[2]][Role.middle],\n champs[3]: champion_roles[champs[3]][Role.adc],\n champs[4]: champion_roles[champs[4]][Role.support],\n }\n\n if second_best_roles == best_roles:\n second_best_roles = None\n second_best_play_percents = None\n second_best_metric = -float('inf')\n count_bad_assignments = 0\n for value in best_play_percents.values():\n if value < 0:\n count_bad_assignments += 1\n\n count_secondary_bad_assignments = 0\n if second_best_play_percents:\n found_acceptable_alternative = True\n for value in second_best_play_percents.values():\n if value < 0:\n count_secondary_bad_assignments += 1\n #if count_secondary_bad_assignments > count_bad_assignments:\n # found_acceptable_alternative = False\n else:\n found_acceptable_alternative = False\n\n confidence = (best_metric - second_best_metric)/best_metric\n\n if found_acceptable_alternative:\n string = []\n for role in [Role.top, Role.jungle, Role.middle, Role.adc, Role.support]:\n if best_roles[role] != second_best_roles[role]:\n string.append(\"{}: {}\".format(role, second_best_roles[role]))\n alternative = ', '.join(string)\n else:\n alternative = None\n\n if verbose:\n # These commented lines below are useful for debugging\n #print(\"Best roles: {}\".format(best_metric))\n #if second_best_metric > -float('inf'):\n # print(\"Second best roles: {}\".format(second_best_metric))\n # for role, champ in second_best_roles.items():\n # print(\" {}: {} == {}\".format(role, champ, champion_roles[champ][role]))\n # for role, champion in best_roles.items():\n # print(champion, champion_roles[champion])\n # print('')\n\n for role in [Role.top, Role.jungle, Role.middle, Role.adc, Role.support]:\n print(\"{}: {} ({}%)\".format(role, best_roles[role].name, round(100.*champion_roles[best_roles[role]][role],2)))\n print(\"Probability: {}%\".format(round(100.*best_metric, 1)))\n if not found_acceptable_alternative:\n print(\"Confidence: {}%\".format(round(100.*confidence, 1)))\n else:\n print(\"Confidence: {}% (Alternative is {})\".format(round(100.*confidence, 1), [champion.name for champion in alternative]))\n print('')\n best_roles = {role: Champion(id=id_, region=region) for role, id_ in best_roles.items()}\n if second_best_roles is not None:\n second_best_roles = {role: Champion(id=id_, region=region) for role, id_ in second_best_roles.items()}\n return best_roles, best_metric, confidence, second_best_roles\n\n\ndef iterative_get_roles(champion_roles, composition: List[Union[Champion, str, int]], top=None, jungle=None, middle=None, adc=None, support=None, verbose=False):\n fixed = {}\n if top is not None:\n fixed[Role.top] = top\n if jungle is not None:\n fixed[Role.jungle] = jungle\n if middle is not None:\n fixed[Role.middle] = middle\n if adc is not None:\n fixed[Role.adc] = adc\n if support is not None:\n fixed[Role.support] = support\n\n _champion_roles = copy.deepcopy(champion_roles)\n second_best_roles = None\n second_best_prob = -float('inf')\n while len(fixed) < 4:\n # Modify data\n for role in fixed:\n for champion, play_rates in _champion_roles.items():\n if champion in fixed.values():\n continue\n play_rate = play_rates[role]\n play_rates[role] = -1.0\n if play_rate > 0:\n roles_left = sum([1 for v in play_rates.values() if v > 0])\n if roles_left > 0:\n to_distribute = play_rate / roles_left\n else:\n to_distribute = 0.\n for r in play_rates:\n if play_rates[r] < 0:\n continue\n play_rates[r] += to_distribute\n _fixed = {role.name.lower(): champion for role, champion in fixed.items()}\n roles, prob, confidence, sbr = get_roles(_champion_roles, composition, **_fixed, verbose=False)\n if sbr is not None:\n _roles, _prob, _confidence, _ = get_roles(_champion_roles, composition, **{k.name.lower(): v for k, v in sbr.items()}, verbose=False)\n\n # I'm pretty sure `prob` can only increase with iterations of this loop\n if prob > _prob > second_best_prob:\n second_best_prob = _prob\n second_best_roles = sbr\n\n best = sorted([(role, champion) for role, champion in roles.items() if role not in fixed],\n key=lambda t: _champion_roles[t[1].id][t[0]], reverse=True)[0]\n fixed[best[0]] = best[1]\n\n if verbose:\n for role in [Role.top, Role.jungle, Role.middle, Role.adc, Role.support]:\n print(\"{}: {} ({}%)\".format(role, roles[role].name, round(100.*champion_roles[roles[role].id][role], 2)))\n print(\"Probability: {}%\".format(round(100. * prob, 1)))\n confidence = (prob - second_best_prob)/prob\n if not second_best_roles:\n print(\"Confidence: {}%\".format(round(100. * confidence, 1)))\n else:\n string = []\n for role in [Role.top, Role.jungle, Role.middle, Role.adc, Role.support]:\n if roles[role] != second_best_roles[role]:\n string.append(\"{}: {}\".format(role, second_best_roles[role].name))\n alternative = ', '.join(string)\n print(\"Confidence: {}% (Alternative is {})\".format(round(100. * confidence, 1), alternative))\n print()\n return roles, prob, confidence, second_best_roles\n","sub_path":"venv/lib64/python3.6/site-packages/roleidentification/get_roles.py","file_name":"get_roles.py","file_ext":"py","file_size_in_byte":11846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"432552714","text":"\nfunc.splash2()\nread('../fix_input_trees/t*')\nrNum = 0\nmyCalc='fastReducedRF'\n\n#os.chdir('/home/heather/BRYO/SPA_gubbins')\n# Set up an STMcmc, with defaul sample interval and (by default) no checkpoints\nstm = STMcmc(var.trees, stRFCalc=myCalc, modelName='SPA', nChains=4, runNum=rNum)\nstm.prob.nni = 1.0\n#stm.prob.spr = 1.0\n#stm.prob.polytomy = 0.5\nstm.prob.spaQ_uniform = 0.2\nstm.tunings.chainTemp = 2.5\nprint(stm.tunings)\n\n# do a pre-run, then remove the output files and reset gen num\nstm.run(2000)\nos.system(\"rm mcmc_likes_%i\" % rNum)\nos.system(\"rm mcmc_prams_%i\" % rNum)\nos.system(\"rm mcmc_trees_%i.nex\" % rNum)\nstm.gen = -1\n\nstm.autoTune(carryOn=True)\n\n# Now the real thing\nnGens = 10000000\nstm.sampleInterval = nGens/10000\nstm.checkPointInterval = nGens/4\nstm.run(nGens)\n","sub_path":"BRYOZOA_DATASET/code/E_SPA.py","file_name":"E_SPA.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"182381586","text":"# Definition for a binary tree node\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\n\r\nclass Solution:\r\n # @param root, a tree node\r\n # @return a list of integers\r\n def inorderTraversal(self, root):\r\n r = []\r\n if root is None:\r\n return r\r\n if root.left:\r\n left = self.inorderTraversal(root.left)\r\n r.extend(left)\r\n r.append(root.val)\r\n if root.right:\r\n right = self.inorderTraversal(root.right)\r\n r.extend(right)\r\n return r\r\n\r\n# Tester\r\n# s = TreeNode(1)\r\n# l = TreeNode(2)\r\n# r = TreeNode(3)\r\n# t = TreeNode(None)\r\n# s.right = l\r\n# l.left = r\r\n# p = Solution()\r\n# print p.inorderTraversal(s)\r\n","sub_path":"binary_tree_inorder_traversal.py","file_name":"binary_tree_inorder_traversal.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"40976183","text":"import socket\nimport threading\n\nHOST = 'localhost'\nPORT = 62300\n\n\ndef process_connection(sock, all_connetctions):\n while True:\n print(sock)\n print(addr)\n data = sock.recv(1024)\n print(data)\n decoded_data = data.decode('utf-8')\n print(decoded_data)\n # sock.sendall(data)\n # another_sock.sendall(data)\n for conn in all_connetctions:\n conn.sendall(data)\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((HOST, PORT))\n server.listen()\n\n connections = []\n while True:\n connection, addr = server.accept()\n connections.append(connection)\n threading.Thread(target=process_connection, args=(connection, connections)).start()\n\n# another_connection, addr = server.accept()\n\n# threading.Thread(target=process_connection, args=(another_connection, connection)).start()\n# while True:\n# print(connection)\n# print(addr)\n# data = connection.recv(1024)\n# print(data)\n# decoded_data = data.decode('utf-8')\n# print(decoded_data)\n# connection.sendall(data)","sub_path":"network/server_side.py","file_name":"server_side.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"442735041","text":"from __future__ import print_function, division\n\nimport unittest\nfrom math import sqrt\nimport torch\nfrom torch.autograd import Variable\nimport densenet\n\nclass TestDenseNet(unittest.TestCase):\n def test_denselayer(self):\n layer = densenet._DenseLayer(in_features=1, growth_rate=1)\n\n # explicitly set batchnorm gamma = 1 and beta = 0\n layer.norm.weight.data[0] = 1\n layer.norm.bias.data[0] = 0\n\n # explicitly set convolutional kernel to just identity\n kernel = torch.Tensor([\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n ])\n layer.conv.weight.data[0,0,:,:] = kernel\n\n # input feature map\n image = [\n [1, 0, 1, 0],\n [0, -1, 1, 0],\n [1, 0, 2, 1],\n [1, -1, 1, 1],\n ]\n x = Variable(torch.Tensor([[image]]))\n y = layer(x)\n\n # construct what we expect output to be\n a = torch.Tensor(image)\n # first batch norm\n epsilon = 1e-5\n b = (a - a.mean()) / sqrt(a.var()*15/16 + epsilon)\n # then relu\n b = torch.max(b, torch.Tensor([0]))\n # (identity conv2d does nothing)\n\n # first output map is just original input\n self.assertTrue(torch.equal(y[0,0].data, x.data[0,0]))\n # second output map is the batchnorm-relu-conv2d\n self.assertTrue(torch.equal(y[0,1].data, b))\n\n def test_densenet(self):\n model = densenet.DilatedDenseNet(image_channels=1, num_init_features=1,\n growth_rate=1, layers=3, dilated=True)\n\n # input feature map\n image = [\n [1, 0, 1, 0],\n [0, -1, 1, 0],\n [1, 0, 2, 1],\n [1, -1, 1, 1],\n ]\n x = Variable(torch.Tensor([[image]]))\n\n layer = model.features.denselayer02\n layer.norm.weight.data[0] = 1\n layer.norm.bias.data[0] = 0\n kernel = torch.Tensor([\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n ])\n layer.conv.weight.data[0,0,:,:] = kernel\n y = model(x)\n\n print(model)\n print(y)\n","sub_path":"test_densenet.py","file_name":"test_densenet.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"526237796","text":"from random import randint\nfrom tkinter import Canvas\nimport turtle\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n def falls_in_rectangle(self, rectangle):\n if rectangle.point1.x < self.x < rectangle.point2.x \\\n and rectangle.point1.y < self.y < rectangle.point2.y:\n return True\n else:\n return False\n\nclass Rectangle:\n def __init__(self, point1, point2):\n self.point1 = point1\n self.point2 = point2\n def area(self):\n return (self.point2.x - self.point1.x) * \\\n (self.point2.y - self.point1.y)\n\nclass GuiRect(Rectangle):\n def draw(self, canvas):\n myturtle.penup()\n myturtle.goto(self.point1.x, self.point1.y)\n myturtle.pendown()\n myturtle.forward(self.point2.x-self.point1.x)\n myturtle.left(90)\n myturtle.forward(self.point2.y-self.point1.y)\n myturtle.left(90)\n myturtle.forward(self.point2.x-self.point1.x)\n myturtle.left(90)\n myturtle.forward(self.point2.y-self.point1.y)\n\nclass GuiPoint(Point):\n def draw(self,canvas,size=10,color='red'):\n canvas.penup()\n canvas.goto(self.x,self.y)\n canvas.pendown()\n canvas.dot(size,color)\n \nrectangle= GuiRect(Point(randint(0, 400), randint(0, 400)), Point(randint(0, 400), randint(0, 400)))\n\n\n# Print rectangle coordinates\nprint(\"Rectangle Coordinates: \",\n rectangle.point1.x, \",\",\n rectangle.point1.y, \"and\",\n rectangle.point2.x, \",\",\n rectangle.point2.y)\n\n# Get point and area from user\nuser_point = GuiPoint(float(input(\"Guess x: \")), float(input(\"Guess y: \")))\nuser_area = float(input(\"Guess rectangle area: \"))\n\n# Print out the game result\nprint(\"Your area was off by: \", rectangle.area() - user_area)\nprint(\"Your point was inside rectangle: \", user_point.falls_in_rectangle(rectangle))\n\n\nmyturtle = turtle.Turtle()\nrectangle.draw(canvas=turtle)\nuser_point.draw(canvas=turtle)\nturtle.done()","sub_path":"034 main.py","file_name":"034 main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"462159704","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n#------------------ AnKoA -----------------------#\n# Made with love by grm34 (FRIPOUILLEJACK) #\n# ........fripouillejack@gmail.com ....... #\n# Greetz: thibs, Rockweb, c0da, Hydrog3n, Speedy76 #\n#--------------------------------------------------#\n\nimport os\nimport sys\nimport readline\nimport optparse\nimport json\nimport urllib2\nimport subprocess\nfrom json import loads\nfrom urllib2 import (Request, urlopen, URLError, HTTPError, unquote)\nfrom subprocess import (CalledProcessError, check_output)\nfrom pprint import pprint\nsys.path.append(\"app/\")\nfrom style import (banner, next, color)\nfrom bitrate import (calcul, calc)\nfrom settings import option\n\n(folder, thumb, tag, team, announce, tmdb_api_key, tag_thumb) = option()\n\n(BLUE, RED, YELLOW, GREEN, END) = color()\n\n#___ Run ___#\ndef ffmpeg():\n if (encode_type == \"2\"): #--> CRF\n return (\n \"cd \"+thumb+\" && ffmpeg -i \"+source+\" -metadata title='\"+\\\n title+\".\"+year+\"' -metadata proudly.presented.by='\"+team+\\\n \"' -map 0:\"+idvideo+interlace+fps+\" -metadata:s:v:0 title= \"\\\n \"-metadata:s:v:0 language= -f \"+string+reso+\" -c:v:0 \"+codec+\\\n \" -crf \"+crf+\" -level \"+level+param+audio_config+sub_config+\\\n \" -passlogfile \"+title+\".log \"+title+\".\"+year+stag+mark+sub_remux\n )\n\n else: #--> 2PASS\n return (\n \"cd \"+thumb+\" && ffmpeg -i \"+source+\" -pass 1 -map 0:\"+\\\n idvideo+interlace2+fps+\" -f \"+string+reso+\" -c:v:0 \"+\\\n codec+\" -b:v:0 \"+bit+\"k -level \"+level+pass1+\" -an -sn \"\\\n \"-passlogfile \"+title+\".log \"+title+\".\"+year+stag+mark+\\\n \" && ffmpeg -y -i \"+source+\" -pass 2 -metadata title='\"+title+\\\n \".\"+year+\"' -metadata proudly.presented.by='\"+team+\"' -map 0:\"+\\\n idvideo+interlace+fps+\" -metadata:s:v:0 title= -metadata:s:v:0 \"\\\n \"language= -f \"+string+reso+\" -c:v:0 \"+codec+\" -b:v:0 \"+bit+\"k \"\\\n \"-level \"+level+param+audio_config+sub_config+\" -passlogfile \"+\\\n title+\".log \"+title+\".\"+year+stag+mark+sub_remux\n )\n\ndef data(): #--> Tools\n if (len(nfoimdb) == 7 and nfoimdb.isdigit()):\n prezz = \"&& ./genprez.py \"+audiolang+\" \"+prezquality+\" \"+titlesub+\\\n \" \"+prezsize+\" \"+nfoimdb+\" && mv \"+thumb+name+\\\n \"*.txt \"+thumb+title+\".\"+year+stag+mark[:-3]+\"txt \"\\\n \"&& ./imgur.py \"+thumb+title+\".\"+year+\"*.png add \"\n zipp = \"cd \"+thumb+\" && zip -r \"+title+\".zip -m \"+title+\\\n \".\"+year+stag+\"*.torrent \"+title+\".\"+year+stag+\\\n \"*.nfo \"+title+\".\"+year+stag+\"*.txt \"+title+\\\n \"*.log \"+title+\".\"+year+stag+\"*.png\"\n else:\n prezz = \"&& ./imgur.py \"+thumb+title+\".\"+year+\"*.png \"\n zipp = \"cd \"+thumb+\" && zip -r \"+title+\".zip -m \"+title+\\\n \".\"+year+stag+\"*.torrent \"+title+\".\"+year+stag+\"*.nfo \"+\\\n title+\"*.log \"+title+\".\"+year+stag+\"*.png\"\n\n return (\n \"./thumbnails.py \"+thumb+title+\".\"+year+stag+mark+\" 5 2 \"+prezz+\\\n \"&& ./nfogen.sh \"+thumb+title+\".\"+year+stag+mark+\" \"+nfosource+\\\n \" \"+titlesub+\" \"+subforced+\" http://www.imdb.com/title/tt\"+nfoimdb+\\\n \" && rm -f \"+thumb+title+\"*.mbtree && cd \"+thumb+\" && mktorrent -a \"+\\\n announce+\" -p -t 8 -l \"+pieces+\" \"+title+\".\"+year+stag+mark+\" \"+zipp\n )\n\ndef main():\n\n #___ Auto completion ___#\n def completer(text,state):\n return (\n [entry for entry in os.listdir(\n folder+os.path.dirname(readline.get_line_buffer()))\n if entry.startswith(text)][state]\n )\n\n #___ Source Infos ___#\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(completer)\n prefix = raw_input(GREEN+\"RELEASE SOURCE > \\n\"+END)\n readline.parse_and_bind(\"tab: \")\n source = folder+prefix\n title = raw_input(GREEN+\"RELEASE TITLE \"+YELLOW+\\\n \"(ex: Hudson.Hawk)\"+GREEN+\" : \"+END)\n year = raw_input(GREEN+\"RELEASE PRODUCTION YEAR : \"+END)\n special = raw_input(GREEN+\"SPECIAL TAG \"+YELLOW+\\\n \"(ex: EXTENDED.CUT)\"+GREEN+\" : \"+END)\n scan = [\n \"HandBrakeCLI -t 0 --scan -i \"+source,\\\n \"mediainfo -f --Inform='General;%Duration/String3%' \"+source,\\\n \"mediainfo -f --Inform='General;%FileSize/String4%' \"+source,\\\n \"mediainfo -f --Inform='Video;%BitRate/String%' \"+source,\\\n \"mediainfo -f --Inform='Video;%FrameRate/String%' \"+source,\\\n \"mediainfo -f --Inform='Video;%Width/String%' \"+source,\\\n \"mediainfo -f --Inform='Video;%Height/String%' \"+source,\\\n \"mediainfo -f --Inform='Video;%DisplayAspectRatio/\"\\\n \"String%' \"+source,\\\n \"mediainfo -f --Inform='Audio;%CodecID% - ' \"+source,\\\n \"mediainfo -f --Inform='Audio;%Language/String% - ' \"+source,\\\n \"mediainfo -f --Inform='Audio;%BitRate/String% - ' \"+source,\\\n \"mediainfo -f --Inform='Audio;%SamplingRate/String% - ' \"+source,\\\n \"mediainfo -f --Inform='Audio;%Channel(s)/String% - ' \"+source,\\\n \"mediainfo -f --Inform='Text;%CodecID% - ' \"+source,\\\n \"mediainfo -f --Inform='Text;%Language/String% - ' \"+source\n ]\n type = raw_input(GREEN+\"SCAN INFOS SOURCE > \\n\"+YELLOW+\\\n \"HANDBRAKE \"+GREEN+\"[1]\"+YELLOW+\" - MEDIAINFO \"+GREEN+\\\n \"[2] : \"+END)\n try:\n if (type == \"1\"):\n subprocess.check_output(scan[0], shell=True)\n else:\n subprocess.check_output(scan[1], shell=True)\n for x in range(1, 15):\n os.system(scan[x])\n x = x + 1\n except (OSError, CalledProcessError):\n print (GREEN+\"\\n -> \"+BLUE+\"ERROR : \"+RED+\"Bad source selection\"\\\n \", please try again !\\n\"+END)\n sys.exit()\n\n #___ Video Params ___#\n codec_type = raw_input(GREEN+\"VIDEO CODEC > \\n\"+YELLOW+\"x264 \"+GREEN+\\\n \"[1]\"+YELLOW+\" - x265 \"+GREEN+\"[2] : \"+END)\n if (codec_type == \"2\"):\n codec = \"libx265\"\n else:\n codec = \"libx264\"\n\n encode_type = raw_input(GREEN+\"ENCODING MODE > \\n\"+YELLOW+\\\n \"DUALPASS \"+GREEN+\"[1]\"+YELLOW+\\\n \" - CRF \"+GREEN+\"[2] : \"+END)\n if (encode_type == \"2\"):\n bit = \"\"\n crf = raw_input(GREEN+\"CRF LEVEL \"+YELLOW+\"(ex: 19)\"+GREEN+\" : \"+END)\n else:\n crf = \"\"\n calculator = raw_input(GREEN+\"BITRATE CALCULATOR \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (calculator == \"y\"):\n next = \"y\"\n while (next != \"n\"):\n HH, MM, SS, audiobit, rls_size, calsize = calcul()\n run_calc = calc(HH, MM, SS, audiobit, rls_size, calsize)\n os.system(run_calc)\n next = raw_input(GREEN+\"TRY AGAIN \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n bit = raw_input(GREEN+\"VIDEO BITRATE Kbps : \"+END)\n else:\n bit = raw_input(GREEN+\"VIDEO BITRATE Kbps : \"+END)\n\n format = raw_input(GREEN+\"RELEASE FORMAT > \\n\"+YELLOW+\"HDTV \"+GREEN+\\\n \"[1]\"+YELLOW+\" - PDTV \"+GREEN+\"[2]\"+YELLOW+\\\n \" - BDRip \"+GREEN+\"[3]\\n\"+YELLOW+\"DVDRip \"+GREEN+\\\n \"[4]\"+YELLOW+\" - BRRip \"+GREEN+\"[5]\"+YELLOW+\\\n \" - 720p \"+GREEN+\"[6] : \"+END)\n if (format == \"2\"):\n hr = raw_input(GREEN+\"PDTV HIGH RESOLUTION \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (hr == \"y\"):\n format = \"0\"+format\n\n rlstype = raw_input(GREEN+\"RELEASE CONTAINER > \\n\"+YELLOW+\\\n \"MPEG4 \"+GREEN+\"[1]\"+YELLOW+\" - MATROSKA \"+GREEN+\\\n \"[2] : \"+END)\n if (rlstype == \"1\"):\n string = \"mp4\"\n else:\n string = \"matroska\"\n\n scan2 = raw_input(GREEN+\"FFMPEG SCAN TRACKS \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n ffmpeg = \"ffmpeg -i \"+source\n if (scan2 == \"y\"):\n os.system(ffmpeg)\n\n idvideo = raw_input(GREEN+\"VIDEO TRACK FFMPEG ID \"+YELLOW+\\\n \"(ex: 0)\"+GREEN+\" : \"+END)\n modif_fps = raw_input(GREEN+\"CHANGE VIDEO FRAMERATE \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (modif_fps == \"y\"):\n set_fps = raw_input(GREEN+\"VIDEO FRAMERATE \"+YELLOW+\\\n \"(ex: 23.98)\"+GREEN+\" : \"+END)\n fps = \"-r \"+set_fps+\" \"\n else:\n fps = \"\"\n\n deinterlace = raw_input(GREEN+\"DEINTERLACE VIDEO \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (deinterlace == \"y\"):\n interlace = \" -filter:v yadif=deint=0 \"\n if (encode_type == \"2\"):\n interlace2 = \"\"\n else:\n interlace2 = \" -filter:v yadif=deint=1 \"\n else:\n interlace = \"\"\n interlace2 = \"\"\n\n #___ Audio Infos ___#\n audiotype = raw_input(GREEN+\"RELEASE AUDIO TYPE > \\n\"+YELLOW+\\\n \"FRENCH \"+GREEN+\"[1]\"+YELLOW+\" - ENGLiSH \"+GREEN+\\\n \"[2]\\n\"+YELLOW+\"OTHER \"+GREEN+\"[3]\"+YELLOW+\" - \"\\\n \"MULTi \"+GREEN+\"[4]\"+YELLOW+\" - NONE \"+GREEN+\\\n \"[5] : \"+END)\n if (audiotype == \"1\" or audiotype == \"2\" or audiotype == \"3\"):\n audionum = raw_input(GREEN+\"AUDIO TRACK FFMPEG ID \"+YELLOW+\\\n \"(ex: 1)\"+GREEN+\" : \"+END)\n if (audiotype == \"3\"):\n audiolang = raw_input(GREEN+\"AUDIO TRACK TITLE \"+YELLOW+\\\n \"(ex: Espagnol)\"+GREEN+\" : \"+END)\n audiocodec = raw_input(GREEN+\"AUDIO TRACK CODEC > \\n\"+YELLOW+\\\n \"MP3 \"+GREEN+\"[1]\"+YELLOW+\" - AC3 \"+GREEN+\\\n \"[2]\"+YELLOW+\" - DTS/COPY \"+GREEN+\"[3] : \"+END)\n if (audiocodec == \"2\"):\n abitrate = raw_input(GREEN+\"AUDIO TRACK BITRATE Kbps \"+YELLOW+\\\n \"(ex: 448)\"+GREEN+\" : \"+END)\n surround = raw_input(GREEN+\"AUDIO TRACK CHANNELS \"+YELLOW+\\\n \"(ex: 2)\"+GREEN+\" : \"+END)\n elif (audiotype == \"4\"):\n audionum = raw_input(GREEN+\"AUDIO TRACK 01 FFMPEG ID \"+YELLOW+\\\n \"(ex: 1)\"+GREEN+\" : \"+END)\n audiolang = raw_input(GREEN+\"AUDIO TRACK 01 TITLE \"+YELLOW+\\\n \"(ex: English)\"+GREEN+\" : \"+END)\n audiocodec = raw_input(GREEN+\"AUDIO TRACK 01 CODEC > \\n\"+YELLOW+\\\n \"MP3 \"+GREEN+\"[1]\"+YELLOW+\" - AC3 \"+GREEN+\\\n \"[2]\"+YELLOW+\" - DTS/COPY \"+GREEN+\"[3] : \"+END)\n if (audiocodec == \"2\"):\n abitrate = raw_input(GREEN+\"AUDIO TRACK 01 BITRATE Kbps \"+YELLOW+\\\n \"(ex: 448)\"+GREEN+\" : \"+END)\n surround = raw_input(GREEN+\"AUDIO TRACK 01 CHANNELS \"+YELLOW+\\\n \"(ex: 2)\"+GREEN+\" : \"+END)\n audionum2 = raw_input(GREEN+\"AUDIO TRACK 02 FFMPEG ID \"+YELLOW+\\\n \"(ex: 0)\"+GREEN+\" : \"+END)\n audiolang2 = raw_input(GREEN+\"AUDIO TRACK 02 TITLE \"+YELLOW+\\\n \"(ex: English)\"+GREEN+\" : \"+END)\n audiocodec2 = raw_input(GREEN+\"AUDIO TRACK 02 CODEC > \\n\"+YELLOW+\\\n \"MP3 \"+GREEN+\"[1]\"+YELLOW+\" - AC3 \"+GREEN+\\\n \"[2]\"+YELLOW+\" - DTS/COPY \"+GREEN+\\\n \"[3] : \"+END)\n if (audiocodec2 == \"2\"):\n abitrate2 = raw_input(GREEN+\"AUDIO TRACK 02 BITRATE \"\\\n \"Kbps \"+YELLOW+\"(ex: 448)\"+GREEN+\" : \"+END)\n surround2 = raw_input(GREEN+\"AUDIO TRACK 02 CHANNELS\"\\\n \" \"+YELLOW+\"(ex: 2)\"+GREEN+\" : \"+END)\n else:\n audiocodec = \"\"\n if (audiotype == \"1\" or audiotype == \"2\"\n or audiotype == \"3\" or audiotype == \"4\"):\n audiox_ = raw_input(GREEN+\"CHANGE SAMPLING RATE \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (audiox_ == \"y\"):\n if (audiotype == \"4\"):\n ar1_ = raw_input(GREEN+\"AUDIO TRACK 01 SAMPLING \"\\\n \"RATE \"+YELLOW+\"(ex: 48)\"+GREEN+\" : \"+END)\n if not (ar1_):\n ar1 = \" -ar:a:0 48k\"\n else:\n ar1 = \" -ar:a:0 \"+ar1_+\"k\"\n ar2 = raw_input(GREEN+\"AUDIO TRACK 02 SAMPLING RATE \"+YELLOW+\\\n \"(ex: 48)\"+GREEN+\" : \"+END)\n if not (ar2_):\n ar2 = \" -ar:a:1 48k\"\n else:\n ar2 = \" -ar:a:1 \"+ar2_+\"k\"\n audiox = ar1\n audiox2 = ar2\n else:\n ar_ = raw_input(GREEN+\"AUDIO TRACK 01 SAMPLING RATE \"+YELLOW+\\\n \"(ex: 48)\"+GREEN+\" : \"+END)\n if not (ar_):\n ar = \" -ar:a:0 48k\"\n else:\n ar = \" -ar:a:0 \"+ar_+\"k\"\n audiox = ar\n audiox2 = \"\"\n else:\n audiox = \" -ar:a:0 48k\"\n audiox2 = \" -ar:a:1 48k\"\n\n #___ Audio Params ___#\n if (audiocodec == \"1\"):\n config = \"-c:a:0 mp3 -b:a:0 128k -ac:a:0 2\"+audiox\n elif (audiocodec == \"2\"):\n config = \"-c:a:0 ac3 -b:a:0 \"+abitrate+\"k -ac:a:0 \"+surround+audiox\n else:\n config = \"-c:a:0 copy\"\n\n if (audiotype == \"4\"):\n if (audiocodec2 == \"1\"):\n config2 = \"-c:a:1 mp3 -b:a:1 128k -ac:a:1 2\"+audiox2\n elif (audiocodec2 == \"2\"):\n config2 = \"-c:a:1 ac3 -b:a:1 \"+abitrate2+\\\n \"k -ac:a:1 \"+surround2+audiox2\n else:\n config2 = \"-c:a:1 copy\"\n\n if (audiotype == \"1\"):\n lang = \"FRENCH\"\n audiolang = \"FRENCH\"\n elif (audiotype == \"2\"):\n lang = \"VOSTFR\"\n audiolang = \"ENGLiSH\"\n elif (audiotype == \"3\"):\n lang = \"VOSTFR\"\n elif (audiotype == \"4\"):\n lang = \"MULTi\"\n else:\n lang = \"NOAUDIO\"\n audiolang = \"NOAUDIO\"\n\n if (audiotype == \"4\"):\n audio_config = \" -map 0:\"+audionum+\" -metadata:s:a:0 title='\"+\\\n audiolang+\"' -metadata:s:a:0 language= \"+config+\\\n \" -map 0:\"+audionum2+\" -metadata:s:a:1 title='\"+\\\n audiolang2+\"' -metadata:s:a:1 language= \"+config2\n\n elif (audiotype == \"1\" or audiotype == \"2\" or audiotype == \"3\"):\n audio_config = \" -map 0:\"+audionum+\" -metadata:s:a:0 title='\"+\\\n audiolang+\"' -metadata:s:a:0 language= \"+config\n else:\n audio_config = \"\"\n\n #___ Release Tag ___#\n if (special == \"\"):\n stag = \"\"\n else:\n stag = \".\"+special\n\n if (format == \"1\"):\n form = \"HDTV\"\n elif (format == \"2\"):\n form = \"PDTV\"\n elif (format == \"02\"):\n form = \"HR.PDTV\"\n elif (format == \"3\"):\n form = \"BDRip\"\n elif (format == \"4\"):\n form = \"DVDRip\"\n elif (format == \"6\"):\n form = \"720p.BluRay\"\n else:\n form = \"BRRip\"\n\n if (rlstype == \"1\"):\n extend = \".mp4\"\n else:\n extend = \".mkv\"\n if (codec_type == \"2\"):\n x = \"x265\"\n else:\n x = \"x264\"\n\n if (audiocodec == \"1\"):\n mark = \".\"+lang+\".\"+form+\".\"+x+\"-\"+tag+extend\n prezquality = form+\" \"+x\n elif (audiocodec == \"3\"):\n mark = \".\"+lang+\".\"+form+\".DTS\"+\".\"+x+\"-\"+tag+extend\n prezquality = form+\" DTS.\"+x\n else:\n mark = \".\"+lang+\".\"+form+\".AC3\"+\".\"+x+\"-\"+tag+extend\n prezquality = form+\" AC3.\"+x\n\n #___ Mkvmerge ___#\n def remux_ext():\n if (subtype == \"3\"):\n if (audiotype == \"4\"): #--> FILE - Audio MULTi / SRT MULTi\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+\\\n title+extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+\\\n stag+mark+\" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:no \"\\\n \"--forced-track 2:no \"+thumb+title+extend+\\\n \" --default-track '0:yes' --forced-track '0:no'\"\\\n \" --language '0:und' \"+sync+\"--track-name '0:\"+titlesub+\\\n \"'\"+charset+\" \"+idsub+\" --default-track '0:no' \"+forced+\\\n \"--language '0:und' \"+sync2+\"--track-name '0:\"+titlesub2+\\\n \"'\"+charset2+\" \"+idsub2+\" && rm -f \"+thumb+title+extend\n )\n\n else: #--> FILE - Audio FR-VO / SRT MULTi\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+\\\n title+extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+\\\n stag+mark+\" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no \"+thumb+title+extend+\\\n \" --default-track '0:yes' --forced-track '0:no' \"\\\n \"--language '0:und' \"+sync+\"--track-name '0:\"+titlesub+\\\n \"'\"+charset+\" \"+idsub+\" --default-track '0:no' \"+forced+\\\n \"--language '0:und' \"+sync2+\"--track-name '0:\"+titlesub2+\\\n \"'\"+charset2+\" \"+idsub2+\" && rm -f \"+thumb+title+extend\n )\n\n else:\n if (audiotype == \"4\"): #--> FILE - Audio MULTi / SRT FR-VO\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:no --forced-track\"\\\n \" 2:no \"+thumb+title+extend+\" --default-track '0:yes' \"+\\\n forced+\"--language '0:und' \"+sync+\"--track-name '0:\"+\\\n titlesub+\"'\"+charset+\" \"+idsub+\" && rm -f \"+thumb+\\\n title+extend\n )\n\n else: #--> FILE - Audio FR-VO / SRT FR-VO\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no \"+thumb+title+extend+\" \"\\\n \"--default-track '0:yes' \"+forced+\"--language '0:und' \"+\\\n sync+\"--track-name '0:\"+titlesub+\"'\"+charset+\" \"+idsub+\\\n \" && rm -f \"+thumb+title+extend\n )\n\n def remux_int():\n if (subtype == \"3\"):\n if (audiotype == \"4\"): #--> SOURCE - Audio MULTi / SRT MULTi\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:no \"\\\n \"--forced-track 2:no --default-track 3:yes \"\\\n \"--forced-track 3:no --default-track 4:no \"+\\\n forced+thumb+title+extend+\" && rm -f \"+thumb+title+extend\n )\n\n else: #--> SOURCE - Audio FR/VO / SRT MULTi\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:yes \"\\\n \"--forced-track 2:no --default-track 3:no \"+forced+\\\n thumb+title+extend+\" && rm -f \"+thumb+title+extend\n )\n else:\n if (audiotype == \"4\"): #--> SOURCE - Audio MULTi / SRT FR/VO\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:no \"\\\n \"--forced-track 2:no --default-track 3:yes \"+forced+\\\n thumb+title+extend+\" && rm -f \"+thumb+title+extend\n )\n\n else: #--> SOURCE - Audio FR/VO / SRT FR/VO\n return (\n \" && mv \"+thumb+title+\".\"+year+stag+mark+\" \"+thumb+title+\\\n extend+\" && mkvmerge -o \"+thumb+title+\".\"+year+stag+mark+\\\n \" --compression -1:none --default-track 0:yes \"\\\n \"--forced-track 0:no --default-track 1:yes \"\\\n \"--forced-track 1:no --default-track 2:yes \"+forced+\\\n thumb+title+extend+\" && rm -f \"+thumb+title+extend\n )\n\n #___ Subtitles Params ___#\n def infos_subs_in():\n if (subtype == \"3\"):\n if (subsource == \"4\"):\n idsub = raw_input(GREEN+\"SUBTITLES TRACK 01 ISO ID \"+YELLOW+\\\n \"(ex: 1)\"+GREEN+\" : \"+END)\n idsub2 = raw_input(GREEN+\"SUBTITLES TRACK 02 ISO ID \"+YELLOW+\\\n \"(ex: 2)\"+GREEN+\" : \"+END)\n else:\n idsub = raw_input(GREEN+\"SUBTITLES TRACK 01 FFMPEG ID \"+\\\n YELLOW+\"(ex: 1)\"+GREEN+\" : \"+END)\n idsub2 = raw_input(GREEN+\"SUBTITLES TRACK 02 FFMPEG ID \"+\\\n YELLOW+\"(ex: 2)\"+GREEN+\" : \"+END)\n titlesub = raw_input(GREEN+\"SUBTITLES TRACK 01 TITLE \"+YELLOW+\\\n \"(ex: Full.French)\"+GREEN+\" : \"+END)\n titlesub2 = raw_input(GREEN+\"SUBTITLES TRACK 02 TITLE \"+YELLOW+\\\n \"(ex: French.Forced)\"+GREEN+\" : \"+END)\n\n else:\n if (subsource == \"4\"):\n idsub = raw_input(GREEN+\"SUBTITLES TRACK ISO ID \"+YELLOW+\\\n \"(ex: 1)\"+GREEN+\" : \"+END)\n else:\n idsub = raw_input(GREEN+\"SUBTITLES TRACK FFMPEG ID \"+YELLOW+\\\n \"(ex: 1)\"+GREEN+\" : \"+END)\n if (subtype == \"1\"):\n titlesub = \"FULL.FRENCH\"\n elif (subtype == \"2\"):\n titlesub = \"FRENCH.FORCED\"\n idsub2 = \"\"\n titlesub2 = \"\"\n\n infos_subs_in = (idsub, titlesub, idsub2, titlesub2)\n return (infos_subs_in)\n\n def infos_subs_out():\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(completer)\n\n if (subtype == \"3\"):\n ub = raw_input(GREEN+\"SUBTITLES TRACK 01 SOURCE > \\n\"+END)\n ub2 = raw_input(GREEN+\"SUBTITLES TRACK 02 SOURCE > \\n\"+END)\n readline.parse_and_bind(\"tab: \")\n idsub = folder+ub\n idsub2 = folder+ub2\n if (subsource == \"3\"):\n titlesub = raw_input(GREEN+\"SUBTITLES TRACK 01 TITLE \"+\\\n YELLOW+\"(ex: Full.French)\"+GREEN+\\\n \" : \"+END)\n titlesub2 = raw_input(GREEN+\"SUBTITLES TRACK 02 TITLE \"+\\\n YELLOW+\"(ex: French.Forced)\"+GREEN+\\\n \" : \"+END)\n else:\n ub = raw_input(GREEN+\"SUBTITLES TRACK SOURCE > \\n\"+END)\n readline.parse_and_bind(\"tab: \")\n idsub = folder+ub\n if (subtype == \"1\"):\n titlesub = \"FULL.FRENCH\"\n elif (subtype == \"2\"):\n titlesub = \"FRENCH.FORCED\"\n idsub2 = \"\"\n titlesub2 = \"\"\n\n if (subtype == \"3\"):\n idcharset = raw_input(GREEN+\"SUBTITLES 01 CHARSET ANSI \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n idcharset2 = raw_input(GREEN+\"SUBTITLES 02 CHARSET ANSI \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n else:\n idcharset = raw_input(GREEN+\"SUBTITLES CHARSET ANSI \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n\n if (idcharset == \"y\"):\n charset = \" --sub-charset '0:cp1252'\"\n else:\n charset = \"\"\n\n if (subtype == \"3\"):\n if idcharset2 == \"y\":\n charset2 = \" --sub-charset '0:cp1252'\"\n else:\n charset2 = \"\"\n\n subsync = raw_input(GREEN+\"SUBTITLES DELAY \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (subsync == \"y\"):\n if (subtype == \"3\"):\n subdelay1 = raw_input(GREEN+\"SUBTITLES 01 DELAY \"+YELLOW+\\\n \"(ex: -200)\"+GREEN+\" : \"+END)\n subdelay2 = raw_input(GREEN+\"SUBTITLES 02 DELAY \"+YELLOW+\\\n \"(ex: -200)\"+GREEN+\" : \"+END)\n sync = \"--sync 0:\"+subdelay1+\" \"\n sync2 = \"--sync 0:\"+subdelay2+\" \"\n else:\n subdelay = raw_input(GREEN+\"SUBTITLES DELAY \"+YELLOW+\\\n \"(ex: -200)\"+GREEN+\" : \"+END)\n sync = \"--sync 0:\"+subdelay+\" \"\n sync2 = \"\"\n else:\n sync = \"\"\n sync2 = \"\"\n\n infos_subs_out = (idsub, titlesub, idsub2, titlesub2,\n charset, charset2, sync, sync2)\n return (infos_subs_out)\n\n #___ Subtitles Extract ___#\n def iso_extract():\n if (subtype == \"3\"): #--> EXTRACT ISO MULTi Subs\n return (\n \"sudo mount -o loop -t iso9660 \"+source+\" /media/ && cd \"+\\\n thumb+\" && mencoder -dvd-device /media/ dvd://1 -vobsubout \"+\\\n title+\"1 -vobsuboutindex 0 -sid \"+idsub+\\\n \" -o /dev/null -nosound \"\\\n \"-ovc frameno && mencoder -dvd-device /media/ dvd://1\"\\\n \" -vobsubout \"+title+\"2 -vobsuboutindex 0 -sid \"+idsub2+\\\n \" -o /dev/null -nosound -ovc frameno && sudo umount -f\"\\\n \" /media*\"\n )\n\n else: #--> EXTRACT ISO FR/VO Subs\n return (\n \"sudo mount -o loop -t iso9660 \"+source+\" /media/ && cd \"+\\\n thumb+\" && mencoder -dvd-device /media/ dvd://1 -vobsubout \"+\\\n title+\" -vobsuboutindex 0 -sid \"+idsub+\\\n \" -o /dev/null -nosound \"\\\n \"-ovc frameno && sudo umount -f /media*\"\n )\n\n def m2ts_extract():\n if (subtype == \"3\"): #--> EXTRACT M2TS MULTi Subs\n return (\n \"cd \"+thumb+\" && ffmpeg -i \"+source+\" -vn -an -map 0:\"+idsub+\\\n \" -scodec copy \"+title+\"1.mkv && ffmpeg -i \"+source+\\\n \" -vn -an -map 0:\"+idsub2+\" -scodec copy \"+title+\\\n \"2.mkv && mkvextract tracks \"+title+\"1.mkv 0:\"\\\n +title+\"1.pgs && mkvextract tracks \"+title+\"2.mkv 0:\"+title+\\\n \"2.pgs && mv \"+title+\"1.pgs \"+title+\"1.sup && mv \"+title+\\\n \"2.pgs \"+title+\"2.sup && rm -f \"+title+\"1.mkv && \"\\\n \"rm -f \"+title+\"2.mkv\"\n )\n\n else: #--> EXTRACT M2TS FR/VO Subs\n return (\n \"cd \"+thumb+\" && ffmpeg -i \"+source+\" -vn -an -map 0:\"+idsub+\\\n \" -scodec copy \"+title+\".mkv && mkvextract tracks \"+title+\\\n \".mkv 0:\"+title+\".pgs && mv \"+title+\".pgs \"+title+\\\n \".sup && rm -f \"+title+\".mkv\"\n )\n\n def mkv_format():\n if (subtype == \"3\"):\n ext = raw_input(GREEN+\"SUBTITLES 01 FORMAT > \\n\"+YELLOW+\"PGS \"+\\\n GREEN+\"[1]\"+YELLOW+\" - VOBSUB \"+GREEN+\"[2]\"+\\\n YELLOW+\" - ASS \"+GREEN+\"[3]\"+YELLOW+\" - SRT \"+\\\n GREEN+\"[4] : \"+END)\n ext2 = raw_input(GREEN+\"SUBTITLES 02 FORMAT > \\n\"+YELLOW+\\\n \"PGS \"+GREEN+\"[1]\"+YELLOW+\" - VOBSUB \"+GREEN+\\\n \"[2]\"+YELLOW+\" - ASS \"+GREEN+\"[3]\"+YELLOW+\\\n \" - SRT \"+GREEN+\"[4] : \"+END)\n else:\n ext = raw_input(GREEN+\"SUBTITLES FORMAT > \\n\"+YELLOW+\\\n \"PGS \"+GREEN+\"[1]\"+YELLOW+\" - VOBSUB \"+GREEN+\\\n \"[2]\"+YELLOW+\" - ASS \"+GREEN+\"[3]\"+YELLOW+\\\n \" - SRT \"+GREEN+\"[4] : \"+END)\n ext2 = \"\"\n\n if (ext == \"1\"):\n ext = \".pgs\"\n elif (ext == \"2\"):\n ext = \".vobsub\"\n elif (ext == \"3\"):\n ext = \".ass\"\n else:\n ext = \".srt\"\n if (ext2 == \"1\"):\n ext2 = \".pgs\"\n elif (ext2 == \"2\"):\n ext2 = \".vobsub\"\n elif (ext2 == \"3\"):\n ext2 = \".ass\"\n elif (ext2 == \"4\"):\n ext2 = \".srt\"\n else:\n ext2 == \"\"\n\n subext = (ext, ext2)\n return (subext)\n\n def mkv_extract():\n if (subtype == \"3\"):\n if (ext == \"1\"):\n if (ext2 == \"1\"): #--> MULTi PGS from MKV\n return (\n \"cd \"+thumb+\" && mkvextract tracks \"+\\\n source+\" \"+idsub+\":\"+title+\\\n \"1\"+ext+\" && mkvextract tracks \"+source+\" \"+idsub2+\\\n \":\"+title+\"2\"+ext2+\" && mv \"+title+\"1\"+ext+\\\n \" \"+title+\"1.sup && mv \"+title+\"2\"+ext2+\\\n \" \"+title+\"2.sup\"\n )\n\n else: #--> MULTi SRT/ASS/VOBSUB from MKV\n return (\n \"cd \"+thumb+\" && mkvextract tracks \"+\\\n source+\" \"+idsub+\":\"+title+\"1\"+ext+\\\n \" && mkvextract tracks \"+source+\" \"+idsub2+\\\n \":\"+title+\"2\"+ext2\n )\n else:\n if (ext == \"1\"): #--> FR/VO PGS from MKV\n return (\n \"cd \"+thumb+\" && mkvextract tracks \"+\\\n source+\" \"+idsub+\":\"+title+ext+\\\n \" && mv \"+title+\"1\"+ext+\" \"+title+\"1.sup\"\n )\n\n else: #--> FR/VO SRT/ASS/VOBSUB from MKV\n return (\n \"cd \"+thumb+\" && mkvextract tracks \"+\\\n source+\" \"+idsub+\":\"+title+ext)\n\n def internal_subs():\n if (subtype == \"3\"): #--> CONFIG MULTI Subs from SOURCE\n sub_config = \" -map 0:\"+idsub+\" -metadata:s:s:0 title='\"+\\\n titlesub+\"' -metadata:s:s:0 language= -c:s:0 srt \"\\\n \"-map 0:\"+idsub2+\" -metadata:s:s:1 title='\"+\\\n titlesub2+\"' -metadata:s:s:1 language= -c:s:1 srt\"\n\n else: #--> CONFIG FR/VO Subs from SOURCE\n sub_config = \" -map 0:\"+idsub+\" -metadata:s:s:0 title='\"+\\\n titlesub+\"' -metadata:s:s:0 language= -c:s:0 srt\"\n return (sub_config)\n\n subsource = raw_input(GREEN+\"SUBTITLES FROM > \\n\"+YELLOW+\"SOURCE \"+GREEN+\\\n \"[1]\"+YELLOW+\" - NONE \"+GREEN+\"[2]\"+YELLOW+\\\n \" - FILE \"+GREEN+\"[3]\\n\"+YELLOW+\"ISO/IMG \"+GREEN+\\\n \"[4]\"+YELLOW+\" - MKV \"+GREEN+\"[5]\"+YELLOW+\\\n \" - M2TS \"+GREEN+\"[6] : \"+END)\n\n if (subsource == \"1\" or subsource == \"3\" or subsource == \"4\"\n or subsource == \"5\" or subsource == \"6\"):\n subtype = raw_input(GREEN+\"SUBTITLES TYPE > \\n\"+YELLOW+\"FR \"+GREEN+\\\n \"[1]\"+YELLOW+\" - FORCED \"+GREEN+\"[2]\"+YELLOW+\\\n \" - MULTi \"+GREEN+\"[3] : \"+END)\n if (subsource == \"1\"):\n if (audiotype == \"4\"):\n if (subtype == \"1\"):\n forced = \"--forced-track 3:no \"\n elif (subtype == \"2\"):\n forced = \"--forced-track 3:yes \"\n else:\n stforced = raw_input(GREEN+\"USE FORCED TRACK \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (stforced == \"y\"):\n forced = \"--forced-track 4:yes \"\n else:\n forced = \"--forced-track 4:no \"\n else:\n if (subtype == \"1\"):\n forced = \"--forced-track 2:no \"\n elif (subtype == \"2\"):\n forced = \"--forced-track 2:yes \"\n else:\n stforced = raw_input(GREEN+\"USE FORCED TRACK \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (stforced == \"y\"):\n forced = \"--forced-track 3:yes \"\n else:\n forced = \"--forced-track 3:no \"\n elif (subsource == \"3\" or subsource == \"4\"\n or subsource == \"5\" or subsource == \"6\"):\n if (subtype == \"1\"):\n forced = \"--forced-track '0:no' \"\n elif (subtype == \"2\"):\n forced = \"--forced-track '0:yes' \"\n else:\n stforced = raw_input(GREEN+\"USE FORCED TRACK \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (stforced == \"y\"):\n forced = \"--forced-track '0:yes' \"\n else:\n forced = \"--forced-track '0:no' \"\n if (subtype == \"3\"):\n if (stforced == \"y\"):\n subforced = \"YES\"\n else:\n subforced = \"N/A\"\n elif (subtype == \"2\"):\n subforced = \"YES\"\n else:\n subforced = \"N/A\"\n\n #___ Subtitles Process ___#\n def subextract_message():\n print (\n RED+\"\\n ->\"+GREEN+\" EXTRACTION DONE, CHECK RESULT FOLDER \"\\\n \"& RUN OCR IF NEEDED !\"+RED+\"\\n ->\"+GREEN+\\\n \" WARNING > PUT FINAL SRT IN SOURCE \"\\\n \"FOLDER FOR NEXT STEP !\\n\"+END\n )\n if (subsource == \"1\"): #--> SOURCE\n (idsub, titlesub, idsub2, titlesub2) = infos_subs_in()\n sub_config = internal_subs()\n sub_remux = remux_int()\n\n elif (subsource == \"4\"): #--> ISO\n (idsub, titlesub, idsub2, titlesub2) = infos_subs_in()\n extract_iso = iso_extract()\n os.system(extract_iso)\n subextract_message()\n\n (\n idsub, titlesub, idsub2, titlesub2,\n charset, charset2, sync, sync2\n ) = infos_subs_out()\n\n sub_config = \"\"\n sub_remux = remux_ext()\n\n elif (subsource == \"5\"): #--> MKV\n (idsub, titlesub, idsub2, titlesub2) = infos_subs_in()\n (ext, ext2) = mkv_format()\n extract_mkv = mkv_extract()\n os.system(extract_mkv)\n subextract_message()\n\n (\n idsub, titlesub, idsub2, titlesub2,\n charset, charset2, sync, sync2\n ) = infos_subs_out()\n\n sub_config = \"\"\n sub_remux = remux_ext()\n\n elif (subsource == \"6\"): #--> M2TS\n (idsub, titlesub, idsub2, titlesub2) = infos_subs_in()\n extract_m2ts = m2ts_extract()\n os.system(extract_m2ts)\n subextract_message()\n\n (\n idsub, titlesub, idsub2, titlesub2,\n charset, charset2, sync, sync2\n ) = infos_subs_out()\n\n sub_config = \"\"\n sub_remux = remux_ext()\n\n else: #--> FILE\n (\n idsub, titlesub, idsub2, titlesub2,\n charset, charset2, sync, sync2\n ) = infos_subs_out()\n\n sub_config = \"\"\n sub_remux = remux_ext()\n\n else:\n sub_config = \"\"\n sub_remux = \"\"\n titlesub = \"N/A\"\n subforced = \"N/A\"\n\n #___ Aspect Ratio ___#\n def custom():\n W = raw_input(GREEN+\"RESOLUTION WIDTH : \"+END)\n H = raw_input(GREEN+\"RESOLUTION HEIGHT : \"+END)\n reso = \" -s \"+W+\"x\"+H+crop\n return (reso)\n\n def DVD():\n ask_sar = raw_input(GREEN+\"USE SAMPLE ASPECT RATIO \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (ask_sar == \"y\"):\n sar = raw_input(GREEN+\"SOURCE ASPECT RATIO > \\n\"+YELLOW+\\\n \"PAL 16:9 \"+GREEN+\"[1]\"+YELLOW+\" - PAL 4:3 \"+\\\n GREEN+\"[2]\\n\"+YELLOW+\"NTSC 16:9 \"+GREEN+\\\n \"[3]\"+YELLOW+\" - NTSC 4:3 \"+GREEN+\"[4] : \"+END)\n if (sar == \"1\"):\n reso = \" -sar 64:45\"+crop\n elif (sar == \"2\"):\n reso = \" -sar 16:15\"+crop\n elif (sar == \"3\"):\n reso = \" -sar 32:27\"+crop\n elif (sar == \"4\"):\n reso = \" -sar 8:9\"+crop\n else:\n reso = custom()\n else:\n reso = custom()\n return (reso)\n\n def BLURAY():\n perso = raw_input(GREEN+\"CUSTOM RESOLUTION \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (perso == \"y\"):\n reso = custom()\n else:\n ratio = raw_input(GREEN+\"RELEASE ASPECT RATIO > \\n\"+YELLOW+\\\n \"1.33 - 1.66 - 1.78 - 1.85 - 2.35 - 2.40\"+\\\n GREEN+\" : \"+END)\n if (ratio == \"2.40\"):\n reso = \" -s 720x300\"+crop\n elif (ratio == \"2.35\"):\n reso = \" -s 720x306\"+crop\n elif (ratio == \"1.85\"):\n reso = \" -s 720x390\"+crop\n elif (ratio == \"1.78\"):\n reso = \" -s 720x404\"+crop\n elif (ratio == \"1.66\"):\n reso = \" -s 720x432\"+crop\n elif (ratio == \"1.33\"):\n reso = \" -s 720x540\"+crop\n else:\n reso = custom()\n return (reso)\n\n scan = raw_input(GREEN+\"SCAN AUTOCROP SOURCE \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (scan == \"y\"):\n os.system(\"HandBrakeCLI -t 0 --scan -i\"+source)\n\n ask_screen = raw_input(GREEN+\"SCREENSHOT VERIFICATION \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (ask_screen == \"y\"):\n os.system(\"./thumbnails.py \"+source+\" 5 2\")\n\n man_crop = raw_input(GREEN+\"MANUAL SOURCE CROP \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (man_crop == \"y\"):\n w_crop = raw_input(GREEN+\"SOURCE CROP WIDTH \"+YELLOW+\\\n \"(ex: 1920)\"+GREEN+\" : \"+END)\n h_crop = raw_input(GREEN+\"SOURCE CROP HEIGHT \"+YELLOW+\\\n \"(ex: 800)\"+GREEN+\" : \"+END)\n x_crop = raw_input(GREEN+\"PIXELS CROP LEFT/RIGHT \"+YELLOW+\\\n \"(ex: 0)\"+GREEN+\" : \"+END)\n y_crop = raw_input(GREEN+\"PIXELS CROP TOP/BOTTOM \"+YELLOW+\\\n \"(ex: 140)\"+GREEN+\" : \"+END)\n crop = \" -filter:v crop=\"+w_crop+\":\"+h_crop+\\\n \":\"+x_crop+\":\"+y_crop+\"\"\n else:\n crop = \"\"\n if (format == \"4\"):\n reso = DVD()\n elif (format == \"6\"):\n reso = custom()\n else:\n reso = BLURAY()\n\n #___ x264/x265 Params ___#\n level = raw_input(GREEN+\"VIDEO FORMAT PROFILE \"+YELLOW+\\\n \"(ex: 3.1)\"+GREEN+\" : \"+END)\n preset = raw_input(GREEN+\"CUSTOM PRESET X264/X265 > \\n\"+YELLOW+\\\n \"FAST \"+GREEN+\"[1]\"+YELLOW+\" - SLOW \"+GREEN+\\\n \"[2]\"+YELLOW+\" - SLOWER \"+GREEN+\"[3]\\n\"+YELLOW+\\\n \"VERYSLOW \"+GREEN+\"[4]\"+YELLOW+\" - PLACEBO \"+GREEN+\\\n \"[5]\"+YELLOW+\" - NONE \"+GREEN+\"[6] : \"+END)\n\n if (preset == \"1\"):\n prest = \" -preset fast\"\n elif (preset == \"2\"):\n preset = \" -preset slow\"\n elif (preset == \"3\"):\n preset = \" -preset slower\"\n elif (preset == \"4\"):\n preset = \" -preset veryslow\"\n elif (preset == \"5\"):\n preset = \" -preset placebo\"\n else:\n preset = \"\"\n\n tuned = raw_input(GREEN+\"X264/X265 TUNE > \\n\"+YELLOW+\"FILM \"+GREEN+\\\n \"[1]\"+YELLOW+\" - ANIMATION \"+GREEN+\"[2]\"+YELLOW+\\\n \" - GRAIN \"+GREEN+\"[3]\\n\"+YELLOW+\"STILLIMAGE \"+GREEN+\\\n \"[4]\"+YELLOW+\" - PSNR \"+GREEN+\"[5]\"+YELLOW+\\\n \" - SSIM \"+GREEN+\"[6]\\n\"+YELLOW+\"FASTDECODE \"+GREEN+\\\n \"[7]\"+YELLOW+\" - \"+GREEN+\"[8]\"+YELLOW+\\\n \" - NONE \"+GREEN+\"[9] : \"+END)\n\n if (tuned == \"1\"):\n tune = \" -tune film\"\n elif (tuned == \"2\"):\n tune = \" -tune animation\"\n elif (tuned == \"3\"):\n tune = \" -tune grain\"\n elif (tuned == \"4\"):\n tune = \" -tune stillimage\"\n elif (tuned == \"5\"):\n tune = \" -tune psnr\"\n elif (tuned == \"6\"):\n tune = \" -tune ssim\"\n elif (tuned == \"7\"):\n tune = \" -tune fastdecode\"\n elif (tuned == \"8\"):\n tune = \" -tune zerolatency\"\n else:\n tune = \"\"\n\n #___ x264 Expert Mode ___#\n x264 = raw_input(GREEN+\"X264/X265 EXPERT MODE \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (x264 == \"y\"):\n threads_ = raw_input(GREEN+\"PROCESSOR THREADS \"+YELLOW+\\\n \"(ex: 8)\"+GREEN+\" : \"+END)\n if not (threads_):\n threads = \" -threads 0\"\n else:\n threads = \" -threads \"+threads_\n\n thread_type_ = raw_input(GREEN+\"THREAD TYPE > \\n\"+YELLOW+\\\n \"SLICE \"+GREEN+\"[1]\"+YELLOW+\\\n \" - FRAME \"+GREEN+\"[2] : \"+END)\n if (thread_type_ == \"1\"):\n thread_type = \" -thread_type slice\"\n elif (thread_type_ == \"2\"):\n thread_type = \" -thread_type frame\"\n else:\n thread_type = \"\"\n if (encode_type == \"2\"):\n fastfirstpass = \"\"\n else:\n fastfirstpass_ = raw_input(GREEN+\"FAST FIRST PASS \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (fastfirstpass_ == \"y\"):\n fastfirstpass = \" -fastfirstpass 1\"\n elif (fastfirstpass_ == \"n\"):\n fastfirstpass = \" -fastfirstpass 0\"\n else:\n fastfirstpass = \"\"\n\n refs_ = raw_input(GREEN+\"REFERENCE FRAMES \"+YELLOW+\\\n \"(ex: 8)\"+GREEN+\" : \"+END)\n if not (refs_):\n refs = \"\"\n else:\n refs = \" -refs \"+refs_\n\n mixed_ = raw_input(GREEN+\"MIXED REFERENCES \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (mixed_ == \"n\"):\n mixed = \" -mixed-refs 0\"\n elif (mixed_ == \"y\"):\n mixed = \" -mixed-refs 1\"\n else:\n mixed = \"\"\n\n bf_ = raw_input(GREEN+\"MAXIMUM B-FRAMES \"+YELLOW+\\\n \"(ex: 16)\"+GREEN+\" : \"+END)\n if not (bf_):\n bf = \"\"\n else:\n bf = \" -bf \"+bf_\n\n pyramid_ = raw_input(GREEN+\"PYRAMIDAL METHOD > \\n\"+YELLOW+\\\n \"NONE \"+GREEN+\"[1]\"+YELLOW+\" - NORMAL \"+GREEN+\\\n \"[2]\"+YELLOW+\" - STRICT \"+GREEN+\"[3] : \"+END)\n if (pyramid_ == \"1\"):\n pyramid = \" -b-pyramid none\"\n elif (pyramid_ == \"2\"):\n pyramid = \" -b-pyramid normal\"\n elif (pyramid_ == \"3\"):\n pyramid = \" -b-pyramid strict\"\n else:\n pyramid = \"\"\n\n weightb_ = raw_input(GREEN+\"WEIGHTED B-FRAMES \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (weightb_ == \"n\"):\n weightb = \" -weightb 0\"\n elif (weightb_ == \"y\"):\n weightb = \" -weightb 1\"\n else:\n weightb = \"\"\n\n weightp_ = raw_input(GREEN+\"WEIGHTED P-FRAMES > \\n\"+YELLOW+\\\n \"NONE \"+GREEN+\"[1]\"+YELLOW+\" - SIMPLE \"+GREEN+\\\n \"[2]\"+YELLOW+\" - SMART \"+GREEN+\"[3] : \"+END)\n if (weightp_ == \"1\"):\n weightp = \" -weightp none\"\n elif (weightp_ == \"2\"):\n weightp = \" -weightp simple\"\n elif (weightp_ == \"3\"):\n weightp = \" -weightp smart\"\n else:\n weightp = \"\"\n\n dct_ = raw_input(GREEN+\"ENABLE 8x8 TRANSFORM \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (dct_ == \"n\"):\n dct = \" -8x8dct 0\"\n elif (dct_ == \"y\"):\n dct = \" -8x8dct 1\"\n else:\n dct = \"\"\n\n cabac_ = raw_input(GREEN+\"ENABLE CABAC \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (cabac_ == \"n\"):\n cabac = \" -coder vlc\"\n elif (cabac_ == \"y\"):\n cabac = \" -coder ac\"\n else:\n cabac = \"\"\n\n b_strategy_ = raw_input(GREEN+\"ADAPTIVE B-FRAMES > \\n\"+YELLOW+\\\n \"VERYFAST \"+GREEN+\"[1]\"+YELLOW+\\\n \" - FAST \"+GREEN+\"[2]\"+YELLOW+\\\n \" - SLOWER \"+GREEN+\"[3] : \"+END)\n if (b_strategy_ == \"1\"):\n b_strategy = \" -b_strategy 0\"\n elif (b_strategy_ == \"2\"):\n b_strategy = \" -b_strategy 1\"\n elif (b_strategy_ == \"3\"):\n b_strategy = \" -b_strategy 2\"\n else:\n b_strategy = \"\"\n\n direct_ = raw_input(GREEN+\"ADAPTIVE DIRECT MODE > \\n\"+YELLOW+\\\n \"NONE \"+GREEN+\"[1]\"+YELLOW+\" - SPATIAL \"+GREEN+\\\n \"[2]\\n\"+YELLOW+\"TEMPORAL \"+GREEN+\"[3]\"+YELLOW+\\\n \" - AUTO \"+GREEN+\"[4] : \"+END)\n if (direct_ == \"1\"):\n direct = \" -direct-pred none\"\n elif (direct_ == \"2\"):\n direct = \" -direct-pred spatial\"\n elif (direct_ == \"3\"):\n direct = \" -direct-pred temporal\"\n elif (direct_ == \"4\"):\n direct = \" -direct-pred auto\"\n else:\n direct = \"\"\n\n me_method_ = raw_input(GREEN+\"MOTION ESTIMATION METHOD > \\n\"+YELLOW+\\\n \"DIA \"+GREEN+\"[1]\"+YELLOW+\" - HEX \"+GREEN+\\\n \"[2]\\n\"+YELLOW+\"UMH \"+GREEN+\"[3]\"+YELLOW+\\\n \" - ESA \"+GREEN+\"[4]\"+YELLOW+\\\n \" - TESA \"+GREEN+\"[5] : \"+END)\n if (me_method_ == \"1\"):\n me_method = \" -me_method dia\"\n elif (me_method_ == \"2\"):\n me_method = \" -me_method hex\"\n elif (me_method_ == \"3\"):\n me_method = \" -me_method umh\"\n elif (me_method_ == \"4\"):\n me_method = \" -me_method esa\"\n elif (me_method_ == \"5\"):\n me_method = \" -me_method tesa\"\n else:\n me_method = \"\"\n\n subq_ = raw_input(GREEN+\"SUBPIXEL MOTION ESTIMATION \"+YELLOW+\\\n \"(ex: 11)\"+GREEN+\" : \"+END)\n if not (subq_):\n subq = \"\"\n else:\n subq = \" -subq \"+subq_\n\n me_range_ = raw_input(GREEN+\"MOTION ESTIMATION RANGE \"+YELLOW+\\\n \"(ex: 16)\"+GREEN+\" : \"+END)\n if not (me_range_):\n me_range = \"\"\n else:\n me_range = \" -me_range \"+me_range_\n\n partitions_ = raw_input(GREEN+\"PARTITIONS TYPE > \\n\"+YELLOW+\\\n \"ALL \"+GREEN+\"[1]\"+YELLOW+\" - p8x8 \"+GREEN+\\\n \"[2]\"+YELLOW+\" - p4x4 \"+GREEN+\\\n \"[3]\\n\"+YELLOW+\"NONE \"+GREEN+\"[4]\"+YELLOW+\\\n \" - b8x8 \"+GREEN+\"[5]\"+YELLOW+\\\n \" - i8x8 \"+GREEN+\"[6]\"+YELLOW+\\\n \" - i4x4 \"+GREEN+\"[7] : \"+END)\n if (partitions_ == \"1\"):\n partitions = \" -partitions all\"\n elif (partitions_ == \"2\"):\n partitions = \" -partitions p8x8\"\n elif (partitions_ == \"3\"):\n partitions = \" -partitions p4x4\"\n elif (partitions_ == \"4\"):\n partitions = \" -partitions none\"\n elif (partitions_ == \"5\"):\n partitions = \" -partitions b8x8\"\n elif (partitions_ == \"6\"):\n partitions = \" -partitions i8x8\"\n elif (partitions_ == \"7\"):\n partitions = \" -partitions i4x4\"\n else:\n partitions = \"\"\n\n trellis_ = raw_input(GREEN+\"TRELLIS MODE > \\n\"+YELLOW+\\\n \"OFF \"+GREEN+\"[1]\"+YELLOW+\\\n \" - DEFAULT \"+GREEN+\"[2]\"+YELLOW+\\\n \" - ALL \"+GREEN+\"[3] : \"+END)\n if (trellis_ == \"1\"):\n trellis = \" -trellis 0\"\n elif (trellis_ == \"2\"):\n trellis = \" -trellis 1\"\n elif (trellis_ == \"3\"):\n trellis = \" -trellis 2\"\n else:\n trellis = \"\"\n\n aq_ = raw_input(GREEN+\"ADAPTIVE QUANTIZATION \"+YELLOW+\\\n \"(ex: 1.5)\"+GREEN+\" : \"+END)\n if not (aq_):\n aq = \"\"\n else:\n aq = \" -aq-strength \"+aq_\n\n psy_ = raw_input(GREEN+\"PSYCHOVISUAL OPTIMIZATION \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (psy_) == \"n\":\n psy = \" -psy 0\"\n elif (psy_) == \"y\":\n psy = \" -psy 1\"\n else:\n psy = \"\"\n\n psyrd1 = raw_input(GREEN+\"RATE DISTORTION [psy-rd] \"+YELLOW+\\\n \"(ex: 1.00)\"+GREEN+\" : \"+END)\n if not (psyrd1):\n psyrd = \"\"\n else:\n psyrd2 = raw_input(GREEN+\"PSYCHOVISUAL TRELLIS [psy-rd] \"+\\\n YELLOW+\"(ex: 0.15)\"+GREEN+\" : \"+END)\n if not (psyrd2):\n psyrd = \"\"\n else:\n psyrd = \" -psy-rd \"+psyrd1+\":\"+psyrd2\n\n deblock_ = raw_input(GREEN+\"DEBLOCKING \"+YELLOW+\\\n \"(ex: -1:-1)\"+GREEN+\" : \"+END)\n if not (deblock_):\n deblock = \"\"\n else:\n deblock = \" -deblock \"+deblock_\n\n lookahead_ = raw_input(GREEN+\"FRAMES LOOKAHEAD \"+YELLOW+\\\n \"(ex: 60)\"+GREEN+\" : \"+END)\n if not (lookahead_):\n lookahead = \"\"\n else:\n lookahead = \" -rc-lookahead \"+lookahead_\n\n bluray_ = raw_input(GREEN+\"BLURAY COMPATIBILITY \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (bluray_ == \"y\"):\n bluray = \" -bluray-compat 1\"\n elif (bluray_ == \"n\"):\n bluray = \" -bluray-compat 0\"\n else:\n bluray = \"\"\n\n fastpskip_ = raw_input(GREEN+\"FAST SKIP on P-FRAMES \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (fastpskip_ == \"y\"):\n fastpskip = \" -fast-pskip 1\"\n elif (fastpskip_ == \"n\"):\n fastpskip = \" -fast-pskip 0\"\n else:\n fastpskip = \"\"\n\n g_ = raw_input(GREEN+\"KEYFRAME INTERVAL \"+YELLOW+\\\n \"(ex: 250)\"+GREEN+\" : \"+END)\n if not (g_):\n g = \"\"\n else:\n g = \" -g \"+g_\n\n keyint_min_ = raw_input(GREEN+\"MINIMAL KEY INTERVAL \"+YELLOW+\\\n \"(ex: 25)\"+GREEN+\" : \"+END)\n if not (keyint_min_):\n keyint_min = \"\"\n else:\n keyint_min = \" -keyint_min \"+keyint_min_\n\n scenecut_ = raw_input(GREEN+\"SCENECUT DETECTION \"+YELLOW+\\\n \"(ex: 40)\"+GREEN+\" : \"+END)\n if not (scenecut_):\n scenecut = \"\"\n else:\n scenecut = \" -sc_threshold \"+scenecut_\n\n cmp_ = raw_input(GREEN+\"CHROMA MOTION ESTIMATION \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n if (cmp_ == \"n\"):\n cmp = \" -cmp sad\"\n elif (cmp_ == \"y\"):\n cmp = \" -cmp chroma\"\n else:\n cmp = \"\"\n\n param = preset+tune+threads+thread_type+fastfirstpass+refs+mixed+\\\n bf+pyramid+weightb+weightp+dct+cabac+b_strategy+direct+\\\n me_method+subq+me_range+partitions+trellis+aq+psy+psyrd+\\\n deblock+lookahead+bluray+fastpskip+g+keyint_min+scenecut+cmp\n\n pass1 = preset+tune+threads+thread_type+fastfirstpass\n\n else:\n param = preset+tune+\" -threads 0\"\n pass1 = preset+tune+\" -threads 0\"\n\n #___ Prez / Torrent ___#\n nfosource = raw_input(GREEN+\"RELEASE SOURCE \"+YELLOW+\\\n \"(ex: 1080p.HDZ)\"+GREEN+\" : \"+END)\n nfoimdb = raw_input(GREEN+\"RELEASE IMDB ID \"+YELLOW+\\\n \"(ex: 6686697)\"+GREEN+\" : \"+END)\n\n if (len(nfoimdb) == 7 and nfoimdb.isdigit()):\n searchIMDB = \"http://deanclatworthy.com/imdb/?id=tt\"+nfoimdb\n try:\n data1 = loads(urlopen(searchIMDB).read())\n except (HTTPError, ValueError):\n data1 = \"\"\n pass\n\n searchTMDB = \"http://api.themoviedb.org/3/movie/tt\"+nfoimdb+\\\n \"?api_key=\"+tmdb_api_key+\"&language=fr\"\n dataTMDB = urllib2.Request(\\\n searchTMDB, headers={\"Accept\" : \"application/json\"})\n try:\n data2 = loads(urllib2.urlopen(dataTMDB).read())\n except (HTTPError, ValueError):\n data2 = \"\"\n pass\n\n searchOMDB = \"http://www.omdbapi.com/?i=tt%s\"+nfoimdb\n try:\n data3 = loads(urlopen(searchOMDB).read())\n except (HTTPError, ValueError):\n data3 = \"\"\n pass\n\n tit1 = \"title\"\n tit2 = \"original_title\"\n tit3 = \"Title\"\n\n if (tit1 in data1):\n dir = \"%s\" % data1['title']\n name = dir.replace(' ', '.').replace('/', '')\\\n .replace('(', '').replace(')', '')\\\n .replace('\"', '').replace(':', '')\\\n .replace(\"'\", \"\").replace(\"[\", \"\")\\\n .replace(\"]\", \"\").replace(\";\", \"\")\\\n .replace(\",\", \"\")\n else:\n if (tit2 in data2):\n dir = \"%s\" % data2['original_title']\n name = dir.replace(' ', '.').replace('/', '')\\\n .replace('(', '').replace(')', '')\\\n .replace('\"', '').replace(':', '')\\\n .replace(\"'\", \"\").replace(\"[\", \"\")\\\n .replace(\"]\", \"\").replace(\";\", \"\")\\\n .replace(\",\", \"\")\n else:\n if (tit3 in data3):\n dir = \"%s\" % data3['Title']\n name = dir.replace(' ', '.').replace('/', '')\\\n .replace('(', '').replace(')', '')\\\n .replace('\"', '').replace(':', '')\\\n .replace(\"'\", \"\").replace(\"[\", \"\")\\\n .replace(\"]\", \"\").replace(\";\", \"\")\\\n .replace(\",\", \"\")\n else:\n name = \"\"\n nfoimdb = \"\"\n else:\n name = \"\"\n\n tsize = raw_input(GREEN+\"RELEASE SIZE > \\n\"+YELLOW+\\\n \"SD - 350 - 550 - 700 - 1.37 - 2.05 - 2.74 - 4.37 \"\\\n \"- 6.56 - HD\"+GREEN+\" : \"+END)\n\n tsize = tsize.lower()\n if (tsize == \"350\"):\n pieces = \"18\"\n prezsize = \"350Mo\"\n elif (tsize == \"550\"):\n pieces = \"18\"\n prezsize = \"550Mo\"\n elif (tsize == \"700\"):\n pieces = \"19\"\n prezsize = \"700Mo\"\n elif (tsize == \"1.37\"):\n pieces = \"20\"\n prezsize = \"1.37Go\"\n elif (tsize == \"2.05\"):\n pieces = \"20\"\n prezsize = \"2.05Go\"\n elif (tsize == \"2.74\"):\n pieces = \"21\"\n prezsize = \"2.74Go\"\n elif (tsize == \"4.37\"):\n pieces = \"22\"\n prezsize = \"4.37Go\"\n elif (tsize == \"6.56\"):\n pieces = \"22\"\n prezsize = \"6.56Go\"\n elif (tsize == \"hd\"):\n pieces = \"22\"\n prezsize = \"..Go\"\n else:\n pieces = \"20\"\n prezsize = \"..Go\"\n\n pprint = raw_input(GREEN+\"PRINT FFMPEG FINAL COMMAND \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n\n #___ Return Global Values ___#\n info_main = (\n source, thumb, team, announce, title, year, stag, string, codec,\n encode_type, crf, bit, level, idvideo, fps, interlace, interlace2,\n audiolang, audio_config, sub_config, sub_remux, reso, param, pass1,\n mark, nfoimdb, nfosource, titlesub, subforced, prezquality,\n prezsize, pieces, name, pprint\n )\n\n return (info_main)\n\n#---> PROCESS <---#\n\nbanner()\n\n(\n source, thumb, team, announce, title, year, stag, string, codec,\n encode_type, crf, bit, level, idvideo, fps, interlace, interlace2,\n audiolang, audio_config, sub_config, sub_remux, reso, param, pass1,\n mark, nfoimdb, nfosource, titlesub, subforced, prezquality, prezsize,\n pieces, name, pprint\n) = main()\n\nrun_ffmpeg = [\n ffmpeg(),\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n]\n\nrun_data = [\n data(),\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n]\n\nn = 1\n\nif (pprint == \"y\"):\n print (ffmpeg())\n\nagain = raw_input(GREEN+\"NEXT ENCODE \"+YELLOW+\"(y/n)\"+GREEN+\" : \"+END)\nwhile (again != \"n\"):\n next()\n\n (\n source, thumb, team, announce, title, year, stag, string, codec,\n encode_type, crf, bit, level, idvideo, fps, interlace, interlace2,\n audiolang, audio_config, sub_config, sub_remux, reso, param, pass1,\n mark, nfoimdb, nfosource, titlesub, subforced, prezquality, prezsize,\n pieces, name, pprint, pending\n ) = main()\n\n run_ffmpeg[n] = ffmpeg()\n run_data[n] = data()\n n = n + 1\n\n if (n != 20):\n again = raw_input(GREEN+\"NEXT ENCODE \"+YELLOW+\\\n \"(y/n)\"+GREEN+\" : \"+END)\n else:\n break\n\nfor i in range (n):\n os.system(run_ffmpeg[i])\n os.system(run_data[i])\n i = i + 1\n\nprint (RED+\"\\n ->\"+GREEN+\" ALL JOBS DONE, CONGRATULATIONS !\"+END)\nprint (RED+\" ->\"+GREEN+\" NFO, THUMBNAILS, (PREZ) & TORRENT CREATED !\\n\"+END)\n\nsys.exit()\n\nif (__name__ == \"__main__\"):\n main()\n","sub_path":"ankoa.py","file_name":"ankoa.py","file_ext":"py","file_size_in_byte":58518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"55739946","text":"# coding=utf-8\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport six\nfrom filecmp import dircmp, _cmp\n\n# VER1 = 'dds_1.13_v1'\n# VER1 = 'dds_1.12'\n# VER2 = 'dds_1.13_v2'\nimport time\n\nVER1 = 'dds_1.12'\nVER2 = 'dds_1.12.0.2.37865'\nDETECT_CHANGED = False # Takes a long time to do, only use if insane\nMEASURE_TIME = True\nNO_PRINT = True\n\n\ndef try_print(*args, **kwargs):\n if not NO_PRINT:\n print(*args, **kwargs)\n\n\ndef diff_files(dcmp, sub=False):\n jobs = {'left': 'removed', 'right': 'added'}\n\n files = {'added': [], 'removed': [], 'changed': []}\n\n for job, job_name in six.iteritems(jobs):\n for file in getattr(dcmp, job + '_only'):\n file_path = os.path.join(getattr(dcmp, job), file)\n try_print(\"File {} {} in {}\".format(file, job_name, VER2))\n files[job_name].append(file_path)\n try_print(file_path)\n if os.path.isfile(file_path):\n shutil.copy(file_path, os.path.join('diff', job_name, file))\n elif os.path.isdir(file_path):\n try:\n shutil.copytree(file_path, os.path.join('diff', job_name, file))\n except (shutil.Error, OSError):\n try_print(\"Error moving dir, this sometimes happens\")\n if DETECT_CHANGED:\n for common_file in dcmp.common_files:\n if _cmp(os.path.join(dcmp.left, common_file), os.path.join(dcmp.right, common_file), sh=True) == 1:\n try_print(common_file, \"different\")\n files['changed'].append(common_file)\n if os.path.isfile(file_path):\n shutil.copy(os.path.join(dcmp.right, common_file), os.path.join('diff', 'changed', common_file))\n\n for sub_dcmp in dcmp.subdirs.values():\n ret = diff_files(sub_dcmp, sub=True)\n for ret1, ret2 in six.iteritems(ret):\n files[ret1].extend(ret2)\n\n if sub:\n return files\n\n if DETECT_CHANGED:\n jobs['changed'] = 'changed' # hmm\n for job, job_name in six.iteritems(jobs):\n with open('diff/{}.txt'.format(job_name), 'w') as txt_file:\n for file in files[job_name]:\n ext = file.split('\\\\')[1]\n name = file.split('\\\\')[-1].split('.dds')[0]\n print(\"{}.{}\".format(name, ext), file=txt_file)\n\n\ndef main():\n if os.path.exists('diff'):\n shutil.rmtree('diff')\n if os.path.exists('diff'):\n os.rmdir('diff')\n os.makedirs('diff/added')\n os.makedirs('diff/removed')\n if DETECT_CHANGED:\n os.makedirs('diff/changed')\n with open('diff/info.txt', 'w') as info_txt:\n print('Comparing {} and {}\\n'.format(VER1, VER2), file=info_txt)\n print('DETECT_CHANGED={}'.format(DETECT_CHANGED), file=info_txt)\n dcmp = dircmp(VER1, VER2)\n # print(dcmp.diff_files)\n # dcmp.report_full_closure()\n if MEASURE_TIME:\n start_time = time.time()\n diff_files(dcmp)\n if MEASURE_TIME:\n print(\"Time taken: {}\".format(time.time()-start_time))\n with open('diff/info.txt', 'a') as info_txt:\n print(\"Time taken: {}\".format(time.time()-start_time), file=info_txt)\n\nif __name__ == '__main__':\n main()\n","sub_path":"trials & tests/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"213287984","text":"from arguments import get_args\nimport math\nimport os\nimport gym\nimport gym.spaces\nfrom gym import wrappers\nimport torch\nimport numpy as np\n\nfrom normalized_actions import NormalizedActions\n\nif __name__ == '__main__':\n \n args = get_args()\n\n # initialize environment\n env_name = args.env_name\n env = gym.make(env_name)\n\n # choose agent according to action space\n if type(env.action_space) != gym.spaces.discrete.Discrete:\n from reinforce_continuous import REINFORCE\n env = NormalizedActions(gym.make(env_name))\n else:\n from reinforce_discrete import REINFORCE\n\n if args.display:\n env = wrappers.Monitor(env, '/tmp/{}-experiment'.format(env_name), force=True)\n\n env.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n \n agent = REINFORCE(args.hidden_size, env.observation_space.shape[0], env.action_space)\n\n dir = 'ckpt_' + env_name\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n for i_episode in range(args.num_episodes):\n env_reset = np.expand_dims(env.reset(), 0)\n state = torch.Tensor(env_reset)\n # print(state)\n entropies = []\n log_probs = []\n rewards = []\n for t in range(args.num_steps):\n\n action, log_prob, entropy = agent.select_action(state)\n action = action.cpu()\n\n next_state, reward, done, _ = env.step(action.numpy()[0])\n\n if args.render:\n env.render()\n\n entropies.append(entropy)\n log_probs.append(log_prob)\n rewards.append(reward)\n state = torch.Tensor([next_state])\n\n if done:\n break\n\n agent.update_parameters(rewards, log_probs, entropies, args.gamma)\n\n if i_episode % args.ckpt_freq == 0:\n torch.save(agent.model.state_dict(), os.path.join(dir, 'reinforce-' + str(i_episode) + '.pkl'))\n\n print('Episode: {}, reward: {}'.format(i_episode, np.sum(rewards)))\n\n\n env.close()\n","sub_path":"Policy Optimization/REINFORCE/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"375880649","text":"from data_loader import data_loader\nimport random\nimport numpy as np\n\n\nclass LearningMachine():\n def __init__(self):\n self.w1 = random.gauss(0, 1)\n self.w2 = random.gauss(0, 1)\n self.w3 = random.gauss(0, 1)*10-100\n\n def train(self, train_x, train_y):\n\n auc_old = self.evaluate(train_x, train_y)\n\n old_w1 = self.w1\n old_w2 = self.w2\n old_w3 = self.w3\n\n self.w1 = random.gauss(0, 1)\n self.w2 = random.gauss(0, 1)\n self.w3 = random.gauss(0, 1)*10-100\n\n auc_new = self.evaluate(train_x, train_y)\n\n if auc_old > auc_new:\n self.w1 = old_w1\n self.w2 = old_w2\n self.w3 = old_w3\n print(\"auc:\", auc_old)\n else:\n print(\"auc:\", auc_new)\n\n def predict(self, x):\n height = x[0]\n weight = x[1]\n score = height * self.w1 + weight * self.w2 + self.w3\n\n if score > 0:\n return True\n else:\n return False\n pass\n\n def evaluate(self, test_x, test_y):\n correct_counter = 0\n total_case = test_x.shape[0]\n\n for idx in range(total_case):\n x_pred = self.predict(test_x[idx, :])\n if (x_pred is True) and test_y[idx] == 1.:\n # print(\"correct\")\n correct_counter += 1\n if (x_pred is False) and test_y[idx] == 0.:\n # print(\"correct\")\n correct_counter += 1\n else:\n # print(\"not correct\")\n pass\n\n auc = correct_counter / total_case\n return auc\n\n\n(train_x, train_y), (test_x, test_y) = data_loader()\n\nm = LearningMachine()\n\nfor x in range(100):\n m.train(train_x, train_y)\n\ntest_auc = m.evaluate(test_x, test_y)\n\nprint(\"test_auc:\", test_auc)\n\n\nfrom plot_perceptron import plot_data, plot_line, plt\nplot_data(train_x,train_y)\nplot_line(m.w1, m.w2, m.w3)\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"course2/example/model_perceptron.py","file_name":"model_perceptron.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"121058414","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize import word_tokenize\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom projects.imdb.src.config import INPUT_FILE,MODEL_OUTPUT\n\nif __name__ == \"__main__\":\n df = pd.read_csv(f\"{INPUT_FILE}\")\n\n df[\"kfold\"] = 1\n\n df = df.sample(frac=1).reset_index(drop=True)\n\n y = df.sentiment.values\n\n kf = model_selection.StratifiedKFold(n_splits=5)\n\n for f, (t_, v_) in enumerate(kf.split(X=df, y=y)):\n df.loc[v_, 'kfold'] = f\n \n for fold_ in range(5):\n train_df = df[df.kfold != fold_].reset_index(drop=True)\n test_df = df[df.kfold == fold_].reset_index(drop=True)\n\n tfv = TfidfVectorizer(tokenizer=word_tokenize, token_pattern=None, ngram_range=(1, 3))\n tfv.fit(train_df.review)\n\n xtrain = tfv.transform(train_df.review)\n xtest = tfv.transform(test_df.review)\n\n model = linear_model.LogisticRegression()\n\n model.fit(xtrain, train_df.sentiment)\n\n preds = model.predict(xtest)\n\n accuracy = metrics.accuracy_score(test_df.sentiment, preds)\n\n print(f\"Fold {fold_}\")\n print(f\"Accuracy = {accuracy}\")\n print(\"\")","sub_path":"projects/imdb/src/tfv_logres_trigram.py","file_name":"tfv_logres_trigram.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"593298059","text":"\"\"\"\n 定义一系列的类,类用来封装和数据库对应的数据\n 一个类对应一个数据库,类名和表名一致,类名是字母大写\n 类中定义对象属性和表中的列相同\n\"\"\"\n\n\nclass Login:\n def __init__(self, id, username, userpwd, usertel, usersex):\n \"\"\"\n 用户登录表\n :param id: 用户ID\n :param username: 用户姓名\n :param userpwd: 用户密码\n :param usertel: 用户联系方式\n :param usersex: 用户性别\n \"\"\"\n self.id = id\n self.username = username\n self.userpwd = userpwd\n self.usertel = usertel\n self.usersex = usersex\n\n def __str__(self):\n return \"{'id': %d, 'username': %s, 'userpwd': %s, 'usertel': %s, 'usersex': %s}\" % (\n self.id, self.username, self.userpwd, self.usertel, self.usersex)\n\n\nclass User:\n def __init__(self, id, name, province, city, address, zip, date):\n \"\"\"\n 用户表\n :param id: 用户ID\n :param name: 用户姓名\n :param province: 用户所在省份\n :param city: 用户所在城市\n :param address: 用户所在地址\n :param zip: 用户所在地址邮编\n :param date: 用户出生日期\n \"\"\"\n self.id = id\n self.name = name\n self.province = province\n self.city = city\n self.address = address\n self.zip = zip\n self.date = date\n\n def __str__(self):\n return \"{'id': %d, 'name': %s, 'province': %s, 'city': %s, 'address': %s, 'zip': %s, 'data': %s}\" % (\n self.id, self.name, self.province, self.city, self.address, self.zip, self.date)\n","sub_path":"python/cgi/com/aowin/modal/modal.py","file_name":"modal.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"215390426","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp import netsvc\nfrom openerp.tools.translate import _\n\nclass purchase_order(osv.osv):\n _inherit = \"purchase.order\"\n\n def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):\n return {\n 'name': order_line.name or '',\n 'product_id': order_line.product_id.id,\n 'product_qty': order_line.product_qty,\n 'product_uos_qty': order_line.product_qty,\n 'product_uom': order_line.product_uom.id,\n 'product_uos': order_line.product_uom.id,\n 'date': self.date_to_datetime(cr, uid, order_line.date_planned, context),\n 'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),\n 'location_id': order.partner_id.property_stock_supplier.id,\n 'location_dest_id': order.location_id.id,\n 'picking_id': picking_id,\n 'partner_id': order.dest_address_id.id or order.partner_id.id,\n 'move_dest_id': order_line.move_dest_id.id,\n 'state': 'draft',\n 'type':'in',\n 'purchase_line_id': order_line.id,\n 'company_id': order.company_id.id,\n 'price_unit': order_line.price_unit\n }\npurchase_order()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"bias_sale_mrp_forecast/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"423703805","text":"import numpy as np\nimport pyqtgraph as pg\nfrom qtpy.QtGui import QBrush\n\nimport ibeatles.step1.utilities as utilities\nfrom neutronbraggedge.experiment_handler.experiment import Experiment\nfrom ..utilities.colors import pen_color, roi_group_color\nfrom ..utilities.gui_handler import GuiHandler\nfrom ..utilities.pyqrgraph import Pyqtgrah as PyqtgraphUtilities\nfrom ..binning.binning_handler import BinningHandler\nfrom ..fitting.fitting_handler import FittingHandler\n\n\nclass CustomAxis(pg.AxisItem):\n\n def __init__(self, gui_parent, *args, **kwargs):\n pg.AxisItem.__init__(self, *args, **kwargs)\n self.parent = gui_parent\n\n def tickStrings(self, values, scale, spacing):\n strings = []\n\n _distance_source_detector = float(str(self.parent.ui.distance_source_detector.text()))\n _detector_offset_micros = float(str(self.parent.ui.detector_offset.text()))\n\n tof_s = [float(time) * 1e-6 for time in values]\n\n _exp = Experiment(tof=tof_s,\n distance_source_detector_m=_distance_source_detector,\n detector_offset_micros=_detector_offset_micros)\n lambda_array = _exp.lambda_array\n\n for _lambda in lambda_array:\n strings.append(\"{:.4f}\".format(_lambda * 1e10))\n\n return strings\n\n\nclass Step1Plot(object):\n data = []\n\n plot_ui = {'sample': None,\n 'ob': None,\n 'normalized': None,\n 'binning': None}\n\n def __init__(self, parent=None, data_type='sample', data=[]):\n self.parent = parent\n self.data_type = data_type\n if data == []:\n data = self.parent.data_metadata[data_type]['data']\n self.data = data\n\n self.plot_ui['sample'] = self.parent.ui.bragg_edge_plot\n self.plot_ui['ob'] = self.parent.ui.ob_bragg_edge_plot\n self.plot_ui['normalized'] = self.parent.ui.normalized_bragg_edge_plot\n\n def all_plots(self):\n self.display_image()\n self.display_bragg_edge()\n\n def display_image(self):\n\n _data = self.data\n _state = None\n self.parent.live_data = _data\n\n if _data == []:\n self.clear_plots(data_type=self.data_type)\n else:\n\n _data = np.array(_data)\n if self.data_type == 'sample':\n o_pyqt = PyqtgraphUtilities(parent=self.parent,\n image_view=self.parent.ui.image_view,\n data_type=self.data_type)\n _state = o_pyqt.get_state()\n o_pyqt.save_histogram_level()\n\n self.parent.ui.area.setVisible(True)\n self.parent.ui.image_view.setImage(_data)\n self.add_origin_label(self.parent.ui.image_view)\n\n o_pyqt.set_state(_state)\n o_pyqt.reload_histogram_level()\n\n elif self.data_type == 'ob':\n o_pyqt = PyqtgraphUtilities(parent=self.parent,\n image_view=self.parent.ui.ob_image_view,\n data_type=self.data_type)\n _state = o_pyqt.get_state()\n o_pyqt.save_histogram_level()\n self.parent.ui.ob_area.setVisible(True)\n self.parent.ui.ob_image_view.setImage(_data)\n self.add_origin_label(self.parent.ui.ob_image_view)\n o_pyqt.set_state(_state)\n o_pyqt.reload_histogram_level()\n\n elif self.data_type == 'normalized':\n o_pyqt = PyqtgraphUtilities(parent=self.parent,\n image_view=self.parent.ui.normalized_image_view,\n data_type=self.data_type)\n _state = o_pyqt.get_state()\n o_pyqt.save_histogram_level()\n self.parent.ui.normalized_area.setVisible(True)\n self.parent.ui.normalized_image_view.setImage(_data)\n self.add_origin_label(self.parent.ui.normalized_image_view)\n self.parent.data_metadata['normalized']['data_live_selection'] = _data\n o_pyqt.set_state(_state)\n o_pyqt.reload_histogram_level()\n\n # make sure that if we have the fitting window open, we have also at least the binning\n if not (self.parent.fitting_ui is None) and \\\n (self.parent.binning_ui is None):\n self.parent.menu_view_binning_clicked()\n\n if not (self.parent.binning_ui is None):\n o_binning = BinningHandler(parent=self.parent)\n o_binning.display_image(data=_data)\n self.parent.binning_ui.ui.groupBox.setEnabled(True)\n self.parent.binning_ui.ui.groupBox_2.setEnabled(True)\n self.parent.binning_ui.ui.left_widget.setVisible(True)\n if not (self.parent.fitting_ui is None):\n o_fitting = FittingHandler(parent=self.parent)\n o_fitting.display_image(data=_data)\n o_fitting.display_roi()\n self.parent.fitting_ui.ui.area.setVisible(True)\n o_fitting.fill_table()\n if not (self.parent.rotate_ui is None):\n o_rotate = self.parent.rotate_ui\n o_rotate.display_rotated_images()\n\n self.parent.image_view_settings[self.data_type]['state'] = _state\n\n def initialize_default_roi(self):\n if self.data_type == 'sample':\n self.add_origin_roi(self.parent.ui.image_view, self.parent.ui.image_view_roi)\n elif self.data_type == 'ob':\n self.add_origin_roi(self.parent.ui.ob_image_view, self.parent.ui.ob_image_view_roi)\n elif self.data_type == 'normalized':\n self.add_origin_roi(self.parent.ui.normalized_image_view, self.parent.ui.normalized_image_view_roi)\n\n def add_origin_roi(self, image_view, roi_id):\n image_view.addItem(roi_id)\n self.parent.list_roi_id[self.data_type] = [roi_id]\n\n def add_origin_label(self, image_ui):\n # origin label\n text_id = pg.TextItem(html=\"(0,0)\",\n anchor=(1, 1))\n image_ui.addItem(text_id)\n text_id.setPos(-5, -5)\n\n # x and y arrows directions\n y_arrow = pg.ArrowItem(angle=-90, tipAngle=35, baseAngle=0,\n headLen=20, tailLen=40, tailWidth=2, pen='y', brush=None)\n image_ui.addItem(y_arrow)\n y_arrow.setPos(0, 65)\n y_text = pg.TextItem(html=\"Y\")\n image_ui.addItem(y_text)\n y_text.setPos(-30, 20)\n\n x_arrow = pg.ArrowItem(angle=180, tipAngle=35, baseAngle=0,\n headLen=20, tailLen=40, tailWidth=2, pen='y', brush=None)\n image_ui.addItem(x_arrow)\n x_arrow.setPos(65, 0)\n x_text = pg.TextItem(html=\"X\")\n image_ui.addItem(x_text)\n x_text.setPos(20, -30)\n\n def refresh_roi(self):\n pass\n\n def clear_image(self, data_type='sample'):\n if data_type == 'sample':\n self.parent.ui.image_view.clear()\n elif data_type == 'ob':\n self.parent.ui.ob_image_view.clear()\n elif data_type == 'normalized':\n self.parent.ui.normalized_image_view.clear()\n\n def clear_plots(self, data_type='sample'):\n if data_type == 'sample':\n self.parent.ui.image_view.clear()\n self.parent.ui.bragg_edge_plot.clear()\n elif data_type == 'ob':\n self.parent.ui.ob_image_view.clear()\n self.parent.ui.ob_bragg_edge_plot.clear()\n elif data_type == 'normalized':\n self.parent.ui.normalized_image_view.clear()\n self.parent.ui.normalized_bragg_edge_plot.clear()\n\n def display_general_bragg_edge(self):\n data_type = utilities.get_tab_selected(parent=self.parent)\n self.data_type = data_type\n data = self.parent.data_metadata[data_type]['data']\n self.data = data\n self.display_bragg_edge()\n\n def save_roi(self, label, x0, y0, x1, y1, group, data_type, index):\n _width = np.abs(x1 - x0)\n _height = np.abs(y1 - y0)\n\n _list_roi = self.parent.list_roi[data_type]\n if _list_roi == []:\n _label = \"roi_label\"\n _group = \"0\"\n _list_roi = [_label, str(x0), str(y0), str(_width), str(_height), _group]\n self.parent.list_roi[data_type] = [_list_roi]\n else:\n _label = label\n _group = group\n _list_roi = [_label, str(x0), str(y0), str(_width), str(_height), _group]\n self.parent.list_roi[data_type][index] = _list_roi\n\n def update_roi_editor(self, index):\n\n o_roi_editor = self.parent.roi_editor_ui[self.data_type]\n o_roi_editor.refresh(row=index)\n\n # o_roi = RoiHandler(parent=self.parent, data_type=self.data_type)\n # row_to_activate = o_roi.get_roi_index_that_changed()\n # o_roi_editor.activate_row(row_to_activate)\n\n def extract_data(self, list_data_group, data):\n list_data = {'0': [],\n '1': [],\n '2': [],\n '3': []}\n\n for _group in list_data_group.keys():\n _list_roi = list_data_group[_group]\n if _list_roi == []:\n list_data[_group] = []\n else:\n for _data in data:\n # nbr_roi = len(_list_roi)\n _tmp_data = []\n for _roi in _list_roi:\n [x0, x1, y0, y1] = _roi\n\n if self.parent.ui.roi_add_button.isChecked():\n # _tmp_data.append(np.sum(_data[y0:y1, x0:x1]))\n _tmp_data.append(np.nansum(_data[x0:x1, y0:y1]))\n else:\n # _tmp_data.append(np.mean(_data[y0:y1, x0:x1]))\n _tmp_data.append(np.nanmean(_data[x0:x1, y0:y1]))\n\n if self.parent.ui.roi_add_button.isChecked():\n list_data[_group].append(np.nansum(_tmp_data))\n else:\n list_data[_group].append(np.mean(_tmp_data, axis=0))\n\n return list_data\n\n def get_row_parameters(self, roi_editor_ui, row):\n\n # # label\n _item = roi_editor_ui.tableWidget.item(row, 0)\n if _item is None:\n raise ValueError\n label = str(_item.text())\n\n # x0\n _item = roi_editor_ui.tableWidget.item(row, 1)\n if _item is None:\n raise ValueError\n x0 = int(str(_item.text()))\n\n # y0\n _item = roi_editor_ui.tableWidget.item(row, 2)\n if _item is None:\n raise ValueError\n y0 = int(str(_item.text()))\n\n # width\n _item = roi_editor_ui.tableWidget.item(row, 3)\n if _item is None:\n raise ValueError\n width = int(str(_item.text()))\n\n # height\n _item = roi_editor_ui.tableWidget.item(row, 4)\n if _item is None:\n raise ValueError\n height = int(str(_item.text()))\n\n # group\n _group_widget = roi_editor_ui.tableWidget.cellWidget(row, 5)\n if _group_widget is None:\n raise ValueError\n _index_selected = _group_widget.currentIndex()\n group = str(_index_selected)\n\n return [label, x0, y0, width, height, group]\n\n def clear_bragg_edge_plot(self):\n if self.data_type == 'sample':\n self.parent.ui.bragg_edge_plot.clear()\n elif self.data_type == 'ob':\n self.parent.ui.ob_bragg_edge_plot.clear()\n elif self.data_type == 'normalized':\n self.parent.ui.normalized_bragg_edge_plot.clear()\n\n def display_bragg_edge(self, mouse_selection=True):\n _data = self.data\n\n if _data == []: # clear data if no data\n self.clear_bragg_edge_plot()\n\n else: # retrieve dictionaries of roi_id and roi data (label, x, y, w, h, group)\n\n list_roi_id = self.parent.list_roi_id[self.data_type]\n list_roi = self.parent.list_roi[self.data_type]\n\n roi_editor_ui = self.parent.roi_editor_ui[self.data_type]\n if self.data_type == 'sample':\n _image_view = self.parent.ui.image_view\n _image_view_item = self.parent.ui.image_view.imageItem\n elif self.data_type == 'ob':\n _image_view = self.parent.ui.ob_image_view\n _image_view_item = self.parent.ui.ob_image_view.imageItem\n elif self.data_type == 'normalized':\n _image_view = self.parent.ui.normalized_image_view\n _image_view_item = self.parent.ui.normalized_image_view.imageItem\n\n # used here to group rois into their group for Bragg Edge plot\n list_data_group = {'0': [],\n '1': [],\n '2': [],\n '3': []}\n\n for _index, roi in enumerate(list_roi_id):\n\n if mouse_selection:\n if type(self.parent.live_data) == type(list()):\n self.parent.live_data = np.array(self.parent.live_data)\n\n try:\n region = roi.getArraySlice(self.parent.live_data,\n _image_view_item)\n except IndexError:\n return\n\n label = list_roi[_index][0]\n x0 = region[0][0].start\n x1 = region[0][0].stop - 1\n y0 = region[0][1].start\n y1 = region[0][1].stop - 1\n group = list_roi[_index][-1]\n\n if x1 == x0:\n x1 += 1\n if y1 == y0:\n y1 += 1\n\n else:\n if roi_editor_ui is None:\n [label, x0, y0, w, h, group] = list_roi[_index]\n x0 = int(x0)\n y0 = int(y0)\n w = int(w)\n h = int(h)\n\n else:\n try:\n [label, x0, y0, w, h, group] = self.get_row_parameters(roi_editor_ui.ui,\n _index)\n except ValueError:\n return\n\n x1 = x0 + w\n y1 = y0 + h\n roi.setPos([x0, y0], update=False, finish=False)\n roi.setSize([w, h], update=False, finish=False)\n\n # display ROI boxes\n roi.setPen(pen_color[group])\n\n _text_array = self.parent.list_label_roi_id[self.data_type]\n if _text_array == []:\n text_id = pg.TextItem(\n html='
' + label + '
',\n anchor=(-0.3, 1.3),\n border='w',\n fill=(0, 0, 255, 50))\n _image_view.addItem(text_id)\n text_id.setPos(x0, y0)\n self.parent.list_label_roi_id[self.data_type].append(text_id)\n else:\n text_id = self.parent.list_label_roi_id[self.data_type][_index]\n # text_id.setText(label)\n text_id.setPos(x0, y0)\n text_id.setHtml('
' + label + ' \\\n ''
')\n\n list_data_group[group].append([x0, x1, y0, y1])\n\n self.save_roi(label, x0, y0, x1, y1, group, self.data_type, _index)\n\n if mouse_selection:\n if not (roi_editor_ui is None):\n roi_editor_ui.ui.tableWidget.blockSignals(True)\n self.update_roi_editor(_index)\n roi_editor_ui.ui.tableWidget.blockSignals(False)\n\n # work over groups\n data = self.parent.data_metadata[self.data_type]['data']\n bragg_edges = self.extract_data(list_data_group,\n data)\n\n # check if xaxis can be in lambda, or tof\n # o_time_handler = TimeSpectraHandler(parent = self.parent)\n # o_time_handler.load()\n # tof_array = o_time_handler.tof_array\n if self.data_type == 'normalized':\n tof_array = self.parent.data_metadata['time_spectra']['normalized_data']\n lambda_array = self.parent.data_metadata['time_spectra']['normalized_lambda']\n else:\n tof_array = self.parent.data_metadata['time_spectra']['data']\n lambda_array = self.parent.data_metadata['time_spectra']['lambda']\n\n # # enable the right xaxis buttons\n o_gui = GuiHandler(parent=self.parent)\n # if tof_array == []:\n # tof_flag = False\n # else:\n # tof_flag = True\n # o_gui.enable_xaxis_button(tof_flag=tof_flag)\n\n list_files_selected = self.parent.list_file_selected[self.data_type]\n linear_region_left = list_files_selected[0]\n linear_region_right = list_files_selected[-1]\n\n # xaxis_choice = o_gui.get_xaxis_checked(data_type=self.data_type)\n\n # display of bottom bragg edge plot\n dictionary = self.display_images_and_bragg_edge(tof_array=tof_array,\n lambda_array=lambda_array,\n bragg_edges=bragg_edges)\n x_axis = dictionary['x_axis']\n [linear_region_left, linear_region_right] = dictionary['linear_region']\n self.parent.normalized_lambda_bragg_edge_x_axis = lambda_array * 1e10\n o_gui.xaxis_label()\n\n lr = pg.LinearRegionItem([linear_region_left, linear_region_right])\n lr.setZValue(-10)\n\n if self.data_type == 'sample':\n self.parent.ui.bragg_edge_plot.addItem(lr)\n elif self.data_type == 'ob':\n self.parent.ui.ob_bragg_edge_plot.addItem(lr)\n else:\n self.parent.ui.normalized_bragg_edge_plot.addItem(lr)\n self.parent.fitting_bragg_edge_x_axis = x_axis\n\n lr.sigRegionChangeFinished.connect(self.parent.bragg_edge_selection_changed)\n self.parent.list_bragg_edge_selection_id[self.data_type] = lr\n self.parent.current_bragg_edge_x_axis[self.data_type] = x_axis\n\n def display_images_and_bragg_edge(self, tof_array=[], lambda_array=[], bragg_edges=[]):\n\n data_type = self.data_type\n plot_ui = self.plot_ui[data_type]\n plot_ui.clear()\n\n list_files_selected = self.parent.list_file_selected[self.data_type]\n linear_region_left_index = int(list_files_selected[0])\n linear_region_right_index = int(list_files_selected[-1])\n linear_region_left = linear_region_left_index\n linear_region_right = linear_region_right_index\n\n x_axis = []\n plot_ui.setLabel(\"left\", \"Total Counts\")\n\n _symbol = 't'\n\n if tof_array == []:\n\n plot_ui.setLabel('bottom', 'File Index')\n\n for _key in bragg_edges.keys():\n _bragg_edge = bragg_edges[_key]\n if _bragg_edge == []:\n continue\n curve = plot_ui.plot(_bragg_edge, symbolPen=None, pen=pen_color[_key], symbol=_symbol, symbolSize=5)\n x_axis = np.arange(len(_bragg_edge))\n\n curvePoint = pg.CurvePoint(curve)\n plot_ui.addItem(curvePoint)\n _text = pg.TextItem(\"Group {}\".format(_key), anchor=(0.5, 0))\n _text.setParentItem(curvePoint)\n brush = QBrush()\n brush.setColor(roi_group_color[int(_key)])\n arrow = pg.ArrowItem(angle=0, brush=brush)\n arrow.setParentItem(curvePoint)\n curvePoint.setPos(x_axis[-1])\n\n else:\n\n tof_array = tof_array * 1e6\n\n o_gui = GuiHandler(parent=self.parent)\n xaxis_choice = o_gui.get_xaxis_checked(data_type=self.data_type)\n\n first_index = True\n\n for _key in bragg_edges.keys():\n _bragg_edge = bragg_edges[_key]\n if _bragg_edge == []:\n continue\n\n if xaxis_choice == 'file_index':\n curve = plot_ui.plot(_bragg_edge, pen=pen_color[_key],\n symbolPen=None,\n symbolSize=5,\n symbol=_symbol)\n x_axis = np.arange(len(_bragg_edge))\n\n elif xaxis_choice == 'tof':\n curve = plot_ui.plot(tof_array, _bragg_edge,\n pen=pen_color[_key],\n symbolPen=None,\n symbolSize=5,\n symbol=_symbol)\n x_axis = tof_array\n linear_region_left = tof_array[linear_region_left_index]\n linear_region_right = tof_array[linear_region_right_index]\n\n else: # lambda\n\n if first_index:\n lambda_array = lambda_array * 1e10\n\n curve = plot_ui.plot(lambda_array, _bragg_edge,\n pen=pen_color[_key],\n symbolPen=None,\n symbolSize=5,\n )\n x_axis = lambda_array\n\n linear_region_left = lambda_array[linear_region_left_index]\n linear_region_right = lambda_array[linear_region_right_index]\n\n if first_index:\n self.display_selected_element_bragg_edges(plot_ui=plot_ui,\n lambda_range=[lambda_array[0], lambda_array[-1]],\n ymax=np.max(_bragg_edge))\n first_index = False\n\n curvePoint = pg.CurvePoint(curve)\n plot_ui.addItem(curvePoint)\n _text = pg.TextItem(\"Group {}\".format(_key), anchor=(0.5, 0))\n _text.setParentItem(curvePoint)\n brush = QBrush()\n brush.setColor(roi_group_color[int(_key)])\n arrow = pg.ArrowItem(angle=0, brush=brush)\n arrow.setParentItem(curvePoint)\n\n if xaxis_choice == 'lambda':\n last_position = x_axis[-1]\n else:\n last_position = x_axis[-1]\n\n curvePoint.setPos(last_position)\n\n return {'x_axis': x_axis,\n 'linear_region': [linear_region_left, linear_region_right]}\n\n def display_selected_element_bragg_edges(self, plot_ui=plot_ui, lambda_range=[], ymax=0):\n\n if self.data_type:\n display_flag_ui = self.parent.ui.material_display_checkbox\n else:\n display_flag_ui = self.parent.ui.material_display_checkbox_2\n\n if not display_flag_ui.isChecked():\n return\n\n _selected_element_bragg_edges_array = self.parent.selected_element_bragg_edges_array\n _selected_element_hkl_array = self.parent.selected_element_hkl_array\n\n for _index, _x in enumerate(_selected_element_bragg_edges_array):\n if (_x >= lambda_range[0]) and (_x <= lambda_range[1]):\n _item = pg.InfiniteLine(_x, pen=pg.mkPen(\"c\"))\n plot_ui.addItem(_item)\n _hkl = _selected_element_hkl_array[_index]\n _hkl_formated = \"{},{},{}\".format(_hkl[0], _hkl[1], _hkl[2])\n _text = pg.TextItem(_hkl_formated, anchor=(0, 1), angle=45, color=pg.mkColor(\"c\"))\n _text.setPos(_x, ymax)\n plot_ui.addItem(_text)\n","sub_path":"ibeatles/step1/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":24569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"177871503","text":"class Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n def f(nums, N, path, res):\n if not nums:\n return\n \n if N == 1:\n res.append(path + nums)\n else:\n for i in range(len(nums)):\n f(nums[0:i]+nums[i+1:], N-1, path+[nums[i]], res)\n \n res = []\n f(nums, len(nums), [], res)\n return res\n","sub_path":"leetcode/46_Permutations.py","file_name":"46_Permutations.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"211260896","text":"import requests\nimport json\nimport datetime as dt\nimport pandas as pd\nimport sqlite3\n\n\nclass optionChain:\n\n def __init__(self, symbol:str = None, date:int = None):\n self.url = 'https://query2.finance.yahoo.com/v7/finance/options/'\n self.symbol = symbol\n self.r = self.request_data(symbol, date)\n self.data_header= ['contractSymbol','strike','currency','lastPrice',\n 'change','percentChange','volume','openInterest',\n 'bid','ask','contractSize','expiration','lastTradeDate',\n 'impliedVolatility','inTheMoney']\n \n\n self.data, self.expiration_dates_unix, self.strikes, self.has_mini, self.quote, self.option_chain = self.parse_response(self.r)\n self.expiration_dates = [self._convert_date(date) for date in self.expiration_dates_unix]\n self.calls, self.puts = self.parse_all_options()\n \n\n\n def _convert_date(self, unix_date):\n return dt.datetime.utcfromtimestamp(unix_date)\n\n\n def request_data(self, symbol:str = None, date:int = None):\n if date == None:\n r = requests.get(rf'{self.url}{self.symbol}')\n elif date != None:\n r = requests.get(rf'{self.url}{self.symbol}?date={date}')\n return r\n\n\n def parse_response(self, response):\n## print(response)\n data = json.loads(response.text)['optionChain']['result'][0]\n expiration_dates_unix = data['expirationDates']\n strikes = data['strikes']\n has_mini = data['hasMiniOptions']\n quote = data['quote']\n try:\n option_chain = data['options'][0]\n except:\n raise SymbolDNEError(self.symbol)\n \n return data, expiration_dates_unix, strikes, has_mini, quote, option_chain\n\n \n## def _get_calls(self):\n## return self.option_chain['calls']\n##\n##\n## def _get_puts(self):\n## return self.option_chain['puts']\n\n\n \n def parse_option_data(self, data):\n \n big_dict = {}\n for key in self.data_header:\n big_dict[key] = []\n for d in data:\n try:\n big_dict[key].append(d[key])\n except:\n big_dict[key].append(None)\n \n df = pd.DataFrame(big_dict)\n df['expiration'] = pd.to_datetime(df['expiration'], unit = 's')\n df['lastTradeDate'] = pd.to_datetime(df['lastTradeDate'], unit = 's')\n df['date'] = dt.datetime.today()\n \n return df\n\n def parse_all_options(self):\n df_calls=pd.DataFrame()\n df_puts=pd.DataFrame()\n for date in self.expiration_dates_unix:\n r = self.request_data(self.symbol, date)\n _,_,_,_,_,option_chain = self.parse_response(r)\n calls = option_chain['calls']\n puts = option_chain['puts']\n df_calls = df_calls.append(self.parse_option_data(calls))\n df_calls['type']='C'\n df_puts = df_puts.append(self.parse_option_data(puts))\n df_puts['type']='P'\n return df_calls, df_puts\n\n \n def save_to_sql(self, db_dst, table_name):\n conn = sqlite3.connect(db_dst)\n self.calls.to_sql(table_name, conn, if_exists='append',index=False)\n self.puts.to_sql(table_name, conn, if_exists='append',index=False)\n \n \n\nclass SymbolDNEError(Exception):\n def __init__(self, symbol):\n self.symbol = symbol\n\n def __str__(self):\n return f\"The symbole '{self.symbol}' does not exist.\"\n","sub_path":"yfoptions_dl/yfoptions_dl.py","file_name":"yfoptions_dl.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"356270904","text":"import urllib3\nfrom bs4 import BeautifulSoup\nimport sys\n\nurl = 'http://bgp.potaroo.net/cidr/autnums.html'\n\n## function that creates as dictionary from the url above\n## dictionary is AS numbers as keys and AS names as values\ndef as_dict(url):\n http = urllib3.PoolManager()\n print(\"pool managed\")\n print(\"...takes some time for http request\")\n r = http.request('GET', url)\n print(\"requested\")\n soup = BeautifulSoup(r.data, \"html.parser\")\n print(\"all souped up\")\n everything = {}\n for a in soup.find('pre').findChildren():\n z1 = a.string.strip()\n z1 = z1[2:]\n z2 = a.next.next.strip()\n everything[z1] = z2\n print(\"succesfully created dictionary\")\n print(\"...ready to go\")\n return everything\n \n \n\n\n","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"304259731","text":"from reports import count_games, decide, get_latest, \\\ncount_by_genre, get_line_number_by_title\n\ndef export_reports(file_name_r, file_name_w, year, genre, title):\n doc = open(file_name_w, \"w\")\n answers = [count_games(file_name_r), decide(file_name_r, year), \\\n get_latest(file_name_r), count_by_genre(file_name_r, genre), \\\n get_line_number_by_title(file_name_r, title)]\n for i in range(len(answers)):\n if i == len(answers):\n doc.write(str(answers[i]))\n else:\n doc.write(str(answers[i]) + \"\\n\")\n doc.close()\n return \n# Export functions\n","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"157554661","text":"# This script is to combine Antenna Schedule and VSN together\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom bs4 import Comment\nimport connect\n\nconn = connect.conn\ncursor = connect.cursor\n\ncursor.execute('''SELECT EXISTS (SELECT relname FROM pg_class WHERE relname = 'capacity')''')\nif cursor.fetchall()[0][0] is False:\n print('TABLE capacity is to be created...\\n')\ncursor.execute('''create table IF NOT EXISTS capacity (\n id SERIAL PRIMARY KEY,\n whichAntenna varchar(40),\n hdd_slot varchar(10) NOT NULL,\n VSN varchar(20) NOT NULL,\n mtime varchar(40) NOT NULL,\n remainingGB decimal(10,3) NOT NULL,\n remainingPer decimal(3,1) NOT NULL,\n checkTime time(0) NOT NULL,\n created_at TIMESTAMPTZ DEFAULT Now(),\n schedule varchar(20),\n CONSTRAINT no_duplicate UNIQUE (VSN, mtime, checkTime, remainingGB)\n )''')\n\nconn.commit()\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/77.0.3865.90 Safari/537.36'}\n\n\nwhile True:\n cursor.execute('''SELECT station_name FROM stations''')\n stationNames = cursor.fetchall()\n\n # Each station has its own url, which only differs in one segment, i.e. /.../\n for stationName in stationNames:\n # tuple\n stationName = stationName[0]\n # .lower() is to convert uppercase to lowercase e.g. WETTZELL -> wettzell\n url = f\"https://vlbisysmon.evlbi.wettzell.de/monitoring_archive/fs_web_pages/{stationName.lower()}\" \\\n f\"/Mark5RemainingCapacity.html\"\n url2readSchedule = f\"https://vlbisysmon.evlbi.wettzell.de/monitoring_archive/fs_web_pages/ivsquickstatus.html\"\n\n page = requests.get(url, headers=headers)\n page2readSchedule = requests.get(url2readSchedule, headers=headers)\n\n soup = BeautifulSoup(page.text, 'html.parser')\n soup2readSchedule = BeautifulSoup(page2readSchedule.text, 'html.parser')\n\n moduleAB = None\n VSN = None\n selected = None\n mtime = None\n remainingGB = None\n remainingPer = None\n checkTime = None\n\n comments = soup.find_all(string=lambda text: isinstance(text, Comment))\n for comment in comments:\n if comment in ['ERC::SELECTED_MODULEA']:\n if comment.next_element.strip() == '>':\n moduleAB = 'A'\n for commentA in comments:\n if commentA in ['ERC::VSN_MODULEA']:\n VSN = commentA.next_element.strip()\n if commentA in ['ERC::TIME_MODULEA']:\n mtime = commentA.next_element.strip()\n if commentA in ['ERC::REMAININGGB_MODULEA']:\n remainingGB = commentA.next_element.strip()\n if commentA in ['ERC::REMAININGPERCENT_MODULEA']:\n remainingPer = commentA.next_element.strip()\n if commentA in ['ERC::CHECKTIME_MODULEA']:\n checkTime = commentA.next_element.strip()\n if comment in ['ERC::SELECTED_MODULEB']:\n if comment.next_element.strip() == '>':\n moduleAB = 'B'\n for commentB in comments:\n if commentB in ['ERC::VSN_MODULEB']:\n VSN = commentB.next_element.strip()\n if commentB in ['ERC::TIME_MODULEB']:\n mtime = commentB.next_element.strip()\n if commentB in ['ERC::REMAININGGB_MODULEB']:\n remainingGB = commentB.next_element.strip()\n if commentB in ['ERC::REMAININGPERCENT_MODULEB']:\n remainingPer = commentB.next_element.strip()\n if commentB in ['ERC::CHECKTIME_MODULEB']:\n checkTime = commentB.next_element.strip()\n\n if moduleAB is None:\n print(\"Neither Module A nor Module B is selected\")\n\n table = soup2readSchedule.find(lambda tag: tag.name == 'table')\n # this is the way to locate antenna code lack of labels\n rows = table.findAll(lambda tag: tag.name == 'tr')\n schedule = None\n for row in rows:\n antenna_links = row.findAll('a')\n for antenna_link in antenna_links:\n parent = antenna_link.parent\n antennaName = antenna_link.text.strip()\n vsn = parent.find_next_siblings()[7].text.strip()\n if antennaName == stationName and vsn == VSN:\n schedule = parent.find_next_siblings()[4].text.strip()\n if schedule == '':\n schedule = None\n data = [(stationName, moduleAB, VSN, mtime, remainingGB, remainingPer, checkTime, schedule)]\n print(\"current information\", data)\n # insert into the table capacity\n try: # ON CONFLICT (stationName, moduleAB, VSN, mtime, checkTime, remainingGB) DO NOTHING\n cursor.execute(\"insert into capacity(id, whichAntenna, hdd_slot, VSN, mtime, remainingGB, remainingPer, \"\n \"checkTime, schedule) \"\n \"values (DEFAULT, %s, %s, %s, %s, %s, %s, %s, %s)\",\n (stationName, moduleAB, VSN, mtime, remainingGB, remainingPer, checkTime, schedule))\n print(\"The current DATA has been successfully added to database!\\n\")\n except:\n print(\"Nothing to update.\\n\")\n finally:\n # without commit(), psycopg2.errors.InFailedSqlTransaction occurs\n conn.commit()\n\n time.sleep(5)\n # gap between each tracking\n","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"149518614","text":"\n\n\"\"\"\nThanks to Brice for this piece of code. Taken from https://stackoverflow.com/a/9969000/851699\n\n\"\"\"\nfrom collections import Iterable\nimport sys\nif sys.version_info < (3, 0):\n from Queue import Queue\nelse:\n from queue import Queue\nfrom threading import Thread\n\n\nclass Iteratorize(Iterable):\n \"\"\"\n Transforms a function that takes a callback\n into a lazy iterator (generator).\n \"\"\"\n\n def __init__(self, func):\n \"\"\"\n :param Callable[Callable, Any] func: A function that takes a callback as an argument then runs.\n \"\"\"\n self.mfunc = func\n self.q = Queue(maxsize=1)\n self.sentinel = object()\n\n def _callback(val):\n self.q.put(val)\n\n def gentask():\n ret = self.mfunc(_callback)\n self.q.put(self.sentinel)\n\n # start_new_thread(gentask, ())\n Thread(target=gentask).start()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n obj = self.q.get(True, None)\n if obj is self.sentinel:\n raise StopIteration\n else:\n return obj\n","sub_path":"artemis/general/iteratorize.py","file_name":"iteratorize.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"475713620","text":"# notes_4.py 26-Jul-2018\r\n\"\"\"\r\nSupport multiple patterns:\r\n 1. nospacepat1 nospacpat2 .... == all patterns must be in line\r\n 2. [|&] nospacepat1 nospacepat2 ... == | any of the pat, & all of the pat\r\n 3. pat1 [&|] pat2 [&|] pat3 ... all & pats plus, if any | atleast one of | pat\r\n\r\nWrite a \"Notes\" program. The program will display lines\r\nfrom a text file, containing a given text string.\r\nTest:\r\nfile name = \"people.notes\"\r\ntext = \"Watertown\"\r\n Implementation Iterations:\r\nSetup test file(s): \"test.notes\", \"people.notes\"\r\n 1.\tRead specific file e.g. \"test.notes\", printing out all lines\r\n 2.\tPrint only lines containing \"student\"\r\n How to match lines ? Google \"python search for substring\" ?\r\n Support case insensitive match (Student, STUDENT)\r\n 3.\tPrompt for, then accept file name, pattern\r\n 4.\t[Extra Credit] Support multiple text patterns\r\n\r\n\"\"\"\r\nimport re # Support Regular Expression Pattern Searching\r\n\r\n# Default values\r\ndef_file_name = \"test.notes\"\r\ndef_pattern = \"student\"\r\n\r\n#########################################################################\r\n# Utility Functions #\r\n# NOTE: Python requires function definitions come before function calls #\r\n#########################################################################\r\n\r\n\r\n\"\"\"\r\nSearch file with pattern\r\n\"\"\"\r\ndef search_pat(file_name, pattern):\r\n finp = open(file_name) # Assume no error since we have just succeeded\r\n def_pat_type = '&'\r\n # Use the leading marker, if any as the default choice OR, AND\r\n if len(pattern) > 0:\r\n ind = pattern[0]\r\n if ind == '&':\r\n def_pat_type = ind\r\n pattern = pattern[1:]\r\n elif ind == '|':\r\n def_pat_type = ind\r\n pattern = pattern[1:]\r\n\r\n # Check if rest of line is devoid of markers\r\n if pattern.find('&') < 0 and pattern.find('|') < 0:\r\n pats = []\r\n pat_list = pattern.split() # all space separated\r\n for pat in pat_list:\r\n pats.append((def_pat_type, pat))\r\n else:\r\n pats = re.findall(r'\\s*([|&])\\s*([^&|]*)', pattern) \r\n \r\n or_pats = [] # | pat - accept if no and pat missing \r\n and_pats = [] # & pat - accept only if present\r\n\r\n def_pat_type = '&' # Default type\r\n if pattern.find('|') >= 0:\r\n def_pat_type = '|' # If we find | we make the default\r\n \r\n for ind, pat in pats:\r\n if ind == '|':\r\n def_pat_type = ind\r\n or_pats.append(pat)\r\n elif ind == '&':\r\n and_pats.append(pat)\r\n def_pat_type = ind\r\n else:\r\n if def_pat_type == '|': # treat as default type\r\n or_pats.append(pat)\r\n else:\r\n and_pats.append(pat)\r\n\r\n pattern_lc = pattern.lower() # Force pattern to lower case\r\n for line in finp:\r\n line = line.rstrip() # All trailing white space\r\n line_lc = line.lower()\r\n and_found = 0\r\n or_found = 0\r\n for and_pat in and_pats:\r\n if line_lc.find(and_pat) > -1:\r\n and_found += 1\r\n for or_pat in or_pats:\r\n if line_lc.find(or_pat) > -1:\r\n or_found += 1\r\n if len(and_pats) > 0:\r\n if and_found == len(and_pats):\r\n if len(or_pats) > 0:\r\n if or_found > 0:\r\n print(line) # At least one of the ors\r\n else:\r\n print(line) # No ors to check\r\n elif or_found > 0:\r\n print(line)\r\n\r\n################################################\r\n# End of Utility Functions #\r\n################################################\r\n\r\n\r\n# Set to default values\r\npattern = def_pattern\r\n\r\nwhile True:\r\n file_name = def_file_name\r\n inp = input(\"Enter file name[\" + file_name + \"] \")\r\n inp = inp.rstrip()\r\n if inp == \"\":\r\n inp = file_name\r\n file_name = inp\r\n try:\r\n finp = open(file_name)\r\n break # Got opened file\r\n \r\n except IOError :\r\n print(\"Can't open file \", file_name)\r\n inp.close() # So we can reopen\r\n\r\n# Loop over patterns until &&& to quit\r\nquit_sign = \"&&&\"\r\nwhile True:\r\n inp = input(\"Enter pattern[\" + pattern + \"] \" + quit_sign + \" to quit: \")\r\n inp = inp.lstrip().rstrip()\r\n if inp == quit_sign:\r\n break\r\n \r\n if inp == \"\":\r\n inp = pattern\r\n pattern = inp\r\n search_pat(file_name, pattern)\r\n","sub_path":"exercises/files/notes/notes_4.py","file_name":"notes_4.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"573622124","text":"import sys\nimport random\nimport cv2\nfrom keras.preprocessing.image import ImageDataGenerator,img_to_array\nfrom keras.utils import to_categorical\nimport glob\nimport os\nimport glob\nimport numpy as np\n\ndef load_data(path,width,height,class_num):\n data = []\n labels = []\n FileList = glob.glob(path+'/*')\n imagepaths = []\n for file in FileList:\n image = glob.glob(file + '/*.png')\n imagepaths+=image\n random.seed(42)\n random.shuffle(imagepaths)\n for imagepath in imagepaths:\n image = cv2.imread(imagepath)\n image = cv2.resize(image, (width, height))\n image = img_to_array(image)\n data.append(image)\n label = int(imagepath.split(os.path.sep)[-2])\n labels.append(label)\n data = np.array(data, dtype=\"float\") / 255.0\n labels = np.array(labels)\n labels = to_categorical(labels, num_classes=class_num)\n return data, labels\n","sub_path":"traffic/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"161606291","text":"import argparse\n\nfrom Bio import SeqIO\nfrom Bio.Align.Applications import ClustalOmegaCommandline\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n#from new_alg.del_gap import *\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--in_file', help='input file [csv]')\n parser.add_argument('--out_file', help='output file [fasta]')\n return parser.parse_args()\n\n\ndef hyphens(fasta):\n id_seqs = []\n for i in SeqIO.parse(fasta, format=\"fasta\"):\n id_seqs.append((i.id, str(i.seq)))\n ids, seqs = map(list, zip(*id_seqs))\n min_start = len(seqs[0])\n for seq in seqs:\n start = len(seq)\n i = len(seq) - 1\n while seq[i] == '-':\n print(seq[i])\n start = i\n i -= 1\n\n if start < min_start:\n min_start = start\n open(fasta, 'w').close()\n out = open(fasta, 'a')\n for i in range(len(seqs)):\n seqs[i] = seqs[i][0:min_start]\n record = SeqRecord(Seq(seqs[i]), id=ids[i])\n SeqIO.write(record, out, 'fasta')\n out.close()\n\ntmp = parse_args()\nfile_in = tmp.in_file\nfile_out = tmp.out_file\n\nt1 = []\nl1 = []\nt2 = []\nl2 = []\nt3 = []\nl3 = []\nt4 = []\nfile_in_op = open(file_in, \"r\")\nfor x in file_in_op.readlines():\n if (x.count(\",\") == 0):\n continue\n arr = x.strip().split(\",\")\n if (arr[-1] == \"1\"):\n arr[1] = str(Seq(arr[1]).reverse_complement())\n m = arr[1]\n a1, a2, a3, a4 = arr[6].split(), arr[7].split(), arr[8].split(), arr[9].split()\n s = [int(a1[0]) * 3, int(a1[1]) * 3 + 3, int(a2[0]) * 3, int(a2[1]) * 3 + 3, int(a3[0]) * 3, int(a3[1]) * 3 + 3, int(a4[0]) * 3, int(a4[1]) * 3 + 3]\n t11 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[0]:s[1]])))\n l11 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[1]:s[2]])))\n t21 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[2]:s[3]])))\n l21 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[3]:s[4]])))\n t31 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[4]:s[5]])))\n l31 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[5]:s[6]])))\n t41 = (SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[6]:s[7]])))\n if (len(l11) * len(l21) * len(l31) == 0):\n continue\n else:\n t1.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[0]:s[1]])))\n l1.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[1]:s[2]])))\n t2.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[2]:s[3]])))\n l2.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[3]:s[4]])))\n t3.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[4]:s[5]])))\n l3.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[5]:s[6]])))\n t4.append(SeqRecord(id=arr[0], description=arr[0], seq=Seq(m[s[6]:s[7]])))\n\nSeqIO.write(t1, file_out + \"fr1\", format=\"fasta\")\nSeqIO.write(t2, file_out + \"fr2\", format=\"fasta\")\nSeqIO.write(t3, file_out + \"fr3\", format=\"fasta\")\nSeqIO.write(t4, file_out + \"fr4\", format=\"fasta\")\nSeqIO.write(l1, file_out + \"cdr1\", format=\"fasta\")\nSeqIO.write(l2, file_out + \"cdr2\", format=\"fasta\")\nSeqIO.write(l3, file_out + \"cdr3\", format=\"fasta\")\n\n\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"fr1\", outfile=file_out+\"fr10\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"fr2\", outfile=file_out+\"fr20\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"fr3\", outfile=file_out+\"fr30\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"fr4\", outfile=file_out+\"fr40\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"cdr1\", outfile=file_out+\"cdr10\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"cdr2\", outfile=file_out+\"cdr20\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nclustalomega_cline = ClustalOmegaCommandline(infile=file_out+\"cdr3\", outfile=file_out+\"cdr30\", verbose=True, auto=True, force=True)\nclustalomega_cline()\nhyphens(file_out+\"fr10\")\nhyphens(file_out+\"fr20\")\nhyphens(file_out+\"fr30\")\nhyphens(file_out+\"fr40\")\n# delind(file_out+\"10\")\n# delind(file_out+\"20\")\n# delind(file_out+\"30\")\n# delind(file_out+\"40\")\n# delind(file_out+\"110\")\n# delind(file_out+\"220\")\n# delind(file_out+\"330\")\n#\nt1 = []\nt2 = []\nt3 = []\nt4 = []\nl1 = []\nl2 = []\nl3 = []\nfor i in SeqIO.parse(file_out+\"fr10\", format=\"fasta\"):\n t1.append(i)\nfor i in SeqIO.parse(file_out+\"fr20\", format=\"fasta\"):\n t2.append(i)\nfor i in SeqIO.parse(file_out+\"fr30\", format=\"fasta\"):\n t3.append(i)\nfor i in SeqIO.parse(file_out+\"fr40\", format=\"fasta\"):\n t4.append(i)\nfor i in SeqIO.parse(file_out+\"cdr10\", format=\"fasta\"):\n l1.append(i)\nfor i in SeqIO.parse(file_out+\"cdr20\", format=\"fasta\"):\n l2.append(i)\nfor i in SeqIO.parse(file_out+\"cdr30\", format=\"fasta\"):\n l3.append(i)\nout_str = []\n#lengths = [len(t1[0]), len(l1[0]), len(t2[0]), len(l2[0]), len(t3[0]), len(l3[0]), len(t4[0])]\nfor i in range(len(l1)):\n out_str.append(t1[i] + l1[i] + t2[i] + l2[i] + t3[i] + l3[i] + t4[i])\n\nSeqIO.write(out_str, file_out, format=\"fasta\")\n#SeqIO.write(lengths, file_out + \"lengths\", format=\"fasta\")\n\n\n","sub_path":"good_align.py","file_name":"good_align.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"51427001","text":"# coding:utf-8\nimport pickle\nimport logging\nfrom model.models import BlogInfo, Menu, ArticleType, Article, Source, Comment, Plugin, BlogView\nfrom sqlalchemy.orm import joinedload\nfrom model.site_info import SiteCollection\nfrom config import site_cache_keys\nimport tornado.gen\nfrom service.custom_service import BlogInfoService\nfrom service.menu_service import MenuService\n\n\"\"\"本页字段缓存策略\n 站点缓存,加快访问速度,尤其是首页显示的相关数据,该类字段做二级缓存,本地缓存-redis缓存\n 查询策略:先查本地缓存,未命中查询redis缓存,还未命中查询数据库,并将结果逐级更新\n 更新策略:数据写入数据库后,更新redis缓存,并通过发布对应字段的更新消息通知所有节点更新本地缓存\n 缓存校准:mater节点,设置定时任务,在访问较少的时间段校准redis缓存,并通知所有节点更新\n \"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\nclass SiteCacheService(object):\n\n PUB_SUB_MSGS = dict(\n blog_info_updated=\"blog_info_updated\", # blog_info更新消息\n )\n\n @staticmethod\n @tornado.gen.coroutine\n def query_all(cache_manager, thread_do, db_session):\n yield SiteCacheService.query_blog_info(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_menus(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_plugin(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_blog_view_count(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_article_count(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_article_sources(cache_manager, thread_do, db_session)\n yield SiteCacheService.query_comment_count(cache_manager, thread_do, db_session)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_blog_info(cache_manager, thread_do, db_session):\n if SiteCollection.title is None or SiteCollection.signature is None or SiteCollection.navbar is None:\n # 一级缓存(redis缓存)\n SiteCollection.title = yield cache_manager.call('GET', site_cache_keys['title'])\n SiteCollection.signature = yield cache_manager.call('GET', site_cache_keys['signature'])\n SiteCollection.navbar = yield cache_manager.call('GET', site_cache_keys['navbar'])\n if SiteCollection.title is None or SiteCollection.signature is None or SiteCollection.navbar is None:\n # 二级缓存(mysql)\n blog_info = yield thread_do(BlogInfoService.get_blog_info, db_session)\n yield SiteCacheService.update_blog_info(cache_manager, blog_info)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_menus(cache_manager, thread_do, db_session):\n if SiteCollection.menus is None or SiteCollection.article_types_not_under_menu is None:\n menu_json = yield cache_manager.call('GET', site_cache_keys['menus'])\n if menu_json:\n menus = pickle.loads(menu_json)\n SiteCollection.menus = menus\n if SiteCollection.menus is None:\n # 将数据库查出的信息统一转为json 与redis同步\n db_menus = yield thread_do(MenuService.menu_init, db_session, show_types=True)\n SiteCollection.menus = db_menus\n menu_json = pickle.dumps(db_menus)\n if SiteCollection.menus is not None:\n yield cache_manager.call('SET', site_cache_keys['menus'], menu_json)\n\n if SiteCollection.article_types_not_under_menu is None:\n ats_json = yield cache_manager.call(\"GET\", site_cache_keys['article_types_not_under_menu'])\n if ats_json:\n ats = pickle.loads(ats_json)\n SiteCollection.article_types_not_under_menu = ats\n if SiteCollection.article_types_not_under_menu is None:\n SiteCollection.article_types_not_under_menu = yield \\\n thread_do(article_type_not_under_menu_init, db_session)\n if SiteCollection.article_types_not_under_menu is not None:\n ats_json = pickle.dumps(SiteCollection.article_types_not_under_menu)\n yield cache_manager.call(\"SET\", site_cache_keys['article_types_not_under_menu'], ats_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_plugin(cache_manager, thread_do, db_session):\n \"\"\"redis存储对象需要将对象序列化\"\"\"\n if SiteCollection.plugins is None:\n plugins_json = yield cache_manager.call(\"GET\", site_cache_keys['plugins'])\n if plugins_json:\n plugins = pickle.loads(plugins_json)\n SiteCollection.plugins = plugins\n if SiteCollection.plugins is None:\n SiteCollection.plugins = yield thread_do(plugin_init, db_session)\n if SiteCollection.plugins is not None:\n plugins_json = pickle.dumps(SiteCollection.plugins)\n yield cache_manager.call(\"SET\", site_cache_keys['plugins'], plugins_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_blog_view_count(cache_manager, thread_do, db_session):\n if SiteCollection.pv is None or SiteCollection.uv is None:\n pv = yield cache_manager.call(\"GET\", site_cache_keys['pv'])\n uv = yield cache_manager.call(\"GET\", site_cache_keys['uv'])\n if pv is None or uv is None:\n blog_view = yield thread_do( blog_view_init, db_session)\n if blog_view:\n SiteCollection.pv = blog_view.pv\n SiteCollection.uv = blog_view.uv\n if SiteCollection.pv or SiteCollection.uv:\n yield cache_manager.call(\"SET\", site_cache_keys['pv'], SiteCollection.pv)\n yield cache_manager.call(\"SET\", site_cache_keys['uv'], SiteCollection.uv)\n else:\n SiteCollection.pv = 0\n SiteCollection.uv = 0\n else:\n SiteCollection.pv = int(pv)\n SiteCollection.uv = int(uv)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_article_count(cache_manager, thread_do, db_session):\n if SiteCollection.article_count is None:\n article_count = yield cache_manager.call(\"GET\", site_cache_keys['article_count'])\n if article_count is not None:\n SiteCollection.article_count = int(article_count)\n if SiteCollection.article_count is None:\n SiteCollection.article_count = yield thread_do( article_count_init, db_session)\n if SiteCollection.article_count is not None:\n yield cache_manager.call(\"SET\", site_cache_keys['article_count'], SiteCollection.article_count)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_comment_count(cache_manager, thread_do, db_session):\n if SiteCollection.comment_count is None:\n comment_count = yield cache_manager.call(\"GET\", site_cache_keys['comment_count'])\n if comment_count is not None:\n SiteCollection.comment_count = int(comment_count)\n if SiteCollection.comment_count is None:\n SiteCollection.comment_count = yield thread_do( comment_count_init, db_session)\n if SiteCollection.comment_count is not None:\n yield cache_manager.call(\"SET\", site_cache_keys['comment_count'], SiteCollection.comment_count)\n\n @staticmethod\n @tornado.gen.coroutine\n def query_article_sources(cache_manager, thread_do, db_session):\n if SiteCollection.article_sources is None:\n article_sources_json = yield cache_manager.call('GET', site_cache_keys['article_sources'])\n if article_sources_json is not None:\n SiteCollection.article_sources = pickle.loads(article_sources_json)\n if SiteCollection.article_sources is None:\n SiteCollection.article_sources = yield thread_do(article_source_init, db_session)\n if SiteCollection.article_sources is not None:\n article_sources_json = pickle.dumps(SiteCollection.article_sources)\n yield cache_manager.call(\"SET\", site_cache_keys['article_sources'], article_sources_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def update_blog_info(cache_manager, blog_info, is_pub_all=False, pub_manager=None):\n SiteCollection.title = blog_info.title\n SiteCollection.signature = blog_info.signature\n SiteCollection.navbar = blog_info.navbar\n yield cache_manager.call(\"SET\", site_cache_keys['title'], blog_info.title)\n yield cache_manager.call(\"SET\", site_cache_keys['signature'], blog_info.signature)\n yield cache_manager.call(\"SET\", site_cache_keys['navbar'], blog_info.navbar)\n # 向其他订阅者发布更新\n if is_pub_all:\n yield pub_manager.pub_call(SiteCacheService.PUB_SUB_MSGS['blog_info_updated'])\n\n @staticmethod\n @tornado.gen.coroutine\n def update_plugins(cache_manager, plugins, is_pub_all=False, pub_manager=None):\n # plugins 按照 order排个序\n # plugins_sorted = sorted(plugins, key=lambda x: x.order)\n # SiteCollection.plugins = plugins_sorted\n SiteCollection.plugins = plugins\n plugins_json = pickle.dumps(plugins)\n if plugins_json:\n yield cache_manager.call('SET', site_cache_keys['plugins'], plugins_json)\n if is_pub_all:\n yield pub_manager.pub_call(SiteCacheService.PUB_SUB_MSGS['blog_info_updated'])\n else:\n logger.error('redis更新plugin失败, plugins_json:%s' % plugins_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def update_article_types_not_under_menu(cache_manager, article_types_not_under_menu, is_pub_all=False, pub_manager=None):\n SiteCollection.article_types_not_under_menu = article_types_not_under_menu\n atnum_json = pickle.dumps(article_types_not_under_menu)\n if atnum_json:\n yield cache_manager.call('SET', site_cache_keys['article_types_not_under_menu'], atnum_json)\n if is_pub_all:\n yield pub_manager.pub_call(SiteCacheService.PUB_SUB_MSGS['blog_info_updated'])\n else:\n logger.error('redis更新plugin失败, atnum_json:%s' % atnum_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def update_menus(cache_manager, menus, is_pub_all=False, pub_manager=None):\n SiteCollection.menus = menus\n menus_json = pickle.dumps(menus)\n if menus_json:\n yield cache_manager.call('SET', site_cache_keys['menus'], menus_json)\n if is_pub_all:\n yield pub_manager.pub_call(SiteCacheService.PUB_SUB_MSGS['blog_info_updated'])\n else:\n logger.error('redis更新menus失败, menus_json:%s' % menus_json)\n\n @staticmethod\n @tornado.gen.coroutine\n def update_by_sub_msg(msg, cache_manager, thread_do, db):\n if msg and msg == SiteCacheService.PUB_SUB_MSGS['blog_info_updated']:\n yield SiteCacheService.query_blog_info(cache_manager, thread_do, db)\n\n\ndef article_type_not_under_menu_init(db_session):\n # 查询没有子目录的父目录\n article_type_not_under_menu = db_session.query(ArticleType).options(joinedload(ArticleType.setting)) \\\n .filter(ArticleType.menu_id.is_(None)).all()\n return article_type_not_under_menu\n\n\n# 右侧卡片组初始化\ndef plugin_init(db_session):\n plugins = db_session.query(Plugin).order_by(Plugin.order.asc()).all()\n return plugins\n\n\ndef blog_view_init(db_session):\n blog_view = db_session.query(BlogView).first()\n return blog_view\n\n\ndef article_count_init(db_session):\n article_count = db_session.query(Article).count()\n return article_count\n\n\ndef comment_count_init(db_session):\n comment_count = db_session.query(Comment).count()\n return comment_count\n\n\ndef article_source_init(db_session):\n article_sources = db_session.query(Source).all()\n if article_sources:\n for source in article_sources:\n source.fetch_article_count()\n return article_sources","sub_path":"service/init_service.py","file_name":"init_service.py","file_ext":"py","file_size_in_byte":12227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"568412712","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 20 15:42:58 2017\n\n@author: JTay\n\"\"\"\n\nimport numpy as np\nimport sklearn.model_selection as ms\nfrom sklearn.neighbors import KNeighborsClassifier as knnC\nimport pandas as pd\nfrom helpers import basicResults,makeTimingCurve\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nalphas = [10**-x for x in np.arange(1,9.01,1/2)]\n\npipeM = Pipeline([('Scale',StandardScaler()),\n ('Cull1',SelectFromModel(RandomForestClassifier(),threshold='median')),\n ('Cull2',SelectFromModel(RandomForestClassifier(),threshold='median')),\n ('Cull3',SelectFromModel(RandomForestClassifier(),threshold='median')),\n ('Cull4',SelectFromModel(RandomForestClassifier(),threshold='median')),\n ('KNN',knnC())]) \n\nif 1 == 1:\n bearings = pd.read_hdf('datasets.hdf','bearings') \n bearingsX = bearings.drop('course',1).copy().values\n bearingsY = bearings['course'].copy().values\n bearings_trgX, bearings_tstX, bearings_trgY, bearings_tstY = ms.train_test_split(bearingsX, bearingsY, test_size=0.3, random_state=0,stratify=bearingsY) \n d = bearingsX.shape[1]\n hiddens_bearings = [(h,)*l for l in [1,2,3] for h in [d,d//2,d*2]]\n pipeA = Pipeline([('Scale',StandardScaler()), \n ('KNN',knnC())]) \n params_bearings= {'KNN__metric':['manhattan','euclidean','chebyshev'],'KNN__n_neighbors':np.arange(1,51,3),'KNN__weights':['uniform','distance']}\n bearings_clf = basicResults(pipeA,bearings_trgX,bearings_trgY,bearings_tstX,bearings_tstY,params_bearings,'KNN','bearings') \n bearings_final_params=bearings_clf.best_params_\n pipeA.set_params(**bearings_final_params)\n makeTimingCurve(bearingsX,bearingsY,pipeA,'KNN','bearings')\n\nif 1 == 1:\n tae = pd.read_hdf('datasets.hdf','tae') \n taeX = tae.drop('turn',1).copy().values\n taeY = tae['turn'].copy().values\n tae_trgX, tae_tstX, tae_trgY, tae_tstY = ms.train_test_split(taeX, taeY, test_size=0.3, random_state=0,stratify=taeY) \n d = taeX.shape[1]\n hiddens_tae = [(h,)*l for l in [1,2,3] for h in [d,d//2,d*2]]\n pipeT = Pipeline([('Scale',StandardScaler()), \n ('KNN',knnC())]) \n params_tae= {'KNN__metric':['manhattan','euclidean','chebyshev'],'KNN__n_neighbors':np.arange(1,51,3),'KNN__weights':['uniform','distance']}\n tae_clf = basicResults(pipeT,tae_trgX,tae_trgY,tae_tstX,tae_tstY,params_tae,'KNN','tae') \n tae_final_params=tae_clf.best_params_\n pipeT.set_params(**tae_final_params)\n makeTimingCurve(taeX,taeY,pipeT,'KNN','tae')\n","sub_path":"KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"584442380","text":"\"\"\"\n:version 0.2\n:author Miguel Dominguez\n:email: miguel.a.dominguez@nokia.com\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\n\nimport pandas as pd\nfrom vspk import v5_0 as vsdk\n\n\n# Information about how to use it: https://nuagenetworks.github.io/vspkdoc/index.html\n# Information about how to use the API calls: https://nuagenetworks.github.io/vsd-api-documentation/index.html\n\n# INFO: The user and password for the VSD API are the default ones.\ndef start_session(ip: str, username: str = 'csproot', password: str = 'csproot'):\n \"\"\"\n Initialize a session with the VSD API and returns a nuage user\n :param username:\n :param password:\n :param ip:\n :return session.user:\n \"\"\"\n\n try:\n logging.info('Establishing connection to VSD API...')\n session = vsdk.NUVSDSession(username=username, password=password,\n enterprise='csp',\n api_url=f\"https://{ip}:8443\")\n session.start()\n logging.info('Connection established successfully!')\n return session.user\n except Exception as error:\n logging.error(f'Failed to connect to VSD API server.\\n {error}\\n')\n sys.exit(1)\n\n\ndef create_rate_limiter(name, description, committed_information_rate,\n peak_information_rate, peak_burst_size,\n enterprise=False):\n \"\"\"\n Creation of a Rate Limiter\n Link: https://nuagenetworks.github.io/vspkdoc/v5_0/nunsgateway.html\n :param name:\n :param description:\n :param committed_information_rate:\n :param peak_information_rate:\n :param peak_burst_size:\n :param enterprise:\n An optional parameter or parameter set up with default values\n must be outlined at the end of the args list. We set up this\n argument as optional since we can go without of it\n if rate limiter creation is performed at Platform level.\n :return:\n \"\"\"\n\n # Building the Rate Limiter object\n rl = vsdk.nuratelimiter.NURateLimiter(name=name,\n description=description,\n committed_information_rate=committed_information_rate,\n peak_information_rate=peak_information_rate,\n peak_burst_size=peak_burst_size)\n try:\n # If enterprise argument is not provided, rate limiter creation is\n # performed at platform level\n if enterprise == False:\n nuage_user.create_child(rl)\n logging.info(f'Rate limiter: {name} with: CIR: {committed_information_rate}, '\n f'PIR: {peak_information_rate}, Burst Size: {peak_burst_size}; '\n f'has been created successfully.')\n else:\n enterprise.create_child(rl)\n em = str(enterprise.name)\n logging.info(f'Enterprise: {em}, Rate limiter: {name}'\n f' with: CIR: {committed_information_rate}, '\n f'PIR: {peak_information_rate}, Burst Size: {peak_burst_size}; '\n f'has been created successfully.')\n except Exception as error:\n logging.error(f'Failed to create {name} rate limiter. {error}')\n\n\ndef delete_rate_limiter(name, enterprise=False):\n \"\"\"\n Deletion of a Rate Limiter\n :param name:\n :param enterprise:\n The same as the creation function. This argument is optional.\n :return:\n \"\"\"\n\n # Fetching the rate limiter based on its name\n if enterprise == False:\n rl = nuage_user.rate_limiters.get_first(filter=f'name is \"{name}\"')\n else:\n rl = enterprise.rate_limiters.get_first(filter=f'name is \"{name}\"')\n\n try:\n rl.delete()\n if enterprise == False:\n logging.info(f'Rate limiter: {name}, deleted successfully!')\n else:\n logging.info(f'Enterprise: {enterprise.name}, Rate limiter: {name}, '\n f'deleted successfully!')\n except Exception as error:\n logging.error(f'Failed to delete {name} rate limiter. {error}')\n\n\ndef find_by_name(name, array):\n for element in array:\n if element.name == name:\n return element\n return None\n\n\nif __name__ == '__main__':\n # Checking if the required arguments are provided\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--api_address', type=str, required=True)\n parser.add_argument('-f', '--csv_file', type=str, required=True)\n parser.add_argument('-d', '--delete', action='store_true', required=False)\n parser.add_argument('-e', '--enterprise', action='store_true', required=False)\n ns = parser.parse_args()\n # Establishing session\n logging.basicConfig(format=\"%(asctime)s: %(message)s\", level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n VSD_API_ADDRESS = ns.api_address\n nuage_user = start_session(VSD_API_ADDRESS)\n # Loading CSV file\n CSV_FILE = ns.csv_file\n logging.info('Reading the CSV file of the environment ...')\n data = pd.read_csv(CSV_FILE, encoding='latin1')\n\n # If -e argument is provided, the script performs Rate Limiters creation by\n # Enterprise, otherwise, creation is performed at Platform level\n if ns.enterprise == True:\n # Fetching all the enterprises registered on the VSD\n enterprises = nuage_user.enterprises.get()\n\n # If argument -d is provided, it activates delete function, otherwise,\n # create function is called\n if ns.delete == False:\n # Creation at Enterprise level\n for index, row in data.iterrows():\n enterprise = find_by_name(row['ORGANIZATION'], enterprises)\n create_rate_limiter(row['NAME'], row['DESCRIPTION'], row['CIR'],\n row['PIR'], row['BURST_SIZE'], enterprise)\n else:\n # Deletion at Enterprise level\n for index, row in data.iterrows():\n enterprise = find_by_name(row['ORGANIZATION'], enterprises)\n delete_rate_limiter(row['NAME'], enterprise)\n else:\n if ns.delete == False:\n # Creation at Platform level\n for index, row in data.iterrows():\n create_rate_limiter(row['NAME'], row['DESCRIPTION'], row['CIR'],\n row['PIR'], row['BURST_SIZE'])\n else:\n # Deletion at Platform level\n for index, row in data.iterrows():\n delete_rate_limiter(row['NAME'])","sub_path":"rate_limiter.py","file_name":"rate_limiter.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"504783014","text":"'''\nExample using the MAX22191PMB.\nThis scripts toggles output of MAX22191PMB.\nCreated on 9.03.2021\n\n@author: JH\n'''\nfrom pyb import Pin\nimport time\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.info(\"MAX22191PMB example running\")\n\nDI1 = Pin(Pin.cpu.C1,Pin.OUT_PP)\nDI2 = Pin(Pin.cpu.A7, Pin.OUT_PP)\n\nwhile(True):\n for cursor in '|/-\\\\':\n DI1_lvl = DI1.value()\n DI2_lvl = DI2.value()\n text = cursor+\" DI1 state: \" + str(DI1_lvl) + \"; DI2 state: \" + str(DI2_lvl)\n print(text, end='\\r')\n time.sleep(0.2)\n","sub_path":"PyTrinamicMicro/platforms/motionpy/examples/modules/max/max22191pmb.py","file_name":"max22191pmb.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"623875755","text":"import string\n\ndef open_file(filename):\n '''Opens a file'''\n\n try:\n file_object = open(filename, 'r')\n return file_object\n except FileNotFoundError:\n return None\n\n\ndef read_file(file_object):\n '''Reads file and returns data_dict'''\n\n data_dict = {}\n\n for line in file_object:\n key,value = line.split(':')\n\n # Take the whitespaces in the end of each line out of the values\n if value[-1] in string.whitespace:\n value = value[0:-1]\n data_dict[key] = value\n\n return data_dict\n\n\ndef read_message(message, data_dict):\n '''Reads the message and prints out the translation'''\n\n a_str = ''\n punctuation_letters = 0\n word_list = message.split()\n\n for word in word_list:\n # Count the punctuation letters in each word\n for letter in word:\n if letter in string.punctuation:\n punctuation_letters +=1\n \n # Check if the word contains more than one punctuation, if it does, keep the word the same\n if punctuation_letters>1:\n a_str += word + ' '\n\n # If the word has 1 or less punctuation letter, check if the last letter is a punctuation and if it is,\n # cut it off to check if the word is in the dict\n else:\n\n if word[-1] in string.punctuation:\n last = word[-1]\n word = word[0:-1]\n else:\n last = ''\n\n if word in data_dict:\n a_str += data_dict[word] + last + ' '\n else:\n a_str += word + last + ' '\n \n # Initialize the counter before going to the next word\n punctuation_letters = 0\n\n print(a_str)\n\n\ndef main():\n ''' The main program'''\n\n filename = input('Enter name of mapping file: ')\n file_object = open_file(filename)\n\n # Only if the file exists\n if file_object != None:\n data_dict = read_file(file_object)\n message = input('Enter a message: ')\n\n while message != 'q':\n read_message(message, data_dict)\n message = input('Enter a message: ')\n \n file_object.close()\n \nmain()","sub_path":"Hlutapróf 3/translate_skila.py","file_name":"translate_skila.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"600061251","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 27 09:24:19 2017\n\n@author: claire\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n#from sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\n\nimport mne\nfrom mne.decoding import (SlidingEstimator, GeneralizingEstimator,\n cross_val_multiscore, LinearModel, get_coef)\nimport os\nfrom sklearn.preprocessing import LabelEncoder\n\ncond= 'diam'\nana_path = '/home/claire/DATA/Data_Face_House/Group Analysis/Ana_Cues/'\n\nexclude = []\nall_epochs=list()\n# We start by exploring the frequence content of our epochs.\nfor subject_id in range(1,12):#,12):\n if subject_id in exclude:\n continue\n subject = 'S%02d' %subject_id\n data_path = os.path.join('/home/claire/DATA/Data_Face_House/' + subject + '/EEG/Ana_Cues/')\n fname_in = os.path.join(data_path, '%s-epo.fif' %subject)\n epochs=mne.read_epochs(fname_in)\n #epochs.interpolate_bads()\n all_epochs.append(epochs)\n \nepochs = mne.concatenate_epochs(all_epochs)\n\nepochs=epochs[cond]\n\nepochs.crop(tmin=-0.2, tmax=0.5)\n\n# fit and time decoder\nle=LabelEncoder()\n\nX = epochs.get_data() # MEG signals: n_epochs, n_channels, n_times\ny = le.fit_transform(epochs.events[:, 2]) # target: Audio left or right\n\nclf = make_pipeline(StandardScaler(), LogisticRegression())\n\n# define the Temporal Generalization object\ntime_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc')\n\nscores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=6)\n\n# Mean scores across cross-validation splits\nscores = np.mean(scores, axis=0)\n\n\nclass_balance = np.mean(y == y[0])\nclass_balance = max(class_balance, 1. - class_balance)\n# Plot the diagonal (it's exactly the same as the time-by-time decoding above)\nfig, ax = plt.subplots()\nax.plot(epochs.times, np.diag(scores), label='score')\nax.axhline(class_balance, color='k', linestyle='--', label='chance')\nax.set_xlabel('Times')\nax.set_ylabel('AUC')\nax.legend()\nax.axvline(.0, color='k', linestyle='-')\nax.set_title('Decoding EEG sensors over time - %s subject %s' %(cond,subject))\nplt.show()\n\nplt.savefig(ana_path + ' gen_across_time_%s_%s.pdf' %(cond,subject), bbox_to_inches='tight')\n\n# Plot the full matrix\nfig, ax = plt.subplots(1, 1)\nim = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',\n extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)\nax.set_xlabel('Testing Time (s)')\nax.set_ylabel('Training Time (s)')\nax.set_title('Temporal Generalization - %s subject %s' %(cond, subject))\nax.axvline(0, color='k')\nax.axhline(0, color='k')\nplt.colorbar(im, ax=ax)\nplt.show()\nplt.savefig(ana_path + ' gen_across_time_matrix_%s_%s.pdf' %(cond,subject), bbox_to_inches='tight')\n","sub_path":"Cues/run_decoding_gen_across_time.py","file_name":"run_decoding_gen_across_time.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"574805416","text":"#------------- Question 7 ------------------------------------------------#\n# python program to take list of words and returns the length of\n# longest one.\n# ------------ Solution ---------------------------------------------------#\n# using split, length, for loop and if\n\n# taking the input string from the user and storing the value in user_input\nuser_input = input (\"Enter the line of strings:\")\nsplitted_input = user_input.split()\n\nlongest_one = len(splitted_input[0])\n\nfor i in splitted_input:\n if len(i) > longest_one:\n longest_one = len(i)\nprint('The length of longest one from the input is:', longest_one)","sub_path":"DatatypeSeven.py","file_name":"DatatypeSeven.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"159397662","text":"import numpy\nimport pandas as pd\nimport random\nfrom tqdm.auto import tqdm\nimport spacy\nfrom spacy import displacy\nfrom spacy.util import minibatch, compounding\nfrom spacy.training import Example\nfrom spacy.scorer import Scorer\nfrom sklearn.base import BaseEstimator\nfrom utilities import load_cleaned_data, split_data, DROPOUT, ITERATIONS, draw_prf_graph\n\nnumpy.random.seed(0)\n\n\ndef load_spacy():\n nlp = spacy.load(\"en_core_web_sm\")\n print(\"spaCy version: \", spacy.__version__)\n # Getting the pipeline component\n ner = nlp.get_pipe(\"ner\")\n return ner, nlp\n\n\nclass NerModel(BaseEstimator):\n def __init__(self, ner, nlp, model=None, n_iter=64, dropout=0.1, **model_hyper_parameters):\n super().__init__()\n self.ner = ner\n self.nlp = nlp\n self.model = model\n self.n_iter = n_iter\n self.dropout = dropout\n\n def fit(self, train_data):\n ''' train the Named Entity Recognition model\n\n :param train_data: processed training data\n :return: None\n '''\n # Adding labels to the NER\n for _, annotations in train_data:\n for ent in annotations.get(\"entities\"):\n self.ner.add_label(ent[2])\n\n # Disable pipeline components that are not changed\n pipe_exceptions = [\"ner\"]\n unaffected_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\n scorer = Scorer()\n\n # Store prediction and gold standard ref. for each sentence\n # (to be used by Scorer)\n example_list = []\n\n # Store the PRF scores for every iteration\n train_scores = []\n\n # Train the NER model\n with nlp.select_pipes(enable=pipe_exceptions, disable=unaffected_pipes):\n # Create a list of Examples objects\n examples = []\n\n for text, annots in train_data:\n examples.append(Example.from_dict(nlp.make_doc(text), annots))\n\n # optimizer = nlp.create_optimizer()\n # get_examples = lambda: examples\n # optimizer = nlp.initialize(get_examples)\n\n for iteration in range(ITERATIONS):\n print(\"Iteration: \", iteration)\n # shuffling examples before every iteration\n random.shuffle(examples)\n losses = {}\n\n # optimizer = nlp.resume_training()\n\n # batch up the examples using spaCy's minibatch\n batches = minibatch(examples, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n nlp.update(\n batch,\n drop=DROPOUT, # dropout - make it harder to memorise data\n losses=losses\n )\n # print(batch)\n # print(\"Losses\", losses)\n\n # After training every iteration, calculate scores\n example_list = []\n for text, annot in train_data:\n # Create a Doc of our text\n # doc_gold_text = nlp.make_doc(text)\n pred_value = nlp(text)\n # reference = (Example.from_dict(doc_gold_text, annot))\n gold_standard = {\"entities\": annot[\"entities\"]}\n example_list.append(Example.from_dict(pred_value, gold_standard))\n\n # Generate per-entity scores by comparing predicted with gold-standard values\n scores = scorer.score(examples=example_list)\n train_scores.append(scores)\n\n draw_prf_graph(train_scores)\n self.nlp.to_disk(\"./saved_model\")\n\n\n def evaluate(self, test_data):\n ''' test the trained NER model\n\n :param test_data: processed test data\n :return: None\n '''\n for example in test_data:\n print(example[0])\n doc = self.nlp(example[0])\n print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\n\n scorer = Scorer(self.nlp)\n example_list = []\n\n # Get the PRF scores for test_data\n for text, annot in test_data:\n # Create a Doc of our text\n doc_gold_text = nlp.make_doc(text)\n\n # Create gold-standard using the Doc of text\n # and original (correct) entities\n gold_standard = {\"text\": doc_gold_text, \"entities\": annot[\"entities\"]}\n\n # Get the predictions of current test data sentence\n pred_value = self.nlp(text)\n\n # Create and append to the example list (of type Example) the prediction\n # as well as the gold standard (reference)\n example_list.append(Example.from_dict(pred_value, gold_standard))\n\n # Generate per-entity scores by comparing predicted with gold-standard values\n scores = scorer.score(examples=example_list)\n\n print(\"All scores: \", scores)\n\n print(\"\\nents_p (aka Precision): \", scores['ents_p'])\n print(\"ents_r (aka Recall): \", scores['ents_r'])\n print(\"ents_f (aka fscore): \", scores['ents_f'])\n\n print(\"\\nINSTR: \", scores['ents_per_type']['INSTR'])\n print(\"QLTY: \", scores['ents_per_type']['QLTY'])\n print(\"EDGE: \", scores['ents_per_type']['EDGE'])\n print(\"\\n\")\n\n\n def predict(self, X):\n ''' make inferences on unseen data\n\n :param X: sentence to make inferences on\n :return: None\n '''\n self.nlp = spacy.load(\"./saved_model\")\n doc = self.nlp(X)\n print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\n\n # def predict_proba(self):\n # '''\n #\n # :return:\n # '''\n\nif __name__ == '__main__':\n\n ner, nlp = load_spacy()\n DATA = load_cleaned_data()\n TRAIN_DATA, TEST_DATA = split_data(DATA)\n ner = NerModel(ner, nlp, n_iter=ITERATIONS, dropout=DROPOUT)\n ner.fit(TRAIN_DATA)\n ner.evaluate(TEST_DATA)\n\n sentence = 'I really like the distortion in this guitar'\n ner.predict(sentence)\n\n\n\n","sub_path":"named_entity_recognition_model/ner_model/ner_model.py","file_name":"ner_model.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"391760592","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport imp\nimport logging\nfrom os import walk\nfrom os.path import realpath, dirname, join, isdir\n\nWORKDIR = realpath(join(dirname(__file__)))\n\n\ndef build_cli(parent_subparser):\n conf_choices = [conf_type for conf_type in available_conf_types()]\n conf_choices += [\"all\"]\n\n conf_parser = parent_subparser.add_parser(\n \"conf\",\n help=\"Configuration related commands\")\n\n conf_subparser = conf_parser.add_subparsers()\n\n install_parser = conf_subparser.add_parser(\n \"install\",\n help=\"Configuration install command\")\n install_parser.set_defaults(func=install)\n install_parser.add_argument(\n \"TYPE\",\n help=\"Configuration type to install\",\n choices=conf_choices)\n\n remove_parser = conf_subparser.add_parser(\n \"remove\",\n help=\"Configuration removal command\")\n remove_parser.set_defaults(func=remove)\n remove_parser.add_argument(\n \"TYPE\",\n help=\"Configuration type to remove\",\n choices=conf_choices)\n\n\ndef available_conf_types():\n target_dirs = filter(\n lambda x: not x.startswith((\".\", \"_\")),\n next(walk(WORKDIR))[1])\n\n for element in target_dirs:\n if isdir(join(WORKDIR, element)):\n yield element\n\n\ndef install(tools_module, args):\n if args.TYPE == \"all\":\n for type in [conf_type for conf_type in available_conf_types()]:\n conf_cmd(tools_module, \"install\", type)\n else:\n conf_cmd(tools_module, \"install\", args.TYPE)\n\n\ndef remove(tools_module, args):\n conf_cmd(tools_module, \"remove\", args.TYPE)\n\n\ndef conf_cmd(tools_module, cmd, type):\n module_path = join(WORKDIR, type, tools_module.ENTRYFILE)\n module = imp.load_source(type, module_path)\n\n try:\n func = getattr(module, cmd)\n except AttributeError:\n logging.info(\"Module {} lacks function {} \".format(\n module_path,\n cmd))\n else:\n func(tools_module)\n","sub_path":"conf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"60518587","text":"import sys\nimport numpy as np\nimport time\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nsys.path.append(\"../pyBSE/\")\nimport pybse\nimport dart_board\nfrom dart_board import sf_history\n\n\n# Values for mock system 3\n# Input values: 11.01 7.42 744.19 0.50 167.69 1.79 2.08 83.2559 -69.9377 36.59\n# Output values: 1.31 7.43 53.45 0.458 36.30 1.140e-12 25.58 13 1\n\nLMC_metallicity = 0.008\n\nsystem_kwargs = {\"M2\" : 7.84, \"M2_err\" : 0.25,\n \"P_orb\" : 14.11, \"P_orb_err\" : 1.0,\n \"ecc\" : 0.47, \"ecc_err\" : 0.05,\n \"L_x\" : 1.94e33, \"L_x_err\" : 1.0e32,\n \"ra\" : 83.5744461 , \"dec\" : -69.4876344}\npub = dart_board.DartBoard(\"HMXB\", evolve_binary=pybse.evolve,\n ln_prior_pos=sf_history.lmc.prior_lmc, nwalkers=320,\n threads=20, ntemps=10,\n metallicity=LMC_metallicity, thin=100,\n system_kwargs=system_kwargs)\n\n# Darts need to be in ln\npub.aim_darts(N_iterations=200000, a_set='low')\n\n\nstart_time = time.time()\npub.throw_darts(nburn=2, nsteps=150000)\nprint(\"Simulation took\",time.time()-start_time,\"seconds.\")\n\n\n# Since emcee_PT does not have a blobs function, we must include the following calculation\nif pub.chains.ndim == 4:\n\n print(\"Generating derived values...\")\n\n ntemps, nchains, nsteps, nvar = pub.chains.shape\n pub.derived = np.zeros(shape=(ntemps, nchains, nsteps, 9))\n\n for i in range(ntemps):\n for j in range(nchains):\n for k in range(nsteps):\n\n x_i = pub.chains[i,j,k]\n\n ln_M1, ln_M2, ln_a, ecc, v_kick_1, theta_kick_1, phi_kick_1, ra, dec, ln_t = x_i\n M1 = np.exp(ln_M1)\n M2 = np.exp(ln_M2)\n a = np.exp(ln_a)\n time = np.exp(ln_t)\n\n P_orb = dart_board.posterior.A_to_P(M1, M2, a)\n\n output = pybse.evolve(M1, M2, P_orb, ecc, v_kick_1, theta_kick_1, phi_kick_1,\n v_kick_1, theta_kick_1, phi_kick_1,\n time, LMC_metallicity, False)\n\n pub.derived[i,j,k] = np.array([output])\n\n print(\"...finished.\")\n\n\n# Acceptance fraction\nprint(\"Acceptance fractions:\",pub.sampler.acceptance_fraction)\n\n# Autocorrelation length\ntry:\n print(\"Autocorrelation length:\", pub.sample.acor)\nexcept:\n print(\"Acceptance fraction is too low.\")\n\n\n\n# Save outputs\nnp.save(\"../data/mock_3_low_chain.npy\", pub.chains)\nnp.save(\"../data/mock_3_low_derived.npy\", pub.derived)\nnp.save(\"../data/mock_3_low_lnprobability.npy\", pub.lnprobability)\n","sub_path":"paper/scripts/mock_3_low.py","file_name":"mock_3_low.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"556671472","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport chainer\nfrom chainer import serializers\nfrom chainer import initializers\nimport chainer.functions as F\nimport chainer.links as L\nimport math\nimport os\nimport numpy as np\n\nclass Module(chainer.Chain):\n\n def __init__(self, n_in, n_out, stride=1):\n \n self.dtype = np.float32\n w = 1/np.sqrt(2)\n initW = initializers.HeNormal(scale=w)\n initbias = initializers.Zero()\n super(Module, self).__init__(\n conv1=L.Convolution2D(n_in, n_out, 3, stride, 1, 1, initialW=initW, initial_bias=initbias),\n bn1=L.BatchNormalization(n_out,dtype=self.dtype),\n conv2=L.Convolution2D(n_out, n_out, 3, 1, 1, 1, initialW=initW, initial_bias=initbias),\n bn2=L.BatchNormalization(n_out,dtype=self.dtype),\n )\n\n def __call__(self, x, train):\n h = F.relu(self.bn1(self.conv1(x), test=not train))\n h = self.bn2(self.conv2(h), test=not train)\n if x.data.shape != h.data.shape:\n xp = chainer.cuda.get_array_module(x.data)\n if x.data.shape[2:] != h.data.shape[2:]:\n x = F.average_pooling_2d(x, 1, 2)\n if x.data.shape[1] != h.data.shape[1]:\n x = F.concat((x, x * 0))\n return F.relu(h + x)\n\nclass Block(chainer.Chain):\n\n def __init__(self, n_in, n_out, n, stride=1):\n super(Block, self).__init__()\n links = [('m0', Module(n_in, n_out, stride))]\n links += [('m{}'.format(i + 1), Module(n_out, n_out))\n for i in range(n - 1)]\n for link in links:\n self.add_link(*link)\n self.forward = links\n\n def __call__(self, x, train):\n h = x\n for name, _ in self.forward:\n h = getattr(self, name)(h, train)\n xp = chainer.cuda.get_array_module(x.data)\n #print('start')\n #print(h.shape)\n #print(x.shape)\n if x.data.shape[2:] != h.data.shape[2:]:\n x = F.average_pooling_2d(x, 1, 2)\n if x.data.shape[1] != h.data.shape[1]:\n x = F.concat((x,x * 0))\n #print('end')\n #print(h.shape)\n #print(x.shape)\n return F.relu(h+x)\n\n\nclass ResNet(chainer.Chain):\n\n def __init__(self, n=5):\n super(ResNet, self).__init__()\n self.dtype = np.float32\n w = 1/np.sqrt(2)\n initW = initializers.HeNormal(scale=w)\n initbias = initializers.Zero()\n \n links = [('conv1', L.Convolution2D(3, 16, 3, 1, 1, initialW=initW, initial_bias=initbias)),\n ('bn2', L.BatchNormalization(16,dtype=self.dtype)),\n ('_relu3', F.ReLU()),\n ('res4', Block(16, 16, n)),\n ('res5', Block(16, 32, n, 2)),\n ('res6', Block(32, 64, n, 2)),\n ('_apool7', F.AveragePooling2D(8, 1, 0, False, True)),\n ('fc8', L.Linear(64, 10,initialW=initW, initial_bias=initbias))]\n for i,link in enumerate(links):\n if 'res' in link[0] and os.path.isfile(link[0]+'.hdf5'):\n self.add_link(*link)\n serializers.load_hdf5(link[0]+'.hdf5',getattr(self,link[0]))\n elif not link[0].startswith('_'):\n self.add_link(*link)\n self.forward = links\n self.train = True\n\n def save(self):\n for name, f in self.forward:\n if 'res' in name:\n serializers.save_hdf5(name+'.hdf5',getattr(self,name))\n\n def clear(self):\n self.loss = None\n self.accuracy = None\n\n def __call__(self, x, t):\n self.clear()\n for name, f in self.forward:\n # print name\n if 'res' in name:\n x = getattr(self, name)(x, self.train)\n elif name.startswith('bn'):\n x = getattr(self, name)(x, not self.train)\n elif name.startswith('_'):\n x = f(x)\n else:\n x = getattr(self, name)(x)\n # print x.data.shape\n if self.train:\n self.loss = F.softmax_cross_entropy(x, t)\n self.accuracy = F.accuracy(x, t)\n return self.loss\n else:\n return x\n\nmodel = ResNet()\n","sub_path":"IE598_ProjectSubmission/code/beyond_resnets/ResResNet/N05/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"38943160","text":"\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .detect_pipe import remove_noise\nfrom .project_algorithm import main\nimport logging\nfrom logging import handlers\n# 날짜없이 그냥 로그파일을 만들때\nlogging.basicConfig(filename='ImageProcessing.log',\n level=logging.DEBUG, format='%(asctime)s:%(message)s')\n\n\n@api_view(['POST'])\ndef pipe_caliber(request):\n if request.method == 'POST':\n image_root = request.data['Path']\n distance = opencv_pipe_detect(image_root)\n return Response({'Caliber_pixle': distance})\n\n\n@api_view(['POST'])\ndef pipe_depth_cal(request):\n depth_list = list()\n if request.method == 'POST':\n img_root = request.data['Path']\n actual_external_edge_diameter = request.data['Edge']\n actual_external_pipe_diameter = request.data['Caliber']\n depth, pipe_depth, curve = remove_noise(\n img_root, actual_external_edge_diameter, actual_external_pipe_diameter)\n if depth < 0:\n depth = 0\n if pipe_depth < 0:\n pipe_depth = 0\n return Response(\n {\n 'DepthToSurface': depth,\n 'DepthToPipe': pipe_depth,\n 'Type': curve[0],\n 'Degree': curve[1],\n }\n )\n\n\n@api_view(['POST'])\ndef pipe_assignment(request):\n if request.method == 'POST':\n image_root = request.data['Path']\n logging.debug('Path: {}'.format(image_root))\n try:\n depth, pipe_depth, degree, pipe_type = main(image_root)\n except:\n return Response({\n 'DepthToPipe': 0,\n 'PixelDistance': 0,\n 'Type': 0,\n 'Degree': 0\n })\n\n return Response(\n {\n 'DepthToPipe': depth,\n 'PixelDistance': pipe_depth,\n 'Type': pipe_type,\n 'Degree': degree\n }\n )\n","sub_path":"api_server/caliber_pipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"493976900","text":"#-*-coding:utf8-*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pylab import *\nimport numpy as np\n\nname1880 = pd.read_csv('ch02/names/yob1880.txt', names=['name', 'sex', 'birth'])\n# print name1880\n# print name1880.groupby('sex').birth.sum()\nyears =range (1880,2011)\npieces = []\ncolumns =['name', 'sex', 'births']\n#合并数据 合并到一个表里\nfor year in years:\n path = 'ch02/names/yob%d.txt'%year\n frame = pd.read_csv(path,names=columns)\n frame['year'] = year\n pieces.append(frame)\nnames = pd.concat(pieces, ignore_index=True)\n\ntotal_births= names.pivot_table('births',index='year', columns='sex', aggfunc=sum)\n# show(total_births.plot(title='Total birth by sex and year'))\ndef add_prop(group):\n # Integer division floors\n births = group.births.astype(float)\n\n group['prop'] = births / births.sum()\n return group\nnames = names.groupby(['year', 'sex']).apply(add_prop)\n\ndef get_top1000(group):\n return group.sort_values(by= 'births', ascending=False)[:1000]\ngrouped = names.groupby(['year', 'sex'])\n\ntop1000 = grouped.apply(get_top1000)\nboys = top1000[top1000.sex=='M']\ngirls = top1000[top1000.sex=='F']\ntotal_births = top1000.pivot_table('births',index='year',columns= 'name', aggfunc=sum)\n# print total_births\nsubset = total_births[['John', 'Harry', 'Mary', 'Marilyn']]\nsubset.plot(subplots=True, figsize=(12, 10), grid=False,title=\"Number of births per year\")\n# show()\ntable = top1000.pivot_table('prop',index = 'year', columns = 'sex', aggfunc= sum)\n# table.plot(title=\"Sum of table1000.prop by year and sex\",yticks=np.linspace(0,1.2,13),xticks=range(1880,2020,10))\n# show()\ndf = boys[boys.year ==2010]\nprop_cumsum= df.sort_values(by='prop', ascending=False).prop.cumsum()\ndef get_quantile_count(group,q=0.5):\n group = group.sort_values(by='prop',ascending=False)\n return group.prop.cumsum().values.searchsorted(q) +1\ndiversity = top1000.groupby(['year', 'sex']).apply(get_quantile_count)\ndiversity = diversity.unstack('sex')\n# show(diversity.plot(title=\"Number of popular name in top 50%\"))\n\n#从name列获取最后一个字母\n\nget_last_letter = lambda x: x[-1]\nlast_letters = names.name.map(get_last_letter)\nlast_letters.name = 'last_letter'\n\ntable = names.pivot_table('births',index = last_letters, columns = ['sex', 'year'],aggfunc = sum)\nsubtable = table.reindex(columns=[1910, 1960, 2010], level='year')\nletter_prop = subtable / subtable.sum().astype(float)\nfig, axes =plt.subplots(2, 1, figsize=(10, 8))\nletter_prop['M'].plot(kind = 'bar', rot = 0, ax = axes[0], title = 'Male')\nletter_prop['F'].plot(kind = 'bar', rot = 0, ax = axes[1], title = 'Female', legend = False)\nletter_prop = table / table.sum().astype(float)\n\ndny_ts = letter_prop.ix[['d', 'n', 'y'], 'M'].T\n# print dny_ts.head()\ndny_ts.plot()\n# show()\nall_names = top1000.name.unique()\nmask = np.array(['lesl' in x.lower() for x in all_names])\nlesley_like = all_names[mask]\nfiltered = top1000[top1000.name.isin(lesley_like)]\nfiltered.groupby('name').births.sum()\ntable = filtered.pivot_table('births', index = 'year', columns = 'sex', aggfunc = 'sum')\ntable = table.div(table.sum(1),axis = 0)\ntable.tail()\n# plt.close('all')\ntable.plot(style={'M':'k-','F':'k--'})\nshow()","sub_path":"python_data_03.py","file_name":"python_data_03.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"331395159","text":"from pantheon import pantheon\nimport asyncio\nimport rg_api_key\n\nserver = \"euw1\"\napi_key = rg_api_key.riot_api_key\n\ndef requestsLog(url, status, headers):\n print(url)\n print(status)\n print(headers)\n\npanth = pantheon.Pantheon(server, api_key, errorHandling=True, requestsLoggingFunction=requestsLog, debug=True)\n\nasync def getSummonerId(name):\n try:\n data = await panth.getSummonerByName(name)\n return (data['id'],data['accountId'])\n except Exception as e:\n print(e)\n\n\nasync def getRecentMatchlist(accountId):\n try:\n data = await panth.getMatchlist(accountId, params={\"endIndex\":10})\n return data\n except Exception as e:\n print(e)\n\nasync def getRecentMatches(accountId):\n try:\n matchlist = await getRecentMatchlist(accountId)\n tasks = [panth.getMatch(match['gameId']) for match in matchlist['matches']]\n return await asyncio.gather(*tasks)\n except Exception as e:\n print(e)\n\nasync def getRecentTimeline(accountId):\n try:\n matchlist = await getRecentMatchlist(accountId)\n tasks = [panth.getTimeline(match['gameId']) for match in matchlist['matches']]\n return await asyncio.gather(*tasks)\n except Exception as e:\n print(e)\n\n\nname = \"exkira\"\n\nloop = asyncio.get_event_loop() \n\n(summonerId, accountId) = loop.run_until_complete(getSummonerId(name))\nprint(summonerId)\nprint(accountId)\nloop.run_until_complete(getRecentTimeline(accountId))\n\n# print(tasks[0]['frames'][0]['participantFrames']['1'][1]['currentGold'])","sub_path":"pantheonScript.py","file_name":"pantheonScript.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"571419243","text":"import os\n\nfrom django.core.files.base import ContentFile\nfrom django.test import RequestFactory\nfrom mock import patch\n\nfrom onadata.apps.main.tests.test_base import TestBase\nfrom onadata.apps.api.viewsets.stats_viewset import StatsViewSet\nfrom onadata.apps.api.viewsets.submissionstats_viewset import\\\n SubmissionStatsViewSet\nfrom onadata.apps.logger.models import XForm\nfrom onadata.libs.utils.logger_tools import publish_xml_form, create_instance\n\n\nclass TestStatsViewSet(TestBase):\n\n def setUp(self):\n TestBase.setUp(self)\n self._create_user_and_login()\n self.factory = RequestFactory()\n self.extra = {\n 'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}\n\n @patch('onadata.apps.logger.models.instance.submission_time')\n def test_form_list(self, mock_time):\n self._set_mock_time(mock_time)\n self._publish_transportation_form()\n self._make_submissions()\n view = SubmissionStatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/', **self.extra)\n response = view(request)\n self.assertEqual(response.status_code, 200)\n formid = self.xform.pk\n data = {\n u'transportation_2011_07_25':\n 'http://testserver/api/v1/stats/submissions/bob/%s' % formid\n }\n self.assertDictEqual(response.data, data)\n request = self.factory.get('/?group=_xform_id_string', **self.extra)\n response = view(request)\n response = view(request, owner='bob', formid=formid)\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.data, list)\n data = {\n u'count': 4\n }\n self.assertDictContainsSubset(data, response.data[0])\n\n def test_anon_form_list(self):\n self._publish_transportation_form()\n self._make_submissions()\n view = SubmissionStatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/')\n response = view(request)\n self.assertEqual(response.status_code, 401)\n\n def _contributions_form_submissions(self):\n count = XForm.objects.count()\n path = os.path.join(os.path.dirname(__file__),\n '..', 'fixtures', 'forms', 'contributions')\n form_path = os.path.join(path, 'contributions.xml')\n f = open(form_path)\n xml_file = ContentFile(f.read())\n f.close()\n xml_file.name = 'contributions.xml'\n self.xform = publish_xml_form(xml_file, self.user)\n self.assertTrue(XForm.objects.count() > count)\n instances_path = os.path.join(path, 'instances')\n for uuid in os.listdir(instances_path):\n s_path = os.path.join(instances_path, uuid, 'submission.xml')\n create_instance(self.user.username, open(s_path), [])\n self.assertEqual(self.xform.instances.count(), 6)\n\n def test_median_api(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/?method=median', **self.extra)\n formid = self.xform.pk\n response = view(request, owner='bob', formid=formid)\n data = {u'age': 28.5, u'amount': 1100.0}\n self.assertDictContainsSubset(data, response.data)\n\n def test_mean_api(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/?method=mean', **self.extra)\n formid = self.xform.pk\n response = view(request, owner='bob', formid=formid)\n data = {u'age': 28.17, u'amount': 1455.0}\n self.assertDictContainsSubset(data, response.data)\n\n def test_mode_api(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/?method=mode', **self.extra)\n formid = self.xform.pk\n response = view(request, owner='bob', formid=formid)\n data = {u'age': 24, u'amount': 430.0}\n self.assertDictContainsSubset(data, response.data)\n\n def test_range_api(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/?method=range', **self.extra)\n formid = self.xform.pk\n response = view(request, owner='bob', formid=formid)\n data = {u'age': {u'range': 10, u'max': 34, u'min': 24},\n u'amount': {u'range': 2770, u'max': 3200, u'min': 430}}\n self.assertDictContainsSubset(data, response.data)\n\n def test_bad_field(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/?method=median&field=INVALID',\n **self.extra)\n formid = self.xform.pk\n response = view(request, owner='bob', formid=formid)\n self.assertEqual(response.status_code, 400)\n\n def test_all_stats_api(self):\n self._contributions_form_submissions()\n view = StatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/', **self.extra)\n response = view(request)\n self.assertEqual(response.status_code, 200)\n formid = self.xform.pk\n data = {\n u'contributions':\n 'http://testserver/api/v1/stats/bob/%s' % formid\n }\n self.assertDictContainsSubset(data, response.data)\n response = view(request, owner='bob', formid=formid)\n data = {}\n data['age'] = {\n 'mean': 28.17,\n 'median': 28.5,\n 'mode': 24,\n 'max': 34,\n 'min': 24,\n 'range': 10\n }\n request = self.factory.get('/?field=age', **self.extra)\n age_response = view(request, owner='bob', formid=formid)\n self.assertEqual(data, age_response.data)\n data['amount'] = {\n 'mean': 1455,\n 'median': 1100.0,\n 'mode': 430,\n 'max': 3200,\n 'min': 430,\n 'range': 2770\n }\n self.assertDictContainsSubset(data, response.data)\n\n @patch('onadata.apps.logger.models.instance.submission_time')\n def test_same_id_string_form_different_users(self, mock_time):\n self._set_mock_time(mock_time)\n\n # as bob\n self._publish_transportation_form()\n\n # as demo user\n self._create_user_and_login('demo')\n self.extra = {\n 'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}\n\n self._publish_transportation_form()\n self._make_submissions()\n view = SubmissionStatsViewSet.as_view({'get': 'list'})\n request = self.factory.get('/', **self.extra)\n response = view(request)\n self.assertEqual(response.status_code, 200)\n formid = self.xform.id_string\n data = {\n u'transportation_2011_07_25':\n 'http://testserver/api/v1/stats/submissions/demo/%s'\n % self.xform.pk\n }\n self.assertDictEqual(response.data, data)\n request = self.factory.get('/?group=_xform_id_string', **self.extra)\n response = view(request)\n response = view(request, owner='bob', formid=formid)\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.data, list)\n data = {\n u'count': 4\n }\n self.assertDictContainsSubset(data, response.data[0])\n","sub_path":"onadata/apps/api/tests/viewsets/test_stats_viewset.py","file_name":"test_stats_viewset.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"333768199","text":"#!/usr/local/bin/python\n\n\"\"\"\n__author__ = \"Ryan Lober\"\n__credits__ = \"Olivier Sigaud\"\n__version__ = \"1.0\"\n__maintainer__ = \"Ryan Lober\"\n__email__ = \"rlober@gmail.com\"\n\"\"\"\n\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\ndef plotFA(fa_object, data):\n\t\"\"\"\n\tThis function can be used to plot all of the relevant approximator data and performance statistic, as well as an overlay with the original function.\"\n\t\n\t:param fa_object: a trained function approximator object\n\t:param data: original noisy data set\n\t\"\"\"\n\tnumFeats = fa_object.numFeatures\n\t\n\t# Reconstruct the original function\n\tNp = np.shape(data)[1]\n\tx_values = data[0,:]\n\ty_noisy = data[1,:]\n\ty_values = 1 - x_values - np.sin(-2.*math.pi*np.power(x_values,3.))*np.cos(-2.*math.pi*np.power(x_values,3.))*np.exp(-np.power(x_values,4.))\n\n\t# Get the output of each of the features\n\tif fa_object.method == 'LWLS':\n\t\tphi = fa_object.featureOutput(x_values)\n\t\ty_feats = (np.dot(phi.transpose(), fa_object.theta)).transpose() #[numFeats x Ns]\n\t\ty_featWeights = fa_object.getWeights(x_values)\n\n\telse:\n\t\ty_feats = fa_object.featureOutput(x_values)\n\n\t# Get the predicted values from the FA\n\ty_approx = fa_object.functionApproximatorOutput(x_values)\n\n\t\n\t# Get execution time info\n\ttotalTime = 0.0\n\ttimeline = [totalTime]\n\tfor t in fa_object.timeHistory:\n\t\ttotalTime += t\n\t\ttimeline.append(totalTime)\n\t\n\ttrainingTime = timeline[-1:][0]\n\titerTime = np.mean(fa_object.timeHistory)\n\t\n\t# Get total error\n\ttotalError = np.sum(np.abs(y_values - y_approx))\n\t\n\t\n\t### Plots\n\t############################################################################\n\t\n\t# Set up various subplots\n\tplt.figure(num=1, figsize=(16, 9), dpi=80, facecolor='w', edgecolor='k')\n\tax_main = plt.subplot2grid((3,3), (0,0), colspan=2, rowspan=3)\n\tax_cost = plt.subplot2grid((3,3), (0,2))\n\tax_cpu = plt.subplot2grid((3,3), (1,2))\n\tax_mem = plt.subplot2grid((3,3), (2,2))\n\n\t# Main plot with data and FA \n\tf_real, = ax_main.plot(x_values, y_values)\n\tf_noisy, = ax_main.plot(x_values, y_noisy, '*')\n\tf_approx, = ax_main.plot(x_values, y_approx, 'r')\n\tfor i in range(numFeats):\n\t\tif fa_object.method == 'LWLS':\n\t\t\tfor j in range(Np):\n\t\t\t\tfeature_plots, = ax_main.plot(x_values[j], y_feats[i,j], '_', mec='k', alpha=y_featWeights[i,j], mew=2)\n\t\telse:\n\t\t\tfeature_plots, = ax_main.plot(x_values, y_feats[i,:]*fa_object.theta[i], 'k--')\n\n\t\n\t\n\t\n\tax_main.legend((f_real, f_noisy, f_approx, feature_plots), (r'$f_{real}$', r'$y_{noisy}$', r'$\\hat{f}$',r'$\\phi * \\Theta$'))\n\tax_main.set_title('Function approximator output')\n\tax_main.set_xlabel('input')\n\tax_main.set_ylabel('output')\n\tax_main.set_ylim([-1., 1.5])\n\t\n\tstatString = 'FA Error: '+str(round(totalError*1000)/1000)+'\\nTraining Time: '+str(round(trainingTime*1E6)/1E3)+' ms\\nMean Iteration Time: ' + str(round(iterTime*1E6)/1E3) +' ms'\n\t#ax_main.annotate(statString, xy=(0.1, 0.05), textcoords='axes fraction')\n\tbbox_props = dict(boxstyle=\"round\", fc=\"w\", ec=\"0.5\", alpha=0.7)\n\tax_main.text(0.05, -.9, statString, ha=\"left\", va=\"bottom\", size=12,\n bbox=bbox_props)\n\n\t# Cost curve (if using an iterative method)\n\tif fa_object.iterationCount > 1 and np.size(fa_object.deltaHistory) > 1: \n\t\tax_cost.plot(range(fa_object.iterationCount), fa_object.deltaHistory)\n\t\tax_cost.axhline(y = fa_object.threshold, c='red', ls='--', lw=3)\n\t\n\tft = 9\n\tax_cost.set_xlabel('Iteration', fontsize=ft)\n\tax_cost.set_ylabel('Cost', fontsize=ft)\n\tax_cost.set_ylim([0,0.01])\n\tax_cost.tick_params(axis='both', which='major', labelsize=ft)\n\t\n\t\n\t\n\t\n\t\n\tcpu_labels = []\n\tax_cpu.plot(timeline, fa_object.cpuHistory)\n\tfor cpu in range(np.size(fa_object.cpuHistory[0])):\n\t\tcpu_labels.append('core_'+str(cpu))\n\t\n\n\tax_cpu.legend(cpu_labels, bbox_to_anchor=(1.3, 1.00))\n\tax_cpu.set_xlabel('Time (sec)', fontsize=ft)\n\tax_cpu.set_ylabel('CPU usage (%)', fontsize=ft)\n\tax_cpu.tick_params(axis='both', which='major', labelsize=ft)\n\tax_cpu.set_ylim([-10, 110])\n\n\t\n\tax_mem.plot(timeline, fa_object.memHistory)\n\tax_mem.set_xlabel('Time (sec)', fontsize=ft)\n\tax_mem.set_ylabel('RAM usage (%)', fontsize=ft)\n\tax_mem.tick_params(axis='both', which='major', labelsize=ft)\n\t\n\tplt.subplots_adjust(wspace=0.25, hspace=0.25)\n\tplt.show()\n\t\n\t# if fa_object.method == 'Incremental':\n# \t\tanimFAPlot(fa_object, data)\n\t\t\n\n\n\t\t\ndef animPlotFA(fa_object, data):\n\t\"\"\"\n\tFor iterative methods, this function can be used to plot the evolution of the approximator over its training.\"\n\t\n\t:param fa_object: a trained function approximator object\n\t:param data: original noisy data set\n\t\"\"\"\n\n\n\tassert(fa_object.method == 'Incremental')\n\tNp = np.shape(data)[1]\n\tx_values = data[0,:]\n\ty_noisy = data[1,:]\n\ty_values = 1 - x_values - np.sin(-2.*math.pi*np.power(x_values,3.))*np.cos(-2.*math.pi*np.power(x_values,3.))*np.exp(-np.power(x_values,4.))\n\tnumFeats = fa_object.numFeatures\n\t\n\tfig2 = plt.figure()\n\tax = fig2.add_subplot(1, 1, 1)\n\t#num=2, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\n\tf_r, = ax.plot(x_values, y_values)\n\tf_n, = ax.plot(x_values, y_noisy, '*')\n\ty_iter = fa_object.functionApproximatorOutput(x_values, fa_object.thetaHistory[0])\n\tf_app, = ax.plot(x_values, y_iter, 'r')\n\ty_feats = fa_object.featureOutput(x_values)\n\tax.set_title('Function approximator output')\n\tax.set_xlabel('input')\n\tax.set_ylabel('output')\n\tax.set_ylim([-1., 1.5])\n\t\n\n\t\t\n\tdef animate(j, fa_object, x_values):\n\t\ty_it = fa_object.functionApproximatorOutput(x_values, fa_object.thetaHistory[j])\n\t\tf_app.set_ydata(y_it) # update the data\n\t\t \n\t\t# for i in range(numFeats):\n# \t\t\tfeature_plots, = ax.plot(x_values, y_feats[i,:]*fa_object.thetaHistory[j][i], 'k--')\n\t\tstatString = 'Iteration: '+str(j)\n\t\tbbox_props = dict(boxstyle=\"round\", fc=\"w\", ec=\"0.5\", alpha=0.7)\n\t\tax.text(0.05, -.9, statString, ha=\"left\", va=\"bottom\", size=12, bbox=bbox_props)\n\t\treturn f_app, #feature_plots\n\n\n\t\n\tNt = len(fa_object.deltaHistory)\n\tNsteps = int(round(Nt/100))\n\t\n\tani = animation.FuncAnimation(fig2, animate, np.arange(0,Nt,Nsteps), fargs=(fa_object, x_values),\n\t\tinterval=1, blit=False, repeat=True)\n\tplt.show()\n\n","sub_path":"IAR/TME Regression/functionApproximator_PlottingTools.py","file_name":"functionApproximator_PlottingTools.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"31663674","text":"import unittest\n\n\ndef sink(ar: list, k: int, n: int) -> None: \n while 2 * k <= n:\n j = 2 * k\n\n if j < n and ar[j] < ar[j + 1]:\n j = j + 1\n\n if ar[k] > ar[j]:\n break\n \n ar[k], ar[j] = ar[j], ar[k]\n k = j\n\n\n# def swim(ar: list, k: int, n: int):\n# while k > 1 and ar[k // 2] < ar[k]:\n# ar[k // 2], ar[k] = ar[k], ar[k // 2]\n# k = k // 2\n\n\ndef sort(ar: list):\n n = len(ar) - 1\n\n # 初始化前一半,使之有序\n for i in range(n // 2, 0, -1):\n sink(ar, i, n)\n # print(ar)\n while n > 1:\n ar[1], ar[n] = ar[n], ar[1]\n n = n - 1\n sink(ar, 1, n)\n\n\ns = [None, 'S', 'O', 'R', 'T', 'E', 'X', 'A', 'M', 'P', 'L', 'E']\n\n\nclass Test(unittest.TestCase):\n def test_normal(self):\n sort(s)\n self.assertEqual(s, [None, 'A', 'E', 'E', 'L', 'M', 'O', 'P', 'R', 'S', 'T', 'X'])\n\n\nunittest.main()","sub_path":"th_alogri_python/charapter-2/heap-sort.py","file_name":"heap-sort.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"445716038","text":"\"\"\"Contains tests for the cerberus tool.\n\nAll tests should be run in Python 2 using python2 -m pytest\n\"\"\"\nimport cerberus\n\n\ndef test_list(capsys, test_case):\n \"\"\"Run a test on the cerberus list command.\"\"\"\n cerberus.list_data(None, test_case[\"data\"])\n out, _ = capsys.readouterr()\n assert out.rstrip() == \"\\n\".join(test_case[\"list_test\"])\n\n\ndef test_add_file_single(test_case):\n \"\"\"Test adding a single file to a project.\"\"\"\n data = test_case[\"data\"]\n parser = cerberus.create_parser()\n args = parser.parse_args([\"add-file\", \"autorun.exe\"])\n output = cerberus.add_file(args, data)\n assert \"autorun.exe\" in output[\"files\"]\n\n\ndef test_add_file_multiple(test_case):\n \"\"\"Test adding multiple files to a project.\"\"\"\n data = test_case[\"data\"]\n parser = cerberus.create_parser()\n args = parser.parse_args([\"add-file\", \"autorun.exe\", \"notavirus.bat\"])\n output = cerberus.add_file(args, data)\n assert (\"autorun.exe\" in output[\"files\"]\n and \"notavirus.bat\" in output[\"files\"])\n\n\ndef test_remove_file(capsys, test_case):\n \"\"\"Test removing a file from the project.\"\"\"\n data = test_case[\"data\"]\n parser = cerberus.create_parser()\n files = test_case[\"remove-file_test\"][\"files\"]\n expected_output = \"\\n\".join(test_case[\"remove-file_test\"][\"results\"])\n expected_files = test_case[\"remove-file_test\"][\"final_files\"]\n args = parser.parse_args([\"remove-file\"] + files)\n processed_data = cerberus.remove_files(args, data)\n messeges, _ = capsys.readouterr()\n assert messeges.strip() == expected_output\n assert set(processed_data[\"files\"]) == set(expected_files)\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"264304006","text":"from gbfs import *\nimport scipy.io as sio\nimport numpy as np\n\ndef super_pixel(datafile,total_f):\n\tdata=sio.loadmat('../../datasets/'+datafile)\n\tX=data['X']\n\tY=data['Y']\n\tn,d=np.shape(X)\n\tn,obj=np.shape(Y)\n\tF=np.ones((d,obj))\n\tfor i in range(obj):\n\t\tF[:,i]=np.reshape(GBFS_parm_sweep(X,np.reshape(Y[:,i],(n,1)),total_f),(d,))\n\t\t\n\tF=1-F\n\tsio.savemat('../../results/'+datafile+'_'+str(total_f)+'_gbfs.mat',dict(F=F,b=data['b'])) \t\n","sub_path":"code/lime-experiments-master/GBFS/super_pixel.py","file_name":"super_pixel.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"114842418","text":"from datetime import datetime\nfrom unittest.mock import patch\n\nimport botocore\nfrom boto3 import session\n\nfrom prowler.providers.aws.lib.audit_info.audit_info import AWS_Audit_Info\nfrom prowler.providers.aws.services.inspector2.inspector2_service import Inspector2\n\nAWS_REGION = \"us-east-1\"\nAWS_ACCOUNT_ID = \"123456789012\"\nFINDING_ARN = (\n \"arn:aws:inspector2:us-east-1:123456789012:finding/0e436649379db5f327e3cf5bb4421d76\"\n)\n\n# Mocking Calls\nmake_api_call = botocore.client.BaseClient._make_api_call\n\n\ndef mock_make_api_call(self, operation_name, kwargs):\n \"\"\"We have to mock every AWS API call using Boto3\"\"\"\n if operation_name == \"BatchGetAccountStatus\":\n return {\n \"accounts\": [\n {\n \"accountId\": \"string\",\n \"resourceState\": {\n \"ec2\": {\n \"errorCode\": \"ALREADY_ENABLED\",\n \"errorMessage\": \"string\",\n \"status\": \"ENABLED\",\n },\n \"ecr\": {\n \"errorCode\": \"ALREADY_ENABLED\",\n \"errorMessage\": \"string\",\n \"status\": \"ENABLED\",\n },\n \"lambda\": {\n \"errorCode\": \"ALREADY_ENABLED\",\n \"errorMessage\": \"string\",\n \"status\": \"ENABLED\",\n },\n },\n \"state\": {\n \"errorCode\": \"ALREADY_ENABLED\",\n \"errorMessage\": \"string\",\n \"status\": \"ENABLED\",\n },\n }\n ]\n }\n if operation_name == \"ListFindings\":\n return {\n \"findings\": [\n {\n \"awsAccountId\": AWS_ACCOUNT_ID,\n \"findingArn\": FINDING_ARN,\n \"description\": \"Finding Description\",\n \"severity\": \"MEDIUM\",\n \"status\": \"ACTIVE\",\n \"title\": \"CVE-2022-40897 - setuptools\",\n \"type\": \"PACKAGE_VULNERABILITY\",\n \"updatedAt\": datetime(2024, 1, 1),\n }\n ]\n }\n\n return make_api_call(self, operation_name, kwargs)\n\n\ndef mock_generate_regional_clients(service, audit_info):\n regional_client = audit_info.audit_session.client(service, region_name=AWS_REGION)\n regional_client.region = AWS_REGION\n return {AWS_REGION: regional_client}\n\n\n# Patch every AWS call using Boto3 and generate_regional_clients to have 1 client\n@patch(\"botocore.client.BaseClient._make_api_call\", new=mock_make_api_call)\n@patch(\n \"prowler.providers.aws.services.inspector2.inspector2_service.generate_regional_clients\",\n new=mock_generate_regional_clients,\n)\nclass Test_Inspector2_Service:\n\n # Mocked Audit Info\n def set_mocked_audit_info(self):\n audit_info = AWS_Audit_Info(\n session_config=None,\n original_session=None,\n audit_session=session.Session(\n profile_name=None,\n botocore_session=None,\n ),\n audited_account=None,\n audited_user_id=None,\n audited_partition=\"aws\",\n audited_identity_arn=None,\n profile=None,\n profile_region=None,\n credentials=None,\n assumed_role_info=None,\n audited_regions=None,\n organizations_metadata=None,\n audit_resources=None,\n )\n return audit_info\n\n def test__get_client__(self):\n audit_info = self.set_mocked_audit_info()\n ssmincidents = Inspector2(audit_info)\n assert (\n ssmincidents.regional_clients[AWS_REGION].__class__.__name__ == \"Inspector2\"\n )\n\n def test__get_service__(self):\n audit_info = self.set_mocked_audit_info()\n ssmincidents = Inspector2(audit_info)\n assert ssmincidents.service == \"inspector2\"\n\n def test__batch_get_account_status__(self):\n audit_info = self.set_mocked_audit_info()\n ssmincidents = Inspector2(audit_info)\n assert len(ssmincidents.inspectors) == 1\n assert ssmincidents.inspectors[0].id == \"Inspector2\"\n assert ssmincidents.inspectors[0].region == AWS_REGION\n assert ssmincidents.inspectors[0].status == \"ENABLED\"\n\n def test__list_findings__(self):\n audit_info = self.set_mocked_audit_info()\n ssmincidents = Inspector2(audit_info)\n assert len(ssmincidents.inspectors[0].findings) == 1\n assert ssmincidents.inspectors[0].findings[0].arn == FINDING_ARN\n assert ssmincidents.inspectors[0].findings[0].region == AWS_REGION\n assert ssmincidents.inspectors[0].findings[0].severity == \"MEDIUM\"\n assert ssmincidents.inspectors[0].findings[0].status == \"ACTIVE\"\n assert (\n ssmincidents.inspectors[0].findings[0].title\n == \"CVE-2022-40897 - setuptools\"\n )\n","sub_path":"tests/providers/aws/services/inspector2/inspector2_service_test.py","file_name":"inspector2_service_test.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"421464828","text":"def max_array(array):\r\n \"\"\"Function only for numerical linear arrays.\r\n\r\n It takes a numeric array and returns a sequence with a maximum sum.\r\n \"\"\"\r\n n = len(array)\r\n max_arr = []\r\n max_sum = 0\r\n if n == 0:\r\n return max_arr\r\n elif max(array) <= 0:\r\n max_arr.append(max(array))\r\n return max_arr\r\n for i in range(0, n):\r\n sum = 0\r\n for k in range(i, n):\r\n sum += array[k]\r\n if sum > max_sum:\r\n max_arr = array[i: k + 1]\r\n max_sum = sum\r\n return max_arr","sub_path":"Budda1.py","file_name":"Budda1.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"48818020","text":"# -*- coding:utf-8 -*-\n\nimport sys\nimport re\nimport pymongo\nimport time\nimport pymysql\nfrom pymysql import escape_string\n\nclass Chapter2Db():\n def __init__(self, chapter, conn):\n self.conn = conn\n self.cursor = conn.cursor()\n self.chapter = chapter\n self.chapter_01 = [] #记录第一层章节\n self.chapter_02 = [] #记录的是父章节和本章节,课本\n self.chapter_03 = [] #记录的是父章节和本章节\n self.chapter_04 = [] #记录的是父章节和本章节\n self.grade_re = r'(一|二|三|四|五|六|七|八|九)年级'\n\n def deal_chapter_first(self, content_list):\n chapter = {}\n #此处为特殊情况很多语文下面第一层是第一单元\n aa = '**'.join([content_list[4],content_list[3]])\n if aa in self.chapter_01:\n return ''\n else:\n #将name 和book同时写到chapter_01中\n self.chapter_01.append('**'.join([content_list[4],content_list[3]]))\n chapter['name'] = content_list[4]\n chapter['parent_source_id'] = 0 #第一层的prent_id = 0\n chapter['period'] = content_list[0]\n chapter['subject'] = content_list[1]\n chapter['version'] = content_list[2]\n chapter['level'] = 1\n grade_re = re.search(self.grade_re, content_list[3])\n if grade_re:\n grade = grade_re.group()\n chapter['grade'] = grade\n chapter['text_book'] = content_list[3]\n return chapter\n\n def deal_chapter_second(self, content_list):\n #判断有没有到最内层的章节\n if len(content_list) < 6:\n return ''\n chapter = {}\n #只要是最内层的章节全部录入,主要判断的是菁优网上,父章节的name不相同,但是子章节的name是相同的\n parent_chapter = '**'.join([content_list[4], content_list[5]])\n if parent_chapter in self.chapter_02:\n return ''\n else:\n self.chapter_02.append(parent_chapter)\n chapter['parent_chapter'] = parent_chapter\n chapter['name'] = content_list[5]\n chapter['period'] = content_list[0]\n chapter['subject'] = content_list[1]\n chapter['version'] = content_list[2]\n chapter['level'] = 2\n grade_re = re.search(self.grade_re, content_list[3])\n if grade_re:\n grade = grade_re.group()\n chapter['grade'] = grade\n chapter['text_book'] = content_list[3]\n # 判断是否是最内层章节\n return chapter\n\n def deal_chapter_third(self, content_list):\n if len(content_list) < 7:\n return ''\n chapter = {}\n #只要是最内层的章节全部录入,主要判断的是菁优网上,父章节的name不相同,但是子章节的name是相同的\n parent_chapter = '**'.join([content_list[5], content_list[6]])\n if parent_chapter in self.chapter_03:\n return ''\n else:\n self.chapter_03.append(parent_chapter)\n chapter['parent_chapter'] = parent_chapter\n chapter['name'] = content_list[6]\n chapter['period'] = content_list[0]\n chapter['subject'] = content_list[1]\n chapter['version'] = content_list[2]\n chapter['level'] = 3\n grade_re = re.search(self.grade_re, content_list[3])\n if grade_re:\n grade = grade_re.group()\n chapter['grade'] = grade\n chapter['text_book'] = content_list[3]\n return chapter\n\n def deal_chapter_four(self, content_list):\n if len(content_list) < 8:\n return ''\n chapter = {}\n #只要是最内层的章节全部录入,主要判断的是菁优网上,父章节的name不相同,但是子章节的name是相同的\n parent_chapter = '**'.join([content_list[6], content_list[7]])\n if parent_chapter in self.chapter_04:\n return ''\n else:\n self.chapter_04.append(parent_chapter)\n chapter['parent_chapter'] = parent_chapter\n chapter['name'] = content_list[7]\n chapter['period'] = content_list[0]\n chapter['subject'] = content_list[1]\n chapter['version'] = content_list[2]\n chapter['level'] = 4\n grade_re = re.search(self.grade_re, content_list[3])\n if grade_re:\n grade = grade_re.group()\n chapter['grade'] = grade\n chapter['text_book'] = content_list[3]\n #判断是否是最内层章节\n return chapter\n\n def insert_one(self, chapter):\n if 'parent_source_id' in chapter.keys():\n #只是第一层\n sql = 'insert into chapters(period,subject,version,grade,name,level,parent_source_id,textbook) ' \\\n 'values(\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\");' % \\\n (chapter['period'], chapter['subject'], chapter['version'], chapter['grade'], escape_string(chapter['name']),\n chapter['level'], chapter['parent_source_id'], chapter['text_book'])\n else:\n #既不是第一层,又不是最后一层\n sql = 'insert into chapters(period,subject,version,grade,name,level,textbook,parent_chapter) ' \\\n 'values(\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\");' % \\\n (chapter['period'], chapter['subject'], chapter['version'], chapter['grade'], escape_string(chapter['name']),\n chapter['level'], chapter['text_book'],escape_string(chapter['parent_chapter']))\n try:\n self.cursor.execute(sql)\n self.cursor.connection.commit()\n except Exception as err:\n import pdb\n pdb.set_trace()\n for key, value in chapter.items():\n print(key, value)\n print(err)\n self.sql.rollback()\n\n def deal_chapter(self, content_list):\n #先处理第一层章节\n chapter_01 = self.deal_chapter_first(content_list)\n if chapter_01:\n self.insert_one(dict(chapter_01))\n #处理第二层\n chapter_02 = self.deal_chapter_second(content_list)\n if chapter_02:\n self.insert_one(chapter_02)\n chapter_03 = self.deal_chapter_third(content_list)\n if chapter_03:\n self.insert_one(chapter_03)\n chapter_04 = self.deal_chapter_four(content_list)\n if chapter_04:\n self.insert_one(chapter_04)\n\n def process(self):\n num = 0\n for line in self.chapter:\n num += 1\n print(num)\n # if num == 603:\n # import pdb\n # pdb.set_trace()\n line = line.replace('\\t\\t','\\t')\n content_list = line.strip().split('\\t')\n self.deal_chapter(content_list)\n\ndef help(msg):\n print(msg)\n print('Usage:')\n print('python %s chapter_chinese.txt' % (sys.argv[0]))\n\ndef main():\n if len(sys.argv) < 2:\n help('params error')\n sys.exit(-1)\n chapter = sys.argv[1]\n try:\n chapter = open(chapter, 'r')\n except Exception as err:\n raise Exception('chapter_02.txt打开时出现异常')\n conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='xyd911211', db='last', charset='utf8')\n chapter2db = Chapter2Db(chapter, conn)\n chapter2db.process()\n conn.close()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"GIT/crawl_aiyunxiao/relation/chapter.py","file_name":"chapter.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"137234808","text":"import PySimpleGUI as sg\r\nimport sqlite3\r\nimport random\r\nimport os\r\n\r\nlogin=sqlite3.connect(\"example.db\")\r\nc=login.cursor()\r\n\r\ndef SearchPlayer(pname):\r\n try:\r\n sqliteConnection = sqlite3.connect('example.db')\r\n cursor = sqliteConnection.cursor()\r\n print(\"Database Connected! Ready to Search Player\")\r\n\r\n sqlite_search = \"\"\"SELECT * from 'players' where (name like ?);\"\"\"\r\n cursor.execute(sqlite_search, (pname,))\r\n sqliteConnection.commit()\r\n records = cursor.fetchall()\r\n \r\n #print(records)\r\n if len(records)>0:\r\n if records[0][2]!=0:\r\n avg=round(records[0][4]/records[0][3],4)\r\n else:\r\n avg=\"NaN\"\r\n if records[0][4]!=0:\r\n sr=round(records[0][4]*100/records[0][5],4)\r\n else:\r\n sr=\"NaN\"\r\n sg.Popup(\"Name: \"+records[0][0],\"Country: \"+records[0][1],\"Matches Played: \"+str(records[0][2]),\"Innings: \"+str(records[0][3]),\r\n \"Runs Scored: \"+str(records[0][4]),\"Average: \"+str(avg),\"Strike Rate: \"+str(sr),\"Balls Faced: \"+str(records[0][5]),\"Fours: \"+\r\n str(records[0][6]),\"Sixes: \"+str(records[0][7]),\"High Score\"+str(records[0][8]),\r\n \"Balls Bowled: \"+str(records[0][9]),\"Wickets: \"+str(records[0][10]),\"Runs Given: \"+str(records[0][11]),\r\n \"Catches:\"+str(records[0][12])\r\n )\r\n else:\r\n sg.Popup(\"Player not found\")\r\n cursor.close()\r\n \r\n\r\n except sqlite3.Error as error:\r\n print(\"Failed to find player in sqlite table\", error)\r\n finally:\r\n if (sqliteConnection):\r\n sqliteConnection.close()\r\n print(\"The SQLite connection is closed\")\r\n\r\nlayout = [[sg.Text('Enter Player Name')],\r\n [sg.InputText(),sg.Button('Search')],\r\n \r\n [sg.Button('Main Menu')]]\r\n# Create the Window\r\nwindow = sg.Window('Search Player Details', layout)\r\n# Event Loop to process \"events\" and get the \"values\" of the inputs\r\nwhile True:\r\n event, values = window.read()\r\n print(values[0])\r\n if event=='Main Menu':\r\n window.close()\r\n os.system('usermenu.py') \r\n break\r\n print('You entered ', values[0])\r\n SearchPlayer(values[0]) \r\nwindow.close()\r\n\r\n\r\n","sub_path":"searchuser.py","file_name":"searchuser.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"82876461","text":"game_list=[]\nwhile True:\n option=int(input(\"\"\"\n 1-Add a Game,\n 2-Remove a Game,\n 3-Insert a Game\n 4-Exit (Quit)\n Make your choice.... 1,2, 3, or 4.....\n \"\"\"))\n\n if option==1:\n print(\"The option allows you to add a game to your list.\")\n print(game_list)\n game=input(\"What game would you like to add? \")\n game_list.append(game)\n print(game_list)\n\n elif option==2:\n print(\"This option allows you to remove a game from your list.\")\n print(game_list)\n \n while True:\n game=input(\"Which game would you like to remove from your list? \")\n if game in game_list:\n game_list.remove(game)\n print(game_list)\n break\n else:\n print(\"That game is not in the list.\")\n\n elif option==3:\n print(\"This option allows you to insert a game at a give position.\")\n while True:\n game=input(\"What game would you like to insert? \")\n pos=int(input(\"What position would you like to insert it at? \"))\n pos-=1\n if pos FIVE_MINUTES:\n logging.info('{} downtime {} to {}'.format(\n delta, last_datetime, this_datetime))\n\n\ndef parse_line(line, events, dt):\n height_str = line\n predicted_is_high = None\n predicted_is_low = None\n utc_datetime = dt.replace(tzinfo=pytz.UTC)\n if utc_datetime in events.keys():\n values = events[utc_datetime]\n if values[0]:\n predicted_is_high = True\n else:\n predicted_is_low = True\n height_m = float(height_str) / 1000.0\n return (Row(utc_datetime, predicted_tide_level=height_m,\n predicted_is_high=predicted_is_high,\n predicted_is_low=predicted_is_low))\n\n\ndef parse_header(line):\n (port_number_str, port_name_str, latitude_str, longitude_str, date_str,\n time_interval_minutes_str, tz_str) = line.split(\",\")\n port_number = int(port_number_str.strip('\"'))\n datetime_str = date_str + \" 00:00:00\"\n time_delta = datetime.timedelta(minutes=int(time_interval_minutes_str))\n utc_datetime = datetime.datetime.strptime(\n datetime_str, _DATETIME_FORMAT_2).replace(tzinfo=pytz.UTC)\n return (Header(port_number, port_name_str.strip('\"'),\n float(latitude_str), float(longitude_str),\n utc_datetime, time_delta, tz_str))\n\n\ndef chunks(l):\n for i in range(0, len(l), 3):\n yield l[i:i + 3]\n\n\ndef make_events_dict(elist, dt_str):\n num_events = len(elist)\n events_dict = {}\n for i in range(num_events):\n sub_str = \" \" + elist[i][0][0:2] + \":\" + elist[i][0][2:4] + \":00\"\n full_dt_str = dt_str.replace(\" 00:00:00\", sub_str)\n dt = datetime.datetime.strptime(\n full_dt_str, _DATETIME_FORMAT_3).replace(tzinfo=pytz.UTC)\n high = True if elist[i][1] == 'H' else False\n height = float(elist[i][2])\n events_dict[dt] = [high, height]\n return events_dict\n\n\ndef parse_tide_events(line, dt):\n str_list = line.split(\",\")\n events_list = list(chunks(str_list))\n dt_str = dt.strftime(_DATETIME_FORMAT_2)\n events_dict = make_events_dict(events_list, dt_str)\n return events_dict\n","sub_path":"tide_wrangler/parsers/ukho.py","file_name":"ukho.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"43063567","text":"import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get_cityscapes_labels():\r\n return np.array([[ 0, 0, 0], [128, 64, 128], [244, 35, 232], [ 70, 70, 70],\r\n [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30],\r\n [220, 220, 0], [107, 142, 35], [152, 251, 152], [ 70, 130, 180],\r\n [220, 20, 60], [255, 0, 0], [ 0, 0, 142], [ 0, 0, 70],\r\n [ 0, 60, 100], [ 0, 80, 100], [ 0, 0, 230], [119, 11, 32]])\r\n\r\ndef encode_segmap(colormap):\r\n colormap = tensor2numpy(colormap) * 255.0\r\n dst = np.zeros((colormap.shape[0], colormap.shape[1], 1), dtype=np.uint8)\r\n for ii, color_id in enumerate(get_cityscapes_labels()):\r\n dst[np.where(np.all(colormap == color_id, axis=-1))[:2]] = ii\r\n return dst\r\n\r\ndef split_class(encoded_label):\r\n n_class = 20\r\n dst = np.zeros((encoded_label.shape[0], encoded_label.shape[1], n_class), dtype=np.uint8)\r\n for iter in range(1, n_class):\r\n pixels = np.where(encoded_label == iter)\r\n dst[pixels[0], pixels[1], iter] = 1\r\n return dst\r\n\r\ndef tensor2numpy(tensor):\r\n return tensor.numpy().transpose((1, 2, 0))\r\n\r\ndef split_colormap(colormap):\r\n labelmap = encode_segmap(colormap)\r\n classmap = split_class(labelmap)\r\n return classmap\r\n\r\ndef replace_segmap(label1, label2, ch):\r\n dst = label1.copy()\r\n dst[:,:,ch] = label2[:,:,ch].copy()\r\n return dst\r\n\r\ndef merge_class(splited_label):\r\n n_class = splited_label.shape[2]\r\n dst = splited_label[:,:,0].copy()\r\n for ch in range(1, n_class):\r\n pixels = np.where(splited_label[:,:,ch] == 1)\r\n dst[pixels] = ch\r\n return dst\r\n\r\ndef decode_segmap(encoded_label):\r\n n_class = 20\r\n label_colours = get_cityscapes_labels()\r\n r = encoded_label.copy()\r\n g = encoded_label.copy()\r\n b = encoded_label.copy()\r\n for ll in range(0, n_class):\r\n r[encoded_label == ll] = label_colours[ll, 0]\r\n g[encoded_label == ll] = label_colours[ll, 1]\r\n b[encoded_label == ll] = label_colours[ll, 2]\r\n rgb = np.zeros((encoded_label.shape[0], encoded_label.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n return rgb\r\n\r\ndef merge_classmap(classmap):\r\n classmap = tensor2numpy(classmap)\r\n labelmap = merge_class(classmap)\r\n colormap = decode_segmap(labelmap)\r\n return colormap\r\n\r\ndef merge_classmap_batch(classmap_batch):\r\n batch_size = classmap_batch.size(0)\r\n load_size = classmap_batch.size(2)\r\n colormap_batch = np.zeros((batch_size, load_size, load_size, 3), dtype=np.float32)\r\n for b in range(batch_size):\r\n colormap_batch[b] = merge_classmap(classmap_batch[b])\r\n\r\n return colormap_batch\r\n","sub_path":"data/edit_semantic_label.py","file_name":"edit_semantic_label.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"588274918","text":"'''\nCreated on 7 sept. 2018\n\n@author: Pascal Jakobi\n\n\n\n Possible tag types :\n . notApplicable - not applicable\n . restrictive - bit set of tag categories where all of the selected tag categories are required in the security clearance.\n . enumerated - integer set of tag categories, with tag further refined by the enumType.\n . permissive - bit set of tag categories where at least one of the selected tag categories are required in the security clearance\n . tagType7 - informative\n Tag types are required.\n ''' \nimport syslog\n\nclass TagType(object):\n def __init__(self, fname, tagType):\n '''\n Tag type has a defined set of values as per xsd.\n '''\n if tagType.lower() == 'notApplicable'.lower(): self.value = 'notApplicable'\n elif tagType.lower() == 'restrictive'.lower(): self.value = 'restrictive'\n elif tagType.lower() == 'enumerated'.lower(): self.value = 'enumerated'\n elif tagType.lower() == 'permissive'.lower(): self.value = 'permissive'\n elif tagType.lower() == 'tagType7'.lower(): self.value = 'tagType7'\n else: \n syslog.syslog(syslog.LOG_WARNING,_('{0} : invalid tag type: {1}').format(fname,tagType))\n return None\n \n def get(self):\n return self.value","sub_path":"LabelServer/spif/TagType.py","file_name":"TagType.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"474507701","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('title', models.CharField(max_length=255)),\n ('image', models.ImageField(upload_to='static/media/')),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('updated_date', models.DateTimeField(auto_now=True)),\n ('is_active', models.BooleanField(default=True)),\n ('is_default_image', models.BooleanField(default=False)),\n ('mobile_version', models.BooleanField(default=False)),\n ('tablet_version', models.BooleanField(default=False)),\n ],\n options={\n 'verbose_name': 'Image',\n 'verbose_name_plural': 'Images',\n },\n ),\n ]\n","sub_path":"images/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"148169789","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ndef main(args):\n a = int(input(\"Podaj wysokość trójkata: \"))\n c = str(\"*\")\n\n for x in range(1, a + 1, 1):\n b = x + (x - 1)\n e = (2 * x) - 5\n d = a - x\n if x > 1 and x < a:\n print(d * \" \", c, e * \" \", c)\n else:\n print(d * \" \", b * c)\n\n # for x in range(1, a + 1, 1):\n # b = x + (x - 1)\n # d = a - x\n # print(d * \" \", b * c)\n\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"pyton/draw_trojkat.py","file_name":"draw_trojkat.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"398084197","text":"def main():\n\n while True:\n l = int(input())\n if l == 0:\n break\n in_street = [int(i) for i in input().split()][::-1]\n side_street = []\n cur_req = 1\n out_street = []\n possible = True\n while len(in_street) != 0 or len(side_street) != 0:\n # print(\"in_street\", in_street)\n # print(\"side_street\", side_street)\n # print(\"out_street\", out_street)\n if len(in_street) != 0 and in_street[-1] == cur_req:\n # print(\"in -> out\")\n in_street.pop()\n out_street.append(cur_req)\n cur_req += 1\n elif len(side_street) != 0 and side_street[-1] == cur_req:\n # print(\"side -> out\")\n side_street.pop()\n out_street.append(cur_req)\n cur_req += 1\n elif len(in_street) != 0:\n # print(\"in -> side\")\n cur = in_street[-1]\n in_street.pop()\n side_street.append(cur)\n else:\n possible =False\n break\n if not possible:\n print(\"no\")\n else:\n print(\"yes\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"stpar.py","file_name":"stpar.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"551386611","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Wonder\nfrom logbook.serializers import WonderSerializer\n\nWONDER_URL = reverse('logbook:wonder-list')\n\n\nclass PublicWonderApiTests(TestCase):\n \"\"\"Test Wonder API for public users\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_login_required(self):\n \"\"\"Test that login is required for retrieving wonders\"\"\"\n res = self.client.get(WONDER_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateWonderApiTests(TestCase):\n \"\"\"Test Wonder API for authorized users\"\"\"\n\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n 'test123@gmail.com',\n 'password123'\n )\n self.client = APIClient()\n self.client.force_authenticate(self.user)\n\n def test_retrieve_wonders(self):\n \"\"\"Test retrieve wonders\"\"\"\n Wonder.objects.create(\n user=self.user,\n name='gray whale',\n category='animal'\n )\n Wonder.objects.create(\n user=self.user,\n name='akitsushima',\n category='wreck'\n )\n\n res = self.client.get(WONDER_URL)\n wonders = Wonder.objects.all().order_by('-name')\n serializer = WonderSerializer(wonders, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_retrieve_wonders_to_limited_user(self):\n \"\"\"Test Retrieve Wonders To Limited User\"\"\"\n user2 = get_user_model().objects.create_user(\n 'test456@gmail.com',\n 'password456'\n )\n wonder = Wonder.objects.create(\n user=self.user,\n name='sperm whale',\n category='animal'\n )\n Wonder.objects.create(\n user=user2,\n name='turtle',\n category='animal'\n )\n\n res = self.client.get(WONDER_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n # res.data[0].name doesn't work\n self.assertEqual(res.data[0]['name'], wonder.name)\n\n def test_create_wonder_successful(self):\n \"\"\"Test create wonder successful\"\"\"\n payload = {\n 'name': 'eel',\n 'category': 'animal'\n }\n res = self.client.post(WONDER_URL, payload)\n\n wonder = Wonder.objects.filter(\n user=self.user,\n name=payload['name']\n )\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(wonder.exists())\n\n def test_create_wonder_failed(self):\n \"\"\"Test create wonder failed\"\"\"\n payload = {\n 'name': ''\n }\n res = self.client.post(WONDER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n","sub_path":"app/logbook/tests/test_wonder_api.py","file_name":"test_wonder_api.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"443062000","text":"import threading\nimport random\nimport time\nimport os\n\nlock = threading.Lock() #???\n\ndef writeData(tid, list, data):\n global lock\n\n if len(list) >= 10:\n return\n\n lock.acquire()\n list.append(data)\n lock.release()\n\ndef procudeData(tid, list):\n\n while True:\n if len(list) < 10:\n data = []\n for i in range(4):\n data.append(random.randint(1, 100));\n print('tid = %d' %tid, end = ' ')\n print(data)\n writeData(tid, list, data)\n else:\n return\n\n'''\nfilename = 'thread-data.txt'\n\nfile = open(filename, 'a+')\n'''\n\ndef test():\n list = []\n for k in range(4):\n new_thread = threading.Thread(target=procudeData,args=(k, list, ))\n new_thread.setDaemon(True)\n new_thread.start()\n \n print (\"list init finish\")\n print(len(list))\n print(list)\n\ntest()","sub_path":"python/code/test/thread-test.py","file_name":"thread-test.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"118301262","text":"import itertools\nimport copy\nfrom collections import defaultdict\nimport json\nimport math\n\nimport executor\nfrom scipy import stats\nimport numpy as np\n\nfrom mybenchmark.base import *\nfrom elastic.simulator import dp_simulation\nfrom elastic.benchmark.workload import SelectionModel\n\n\n\n\nclass SkewAnalyzer:\n @staticmethod\n def calculate_factor(access_list):\n '''\n from a paper\n '''\n num_partitions = len(access_list)\n total = sum(access_list)\n # print(\"total\", total)\n best = 1 / num_partitions\n # print(\"best\", best)\n skew = 0\n for index in range(num_partitions):\n ratio = access_list[index] / total\n # print(index, ratio)\n if ratio < best:\n ratio = best + ((1 - ratio / best) * (1 - best))\n skew += math.log(ratio / best, 10)\n return skew / (math.log(1 / best, 10) * num_partitions)\n\n @staticmethod\n def calculate_factor_scipy(access_list):\n '''\n from scipy\n '''\n return stats.skew(access_list)\n\n @staticmethod\n def calculate_factor_std(access_list):\n return np.std(access_list)\n\n @staticmethod\n def calculate_max_mean(access_list):\n return max(access_list) / np.mean(access_list)\n\n @staticmethod\n def calculate_min_max(access_list):\n return min(access_list) / max(access_list)\n\n def calculate_factor_myown(access_list):\n import numpy as np\n avg = np.mean(access_list)\n return sum([abs(n - avg) for n in access_list]) / len(access_list)\n\n def calculate_factor_myown_percentage(access_list):\n import numpy as np\n avg = np.mean(access_list)\n return sum([abs(n - avg) for n in access_list]) / avg\n\n def calculate_bottleneck(access_list):\n import numpy as np\n load_avg = np.mean(access_list)\n load_max = max(access_list)\n return load_max / load_avg\n\n\nclass ColorAnalyzer:\n @staticmethod\n def calculate_colors(dp_records):\n return np.mean([len(set(partition_list)) for partition_list in dp_records.values()])\n\n\n\ndef calculate_load(dp_records, num_replicas_list, af_list):\n partition_load_records = dp_simulation.calculate_partition_loads(num_replicas_list, af_list)\n node_load_records = defaultdict(lambda: 0)\n for node_id, partition_list in dp_records.items():\n for partition_id in partition_list:\n node_load_records[node_id] += partition_load_records[partition_id]\n return node_load_records\n\ndef calculate_partition_load(dp_records, num_replicas_list, af_list):\n partition_load_records = dp_simulation.calculate_partition_loads(num_replicas_list, af_list)\n return partition_load_records\n\n\ndef generate_workload(workload_size, num_partitions, workload):\n workload_config = {\n 'kind': 'nature',\n 'num_chunks': num_partitions,\n }\n workload_config.update(workload)\n objects = [n for n in range(1, num_partitions+1)]\n w = SelectionModel.new(workload_config, objects)\n record = {}\n for n in objects:\n record[n] = 0\n for i in range(workload_size):\n n = w.select()\n record[n] += 1\n access_list = []\n for n in objects:\n access_list.append(record[n])\n #print(json.dumps(record, indent=4, sort_keys=True))\n return access_list\n\ndef test1():\n workload_list = {\n \"uniform-base\": {\"type\": \"uniform\"},\n #\"beta-least\": {\"type\": \"beta\", \"alpha\": 1, \"beta\": 1},\n #\"beta-less\": {\"type\": \"beta\", \"alpha\": 1.5, \"beta\": 1.5},\n \"beta-base\": {\"type\": \"beta\", \"alpha\": 2, \"beta\": 2},\n #\"beta-more\": {\"type\": \"beta\", \"alpha\": 4, \"beta\": 4},\n #\"beta-most\": {\"type\": \"beta\", \"alpha\": 5, \"beta\": 5},\n #\"normal-base\": {\"type\": \"normal\", \"loc\": 0, \"scale\": 1},\n #\"powerlaw-least\": {\"type\": \"powerlaw\", \"shape\": 2},\n #\"powerlaw-less\": {\"type\": \"powerlaw\", \"shape\": 2.5},\n \"powerlaw-base\": {\"type\": \"powerlaw\", \"shape\": 3},\n #\"powerlaw-more\": {\"type\": \"powerlaw\", \"shape\": 4},\n #\"powerlaw-most\": {\"type\": \"powerlaw\", \"shape\": 5},\n #\"gamma-least\": {\"type\": \"gamma\", \"shape\": 7},\n #\"gamma-less\": {\"type\": \"gamma\", \"shape\": 6},\n #\"gamma-base\": {\"type\": \"gamma\", \"shape\": 5},\n \"gamma-more\": {\"type\": \"gamma\", \"shape\": 4},\n \"gamma-most\": {\"type\": \"gamma\", \"shape\": 3},\n }\n workload_type = 'gamma'\n generate_workload(16, workload_list[workload_type])\n\n\ndef analyze_skew(M, N, k, scheme, af_list):\n\n dp_records, num_replicas_list = dp_simulation.run(M, N, k, scheme, af_list=af_list, show_output=False)\n\n # load per node\n load_records = calculate_load(dp_records, num_replicas_list, af_list)\n load_list = list(load_records.values())\n\n return {\n 'max-mean': SkewAnalyzer.calculate_max_mean(load_list),\n 'min-max': SkewAnalyzer.calculate_min_max(load_list),\n 'avg-colors': ColorAnalyzer.calculate_colors(dp_records)\n }\n\ndef analyze_partition_skew(M, N, k, scheme, af_list):\n\n dp_records, num_replicas_list = dp_simulation.run(M, N, k, scheme, af_list=af_list, show_output=False)\n\n partition_load_records = calculate_partition_load(dp_records, num_replicas_list, af_list)\n print('#######################')\n print(partition_load_records)\n print('#######################')\n #print(\"@@\", sum(partition_load_records[n] * num_replicas_list[n] for n in range(len(num_replicas_list))))\n total_replicas = sum(num_replicas_list)\n total_load = sum(af_list)\n expected_load_per_replica = (total_load / total_replicas) / (N/M)\n print(\"replica list:\")\n print(num_replicas_list)\n print(\"total replicas={}, total_load={}, load per replica={}\".format(total_replicas, total_load, expected_load_per_replica))\n partition_load_delta_list = [abs(partition_load_records[n] - expected_load_per_replica) for n in range(len(num_replicas_list))]\n #print(\"^^\", partition_load_delta_list)\n print(\"**\", sum(partition_load_delta_list))\n\n return calculate_skew_factor(partition_load_records)\n\ndef analyze_placement(M, N, k, scheme, af_list):\n dp_records, num_replicas_list = dp_simulation.run(M, N, k, scheme, af_list=af_list, show_output=False)\n\n load_records = calculate_load(dp_records, num_replicas_list, af_list)\n return calculate_skew_factor(list(load_records.values()))\n # return calculate_bottleneck(list(load_records.values()))\n\n\ndef main():\n init.setup_logging(default_level=logging.DEBUG, config_path=\"conf/logging.yaml\", log_dir=\"logs\", component=\"simulation\")\n script_dir = os.path.dirname(os.path.realpath(__file__))\n\n workload_list = {\n #\"uniform-base\": {\"type\": \"uniform\"},\n # \"beta-least\": {\"type\": \"beta\", \"alpha\": 1, \"beta\": 1},\n # \"beta-less\": {\"type\": \"beta\", \"alpha\": 1.5, \"beta\": 1.5},\n #\"beta-base\": {\"type\": \"beta\", \"alpha\": 2, \"beta\": 2},\n # \"beta-more\": {\"type\": \"beta\", \"alpha\": 4, \"beta\": 4},\n # \"beta-most\": {\"type\": \"beta\", \"alpha\": 5, \"beta\": 5},\n #\"normal-base\": {\"type\": \"normal\", \"loc\": 0, \"scale\": 1},\n # \"powerlaw-least\": {\"type\": \"powerlaw\", \"shape\": 2},\n # \"powerlaw-less\": {\"type\": \"powerlaw\", \"shape\": 2.5},\n \"powerlaw-base\": {\"type\": \"powerlaw\", \"shape\": 3},\n # \"powerlaw-more\": {\"type\": \"powerlaw\", \"shape\": 4},\n # \"powerlaw-most\": {\"type\": \"powerlaw\", \"shape\": 5},\n # \"gamma-least\": {\"type\": \"gamma\", \"shape\": 7},\n # \"gamma-less\": {\"type\": \"gamma\", \"shape\": 6},\n #\"gamma-base\": {\"type\": \"gamma\", \"shape\": 5},\n # \"gamma-more\": {\"type\": \"gamma\", \"shape\": 4},\n # \"gamma-most\": {\"type\": \"gamma\", \"shape\": 3},\n }\n\n iterations = 1\n\n M = 4\n N = 8\n #scheme_list = ['rainbow', 'monochromatic']\n scheme_list = ['rainbow']\n #scheme_list = ['monochromatic']\n #workload_type_list = ['uniform', 'beta', 'normal', 'powerlaw', 'gamma']\n #workload_type_list = ['normal']\n workload_type_list = ['powerlaw']\n #workload_skew_list = ['least', 'less', 'base', 'more', 'most']\n workload_skew_list = ['base']\n\n workload_size = 30000\n #k_list = [1, 4, 8, 16, 32, 64, 128]\n #k_list = [1, 4, 8, 16]\n #k_list = [4, 8]\n #k_list = [8, 16]\n k_list = [1, 4]\n\n\n\n # start multi-iteration simulation\n\n\n # reuse workload\n workload_records = {}\n for i in range(iterations):\n for workload_type in workload_type_list:\n num_partitions = M * k_list[-1]\n workload_name = \"{}-base\".format(workload_type)\n af_list = generate_workload(workload_size, num_partitions, workload_list[workload_name])\n workload_records[(workload_name, i)] = af_list\n\n for scheme in scheme_list:\n skew_records = {}\n for k in k_list:\n skew_records[k] = {}\n for workload_type, workload_skew in itertools.product(workload_type_list, workload_skew_list):\n skew_score_list = []\n for i in range(iterations):\n workload_name = \"{}-{}\".format(workload_type, workload_skew)\n af_list = workload_records[(workload_name, i)]\n print(\"##\", sum(af_list))\n k_largest = k_list[-1]\n aggregate_ratio = int(k_largest / k)\n num_partitions = M * k\n partition_workload = [sum(af_list[i*aggregate_ratio:(i+1)*aggregate_ratio])for i in range(num_partitions)]\n print(\"$$\", sum(partition_workload))\n #skew_score = analyze_skew(M, N, k, scheme, partition_workload)\n skew_score = analyze_partition_skew(M, N, k, scheme, partition_workload)\n skew_score_list.append(skew_score)\n skew_records[k][workload_name] = sum(skew_score_list) / len(skew_score_list)\n\n print('=================')\n print(json.dumps(skew_records, indent=True, sort_keys=True))\n\n default_setting = ExperimentConfig.new()\n default_setting.set_config('experiment.id', 'E29')\n output_dir = os.path.join(\n default_setting['experiment.result_dir'],\n default_setting['experiment.id']\n )\n output_path = os.path.join(output_dir, \"M{}_N{}_k{}_s{}_{}.json\".format(M, N, k_list[-1], workload_size, scheme))\n #output_path = os.path.join(output_dir, \"{}.json\".format(scheme))\n\n executor.execute(\"mkdir -p %s\" % output_dir)\n with open(output_path, 'w') as f:\n json.dump(skew_records, f, indent=True)\n\n\nif __name__ == \"__main__\":\n main()\n #test1()\n","sub_path":"mybenchmark/E32/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"344202151","text":"from direccion import Address\ncases = [\n ('645 VIRGINIA PARK ST', '645 VIRGINIA PARK, 48202'),\n ('400 RIVER PLACE DRIVE', '400 RIVER PLACE, 48207'),\n ('2808 Cambridge St, Detroit mi', '2808 Cambridge Ave, Detroit, 48221'),\n ('2808 Cambridge St, Detroit, mi', '2808 Cambridge Ave, Detroit, 48221')\n]\n\nimport nose.tools\n\ndef test_address():\n for c in cases:\n yield nose.tools.assert_equals, c[1], Address(c[0]).geocode()['attributes']['Match_addr']\n\ndef test_notify_fail():\n Address('123 Fake St', notify_fail=True).geocode()","sub_path":"direccion/tests/test_address.py","file_name":"test_address.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"87869096","text":"# DESCRIPTION: To load/write modes(and eigenvalues) from/to .dat(WEBnma mode file)\n# AUTHOR: dandan.xue@uib.no\n# DATE:\n\nimport numpy as np\nfrom .pdb import mass_protein, read_pdb\n\nHEADER = \\\n \"Normal modes text file from WEBnma\\n\" + \\\n \"Row 5: Eigenvalues(including first 6 trivial modes)\\n\" +\\\n \"Row 6: Sequence according to input PDB file (e.g. A.Gly123, \" + \\\n \"where A is chain, Gly is Glycine and 123 is position in sequence)\\n\" + \\\n \"Row 7 to last row: Normal modes vectors (columns: modes index, rows:\" + \\\n \"vectors X1,Y1,Z1, ..., Xn,Yn,Zn where n is the number of atoms)\"\n\n\nDELM = ' ' # delimiter used in modefiles \nDIG = 3 # digits kept for eigenvalues or modes in modefiles\n\n\ndef omega_to_lambda(w):\n \"\"\"\n Switch omega(raw eigenvalues in modefiles) squared values to \n lambda(frequencies)\n \"\"\"\n return abs(w)**0.5 / (2 * np.pi)\n\n\n\nT = 300\nUnits_k_B = 1.3806513e-23 * 6.0221367e23 / 1.0e3 \n# Transform a raw mode (MMTK definition) into a Vibrational mode\ndef rawmode_to_vibrational(mode_arr, freq, weights):\n amplitude = np.sqrt(2 * T * Units_k_B) / (2 * np.pi * freq)\n v_mode = [coord * amplitude / weights[i//3] for i, coord in enumerate(mode_arr)]\n return np.array(v_mode)\n\n\n# support WEBnma2 & 3 format and any others that fulfill the following rules\n'''\nChecking rules:\n1. the last 3*N lines are considered mode data (where N is C-alpha atom number)\n and one mode resides one column (delimited by white space)\n2. eigenvalues (in one line) are somewhere above the mode data \n3. residues (in one line) have full information, i.e., .\n'''\ndef verify_modes(filename, pdbfile) -> bool:\n pdb_info = read_pdb(pdbfile)\n ca_num = len(pdb_info.residues_full)\n \n with open(filename) as f:\n content = f.readlines()\n if len(content) < ca_num * 3:\n return False\n \n modes = content[-(ca_num*3):]\n mode_num = len(modes[0].strip().split(DELM))\n for m in modes[1:]:\n ls = m.strip().split(DELM)\n if len(ls) != mode_num:\n return False\n for e in ls:\n try:\n e = float(e)\n except ValueError:\n return False\n\n eigenvalues = None\n residues_full = None\n pdb_residues = DELM.join(pdb_info.residues_full).lower()\n for l in content[:-(ca_num * 3)]: \n if l.lower().strip() == pdb_residues:\n residues_full = l \n else:\n ls = l.strip().split(DELM)\n if len(ls) == mode_num:\n for e in ls:\n try:\n e = float(e)\n except ValueError:\n return False\n eigenvalues = ls\n return (eigenvalues is not None) and (residues_full is not None)\n\n \n\n# Load all eigenvalues and modes **including** the first 6 trivial ones\n# Modes are in 2D array keeping the same shape/position as they are in the modefile\n# i.e., Mode 6 is modes[:,6]\n# note: default load frequencies, rather than raw eigenvalues\n# note: specify format 'webnma2' will keep only 3 digits\ndef load_modes(filename, freq=True, vibr = False, load_res=False, format='webnma3'):\n f = open(filename)\n content = f.readlines() # including '\\n' also\n content = [l.strip() for l in content]\n f.close()\n \n if format[:6] == 'webnma':\n eigenvalues = [float(x) for x in content[4].split(DELM)]\n modes = [[float(v) for v in row.split(DELM)] for row in content[6:]]\n if format == 'webnma2':\n eigenvalues = [round(e, DIG) for e in eigenvalues]\n modes = [[round(v, DIG) for v in row] for row in modes]\n else:\n raise Exception('Unsupported modefile format: ' + format)\n \n if freq:\n eigenvalues = [omega_to_lambda(e) for e in eigenvalues]\n \n residues_full = None \n\n if vibr:\n if not freq:\n eigenvalues = [omega_to_lambda(e) for e in eigenvalues]\n residues_full = [r for r in content[5].split(DELM)]\n weights = mass_protein(residues_full, full=True)\n modes = np.array(modes)\n for i in range(6, len(eigenvalues)):\n modes[:,i] = rawmode_to_vibrational(modes[:,i], eigenvalues[i], weights)\n \n if load_res:\n if residues_full is None:\n residues_full = [r for r in content[5].split(DELM)]\n return np.array(eigenvalues), np.array(modes), np.array(residues_full)\n else:\n return np.array(eigenvalues), np.array(modes)\n\n \n# check if the modes in two modefiles are equal:\n# eigenvalues must be numerically equal, modes can be equal or have different directions\ndef comp_modefile(f1, f2, atol=1e-02):\n e1,m1 = load_modes(f1, False, format='webnma2')\n e2,m2 = load_modes(f2, False, format='webnma2')\n \n l = min(len(e1), len(e2))\n e1 = e1[:l]\n e2 = e2[:l]\n test1 = np.allclose(e1, e2, atol=atol)\n if test1:\n print('Eigenvalues are equal.')\n else:\n print('Eigenvalues are NOT equal.')\n return False\n\n l = min(m1.shape[1], m2.shape[1])\n m1 = m1[:, 6:l]\n m2 = m2[:, 6:l]\n \n test2 = np.allclose(m1,m2,atol=atol)\n if test2:\n print('Modes are equal.')\n return True\n else:\n test3 = np.allclose(abs(m1),abs(m2),atol=atol)\n if test3:\n print('Modes have equal scalars, but different directions.')\n return True\n else:\n print('Modes are NOT equal.')\n return False\n \n \n\ndef write_modefile(eigenvalues, modes, filename, residues):\n array2str = lambda arr: [str(round(a, DIG)) for a in arr]\n content = [ HEADER,\n DELM.join(array2str(eigenvalues)),\n DELM.join(residues)]\n for row in modes:\n content.append(DELM.join(array2str(row)))\n\n f = open(filename,'w')\n f.writelines(\"\\n\".join(content))\n f.close()\n\n \nif __name__ == '__main__':\n f = 'test_modefile.txt'\n write_modefile(np.array([1,2,3]), np.array([[4],[5],[6]]), f, 'xxx')\n print(load_modes(f))\n","sub_path":"utils/modefiles.py","file_name":"modefiles.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"250811150","text":"try:\n import parasail\nexcept ImportError:\n import sys, os\n myPath = os.path.dirname(os.path.abspath(__file__))\n sys.path.insert(0, myPath + '/../')\n import parasail\n\ndef test1():\n p = parasail.ssw_init(\"asdf\", parasail.blosum62, 1)\n r = parasail.ssw_profile(p, \"asdf\", 10, 1)\n\n print(p.s1)\n print(p.s1Len)\n print(r.cigarLen)\n print(r.cigar[0])\n\n r = parasail.sw_trace(\"asdf\", \"asdf\", 10, 1, parasail.blosum62)\n c = r.cigar\n print(c.len)\n print(c.seq[0])\n print(c.decode)\n\n p = parasail.profile_create_8(\"asdf\", parasail.blosum62)\n r = parasail.sw_trace_striped_profile_8(p, \"asdf\", 10, 1)\n c = r.cigar\n print(c.len)\n print(c.seq[0])\n\n r = parasail.sw_trace(\"asdf\", \"asdf\", 10, 1, parasail.blosum62)\n print(r.query)\n print(r.ref)\n\nif __name__ == '__main__':\n test1()\n","sub_path":"tests/test_ssw.py","file_name":"test_ssw.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"198737386","text":"cells = input().split(\"#\")\namount_of_water = int(input())\n\ntotal_efford = 0\ntotal_fire = 0\n\nprint(\"Cells:\")\n\nfor cell_str in cells:\n cell = cell_str.split(\" = \")\n cell_type = cell[0]\n cell_value = int(cell[1])\n is_valid = False\n\n if cell_type == \"High\" and 81 <= cell_value <= 125:\n is_valid = True\n elif cell_type == \"Medium\" and 51 <= cell_value <= 80:\n is_valid = True\n elif cell_type == \"Low\" and 1 <= cell_value <= 50:\n is_valid = True\n\n if is_valid and amount_of_water - cell_value >= 0:\n print(f\" - {cell_value}\")\n total_fire += cell_value\n total_efford += cell_value * 0.25\n amount_of_water -= cell_value\n\nprint(f\"Effort: {total_efford:.2f}\")\nprint(f\"Total Fire: {total_fire}\")","sub_path":"Programing Fundamentals Python/03. List Basics Exercise/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"127285414","text":"import os\nimport sys\nimport warnings\nimport numpy as np\nfrom astropy.wcs import WCS\nfrom astroquery.mast import Tesscut\nfrom astroquery.mast import Catalogs\nfrom astropy.coordinates import SkyCoord\nfrom astropy.table import Table, hstack\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nclass Source(object):\n \"\"\"\n Get FFI cut using TESScut\n\n Parameters\n ----------\n name : str or float\n Target identifier (e.g. \"NGC 7654\" or \"M31\"),\n or coordinate in the format of ra dec (e.g. 351.40691 61.646657)\n size : int, optional\n The side length in pixel of TESScut image\n sector : int, optional\n The sector for which data should be returned. If None, returns the first observed sector\n search_gaia : boolean, optional\n Whether to search gaia targets in the field\n\n Attributes\n ----------\n z : numpy.ndarray\n Parametrized x (z // size) and y (z % size)\n wcs : astropy.wcs.WCS class\n World Coordinate Systems information of the FFI\n time : numpy.ndarray (1d)\n Time of each frame\n flux : numpy.ndarray (3d)\n Fluxes of each frame, spanning time space\n flux_err : numpy.ndarray (3d)\n Flux errors of each frame, spanning time space\n gaia : astropy.table.table.Table class\n Gaia information including ra, dec, brightness, projection on TESS FFI, etc.\n \"\"\"\n # variable parameters\n nstars = None\n star_index = [0]\n cguess = [0., 0., 0., 1., 0., 1., 3.]\n var_to_bounds = [(-0.5, 0.5), (-0.5, 0.5), (-0.1, 0.1), (0, 10.0), (-0.5, 0.5), (0, 10.0), (0, np.inf)]\n cut_size = 11\n gaia_cut = None\n flux_cut = None\n flux_cut_err = None\n inner_star = []\n\n def __init__(self, name, size=15, sector=None, search_gaia=True, mag_threshold=15):\n super(Source, self).__init__()\n self.name = name\n self.size = size\n self.z = np.arange(self.size ** 2)\n self.mag_threshold = mag_threshold\n catalog = Catalogs.query_object(self.name, radius=self.size * 21 * 0.707 / 3600, catalog=\"TIC\")\n ra = catalog[0]['ra']\n dec = catalog[0]['dec']\n coord = SkyCoord(ra, dec, unit=\"deg\")\n hdulist = Tesscut.get_cutouts(coord, self.size)\n sector_table = Tesscut.get_sectors(coord)\n self.sector_table = sector_table\n if sector is None:\n self.sector = sector_table['sector'][0]\n hdu = hdulist[0]\n else:\n self.sector = sector\n hdu = hdulist[list(sector_table['sector']).index(sector)]\n wcs = WCS(hdu[2].header)\n data_time = hdu[1].data['TIME']\n data_flux = hdu[1].data['FLUX']\n data_flux_err = hdu[1].data['FLUX_ERR']\n data_quality = hdu[1].data['QUALITY']\n\n data_time = data_time[np.where(data_quality == 0)]\n data_flux = data_flux[np.where(data_quality == 0), :, :][0]\n data_flux_err = data_flux_err[np.where(data_quality == 0), :, :][0]\n self.wcs = wcs\n self.time = data_time\n self.flux = data_flux\n self.flux_err = data_flux_err\n if search_gaia:\n catalogdata = Catalogs.query_object(self.name, radius=(self.size + 2) * 21 * 0.707 / 3600, catalog=\"Gaia\")\n # catalogdata.sort(\"phot_g_mean_mag\")\n gaia_targets = catalogdata[\n 'designation', 'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag', 'ra', 'dec']\n x = np.zeros(len(gaia_targets))\n y = np.zeros(len(gaia_targets))\n tess_mag = np.zeros(len(gaia_targets))\n for i, designation in enumerate(gaia_targets['designation']):\n pixel = self.wcs.all_world2pix(\n np.array([gaia_targets['ra'][i], gaia_targets['dec'][i]]).reshape((1, 2)), 0)\n x[i] = pixel[0][0]\n y[i] = pixel[0][1]\n dif = gaia_targets['phot_bp_mean_mag'][i] - gaia_targets['phot_rp_mean_mag'][i]\n tess_mag[i] = gaia_targets['phot_g_mean_mag'][\n i] - 0.00522555 * dif ** 3 + 0.0891337 * dif ** 2 - 0.633923 * dif + 0.0324473\n if np.isnan(tess_mag[i]):\n tess_mag[i] = gaia_targets['phot_g_mean_mag'][i] - 0.430\n tess_flux = 10 ** (- tess_mag / 2.5)\n t = Table()\n t[f'tess_mag'] = tess_mag\n t[f'tess_flux'] = tess_flux\n t[f'tess_flux_ratio'] = tess_flux / np.max(tess_flux)\n t[f'Sector_{self.sector}_x'] = x\n t[f'Sector_{self.sector}_y'] = y\n t['variability'] = np.zeros(len(gaia_targets), dtype=int)\n gaia_targets = hstack([gaia_targets, t])\n gaia_targets.sort('tess_mag')\n\n gaia_table = Table(names=gaia_targets.colnames,\n dtype=('str', 'float64', 'float64', 'float64', 'float64', 'float64',\n 'float64', 'float64', 'float64', 'float64', 'float64', 'float64'))\n x_tess = gaia_targets[f'Sector_{self.sector}_x']\n y_tess = gaia_targets[f'Sector_{self.sector}_y']\n for i in range(len(gaia_targets)):\n if -2 < x_tess[i] < self.size + 1 and -2 < y_tess[i] < self.size + 1:\n gaia_table.add_row(gaia_targets[i])\n self.gaia = gaia_table\n\n if np.min(self.gaia['tess_mag']) > self.mag_threshold:\n print('Magnitude threshold too high. Try a smaller magnitude value.')\n self.nstars = 1\n else:\n nstars = np.where(self.gaia['tess_mag'] < self.mag_threshold)[0][-1]\n self.nstars = nstars\n\n x_table = gaia_table[f'Sector_{self.sector}_x']\n y_table = gaia_table[f'Sector_{self.sector}_y']\n for i in range(self.nstars):\n if -2 < x_table[i] < self.size + 1 and -2 < y_table[i] < self.size + 1:\n self.inner_star.append(i)\n else:\n self.gaia = None\n\n def star_idx(self, star_idx=None):\n \"\"\"\n Choose stars of interest (primarily for PSF fitting\n\n Attributes/\n ----------\n nstars : int\n Number of stars of interest, cut by a magnitude threshold\n star_index : list or str\n Star indexes for PSF fitting, list of indexes, int, None, or 'all'\n mag_threshold : int or float\n Min magnitude threshold for stars to fit\n \"\"\"\n\n if star_idx is None:\n self.star_index = np.array([], dtype=int)\n elif star_idx == 'all':\n self.star_index = np.arange(self.nstars - 1)\n elif type(star_idx) == int:\n self.star_index = np.array([star_idx])\n elif type(star_idx) == list and all(isinstance(n, (int, np.int64)) for n in star_idx):\n self.star_index = np.array(star_idx)\n elif type(star_idx) == np.ndarray and all(isinstance(n, (int, np.int64)) for n in set(star_idx)):\n self.star_index = star_idx\n else:\n raise TypeError(\"Star index (star_index) type should be a list or np.array of ints, int, None or 'all'. \")\n\n def ffi(self):\n self.z = np.arange(self.size ** 2)\n self.star_index = np.array([], dtype=int)\n self.nstars = np.where(self.gaia['tess_mag'] < self.mag_threshold)[0][-1]\n self.gaia_cut = None\n\n def cut(self, star_idx: int):\n \"\"\"\n Below is dividing the cut into 9 regions: center, 4 corners and 4 edges. By deciding which\n region a star is at, returns a cut of its neighborhood.\n \"\"\"\n\n self.z = np.arange(121)\n x = self.gaia[f'Sector_{self.sector}_x']\n y = self.gaia[f'Sector_{self.sector}_y']\n x_mid = int(min(self.size - 6, max(x[star_idx], 5)))\n y_mid = int(min(self.size - 6, max(y[star_idx], 5)))\n self.flux_cut = self.flux[:, y_mid - 5:y_mid + 6, x_mid - 5:x_mid + 6]\n # self.flux_cut_err = self.flux_err[:, x_mid - 5:x_mid + 6, y_mid - 5:y_mid + 6]\n in_frame = (np.abs(x_mid - x) < 7) & (np.abs(y_mid - y) < 7)\n t = self.gaia[in_frame]\n\n t[f'Sector_{self.sector}_x'][:] = t[f'Sector_{self.sector}_x'] - x_mid + 5\n t[f'Sector_{self.sector}_y'][:] = t[f'Sector_{self.sector}_y'] - y_mid + 5\n self.gaia_cut = t\n self.star_index = [np.where(self.gaia['designation'][star_idx] == self.gaia_cut['designation'])[0][0]]\n nstars = np.where(self.gaia['tess_mag'] < self.mag_threshold)[0][-1]\n self.nstars = nstars\n\n\nif __name__ == '__main__':\n # target = Source('NGC 7654')\n source = Source('351.1378304925981 61.311247660185245', size=3, sector=24)\n","sub_path":"SEBIT/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":8688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"123931784","text":"import sys\nimport os\n\nsys.path.append('../GPU_INSCY')\nimport python.inscy as INSCY\nimport time\nimport numpy as np\n\nmethod = sys.argv[1]\nexperiment = sys.argv[2]\nrepeats = int(sys.argv[3])\nreal_no_clusters = int(sys.argv[4])\nexperiment_2nd = None\nif len(sys.argv) > 5:\n experiment_2nd = sys.argv[5]\nlarge = False\nif len(sys.argv) > 6:\n large = bool(int(sys.argv[6]))\n\nparams = {\"n\": [1500],\n \"c\": [4],\n \"d\": [15],\n \"N_size\": [0.01],\n \"F\": [1.],\n \"r\": [1.],\n \"num_obj\": [8],\n \"min_size\": [0.05],\n \"order\": [0],#-1],\n \"repeats\": repeats,\n \"real_no_clusters\": real_no_clusters,\n \"real_cls\":[real_no_clusters]}\n\n\nname = \"\"\n\nif method == \"INSCY\":\n function = INSCY.CPU\n name += \"INSCY\"\nif method == \"GPU-INSCY\":\n function = INSCY.GPU5\n name += \"GPU_INSCY\"\nif method == \"GPU-INSCY*\":\n function = INSCY.GPU_star\n name += \"GPU_INSCY_star\"\nif method == \"GPU-INSCY-memory\":\n function = INSCY.GPU_memory\n name += \"GPU_INSCY_memory\"\n\nif experiment == \"real_cls\":\n name += \"_real_cls\"\n params[\"real_cls\"] = [5, 10, 20, 30, 40]\n if large:\n name += \"_large\"\n params[\"n\"] = [25000]\nif experiment == \"n\":\n name += \"_n\"\n params[\"n\"] = [500, 1000, 2000, 4000, 8000]#, 2500, 5000, 10000]\n if large:\n name += \"_large\"\n params[\"n\"] = [500, 1000, 2000, 4000, 8000] + [i*8000 for i in range(2, 26)]\nif experiment == \"d\":\n name += \"_d\"\n params[\"d\"] = [5, 10, 15, 20, 25, 30]\n if large:\n name += \"_large\"\n params[\"n\"] = [25000]\n params[\"d\"] = [i*5 for i in range(1, 21)]\n\nif experiment == \"c\":\n name += \"_c\"\n params[\"c\"] = [2, 4, 6, 8, 10]\nif experiment == \"N_size\":\n name += \"_N_size\"\n params[\"N_size\"] = [0.001, 0.005, 0.01, 0.02]#, 0.05]\nif experiment == \"F\":\n name += \"_F\"\n params[\"F\"] = [.5, 1., 1.5, 2., 2.5]\nif experiment == \"r\":\n name += \"_r\"\n params[\"r\"] = [0., .2, .5, .7, 1.]\nif experiment == \"num_obj\":\n name += \"_num_obj\"\n params[\"num_obj\"] = [2, 4, 8, 16]\nif experiment == \"min_size\":\n name += \"_min_size\"\n params[\"min_size\"] = [0.01, 0.05, 0.10]\nif experiment == \"order\":\n name += \"_order\"\n params[\"order\"] = [0, 1, -1]\n\nif experiment_2nd == \"n\":\n name += \"_n\"\n params[\"n\"] = [500, 1000, 2000, 2500, 5000, 10000]\nif experiment_2nd == \"d\":\n name += \"_d\"\n params[\"d\"] = [2, 5, 10, 15, 25]\nif experiment_2nd == \"c\":\n name += \"_c\"\n params[\"c\"] = [2, 4, 6, 8, 10]\nif experiment_2nd == \"N_size\":\n name += \"_N_size\"\n params[\"N_size\"] = [0.001, 0.005, 0.01, 0.02, 0.05]\nif experiment_2nd == \"F\":\n name += \"_F\"\n params[\"F\"] = [.5, 1., 1.5, 2., 2.5]\nif experiment_2nd == \"r\":\n name += \"_r\"\n params[\"r\"] = [0., .2, .5, .7, 1.]\nif experiment_2nd == \"num_obj\":\n name += \"_num_obj\"\n params[\"num_obj\"] = [2, 4, 8, 16]\nif experiment_2nd == \"min_size\":\n name += \"_min_size\"\n params[\"min_size\"] = [0.01, 0.05, 0.10]\nif experiment_2nd == \"order\":\n name += \"_order\"\n params[\"order\"] = [0, 1, -1]\n\nexperiment_file = 'experiments_data/' + name + '.npz'\nnp.savez(experiment_file, params=params)\n\nfor n in params[\"n\"]:\n for d in params[\"d\"]:\n for c in params[\"c\"]:\n for N_size in params[\"N_size\"]:\n for F in params[\"F\"]:\n for r in params[\"r\"]:\n for num_obj in params[\"num_obj\"]:\n for min_size in params[\"min_size\"]:\n for order in params[\"order\"]:\n for real_no_clusters in params[\"real_cls\"]:\n\n run_file = 'experiments_data/runs/' + method + \\\n \"n\" + str(n) + \"d\" + str(d) + \"c\" + str(c) + \\\n \"N_size\" + str(N_size) + \"F\" + str(F) + \"r\" + str(r) + \\\n \"num_obj\" + str(num_obj) + \"min_size\" + str(min_size) + \\\n \"order\" + str(order)\n\n if real_no_clusters != 4:\n run_file += 'cl' + str(real_no_clusters)\n run_file += '.npz'\n\n if os.path.exists(run_file):\n print(\"Experiment already preformed!\", method,\n \"n\", n, \"d\", d, \"c\", c, \"N_size\", N_size, \"F\", F, \"r\", r,\n \"num_obj\", num_obj, \"min_size\", min_size, \"order\", order)\n else:\n print(\"Running experiment...\", method,\n \"n\", n, \"d\", d, \"c\", c, \"N_size\", N_size, \"F\", F, \"r\", r,\n \"num_obj\", num_obj, \"min_size\", min_size, \"order\", order)\n times = []\n no_clusters = []\n subspaces_list = []\n clusterings_list = []\n for i in range(repeats):\n X = INSCY.load_synt(d, n, real_no_clusters, i)\n\n t0 = time.time()\n subspaces, clusterings = function(X, N_size, F, num_obj, int(n * min_size),\n r,\n number_of_cells=c, rectangular=True,\n entropy_order=order)\n\n\n t = time.time() - t0\n times.append(t)\n print(\"Finished \" + name + \", took: %.4fs\" % (time.time() - t0), i + 1, \"/\",\n repeats)\n no = INSCY.count_number_of_clusters(subspaces, clusterings)\n no_clusters.append(no)\n subspaces_list.append(subspaces)\n clusterings_list.append(clusterings)\n print(i, n, d, \"number of clusters:\", no)\n del X\n np.savez(run_file, times=times, no_clusters=no_clusters, subspaces_list=subspaces_list, clusterings_list=clusterings_list)\n","sub_path":"experiments/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"438876660","text":"import codecs\nimport requests\n\nAPI_URL = \"http://173.249.51.133:5000/osuTR-API/v1/get_messages\"\nLOG_PATH = \"../log/osutr-log.txt\"\n\ndef RequestChatLog(url = API_URL):\n\n request_content = {\"lineIndex\": \"1\"}\n try:\n r = requests.post(API_URL, timeout = 10.0, json=request_content)\n data = r.json()\n\n if data['success'] == True:\n return data['messages']\n else:\n print(\"chat API returned false\")\n return None\n except Exception as e:\n print(\"error while requesting chat : \" , e)\n return None\n\ndef GetMessages():\n \n msgs = RequestChatLog()\n if not msgs == None:\n file = codecs.open(LOG_PATH, \"w+\", \"utf-8\")\n for msg in msgs:\n file.write(msg)\n\n return LOG_PATH\n else:\n return None","sub_path":"src/chatCrawler.py","file_name":"chatCrawler.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"269557870","text":"from abc import abstractmethod\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, NoReturn, Tuple, Union\n\nfrom numpy import inf\nimport torch\nfrom torch.nn import DataParallel, Module\nfrom torch.optim.optimizer import Optimizer\n\nfrom logger import TensorboardWriter\nfrom parse_config import ConfigParser\n\n\nclass BaseTrainer:\n \"\"\"\n Base class for all trainers.\n\n Attributes\n ----------\n config : parse_config.ConfigParser\n The config parsing object.\n\n device : torch.device\n The device the model will be trained on.\n\n epochs : int\n Number of epochs to train over.\n\n logger : logging.Logger\n Logging object.\n\n loss_fn : callable\n Loss function.\n\n loss_args : dict of {str, Any}\n Keyword arguments of the loss function.\n\n metric_fns : list of callable\n List of metric functions.\n\n metric_args : list of dict of {str, Any}\n List of keyword arguments of the metric functions, matched by index.\n\n mnt_best : float\n Current best recorded metric.\n\n mnt_mode : str\n What to monitor (\"off\", \"max\", or \"min\")\n\n model : torch.nn.Module\n The model.\n\n monitor : str\n Whether or not to monitor metrics (for early stopping).\n\n optimizer : torch.optimizer.Optimizer\n The optimizer.\n\n save_periods : int\n How often to save to a checkpoint.\n\n writer : logging.TensorBoardWriter\n Writer object for TensorBoard logging.\n\n Methods\n -------\n train()\n Full training logic.\n \"\"\"\n def __init__(\n self,\n model: Module,\n loss_fn: Callable,\n loss_args: Dict[str, Any],\n metric_fns: List[Callable],\n metric_args: List[Dict[str, Any]],\n optimizer: Optimizer,\n config: ConfigParser,\n ):\n\n self.config: ConfigParser = config\n self.logger: Logger = config.get_logger(\"trainer\", config[\"trainer\"][\"verbosity\"])\n\n # Setup GPU device if available.\n self.device: torch.device\n device_ids: List[int]\n self.device, device_ids = self._prepare_device(config[\"n_gpu\"])\n\n # Move model into configured device(s).\n self.model: Module = model.to(self.device)\n if len(device_ids) > 1:\n self.model = DataParallel(model, device_ids=device_ids)\n\n # Set loss function and arguments.\n self.loss_fn: Callable = loss_fn\n self.loss_args: Dict[str, Any] = loss_args\n\n # Set all metric functions and associated arguments.\n self.metric_fns: List[Callable] = metric_fns\n self.metric_args: List[Dict[str, Any]] = metric_args\n\n # Set optimizer.\n self.optimizer: Optimizer = optimizer\n\n # Set training configuration.\n cfg_trainer: Dict[str, Any] = config[\"trainer\"]\n self.epochs: int = cfg_trainer[\"epochs\"]\n self.save_period: int = cfg_trainer[\"save_period\"]\n self.monitor: str = cfg_trainer.get(\"monitor\", \"off\")\n\n # Configuration to monitor model performance and save best.\n if self.monitor == \"off\":\n self.mnt_mode: str = \"off\"\n self.mnt_best: float = 0\n else:\n self.mnt_metric: str\n self.mnt_mode, self.mnt_metric = self.monitor.split()\n assert self.mnt_mode in [\"min\", \"max\"]\n\n self.mnt_best = inf if self.mnt_mode == \"min\" else -inf\n self.early_stop: float = cfg_trainer.get(\"early_stop\", inf)\n\n self.start_epoch: int = 1\n self.checkpoint_dir: Path = config.save_dir\n\n # Setup visualization writer instance.\n self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer[\"tensorboard\"])\n\n if config.resume is not None:\n self._resume_checkpoint(config.resume)\n\n @abstractmethod\n def _train_epoch(self, epoch: int) -> Union[Dict[str, Any], NoReturn]:\n \"\"\"\n Training logic for an epoch. If not implemented in child class, raise `NotImplementedError`.\n\n Parameters\n ----------\n epoch : int\n The current epoch.\n\n Returns\n -------\n dict\n A dictionary containing the logged information.\n\n Raises\n ------\n NotImplementedError\n If not implemented in child class.\n \"\"\"\n raise NotImplementedError\n\n def train(self) -> None:\n \"\"\"Full training logic.\"\"\"\n for epoch in range(self.start_epoch, self.epochs + 1):\n result: dict = self._train_epoch(epoch)\n\n # Save logged information in log dict.\n log: Dict[str, float] = {\"epoch\": epoch}\n key: str\n value: Union[float, List[float]]\n for key, value in result.items():\n if key == \"metrics\":\n assert isinstance(value, list)\n i: int\n mtr: str\n log.update({mtr.__name__: value[i] for i, mtr in enumerate(self.metric_fns)})\n\n elif key == \"val_metrics\":\n assert isinstance(value, list)\n log.update(\n {\"val_\" + mtr.__name__: value[i] for i, mtr in enumerate(self.metric_fns)}\n )\n\n else:\n assert isinstance(value, float)\n log[key] = value\n\n # Print logged info to stdout.\n for key, value in log.items():\n self.logger.info(\" {:15s}: {}\".format(str(key), value))\n\n # Evaluate model performance in accordance with the configured metric, save best\n # checkpoint as model_best.\n best: bool = False\n if self.mnt_mode != \"off\":\n try:\n # Check whether model performance improved or not, according to specified\n # metric(mnt_metric).\n improved: bool = (\n self.mnt_mode == \"min\" and log[self.mnt_metric] <= self.mnt_best\n ) or (self.mnt_mode == \"max\" and log[self.mnt_metric] >= self.mnt_best)\n\n except KeyError:\n self.logger.warning(\n \"Warning: Metric '{}' is not found.\\n\".format(self.mnt_metric)\n + \"Model performance monitoring is disabled.\"\n )\n self.mnt_mode = \"off\"\n improved = False\n not_improved_count: int = 0\n\n if improved:\n self.mnt_best = log[self.mnt_metric]\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n\n if not_improved_count > self.early_stop:\n self.logger.info(\n \"Validation performance didn't improve for {} \".format(self.early_stop)\n + \"epochs.\\nTraining stops.\"\n )\n break\n\n if epoch % self.save_period == 0:\n self._save_checkpoint(epoch, save_best=best)\n\n def _prepare_device(self, n_gpu_use: int) -> Tuple[torch.device, List[int]]:\n \"\"\"\n Setup GPU device if available, move model into configured device.\n\n Parameters\n ----------\n n_gpu_use : int\n The number of GPUs to use.\n\n Returns\n -------\n tuple\n A tuple of the device in use and a list of device IDs.\n \"\"\"\n n_gpu: int = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\n \"Warning: There's no GPU available on this machine, \"\n + \"training will be performed on CPU.\"\n )\n n_gpu_use = 0\n\n if n_gpu_use > n_gpu:\n self.logger.warning(\n \"Warning: The number of GPU's configured to use is {}, \".format(n_gpu_use)\n + \"but only {} are available on this machine.\".format(n_gpu)\n )\n n_gpu_use = n_gpu\n\n device: torch.device = torch.device(\"cuda:0\" if n_gpu_use > 0 else \"cpu\")\n list_ids: List[int] = list(range(n_gpu_use))\n return device, list_ids\n\n def _save_checkpoint(self, epoch: int, save_best: bool = False) -> None:\n \"\"\"\n Saving current state as a checkpoint.\n\n Parameters\n ----------\n epoch : int\n The current epoch.\n\n save_best : bool, optional\n If True, the saved checkpoint is renamed to \"model_best.pth\" (default is False).\n \"\"\"\n arch: str = type(self.model).__name__\n state: Dict[str, Any] = {\n \"arch\": arch,\n \"epoch\": epoch,\n \"state_dict\": self.model.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n \"monitor_best\": self.mnt_best,\n \"config\": self.config,\n }\n filename: str = str(self.checkpoint_dir / \"checkpoint-epoch{}.pth\".format(epoch))\n torch.save(state, filename)\n\n self.logger.info(\"Saving checkpoint: {} ...\".format(filename))\n if save_best:\n best_path: str = str(self.checkpoint_dir / \"model_best.pth\")\n torch.save(state, best_path)\n self.logger.info(\"Saving current best: model_best.pth ...\")\n\n def _resume_checkpoint(self, resume_path: Union[Path, str]) -> None:\n \"\"\"\n Resume from saved checkpoints.\n\n Parameters\n ----------\n resume_path : pathlib.Path\n File path of the checkpoint to resume form.\n \"\"\"\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint: dict = torch.load(resume_path)\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n self.mnt_best = checkpoint[\"monitor_best\"]\n\n # Load architecture params from checkpoint.\n if checkpoint[\"config\"][\"arch\"] != self.config[\"arch\"]:\n self.logger.warning(\n \"Warning: Architecture configuration given in config file is different from that of\"\n + \" checkpoint. This may yield an exception while state_dict is being loaded.\"\n )\n self.model.load_state_dict(checkpoint[\"state_dict\"])\n\n # Load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint[\"config\"][\"optimizer\"][\"type\"] != self.config[\"optimizer\"][\"type\"]:\n self.logger.warning(\n \"Warning: Optimizer type given in config file is different from that of checkpoint.\"\n + \" Optimizer parameters not being resumed.\"\n )\n else:\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n self.logger.info(\n \"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch)\n )\n","sub_path":"base/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":10898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"80605829","text":"'''\nThis should play the same role that speech act plays for posts.\n\nMake it easier to filter responses based on tags assigned and the\ndate of the latest post with a given tag assignment.\n\n'''\nfrom solariat.db.abstract import Document\nfrom .channel.base import Channel\nfrom solariat.db.fields import (\n NumField, ObjectIdField, StringField,\n ListField, ReferenceField, DateTimeField\n)\nfrom solariat.utils.timeslot import datetime_to_timeslot\n\n\ndef _create_response_tag(response, post, tag):\n create_args = dict(response_id=response.id,\n channel=response.channel.id,\n post=post.id,\n tag=tag.id,\n assignee=response.assignee,\n post_date=datetime_to_timeslot(post.created_at, 'hour'),\n status=response.status,\n intention_name=response.intention_name,\n intention_confidence=response.intention_confidence,\n punks=response.punks,\n starred=response.starred,\n message_type=response.message_type,\n relevance=response.relevance,\n actionability=response.actionability,\n skipped_list=response.skipped_list)\n r_t = ResponseTag(**create_args)\n r_t.save()\n\ndef handle_add_post(response, post):\n \"\"\"\n Handle the case where a new post is added to a response object.\n \"\"\"\n for tag in post.accepted_smart_tags:\n # If there is a previous response/tag assignment remove it, since this\n # just became the latest one.\n try:\n r_t = ResponseTag.objects.get(response_id=response.id, tag=tag.id)\n r_t.delete()\n except ResponseTag.DoesNotExist:\n pass\n _create_response_tag(response, post, tag)\n\ndef handle_remove_tag(response, post, tag):\n \"\"\"\n When we remove a tag from a post, if that was the latest one\n then we need to search for a previous one (if exists). Otherwise\n nothing to do.\n \"\"\"\n try:\n r_t = ResponseTag.objects.get(response_id=response.id, tag=tag.id,\n post=post.id)\n r_t.delete()\n except ResponseTag.DoesNotExist:\n pass\n for n_post in reversed(response.posts):\n if tag in n_post.accepted_smart_tags and post != n_post:\n # This is the first post which still has the given `tag`\n # applied to it.\n _create_response_tag(response, n_post, tag)\n break\n\ndef handle_add_tag(response, post, tag):\n \"\"\"\n Whenever a new tag is added to a post from this response, we should\n check if this is a post which is more current than any existing response tags.\n \"\"\"\n create_r_t = False\n try:\n r_t = ResponseTag.objects.get(response_id=response.id, tag=tag.id)\n if r_t.post_date < datetime_to_timeslot(post.created_at, 'hour'):\n r_t.delete()\n create_r_t = True\n except ResponseTag.DoesNotExist:\n create_r_t = True\n if create_r_t:\n _create_response_tag(response, post, tag)\n\ndef upsert_response_tags(response):\n \"\"\"\n Given a response object, refresh the entire response tag that matches.\n\n This should be made as efficient as possible. For just destroy everything\n previously created, and recreate new entities if tag are updated.\n \"\"\"\n from ..utils.post import get_service_channel\n existing = ResponseTag.objects.find(response_id=str(response.id))\n for r_t in existing:\n r_t.delete()\n\n done_tags = []\n for post in reversed(response.posts):\n for tag in post.accepted_smart_tags:\n if (get_service_channel(Channel.objects.get(tag.parent_channel)) == response.service_channel\n and tag.id not in done_tags):\n done_tags.append(tag.id)\n _create_response_tag(response, post, tag)\n\n\nclass ResponseTag(Document):\n\n response_id = StringField(db_field='r_id', required=True) # This will be the same as the response so we can quickly get them\n channel = ReferenceField(Channel, db_field='cl')\n post = ObjectIdField (db_field='pt', required=True)\n tag = ObjectIdField (db_field='tc', required=True)\n assignee = ObjectIdField(db_field='ur')\n post_date = NumField(db_field='ts', required=True)\n assignment_expires_at= DateTimeField(db_field='ae')\n status = StringField(db_field='ss', default='pending')\n intention_name = StringField(db_field='in')\n skipped_list = ListField(ObjectIdField(), db_field='sl')\n intention_confidence = NumField(db_field='ic', default=0.0)\n punks = ListField(StringField(), db_field='ps')\n starred = ListField(ObjectIdField(), db_field='sd')\n message_type = NumField(db_field='mtp', default=0)\n relevance = NumField(db_field='re', default=0.0)\n actionability = NumField(db_field='ay', default=0.0)\n\n indexes = [('response_id'), ('tag')]\n\n","sub_path":"db/response_tag.py","file_name":"response_tag.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"525921552","text":"from __future__ import print_function\nimport sys\n\nif sys.version[0] == \"3\":\n from html.parser import HTMLParser\nelse:\n from HTMLParser import HTMLParser\n\nimport tools\nimport parser_types\n\nself_closed_tags = (\"link\", \"meta\", \"input\", \"br\", \"img\")\nshort_data_length = 20\n\nclass BlockHTMLParser(HTMLParser):\n\n def __init__( self, init_tag, init_tag_attr, verbose, debug, output, func_name ):\n HTMLParser.__init__(self)\n\n #### Flags ####\n self.__flag = False\n self.__short_data = None\n self.__last_tag = parser_types.LastTag()\n\n #### Counters ####\n self.__level = 0 # Nesting level for determine when show close tag.\n self.__offset = parser_types.Offset()\n\n #### Initial values ####\n self.__init_tag = init_tag # Tag for search.\n self.__init_tag_attr = init_tag_attr # default is None\n self.__debug = debug\n self.__verbose = verbose\n\n #### Output ####\n self.__output = output\n if self.__debug:\n print(self.__output, file = sys.stderr)\n\n #### Function for tag ####\n self.__my_func = None # Set default value.\n if func_name:\n import functions\n self.__my_func = functions.func_list[func_name]\n \n #### Verbose message ####\n self.show_comment(text=\"PARSER_BEGIN\", end=\"\\n\") \n\n\n def __del__(self):\n self.show_comment(start=\"\\n\", text=\"PARSER_END\")\n\n\n # Closure method (for the flag).\n def set_last_tag(self, tag):\n self.__last_tag.set_last_tag(tag, self.__flag)\n def get_last_tag(self):\n return self.__last_tag.get_last_tag()\n\n\n ###################\n ### Handle tags ###\n ###################\n\n def handle_startendtag(self, tag, attrs):\n\n # Search for query like \"link\" or \"meta\" and so on.\n # If tag is match then try to show it and switch off the flag.\n if not self.__flag:\n\n if not self.check_tag(tag, attrs):\n return\n \n tag_attrs = tools.make_tag_attrs(attrs, self.__debug, self.__flag)\n self.show_selfclosed_tag(u\"<{tag_name} {tag_attrs}/>\".format(tag_name = tag, tag_attrs = tag_attrs))\n\n # Swithc off flag after selfclosed tag.\n self.__flag = False\n\n else:\n # Display selfclose tag when flag is set (Ex: \"img\" inside \"div\").\n tag_attrs = tools.make_tag_attrs(attrs, self.__debug, self.__flag)\n self.show_selfclosed_tag(u\"<{tag_name} {tag_attrs}/>\".format(tag_name = tag, tag_attrs = tag_attrs))\n\n self.set_last_tag(\"self_close\")\n\n\n\n def handle_starttag(self, tag, attrs):\n\n ## Handle when some wrong tags appear here. ##\n ## Ex: \n if tag in self_closed_tags:\n self.handle_startendtag(tag, attrs)\n return\n\n if not self.check_tag(tag, attrs):\n return\n\n # If specific function is set for this tag.\n if self.__my_func and tag == self.__my_func[0]:\n tag_attrs = self.__my_func[1](tag, attrs)\n self.show_start_tag(u\"<{tag_name} {tag_attrs}>\".format(tag_name = tag, tag_attrs = tag_attrs))\n\n # Else process a tag without function.\n else:\n # Show tag without attributes. \n if not len(attrs):\n self.show_start_tag(u\"<{tag}>\".format(tag = tag))\n\n # Show tag with attributes.\n else:\n tag_attrs = tools.make_tag_attrs(attrs, self.__debug, self.__flag)\n self.show_start_tag(u\"<{tag_name} {tag_attrs}>\".format(tag_name = tag, tag_attrs = tag_attrs))\n \n\n # Increase offset after open tag always.\n self.__offset.increase(self.__flag)\n\n self.set_last_tag(\"open\")\n\n\n def handle_endtag(self, tag):\n\n # Always Decrease offset after end tag.\n self.__offset.decrease(self.__flag)\n\n self.show_end_tag(u\"\".format(tag = tag)) \n self.check_end_tag(tag)\n\n self.set_last_tag(\"close\")\n\n\n def handle_data(self, data):\n if not data.isspace():\n self.show_data(data.strip())\n self.set_last_tag(\"data\")\n\n else:\n # Show close tag on the same line if\n # data is short or data is empty.\n self.__short_data = False\n\n\n\n ################\n #### Output ####\n ################\n\n # Show start tag from new line always.\n # Add new line before start tag if previous tag was start tag too,\n # Not show new line after start tag.\n def show_start_tag(self, full_tag_string):\n #if not self.__flag: return\n\n # Get tag length.\n tag_length = self.__offset.get_offset(full_tag_string)\n\n last_tag = self.get_last_tag()\n\n if last_tag in (\"open\", ):\n print(u\"\\n{0:>{w}}\".format(full_tag_string, w = tag_length), end='', file = self.__output ) \n else:\n print(u\"{0:>{w}}\".format(full_tag_string, w = tag_length), end='', file = self.__output ) \n\n if self.__debug:\n tools.show_tag_debug(full_tag_string, self.__level, self.__offset.cur_offset(), self.__last_tag )\n\n # Show selfclose tag from new line always.\n # Add new line before the tag last tag was opening tag.\n def show_selfclosed_tag(self, full_tag_string):\n #if not self.__flag: return\n\n # Get tag length.\n tag_length = self.__offset.get_offset(full_tag_string)\n\n if self.get_last_tag() == \"open\":\n print(u\"\\n{tag:>{w}}\".format(tag = full_tag_string, w = tag_length), file = self.__output )\n else:\n print(u\"{tag:>{w}}\".format(tag = full_tag_string, w = tag_length), file = self.__output )\n\n if self.__debug:\n tools.show_tag_debug(full_tag_string, self.__level, self.__offset.cur_offset(), self.get_last_tag())\n\n\n # Show end tag from new line if short_data is not set.\n # Show new line after end tag always.\n def show_end_tag(self, full_tag_string):\n if not self.__flag: return\n\n last_tag = self.get_last_tag()\n\n # Show tags with no data on the same line and print newline.\n if last_tag == \"open\" and not self.__short_data:\n print(u\"{}\".format(full_tag_string), file = self.__output )\n\n # Show tags with short data on the same line and print newline.\n elif last_tag == \"data\" and self.__short_data:\n print(u\"{}\".format(full_tag_string), file = self.__output )\n\n else:\n # In any another way show close tag from new line.\n tag_length = self.__offset.get_offset(full_tag_string)\n print(u\"{0:>{w}}\".format(full_tag_string, w = tag_length), file = self.__output ) \n\n if self.__debug:\n tools.show_tag_debug(full_tag_string, self.__level, self.__offset.cur_offset(), last_tag)\n\n\n # Show data from new line if data is long, and show new\n # line after it if so.\n def show_data(self, data_string):\n if not self.__flag: return\n\n # Short data show without new line.\n if len(data_string) <= short_data_length:\n print(u\"{}\".format(data_string), end='', file = self.__output )\n self.__short_data = True\n\n else:\n # Long data start fomr new line with offset.\n # Get data length.\n offset = self.__offset.get_offset(data_string)\n print(u\"\\n{0:>{w}}\".format(data_string, w = offset), file = self.__output )\n self.__short_data = False\n\n\n if self.__debug:\n tools.show_data_debug(data_string, self.__offset.cur_offset())\n\n\n def show_comment(self, start=\"\" , text=\"\", end=\"\"):\n if self.__verbose >= 1:\n print(u\"{start}{end}\".format(start=start, text=text, end=end), file = self.__output )\n\n\n ###################\n #### Checkings ####\n ###################\n\n def check_start_tag_with_attrs(self, tag, attrs):\n \"\"\" Check tags with attributes. \"\"\"\n\n if self.__init_tag == tag and tools.find_attr_in_cur_attrs(self.__init_tag_attr, attrs):\n if not self.__flag: \n self.__flag = True # Start to display html code.\n\n elif self.__init_tag == tag and self.__flag:\n self.__level += 1 # If meet that tag again, then add new nesting level.\n\n\n def check_start_tag(self, tag):\n \"\"\" Check tags without atributes. \"\"\"\n\n if self.__init_tag == tag: \n if not self.__flag:\n self.__flag = True\n else:\n self.__level += 1\n\n def check_end_tag(self, tag):\n if self.__init_tag == tag and self.__flag:\n\n if self.__level == 0:\n self.__flag = False\n\n else:\n self.__level -= 1\n\n def check_startendtag(self, tag):\n if tag == self.__init_tag:\n if not self.__flag:\n self.__flag = True\n\n def check_startendtag_with_attrs(self, tag, attrs):\n if self.__init_tag == tag and tools.find_attr_in_cur_attrs(self.__init_tag_attr, attrs):\n if not self.__flag: \n self.__flag = True # Start to display html code.\n\n\n def check_tag(self, tag, attrs):\n # Purpose of this method is to check if \"tag\" is same as self.__initial_tag,\n # And if so then change the flag or offset.\n # This function returns True if \"tag\" is same as self.__initital_tag,\n # and False otherwise.\n\n # Dont't need to check anything if flag is set, just show and control nesting.\n if self.__flag:\n if tag == self.__init_tag:\n self.__level += 1\n return True\n\n # Check current tag and return false if not the same.\n # All checkings with flag == False.\n if tag == self.__init_tag:\n\n # If attribute is not set (-a)\n if not self.__init_tag_attr:\n\n ##### Check tag without attributes #####\n self.__flag = True\n return True\n \n elif tools.find_attr_in_cur_attrs(self.__init_tag_attr, attrs):\n self.__flag = True\n return True\n\n # The tag was not found.\n return False\n\n","sub_path":"html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":10325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"606376993","text":"import numpy as np\nimport math\nimport cv2\n\n\nclass HomographyMapper:\n \"\"\"\n Applies homography mapping and inverse, either to points or images\n \"\"\"\n\n def __init__(self, M, output_size=None):\n \"\"\"\n\n :param M: 3x3 numpy homography matrix\n :param output_size: (w, h) optional output size tuple for transforming images\n \"\"\"\n\n self.M = M\n self.output_size = output_size\n\n def _reshape_to_2c(self, pts):\n \"\"\"\n\n :param pts: (N x 2) array\n :return: pts_2c: (N x 1 x 2) array for working with cv2.fisheye\n \"\"\"\n\n Np = pts.shape[0]\n pts_2c = np.full((Np, 1, 2), np.nan)\n for i in range(Np):\n pts_2c[i, 0, :] = pts[i, :]\n\n return pts_2c\n\n def _reshape_from_2c(self, pts_2c):\n \"\"\"\n\n :param pts_2c: (N x 1 x 2) array for working with cv2.fisheye\n :return: pts: (N x 2) array\n \"\"\"\n\n Np = pts_2c.shape[0]\n pts = np.full((Np, 2), np.nan)\n for i in range(Np):\n pts[i, :] = pts_2c[i, 0, :]\n\n return pts\n\n def map_pts(self, src):\n\n src_2c = self._reshape_to_2c(src)\n nan_inds = []\n for (i, pt) in enumerate(src_2c):\n if np.isnan(pt[0][0]) or np.isnan(pt[0][1]):\n nan_inds.append(i)\n dst_2c = cv2.perspectiveTransform(src=src_2c, dst=None, m=self.M)\n dst = self._reshape_from_2c(dst_2c)\n for i in nan_inds:\n dst[i] = [np.nan, np.nan]\n return dst\n\n def map_pts_inv(self, dst):\n\n dst_2c = self._reshape_to_2c(dst)\n nan_inds = []\n for (i, pt) in enumerate(dst_2c):\n if np.isnan(pt[0][0]) or np.isnan(pt[0][1]):\n nan_inds.append(i)\n src_2c = cv2.perspectiveTransform(src=dst_2c, dst=None, m=np.linalg.inv(self.M))\n src = self._reshape_from_2c(src_2c)\n for i in nan_inds:\n src[i] = [np.nan, np.nan]\n return src\n\n def map_img(self, src):\n\n if self.output_size is not None:\n output_size = self.output_size\n else:\n output_size = (src.shape[1], src.shape[0])\n\n dst = cv2.warpPerspective(src=src, M=self.M, dsize=output_size)\n return dst\n\n def map_img_inv(self, src):\n\n if self.output_size is not None:\n output_size = self.output_size\n else:\n output_size = (src.shape[1], src.shape[0])\n\n dst = cv2.warpPerspective(src=src, M=np.linalg.inv(self.M), dsize=output_size)\n return dst\n","sub_path":"src/mappers/homography_mapper.py","file_name":"homography_mapper.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"36428747","text":"# task type can be either 'classification' or 'regression', based on the target feature in the dataset\nTASK_TYPE = 'regression'\n\n# the header (all column names) of the input data file(s)\nHEADERS = ['key','x','y','alpha','beta','target']\n\n# the default values of all the columns of the input data, to help TF detect the data types of the columns\nHEADER_DEFAULTS = [[0.], [0.], [0.], [''], [''], [0.]]\n\n# column of type int or float\nNUMERIC_FEATURE_NAMES = [\"x\", \"y\"]\n\n# categorical features with few values (to be encoded as one-hot indicators)\nCATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'alpha': ['ax01','ax02'], 'beta': ['bx01', 'bx02']}\n\n# categorical features with many values (to be treated using embedding)\nCATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET = {}\n\n# all the categorical feature names\nCATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys()) \\\n + list(CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET.keys())\n\n# all the feature names to be used in the model\nFEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES\n\n# target feature name (response or class variable)\nTARGET_NAME = 'target'\n\n# the class values target feature in a classification dataset\nTARGET_LABELS = []\n\n# column to be ignores (e.g. keys, constants, etc.)\nUNUSED_FEATURE_NAMES = ['key']","sub_path":"examples/synthetic-regression/trainer/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"91110320","text":"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport io\nimport random\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\nfrom tempfile import TemporaryDirectory\nimport json\nfrom pathlib import Path\n\nfrom pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer,\n XLMConfig, XLMForSequenceClassification,\n XLMTokenizer, XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer)\n\nfrom pytorch_transformers import AdamW, WarmupLinearSchedule\n\nfrom utils_doc_vp import (compute_metrics, convert_vp_examples_to_features, convert_vp_example_to_features_w_multi_hot_labels, convert_vp_example_to_features, output_modes, processors)\nfrom checkpoint_utils import (update_checkpoint_dict, clean_outdated_checkpoints)\nfrom modeling_bert import BertForVocabPrediction, BertForVocabPredictionAvgPool\n\n\nlogger = logging.getLogger(__name__)\n\n# ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig)), ())\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())\n\nMODEL_CLASSES = {\n 'bert': (BertConfig, BertForVocabPrediction, BertTokenizer),\n 'bert-avg-pool': (BertConfig, BertForVocabPredictionAvgPool, BertTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef save_model(args, global_step, model):\n # init dir\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # save\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n\ndef doc_vocab2multi_hot(doc_vocab, vocab_size):\n \"\"\" \n Input doc_vocab: batch_size * max_doc_vocab_size\n Return multi-hot tensors: doc_vocab_mh: batch_size * vocab_size\n \n \"\"\"\n # logger.info('doc_vocab: {}'.format(doc_vocab.size()))\n doc_vocab_mh = torch.zeros([len(doc_vocab), vocab_size], dtype=torch.float32)\n # todo: test index with ipython\n for doc_idx, vocab_ids in enumerate(doc_vocab):\n # logger.info('vocab_ids: {}'.format(vocab_ids.size()))\n vocab_ids = [int(i) for i in vocab_ids if i != -1]\n # vocab_ids = (vocab_ids != -1).nonzero().squeeze(1)\n # logger.info('vocab_ids [first 5, list]: {}'.format(vocab_ids[:5]))\n doc_vocab_mh[doc_idx, vocab_ids] = 1.0\n\n return doc_vocab_mh\n\n\ndef train(args, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter(log_dir=args.log_dir)\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n temp_path = os.path.join(args.data_dir, 'temp')\n if not os.path.exists(temp_path):\n os.makedirs(temp_path)\n train_dataset = PregeneratedDataset(training_path=args.data_dir, \n tokenizer=tokenizer,\n num_samples=args.num_training_examples,\n max_doc_vocab_size=args.max_doc_vocab_size,\n seq_len=args.max_seq_length,\n reduce_memory=args.reduce_memory,\n temp_path=temp_path)\n\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_dataset)\n else:\n train_sampler = DistributedSampler(train_dataset)\n \n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n # no_decay = ['bias', 'LayerNorm.weight'] # original\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n best_score = 0\n\n checkpoint_dict = dict() # {n_batches: f1}\n output_eval_file = os.path.join(args.output_dir, 'eval_results.txt')\n headline = 'Step\\tTrain\\n'\n io.open(output_eval_file, 'a', encoding='utf-8').write(headline)\n\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n for epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n # turn doc vocab ids to multi-hot matrix\n batch[3] = doc_vocab2multi_hot(doc_vocab=batch[3], vocab_size=tokenizer.vocab_size)\n batch = tuple(t.to(args.device) for t in batch)\n # logger.info('Put batch onto args.device: {}'.format(args.device))\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids\n 'doc_vocab': batch[3]}\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)\n # logger.info('loss: {}'.format(loss))\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n scheduler.step() # Update learning rate schedule\n optimizer.step()\n model.zero_grad()\n global_step += 1\n\n # this part is changed\n avg_loss = None\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n avg_loss = (tr_loss - logging_loss) / args.logging_steps\n tb_writer.add_scalar('train_loss', avg_loss, global_step)\n logging_loss = tr_loss\n\n record = '{}\\t{:.4f}\\n'.format(global_step, avg_loss)\n io.open(output_eval_file, 'a', encoding='utf-8').write(record)\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # logger.info('global_step: {}, save_steps: {}, global_step % args.save_steps: {}'.format(global_step, args.save_steps, global_step % args.save_steps))\n save_model(args, global_step, model)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n\n save_model(args, global_step, model) # save model at the end of each epoch\n\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n \n return global_step, tr_loss / global_step\n\n\nclass PregeneratedDatasetWithMultiHotLabels(Dataset):\n def __init__(self, training_path, tokenizer, num_samples, seq_len, reduce_memory=False, temp_path=None):\n self.vocab = tokenizer.vocab\n self.vocab_size = len(self.vocab)\n\n data_file = Path(training_path) / 'all.json'\n assert data_file.is_file()\n\n self.temp_dir = None\n self.working_dir = None\n if reduce_memory:\n self.temp_dir = TemporaryDirectory(dir=temp_path)\n self.working_dir = Path(self.temp_dir.name)\n logger.info(f'Initialized working_dir: {self.working_dir}')\n input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.int32)\n logger.info('Initialized placeholders with memmap: input_ids')\n input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n logger.info('Initialized placeholders with memmap: input_masks')\n segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n logger.info('Initialized placeholders with memmap: segment_ids')\n label_ids = np.memmap(filename=self.working_dir/'label_ids.memmap',\n shape=(num_samples, tokenizer.vocab_size), mode='w+', dtype=np.int32)\n label_ids[:] = -1\n logger.info('Initialized placeholders with memmap: label_ids')\n else:\n input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)\n input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n label_ids = np.full(shape=(num_samples, tokenizer.vocab_size), dtype=np.int32, fill_value=-1)\n \n with data_file.open() as f:\n for i, line in enumerate(tqdm(f, total=num_samples, desc=\"Training examples\")):\n line = line.strip()\n example = json.loads(line)\n features = convert_vp_example_to_features_w_multi_hot_labels(example=example, max_seq_length=seq_len, tokenizer=tokenizer)\n input_ids[i] = features.input_ids\n segment_ids[i] = features.segment_ids\n input_masks[i] = features.input_mask\n label_ids[i] = features.label_ids\n assert i == num_samples - 1 # Assert that the sample count metric was true\n logging.info(\"Loading complete!\")\n self.num_samples = num_samples\n self.seq_len = seq_len\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.label_ids = label_ids\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, item):\n return (torch.tensor(self.input_ids[item].astype(np.int64)),\n torch.tensor(self.input_masks[item].astype(np.int64)),\n torch.tensor(self.segment_ids[item].astype(np.int64)),\n torch.tensor(self.label_ids[item].astype(np.float)))\n\n\n\nclass PregeneratedDataset(Dataset):\n def __init__(self, training_path, tokenizer, num_samples, max_doc_vocab_size, seq_len, reduce_memory=False, temp_path=None):\n data_file = Path(training_path) / 'all.json'\n assert data_file.is_file()\n\n self.temp_dir = None\n self.working_dir = None\n if reduce_memory:\n self.temp_dir = TemporaryDirectory(dir=temp_path)\n self.working_dir = Path(self.temp_dir.name)\n logger.info(f'Initialized working_dir: {self.working_dir}')\n input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.int32)\n logger.info('Initialized placeholders with memmap: input_ids')\n input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n logger.info('Initialized placeholders with memmap: input_masks')\n segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n logger.info('Initialized placeholders with memmap: segment_ids')\n label_ids = np.memmap(filename=self.working_dir/'label_ids.memmap',\n shape=(num_samples, max_doc_vocab_size), mode='w+', dtype=np.int32)\n label_ids[:] = -1\n logger.info('Initialized placeholders with memmap: label_ids')\n else:\n input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)\n input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n label_ids = np.full(shape=(num_samples, max_doc_vocab_size), dtype=np.int32, fill_value=-1)\n \n with data_file.open() as f:\n for i, line in enumerate(tqdm(f, total=num_samples, desc=\"Training examples\")):\n line = line.strip()\n example = json.loads(line)\n features = convert_vp_example_to_features(example=example, \n max_seq_length=seq_len,\n max_doc_vocab_size=max_doc_vocab_size,\n tokenizer=tokenizer)\n input_ids[i] = features.input_ids\n segment_ids[i] = features.segment_ids\n input_masks[i] = features.input_mask\n label_ids[i] = features.label_ids\n assert i == num_samples - 1 # Assert that the sample count metric was true\n logging.info(\"Loading complete!\")\n self.num_samples = num_samples\n self.max_doc_vocab_size = max_doc_vocab_size\n\n self.seq_len = seq_len\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.label_ids = label_ids\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, item):\n return (torch.tensor(self.input_ids[item].astype(np.int64)),\n torch.tensor(self.input_masks[item].astype(np.int64)),\n torch.tensor(self.segment_ids[item].astype(np.int64)),\n torch.tensor(self.label_ids[item].astype(np.int64)))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\n parser.add_argument(\"--task_name\", default=None, type=str, required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()))\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument(\"--log_dir\", default=None, type=str, required=True,\n help=\"The log directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\", default=512, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--max_doc_vocab_size\", default=2048, type=int,\n help=\"The maximum size of vocabulary that a document can cover. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--reduce_memory\", action=\"store_true\",\n help=\"Store training data as on-disc memmaps to massively reduce memory usage\")\n\n parser.add_argument(\"--num_training_examples\", default=1604146, type=int,\n help=\"Training set size.\")\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)\n logging.getLogger(\"pytorch_transformers.tokenization_utils\").setLevel(logging.ERROR)\n num_labels = tokenizer.vocab_size\n\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, \n num_labels=num_labels, \n finetuning_task=args.task_name)\n model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n global_step, tr_loss = train(args, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n # if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n # os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/doc_vp/run_doc_vp.py","file_name":"run_doc_vp.py","file_ext":"py","file_size_in_byte":28887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"108251000","text":"from typing import Dict\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom deep_table.augmentation import Cutmix, Mixup\nfrom deep_table.nn.layers.mlp import SimpleMLPLayer\nfrom deep_table.nn.models.base import BaseModel\nfrom deep_table.nn.models.loss import InfoNCELoss\n\n\nclass SAINTPretrainModel(BaseModel):\n \"\"\"SAINT pretrain model.\n\n References:\n G. Somepalli, M. Goldblum, A. Schwarzschild, C. B. Bruss and T. Goldstein,\n “SAINT: Improved Neural Networks for Tabular Data via Row Attention and Contrastive Pre-Training,”\n ArXiv:2106.01342 [cs.LG], 2021. \n \"\"\"\n\n def __init__(\n self,\n encoder,\n dim_embed: int = 16,\n mask_prob: float = 0.1,\n alpha: float = 0.9,\n temp_param: float = 0.7,\n lambda_: float = 10,\n dim_z: int = 2,\n dim_g_hidden: int = 256,\n dim_mlp_hidden: int = 256,\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n encoder (`Encoder`): Encoder module used in training.\n dim_feature_estimator (int): Size of dimension in the hidden layer of MLP.\n The model predicts the original features using this MLP module.\n Defaults to 256.\n prob_row (float):\n Probability of applying row-wise swapping.\n Defaults to 0.2.\n prob_column (float):\n Probability of applying column-wise swapping.\n \"\"\"\n self.save_hyperparameters(ignore=\"encoder\")\n super(SAINTPretrainModel, self).__init__(encoder, **kwargs)\n\n def _build_network(self) -> None:\n self.cutmix = Cutmix(self.hparams.mask_prob)\n self.mixup = Mixup(self.hparams.alpha)\n\n self.num_categorical_features = self.encoder.num_categorical_features\n self.num_continuous_features = self.encoder.num_continuous_features\n self.num_categories = self.encoder.num_categories\n self.num_features = (\n self.num_categorical_features + self.num_continuous_features + 1\n )\n dim_representation = self.encoder.dim_out(is_pretrain=True)\n\n self.g1 = SimpleMLPLayer(\n in_dim=dim_representation,\n out_dim=self.hparams.dim_z,\n hidden_dim=self.hparams.dim_g_hidden,\n )\n self.g2 = SimpleMLPLayer(\n in_dim=dim_representation,\n out_dim=self.hparams.dim_z,\n hidden_dim=self.hparams.dim_g_hidden,\n )\n\n layers = []\n for i in range(self.num_features):\n if i < self.num_continuous_features:\n layers.append(\n SimpleMLPLayer(\n in_dim=dim_representation,\n out_dim=1,\n hidden_dim=self.hparams.dim_mlp_hidden,\n )\n )\n else:\n layers.append(\n SimpleMLPLayer(\n in_dim=dim_representation,\n out_dim=self.num_categories,\n hidden_dim=self.hparams.dim_mlp_hidden,\n )\n )\n\n self.feature_wise_mlp = nn.ModuleList(layers)\n\n def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]:\n x_origin = self._encoder(x)\n x_noisy = self._encoder(x, add_noise=True)\n\n if x_origin.dim() == 3:\n x_origin = x_origin.flatten(1)\n x_noisy = x_noisy.flatten(1)\n else:\n assert x_origin.dim() == 2\n\n z_origin = self.g1(x_origin)\n z_noisy = self.g2(x_noisy)\n x_reconstructed = [mlp(x_noisy) for mlp in self.feature_wise_mlp]\n return {\n \"z_origin\": z_origin,\n \"z_noisy\": z_noisy,\n \"con_reconstructed\": torch.stack(\n x_reconstructed[: self.num_continuous_features], 1\n )\n if self.num_continuous_features > 0\n else None,\n \"cat_reconstructed\": torch.stack(\n x_reconstructed[self.num_continuous_features :], 1\n )\n if self.num_categorical_features > 0\n else None,\n }\n\n def _encoder(self, x: Dict[str, Tensor], add_noise: bool = False) -> Tensor:\n if add_noise:\n if (self.num_continuous_features > 0) and (\n self.num_categorical_features > 0\n ):\n x_cat, x_con = self.cutmix(x[\"categorical\"], x[\"continuous\"])\n x = {\"categorical\": x_cat, \"continuous\": x_con}\n elif self.num_continuous_features > 0:\n x[\"continuous\"] = self.cutmix(x[\"continuous\"])[0]\n elif self.num_categorical_features > 0:\n x[\"categorical\"] = self.cutmix(x[\"categorical\"])[0]\n x = self.encoder.forward_embedding(x)\n x[\"in_backbone\"] = self.mixup(x[\"in_backbone\"])[0]\n else:\n x = self.encoder.forward_embedding(x)\n x = self.encoder.forward_backbone(x, is_pretrain=True)\n return x\n\n def training_step(self, batch: Dict[str, Tensor], batch_idx: int) -> Tensor:\n outputs = self(batch)\n loss = self.calculate_loss(batch, outputs, tag=\"train\")\n return loss\n\n def validation_step(self, batch: Dict[str, Tensor], batch_idx: int) -> None:\n outputs = self(batch)\n _ = self.calculate_loss(batch, outputs, tag=\"val\")\n\n def _setup_loss(self) -> None:\n self.contranstive_loss = InfoNCELoss()\n self.mse_loss = nn.MSELoss()\n self.cross_entropy_loss = nn.CrossEntropyLoss()\n\n def calculate_loss(\n self, batch: Dict[str, Tensor], outputs: Dict[str, Tensor], tag: str = \"train\"\n ) -> Tensor:\n contrastive_loss = self.contranstive_loss(\n outputs[\"z_origin\"], outputs[\"z_noisy\"], self.hparams.temp_param\n )\n denoising_loss = 0\n if outputs[\"con_reconstructed\"] is not None:\n denoising_loss = denoising_loss + self.mse_loss(\n outputs[\"con_reconstructed\"].squeeze(), batch[\"continuous\"]\n )\n for i in range(self.num_categorical_features):\n denoising_loss = denoising_loss + self.cross_entropy_loss(\n outputs[\"cat_reconstructed\"][:, i], batch[\"categorical\"][:, i]\n )\n loss = contrastive_loss + self.hparams.lambda_ * denoising_loss\n\n self.log(\n f\"{tag}_contrastive_loss\",\n contrastive_loss,\n on_epoch=(tag == \"valid\"),\n on_step=(tag == \"train\"),\n logger=True,\n prog_bar=True,\n )\n self.log(\n f\"{tag}_denoising_loss\",\n denoising_loss,\n on_epoch=(tag == \"valid\"),\n on_step=(tag == \"train\"),\n logger=True,\n prog_bar=True,\n )\n self.log(\n f\"{tag}_loss\",\n loss,\n on_epoch=(tag == \"val\"),\n on_step=(tag == \"train\"),\n logger=True,\n prog_bar=True,\n )\n return loss\n","sub_path":"deep_table/nn/models/pretraining/saint.py","file_name":"saint.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"641111193","text":"__author__ = 'Pragash'\nfrom stateNodes import stateNodes\n\npayoff = [[0 for x in range(3)] for y in range(3)]\nplayerPiece = \"X\"\ncompPiece = \"O\"\n\ndef initPayoff() :\n for i in range(3) :\n for j in range(3) :\n if j==1 and i == 1 :\n payoff[i][j] = 4\n elif i%2 == 0 and j%2 == 0 :\n payoff[i][j] = 3\n else :\n payoff[i][j] = 2\n\n\ndef printTable(data) :\n output = \"\"\n print(\"The table : \\n\")\n for i in range(0,len(data)) :\n for j in range(0,len(data[i])) :\n output += \"\\t\"+str(data[i][j])\n output += \"\\n\"\n print(output)\n\ndef playerMove(gameTable,x,piece) :\n r= x[0]\n c= x[1]\n payoff[r][c] = 0\n if gameTable[r][c] == \"-\" :\n gameTable[r][c] = piece\n return True\n else :\n print(\"Invalid move, try again\")\n return False\n\ndef initState(gameTable,Nodes) :\n\n return Nodes.addState(gameTable)\n\ndef utilityTest(comp,human) :\n pass\n\n\ndef terminalTest(state) :\n comp = []\n human = []\n for i in range(3) :\n for j in range(3) :\n if state[i][j] != \"-\" :\n if state[i][j] == \"X\" :\n human.append((i,j))\n else :\n comp.append((i,j))\n if len(comp) < 3 and len(human) < 3 :\n return 0\n else :\n return utilityTest(comp,human)\n\n pass\n\n\ndef checkDiagonal(r,c,table) :\n\n pairs = [(0,0),(0,2),(2,0),(2,2)]\n for i,j in pairs :\n if table[1][1] == playerPiece and table[(i+2)%4][(j+2)%4] == playerPiece :\n payoff[i][j] = 5\n break\n elif table[1][1] == compPiece and table[(i+2)%4][(j+2)%4] == compPiece :\n payoff[i][j] = 6\n break\n else :\n pass\n\n return payoff[r][c]\n\ndef checkVertical(r,c,table) :\n for i in range(3) :\n for j in range(3) :\n if table[(i+1)%3][j] == playerPiece and table[(i+2)%3][j] == playerPiece :\n payoff[i][j] = 5\n break\n elif table[(i+1)%3][j] == compPiece and table[(i+2)%3][j] == compPiece :\n payoff[i][j] = 6\n break\n else :\n pass\n return payoff[r][c]\n\ndef checkHorizontal(r,c,table) :\n for i in range(3) :\n for j in range(3) :\n if table[i][(j+1)%3] == playerPiece and table[i][(j+2)%3] == playerPiece :\n payoff[i][j] = 5\n break\n elif table[i][(j+1)%3] == compPiece and table[i][(j+2)%3] == compPiece :\n payoff[i][j] = 6\n break\n else :\n pass\n return payoff[r][c]\n\n\ndef checkIntersect(r,c,table) :\n for i in range(3) :\n for j in range(3) :\n if table[i][j] == \"-\" :\n if (table[(i+1)%3][j] == \"-\" and table[(i+2)%3][j] == playerPiece) or (table[(i+1)%3][j] == playerPiece and table[(i+2)%3][j] == \"-\") :\n if (table[i][(j+1)%3] == \"-\" and table[i][(j+2)%3] == playerPiece) or (table[i][(j+1)%3] == playerPiece and table[i][(j+2)%3] == \"-\") :\n payoff[i][j] = 7\n break\n return payoff[r][c]\n\ndef calculatePayoff(x,node) :\n r = x[0]\n c = x[1]\n #print(r,c)\n Table =[[None for m in range(3)] for n in range(3)]\n for i in range(3) :\n for j in range(3) :\n Table[i][j] = node.nodeTable[i][j]\n #print(Table)\n x = checkDiagonal(r,c,Table)\n y = checkVertical(r,c,Table)\n z = checkHorizontal(r,c,Table)\n m = checkIntersect(r,c,Table)\n\n #print(x,y,z)\n\n return max(x,y,z,m)\n\n\ndef calCompMove(stateNode,piece,Nodes) :\n tempTable = [[None for x in range(3)] for y in range(3)]\n for i in range(3) :\n for j in range(3) :\n tempTable[i][j] = stateNode.nodeTable[i][j]\n\n for i in range(3) :\n for j in range(3) :\n if tempTable[i][j] == \"-\" :\n tempTable[i][j] = piece\n poff = calculatePayoff((i,j),stateNode)\n tempNode = Nodes.addState(tempTable,poff)\n Nodes.addNode(tempNode)\n tempTable[i][j] = \"-\"\n #printTable(tempNode.nodeTable)\n\n\ndef main() :\n print(\"WELCOME TO TIC TAK TOE GAME\")\n gameTable = [[\"-\" for x in range(3)] for y in range(3)]\n \"\"\"--------INIT PAYOFF--------------\"\"\"\n initPayoff()\n Nodes = stateNodes()\n\n\n count = 0\n while count <= 3 :\n r = int(input(\"PLAYERS MOVE (enter row):\"))\n c = int(input(\"PLAYERS MOVE (enter column):\"))\n playerMove(gameTable,(r,c),playerPiece)\n printTable(gameTable)\n stateNode = initState(gameTable,Nodes)\n Nodes.clearNodes()\n\n calCompMove(stateNode,compPiece,Nodes)\n maxPayoff = Nodes.nodeList[0].payoff\n compMoveNode = Nodes.nodeList[0]\n Nodes.printTable()\n for node in Nodes.nodeList :\n if node.payoff > maxPayoff :\n maxPayoff = node.payoff\n compMoveNode = node\n else :\n continue\n for i in range(3) :\n for j in range(3) :\n gameTable[i][j] = compMoveNode.nodeTable[i][j]\n printTable(gameTable)\n Nodes.clearNodes()\n\n count += 1\n\nif __name__ == \"__main__\" :\n main()","sub_path":"IntelligentSystemsAssignments/IS_A2/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"484647251","text":"from ferryschedules import gsheet\nfrom ferryschedules import cache\n\nclass Sitemap:\n SITEMAP_WORKSHEET_NUMBER = 0\n SCHEDULE_ID_COLUMN = 1\n ROOT_COLUMN = 2\n SLUG_COLUMN = 3\n ANCHOR_TEXT_COLUMN = 4\n SHORT_DESCRIPTION_COLUMN=5\n BREADCRUMB_TEXT_COLUMN=6\n\n # Retrieve sitemap worksheet from google sheet\n def __init__(self):\n self.worksheet = gsheet.get_worksheet(self.SITEMAP_WORKSHEET_NUMBER)\n\n\n def retrieve_all_links(self, california=False, newyork=False, washington=False):\n \n self.link_lists = cache.get('cached_link_lists')\n if self.link_lists == None:\n self.link_lists = []\n self.link_lists.append(self.worksheet.col_values(self.SCHEDULE_ID_COLUMN))\n self.link_lists.append(self.worksheet.col_values(self.ROOT_COLUMN))\n self.link_lists.append(self.worksheet.col_values(self.SLUG_COLUMN))\n self.link_lists.append(self.worksheet.col_values(self.ANCHOR_TEXT_COLUMN))\n self.link_lists.append(self.worksheet.col_values(self.SHORT_DESCRIPTION_COLUMN))\n self.link_lists.append(self.worksheet.col_values(self.BREADCRUMB_TEXT_COLUMN))\n # Transpose link list to pair up each link's info into its own list\n self.link_lists = list(map(list, zip(*self.link_lists)))\n cache.set('cached_link_lists', self.link_lists)\n\n if california:\n self.ca_links = []\n for link in self.link_lists:\n if link[1] == '/ca/':\n self.ca_links.append(link)\n return self.ca_links\n elif newyork:\n self.ny_links = []\n for link in self.link_lists:\n if link[1] == '/ny/':\n self.ny_links.append(link)\n return self.ny_links\n elif washington:\n self.wa_links = []\n for link in self.link_lists:\n if link[1] == '/wa/':\n self.wa_links.append(link)\n return self.wa_links\n else:\n return self.link_lists","sub_path":"ferryschedules/models/sitemap.py","file_name":"sitemap.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"206428942","text":"import os\nimport sys\nfrom keras.utils.np_utils import to_categorical\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras import applications\nfrom keras import callbacks\nfrom keras import optimizers\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\nimport tensorflow as tf\nimport numpy as np\nimport scipy.io as sio\nimport time\n\ndef main():\n start_time = time.time()\n input_base = '/srv/home/lerandc/outputs/712_STO/'\n input_sub_folder = ['0_0/','05_0/','025_025/','1_0/','1_1/','2_0/','2_2/','3_0/'] \n result_path = '/srv/home/lerandc/CNN/models/072418_vgg16_softmax/attempt2/'\n\n x_train_list = []\n y_train_list = []\n\n sx, sy = 0, 0\n\n for current_folder in input_sub_folder:\n input_folder = input_base + current_folder\n input_images = [image for image in os.listdir(input_folder) if 'Sr_PACBED' in image]\n\n for image in input_images:\n cmp = image.split('_')\n if ('noise' in image):\n label = int(cmp[-2][:])\n else:\n label = int(cmp[-1][:-4]) \n \n if (('noise100' in image)):\n\n img = np.load(input_folder + image).astype(dtype=np.float64)\n img = scale_range(img,0,1)\n img = img.astype(dtype=np.float32)\n img_size = img.shape[0]\n sx, sy = img.shape[0], img.shape[1]\n new_channel = np.zeros((img_size, img_size))\n img_stack = np.dstack((img, new_channel, new_channel))\n\n x_train_list.append(img_stack)\n y_train_list.append(label)\n\n nb_train_samples = len(x_train_list)\n print('Image loaded')\n print('input shape: ')\n print(sx, sy)\n print('training number: ')\n print(nb_train_samples)\n nb_class = len(set(y_train_list))\n x_train = np.concatenate([arr[np.newaxis] for arr in x_train_list])\n y_train = to_categorical(y_train_list, num_classes=nb_class)\n print('Size of image array in bytes')\n print(x_train.nbytes)\n np.save(result_path + 'y_train.npy', y_train)\n\n\n logs = [log for log in os.listdir(result_path) if 'log' in log]\n max_index = 0\n for log in logs:\n cur = int(log.split('_')[1])\n if cur > max_index:\n max_index = cur\n max_index = max_index + 1\n\n batch_size = 32\n # step 1\n save_bottleneck_features(x_train, y_train, batch_size, nb_train_samples,result_path)\n\n # step 2\n epochs = 12\n batch_size = 32 # batch size 32 works for the fullsize simulation library which has 19968 total files, total number of training file must be integer times of batch_size\n train_top_model(y_train, nb_class, max_index, epochs, batch_size, input_folder, result_path)\n\n # step 3\n epochs = 50\n batch_size = 32\n fine_tune(x_train, y_train, sx, sy, max_index, epochs, batch_size, input_folder, result_path)\n\n print('Total computing time is: ')\n print(int((time.time() - start_time) * 100) / 100.0)\n\n\ndef save_bottleneck_features(x_train, y_train, batch_size, nb_train_samples,result_path):\n model = applications.VGG16(include_top=False, weights='imagenet')\n print('before featurewise center')\n \n datagen = ImageDataGenerator(\n featurewise_center=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.1,\n horizontal_flip=1,\n vertical_flip=1,\n shear_range=0.05)\n \n\n datagen = ImageDataGenerator(\n featurewise_center=True)\n\n datagen.fit(x_train)\n print('made it past featurewise center')\n generator = datagen.flow(\n x_train,\n y_train,\n batch_size=batch_size,\n shuffle=False)\n print('made it past generator')\n\n bottleneck_features_train = model.predict_generator(\n generator, nb_train_samples // batch_size)\n print('made it past the bottleneck features')\n np.save(result_path + 'bottleneck_features_train.npy',\n bottleneck_features_train)\n\ndef train_top_model(y_train, nb_class, max_index, epochs, batch_size, input_folder, result_path):\n train_data = np.load(result_path + 'bottleneck_features_train.npy')\n train_labels = y_train\n print(train_data.shape, train_labels.shape)\n model = Sequential()\n model.add(Flatten(input_shape=train_data.shape[1:]))\n model.add(Dropout(0.3))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(nb_class, activation='softmax'))\n\n # compile setting:\n lr = 0.001\n decay = 1e-6\n momentum = 0.9\n optimizer = optimizers.SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)\n loss = 'categorical_crossentropy'\n model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n \n bottleneck_log = result_path + 'training_' + str(max_index) + '_bnfeature_log.csv'\n csv_logger_bnfeature = callbacks.CSVLogger(bottleneck_log)\n earlystop = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=3, verbose=1, mode='auto')\n\n model.fit(train_data,train_labels,epochs=epochs,batch_size=batch_size,shuffle=True,\n callbacks=[csv_logger_bnfeature, earlystop],verbose=2,validation_split=0.2)\n\n with open(bottleneck_log, 'a') as log:\n log.write('\\n')\n log.write('input images: ' + input_folder + '\\n')\n log.write('batch_size:' + str(batch_size) + '\\n')\n log.write('learning rate: ' + str(lr) + '\\n')\n log.write('learning rate decay: ' + str(decay) + '\\n')\n log.write('momentum: ' + str(momentum) + '\\n')\n log.write('loss: ' + loss + '\\n')\n\n model.save_weights(result_path + 'bottleneck_fc_model.h5')\n\ndef fine_tune(train_data, train_labels, sx, sy, max_index, epochs, batch_size, input_folder, result_path):\n print(train_data.shape, train_labels.shape)\n\n model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(sx, sy, 3))\n print('Model loaded')\n\n top_model = Sequential()\n top_model.add(Flatten(input_shape=model.output_shape[1:]))\n top_model.add(Dropout(0.3))\n top_model.add(Dense(256, activation='relu'))\n top_model.add(Dropout(0.3))\n top_model.add(Dense(52, activation='softmax'))\n\n top_model.load_weights(result_path + 'bottleneck_fc_model.h5')\n\n new_model = Sequential()\n for l in model.layers:\n new_model.add(l)\n new_model.add(top_model)\n\n # compile settings\n lr = 0.0001\n decay = 1e-6\n momentum = 0.9\n optimizer = optimizers.SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)\n loss = 'categorical_crossentropy'\n new_model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n\n fineture_log = result_path + 'training_' + str(max_index) + '_finetune_log.csv'\n csv_logger_finetune = callbacks.CSVLogger(fineture_log)\n earlystop = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=5, verbose=1, mode='auto')\n\n datagen = ImageDataGenerator(\n featurewise_center=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.1,\n horizontal_flip=1,\n vertical_flip=1,\n shear_range=0.05)\n\n datagen.fit(train_data)\n\n generator = datagen.flow(\n train_data,\n train_labels,\n batch_size=batch_size,\n shuffle=True)\n\n validation_generator = datagen.flow(\n train_data,\n train_labels,\n batch_size=batch_size,\n shuffle=True)\n\n new_model.fit_generator(generator,epochs=epochs,steps_per_epoch=len(train_data) / 32,validation_data=validation_generator,validation_steps=(len(train_data)//5)//32,\n callbacks=[csv_logger_finetune, earlystop],verbose=2)\n\n #new_model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2,\n #callbacks=[csv_logger_finetune, earlystop])\n\n with open(fineture_log, 'a') as log:\n log.write('\\n')\n log.write('input images: ' + input_folder + '\\n')\n log.write('batch_size:' + str(batch_size) + '\\n')\n log.write('learning rate: ' + str(lr) + '\\n')\n log.write('learning rate decay: ' + str(decay) + '\\n')\n log.write('momentum: ' + str(momentum) + '\\n')\n log.write('loss: ' + loss + '\\n')\n\n new_model.save(result_path + 'FinalModel.h5') # save the final model for future loading and prediction\n\n\ndef scale_range (input, min, max):\n input += -(np.min(input))\n input /= np.max(input) / (max - min)\n input += min\n return input \n\n# step 4 make predictions using experiment results\n\nif __name__ == '__main__':\n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(sys.argv[1])\n main()\n","sub_path":"Classification_training.py","file_name":"Classification_training.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"427792045","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\n\n# analysis complete in this script include a re-analysis of a condensed dataset:\n# avg age\n# student count\n# most common ages\n# gender diversity\n# student postal area count\n# student attendance statuses count\n# student course count\n# time series plot experiment\n\n\ndf = pd.read_csv('../../Datasets/Created:Modified/Att_Condensed.csv')\n\n\ndf.head(3)\n\n\nstudentAge = df.groupby(['StudentID'])['Age'].mean()\nprint('Average Age:', round(np.mean(studentAge), 2))\n\n\nstudentCount = df.StudentID.unique()\nprint('Student Count:', studentCount)\n# 76\n\n\n\ncounts = Counter(studentAge)\n# remove/change the 3 to see all counts of ages - could be used for infographic\nageCount = dict(counts.most_common())\nprint('Most Common Ages:', ageCount)\n\n\n\nstudentGender = df.groupby(['StudentID'])['Gender'].max()\ncounts = Counter(studentGender)\ngenCount = dict(counts.most_common())\n\nprint('Gender Diversity:', genCount)\n\n\n\nstudentPO = df.groupby(['StudentID'])['PostalArea'].max()\ncounts = Counter(studentPO)\npoCount = dict(counts.most_common(50))\n\nprint('Student PO:', poCount)\n\n\n\nattStatus = df.groupby(['ID'])['Status'].max()\ncounts = Counter(attStatus)\nstatusCount = dict(counts.most_common())\n\nprint('Attendance by Status:', statusCount)\n\n\n\ncourse = df.groupby(['StudentID'])['CourseCode'].max()\ncounts = Counter(course)\ncourseCounts = dict(counts.most_common())\n\nprint('Courses:', courseCounts)\n\n\n\ndf.plot(x='classDateTime', y='Age')\n\n\n\nfig = plt.figure()\nax.plot(df['classDateTime'])\nax.set_yticklabels(df['Status'])\n\nplt.show()\n\n\n\n\n\n","sub_path":"ReAnalysisAttendanceCondensedDataset.py","file_name":"ReAnalysisAttendanceCondensedDataset.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"40729696","text":"# -*- coding: utf-8 -*-\r\nfrom aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\r\nfrom aliyunsdkdysmsapi.request.v20170525 import QuerySendDetailsRequest\r\nfrom aliyunsdkcore.client import AcsClient\r\nimport uuid\r\n\"\"\"\r\n短信产品-发送短信接口\r\nCreated on 2017-06-12\r\n\"\"\"\r\nREGION = \"cn-hangzhou\"\r\n# ACCESS_KEY_ID/ACCESS_KEY_SECRET 根据实际申请的账号信息进行替换\r\nACCESS_KEY_ID = \"LTAIkoaJot3E219D\"\r\nACCESS_KEY_SECRET = \"Ph0UZyYnTI8BpTWZSnoxHDjbAVk14p\"\r\nSIGN_NAME='爱上积分'\r\n\r\nacs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\r\n# 请参考本文档步骤2\r\ndef send_sms(business_id, phone_number, sign_name, template_code, template_param=None):\r\n smsRequest = SendSmsRequest.SendSmsRequest()\r\n # 申请的短信模板编码,必填\r\n smsRequest.set_TemplateCode(template_code)\r\n # 短信模板变量参数,友情提示:如果JSON中需要带换行符,请参照标准的JSON协议对换行符的要求,比如短信内容中包含\\r\\n的情况在JSON中需要表示成\\\\r\\\\n,否则会导致JSON在服务端解析失败\r\n if template_param is not None:\r\n smsRequest.set_TemplateParam(template_param)\r\n # 设置业务请求流水号,必填。\r\n smsRequest.set_OutId(business_id)\r\n # 短信签名\r\n smsRequest.set_SignName(sign_name);\r\n # 短信发送的号码,必填。支持以逗号分隔的形式进行批量调用,批量上限为1000个手机号码,批量调用相对于单条调用及时性稍有延迟,验证码类型的短信推荐使用单条调用的方式\r\n smsRequest.set_PhoneNumbers(phone_number)\r\n # 发送请求\r\n smsResponse = acs_client.do_action_with_exception(smsRequest)\r\n return smsResponse\r\n\r\ndef sendSMSTemplate(phone, code, template_id):\r\n business_id = uuid.uuid1()\r\n params = \"{{'code':'{}'}}\".format(code)\r\n send_sms( business_id, phone, SIGN_NAME,template_id,params)\r\n\r\n","sub_path":"i3jf/authx/SMSValidation/AliDayu/SendTemplateSMS.py","file_name":"SendTemplateSMS.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"399013246","text":"from setuptools import setup\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"aql/__init__.py\") as f:\n for line in f:\n if line.startswith(\"__version__\"):\n version = line.split('\"')[1]\n\nsetup(\n name=\"aql\",\n description=\"asyncio query generator\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n version=version,\n author=\"John Reese\",\n author_email=\"john@noswap.com\",\n url=\"https://github.com/jreese/aql\",\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"MIT\",\n packages=[\"aql\", \"aql.engines\", \"aql.tests\"],\n setup_requires=[\"setuptools>=38.6.0\"],\n install_requires=[\"attrs\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"398110013","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/techrec/cli.py\n# Compiled at: 2019-11-15 16:32:42\nimport os, sys\nfrom argparse import ArgumentParser, Action\nfrom datetime import datetime\nimport logging\nlogging.basicConfig(stream=sys.stdout)\nlogger = logging.getLogger('cli')\nCWD = os.getcwd()\nfrom . import forge\nfrom . import maint\nfrom .config_manager import get_config\nfrom . import server\n\ndef pre_check_permissions():\n\n def is_writable(d):\n return os.access(d, os.W_OK)\n\n if is_writable(get_config()['AUDIO_INPUT']):\n yield \"Audio input '%s' writable\" % get_config()['AUDIO_INPUT']\n if not os.access(get_config()['AUDIO_INPUT'], os.R_OK):\n yield \"Audio input '%s' unreadable\" % get_config()['AUDIO_INPUT']\n sys.exit(10)\n if is_writable(os.getcwd()):\n yield 'Code writable'\n if not is_writable(get_config()['AUDIO_OUTPUT']):\n yield \"Audio output '%s' not writable\" % get_config()['AUDIO_OUTPUT']\n logger.critical('Aborting')\n sys.exit(10)\n\n\ndef pre_check_user():\n if os.geteuid() == 0:\n yield \"You're running as root; this is dangerous\"\n\n\ndef pre_check_ffmpeg():\n path = get_config()['FFMPEG_PATH']\n if not path.startswith('/'):\n yield 'FFMPEG_PATH is not absolute: %s' % path\n from subprocess import check_output\n try:\n check_output([path, '-version'])\n except OSError:\n yield 'FFMPEG not found as ' + path\n\n elif not os.path.exists(path):\n yield 'FFMPEG not found in ' + path\n\n\nclass DateTimeAction(Action):\n\n def __call__(self, parser, namespace, values, option_string=None):\n if len(values) == 15 or len(values) == 13:\n parsed_val = datetime.strptime(values, '%Y%m%d-%H%M%S')\n else:\n raise ValueError(\"'%s' is not a valid datetime\" % values)\n setattr(namespace, self.dest, parsed_val)\n\n\ndef common_pre():\n prechecks = [\n pre_check_user, pre_check_permissions, pre_check_ffmpeg]\n configs = ['default_config.py']\n if 'TECHREC_CONFIG' in os.environ:\n for conf in os.environ['TECHREC_CONFIG'].split(':'):\n if not conf:\n continue\n path = os.path.realpath(conf)\n if not os.path.exists(path):\n logger.warn(\"Configuration file '%s' does not exist; skipping\" % path)\n continue\n configs.append(path)\n\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n for conf in configs:\n get_config().from_pyfile(conf)\n\n for check in prechecks:\n for warn in check():\n logger.warn(warn)\n\n\ndef main():\n parser = ArgumentParser(description='creates mp3 from live recordings')\n parser.add_argument('--verbose', '-v', action='count', default=0, help='Increase verbosity; can be used multiple times')\n parser.add_argument('--pretend', '-p', action='store_true', default=False, help='Only pretend; no real action will be done')\n sub = parser.add_subparsers(title='main subcommands', description='valid subcommands')\n serve_p = sub.add_parser('serve', help='Start an HTTP server')\n serve_p.set_defaults(func=server.main_cmd)\n forge_p = sub.add_parser('forge', help='Create an audio file')\n forge_p.add_argument('starttime', metavar='START', help='Start time, espressed as 19450425_1200 (%%Y%%m%%d-%%H%%M%%S)', action=DateTimeAction)\n forge_p.add_argument('endtime', metavar='END', help='End time, espressed as 19450425_1200 (%%Y%%m%%d-%%H%%M%%S)', action=DateTimeAction)\n forge_p.add_argument('-o', metavar='OUTFILE', dest='outfile', default='out.mp3', help='Path of the output mp3')\n forge_p.set_defaults(func=forge.main_cmd)\n cleanold_p = sub.add_parser('cleanold', help='Remove old files from DB', description='Will remove oldfiles with no filename from DB')\n cleanold_p.add_argument('-t', metavar='MINAGE', dest='minage', default='14', type=int, help='Minimum age (in days) for removal')\n cleanold_p.set_defaults(func=maint.cleanold_cmd)\n options = parser.parse_args()\n options.cwd = CWD\n if options.verbose < 1:\n logging.basicConfig(level=logging.WARNING)\n elif options.verbose == 1:\n logging.basicConfig(level=logging.INFO)\n elif options.verbose >= 2:\n logging.basicConfig(level=logging.DEBUG)\n if options.verbose > 2:\n logging.info('giving verbose flag >2 times is useless')\n common_pre()\n options.func(options)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/techrec-1.2.0-py2.7/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"401132894","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport time\n\nfrom timer_decorator import timer\nfrom rons_tutorial_formatting import *\n\nprint_block_separator()\n\n\ndef logger(old_function):\n\n def new_function(*args):\n print (\"LOG: args = %s\" % str(args))\n return old_function(*args)\n\n return new_function\n\n\ndef func1(s):\n return s.upper()\n\n\n@logger\ndef func2(s):\n return s * 3\\\n\n\n\n@logger\ndef func3(x, y):\n return \"Got coordinates: ({}, {})\".format(x, y)\n\n\ndef foo1(function_handler):\n res = function_handler(\"hello\")\n print (res)\n\nfoo1(func1)\n\n\ndef foo():\n def yaa():\n print (\"Hello from hell!!!\")\n yaa()\n\n\nprint (func1(\"ron\"))\n\nend_block()\n\nfunc1 = logger(func1)\n\nstart_block()\n\nprint (func1(\"ron2\"))\n\nend_block()\n\nstart_block()\n\nprint (func2(\"duplicate this ! \"))\n\nend_block()\n\nstart_block()\n\nprint (func3(7, 12))\n\nend_block()\n\nstart_block()\n\n\n@timer\ndef slow_function(outer, inner):\n result = list()\n for i in range(1, outer):\n for j in range(1, inner):\n result.append(\"i,j = {},{}\".format(i, j))\n # time.sleep(1)\n return result\n\nmy_result = slow_function(300, 400)\n\nprint (my_result.__len__())\nprint (timer(slow_function)(300, 400).__len__())\nprint (timer(timer(slow_function))(300, 400).__len__())\nprint (timer(timer(timer(slow_function)))(300, 400).__len__())\nprint (timer(timer(timer(timer(slow_function))))(300, 400).__len__())\n\nend_block()\n\n","sub_path":"Python/AdvancedTutorials/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"487382937","text":"# multistate sales calculator\n\norder_subtotal = float(input(\"What is your order total? \"))\nuser_state = input(\"What state are you shipping to (abbreviation)? \")\nuser_state = user_state.upper()\n\nif user_state == \"WI\":\n\tuser_county = input(\"What county? \")\n\tuser_county = user_county.upper()\n\t\n\tif user_county == \"EAU CLAIRE\":\n\t\tprint(\"0.05% tax\")\n\telif user_county == \"DUNN\":\n\t\tprint(\"0.4% tax\")\n\telse:\n\t\tprint(\"No tax :D\")\nelif user_state == \"IL\":\n\tprint(\"8% tax for all counties :C\")\nelse:\n\tprint(\"No tax :D\")\n","sub_path":"20/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"306991629","text":"\"\"\"\nDeletes the specific key from the numpy database.\n\"\"\"\n\nimport numpy as np\n\ngg = 1\nwhile gg != '0':\n # User inputs and load database\n Drive = input('Drive: ').upper()\n Drive = Drive + ':'\n Job = input('Enter Job Number: ')\n JobNumber = \"44OP-\" + str(Job)\n EFSJobDict = np.load(Drive + \"\\\\00_CN_(RENAME)_v3\\\\JobList.npy\").item()\n\n # Delete key values in the npy database\n try:\n del EFSJobDict[JobNumber]\n np.save(Drive + \"00_CN_(RENAME)_v3\\\\JobList.npy\", EFSJobDict)\n except KeyError:\n print('This job does not exist in the database.')\n np.save(Drive + \"00_CN_(RENAME)_v3\\\\JobList.npy\", EFSJobDict)\n gg = input(\"Press 'enter' to continue, Press '0' to exit: \")\n\n\n","sub_path":"Edit_Database.py","file_name":"Edit_Database.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"213540332","text":"import sqlite3\r\nimport unittest\r\nimport sys\r\n\r\n# Note: Use the tables.sql file to build the database tables BEFORE running this or any other tests!\r\n\r\nsys.path.append(\"../\")\r\n\r\nfrom lootbag import Lootbag\r\n\r\nclass Testing(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.lootbag = Lootbag('../bag_o_loot.db')\r\n\r\n# ==================================================================\r\n\r\n# 1. test if items can be added to bag, and assigned to a child.\r\n\r\n def test01_add_toy(self):\r\n # function returns same ID that is passed in\r\n self.assertEqual(self.lootbag.add(\"Hotwheels\", 1), 1)\r\n # database table will have toyID of 1\r\n self.assertEqual(self.lootbag.get_toyID(\"Hotwheels\"), 1)\r\n # database table will return childID of 1 when toy name passed in\r\n self.assertEqual(self.lootbag.ls(1, True), \"Hotwheels\")\r\n\r\n# ==================================================================\r\n\r\n# 2. Items can be removed from bag, per child.\r\n# Removing a toy from the bag should not be allowed. A child's name must be specified.\r\n\r\n def test02_remove_toy(self):\r\n # database has a toy with the name Hotwheels, and its ID is 1\r\n self.assertEqual(self.lootbag.get_toyID(\"Hotwheels\"), 1)\r\n # database has a child \"Brendan\" with an ID of 1\r\n self.assertEqual(self.lootbag.get_childID(\"Brendan\"), 1)\r\n # removing a toy without child name specified will throw error\r\n self.assertEqual(self.lootbag.remove(-1, 1), \"No child specified\")\r\n # removing the toy results in database response of toyID = None\r\n self.lootbag.remove(1)\r\n self.assertEqual(self.lootbag.get_toyID(\"Hotwheels\"), None)\r\n\r\n# 3. Must be able to list all children who are getting a toy.\r\n# 4. Must be able to list all toys for a given child's name.\r\n\r\n def test03_child_list(self):\r\n # Three children receive are added in this example\r\n self.lootbag.add(\"Rocket\", 1)\r\n self.lootbag.add(\"Robot\", 2)\r\n self.lootbag.add(\"Plane\", 5)\r\n # list function will fail to execute if child arg is not specified\r\n self.assertEqual(self.lootbag.ls(None, False), \"No child ID specified\")\r\n # a list of three names is returned if listing all children (id = false for a list of multiple children) - note delivered status of TRUE is required\r\n self.lootbag.update_child_delivery_status(True, 1)\r\n self.lootbag.update_child_delivery_status(True, 2)\r\n self.lootbag.update_child_delivery_status(True, 5)\r\n self.assertEqual(self.lootbag.ls(False, False), [\"Brendan\",\"Zac\",\"Brad\"])\r\n # a child's toys are displayed when child ID is explicitly provided\r\n self.assertEqual(self.lootbag.ls(1, False), \"Rocket\")\r\n\r\n# 5. Must be able to set the delivered property of a child's toys -- which defaults to false-- to true.\r\n\r\n def test04_update_delivered(self):\r\n # add item for user with ID = 4\r\n self.lootbag.add(\"Skateboard\", 4)\r\n # update user 4 delivered status yields\r\n self.lootbag.delivered(4)\r\n # childID 4 (\"Richard\") toy appears in list all method\r\n self.assertEqual(self.lootbag.ls(False, False), [\"Brendan\",\"Zac\",\"Richard\",\"Brad\"])\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"test/unittest_file.py","file_name":"unittest_file.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"403460354","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint \n\ntau=-10\nE=5\n\ndef F(x,t):\n return -x/tau+E\n\nt=np.linspace(0,10,100)\nsol1=odeint(F,0,t)\n\nplt.plot(t,sol1)\nplt.show()","sub_path":"tp_premier_ordre.py","file_name":"tp_premier_ordre.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"181895138","text":"import re\r\n\r\ndef cipher(s):\r\n\tciphered = ''\r\n\tfor c in s:\r\n\t\tif re.match(r'[a-z]', c):\r\n\t\t\tciphered += chr(219 - ord(c))\r\n\t\telse:\r\n\t\t\tciphered += c\r\n\r\n\tprint(ciphered)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tcipher(input())\r\n","sub_path":"08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"8901854","text":"#\n# Copyright (c) 2016, Prometheus Research, LLC\n#\n\n\nfrom rex.deploy import get_cluster, sql_name, sql_qname\nimport weakref\nimport collections\n\n\nclass ComparableMeta(type):\n\n def __call__(cls, *args, **kwds):\n cache = cls._object_cache\n basis = cls.__basis__(*args, **kwds)\n try:\n return cache[basis]\n except KeyError:\n return cache.setdefault(\n basis,\n type.__call__(cls, *args, **kwds))\n pass\n\n\nclass Comparable(object, metaclass=ComparableMeta):\n\n __slots__ = ('__weakref__',)\n\n _object_cache = weakref.WeakValueDictionary()\n\n @classmethod\n def __basis__(cls):\n return (cls,)\n\n def __init__(self):\n pass\n\n def __repr__(self):\n return \"%s()\" % self.__class__.__name__\n\n\nclass InputMode(Comparable):\n\n __slots__ = ()\n\n def __or__(self, other):\n if not isinstance(other, InputMode):\n return NotImplemented\n return self\n\n\nclass OutputMode(Comparable):\n\n __slots__ = ('optional', 'plural')\n\n @classmethod\n def __basis__(cls, optional=False, plural=False):\n return (cls, optional, plural)\n\n def __init__(self, optional=False, plural=False):\n self.optional = optional\n self.plural = plural\n\n def __repr__(self):\n args = []\n if self.optional is not False:\n args.append(\"optional=%r\" % self.optional)\n if self.plural is not False:\n args.append(\"plural=%r\" % self.plural)\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(args))\n\n def __lt__(self, other):\n if not isinstance(other, OutputMode):\n return NotImplemented\n return (self != other and\n self.optional <= other.optional and\n self.plural <= other.plural)\n\n def __le__(self, other):\n if not isinstance(other, OutputMode):\n return NotImplemented\n return (self.optional <= other.optional and\n self.plural <= other.plural)\n\n def __ge__(self, other):\n if not isinstance(other, OutputMode):\n return NotImplemented\n return (self.optional >= other.optional and\n self.plural >= other.plural)\n\n def __gt__(self, other):\n if not isinstance(other, OutputMode):\n return NotImplemented\n return (self != other and\n self.optional >= other.optional and\n self.plural >= other.plural)\n\n def __or__(self, other):\n if not isinstance(other, OutputMode):\n return NotImplemented\n return OutputMode(\n optional=(self.optional or other.optional),\n plural=(self.plural or other.plural))\n\n\nclass Domain(Comparable):\n\n __slots__ = ()\n\n @classmethod\n def convert(cls, value):\n if isinstance(value, Domain):\n return value\n if isinstance(value, str):\n return AtomicDomain(value)\n if value == ():\n return AtomicDomain(\"Void\")\n if isinstance(value, tuple):\n if all([isinstance(item, str) for item in value]):\n return CategoricalDomain(value)\n if all([isinstance(item, Output) for item in value]):\n return DataSetDomain(value)\n raise TypeError(value)\n\n def __le__(self, other):\n if not isinstance(other, Domain):\n return NotImplemented\n return (self == other or\n isinstance(self, NullDomain) or\n isinstance(other, AnyDomain))\n\n def __or__(self, other):\n if not isinstance(other, Domain):\n return NotImplemented\n if self == other or isinstance(self, NullDomain):\n return other\n if isinstance(other, NullDomain):\n return self\n return AnyDomain()\n\n\nclass NullDomain(Domain):\n\n __slots__ = ()\n\n\nclass AnyDomain(Domain):\n\n __slots__ = ()\n\n\nclass AtomicDomain(Domain):\n\n __slots__ = ('name',)\n\n @classmethod\n def __basis__(cls, name):\n return (cls, name)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.name)\n\n\nclass EntityDomain(Domain):\n\n __slots__ = ('source',)\n\n @classmethod\n def __basis__(cls, source):\n return (cls, source)\n\n def __init__(self, source):\n self.source = source\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.source)\n\n\nclass CategoricalDomain(Domain):\n\n __slots__ = ('labels',)\n\n @classmethod\n def __basis__(cls, labels):\n return (cls, labels)\n\n def __init__(self, labels):\n self.labels = labels\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.labels)\n\n\nclass DataSetDomain(Domain):\n\n __slots__ = ('fields',)\n\n @classmethod\n def __basis__(cls, fields):\n return (cls, fields)\n\n def __init__(self, fields):\n self.fields = fields\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.fields)\n\n\nnull_t = NullDomain()\nany_t = AnyDomain()\nvoid_t = AtomicDomain('Void')\nboolean_t = AtomicDomain('Boolean')\ntext_t = AtomicDomain('Text')\ninteger_t = AtomicDomain('Integer')\nfloat_t = AtomicDomain('Float')\ndate_t = AtomicDomain('Date')\ndatetime_t = AtomicDomain('DateTime')\ntime_t = AtomicDomain('Time')\n\n\nclass Input(Comparable):\n\n __slots__ = ('domain', 'mode')\n\n @classmethod\n def __basis__(cls, domain, mode=None):\n if not isinstance(domain, Domain):\n domain = Domain.convert(domain)\n if mode is None:\n mode = InputMode()\n return (cls, domain, mode)\n\n def __init__(self, domain, mode=None):\n if not isinstance(domain, Domain):\n domain = Domain.convert(domain)\n if mode is None:\n mode = InputMode()\n self.domain = domain\n self.mode = mode\n\n def __repr__(self):\n args = [repr(self.domain)]\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(args))\n\n def __or__(self, other):\n if not isinstance(other, Input):\n return NotImplemented\n return Input(self.domain|other.domain, self.mode|other.mode)\n\n\nclass Output(Comparable):\n\n __slots__ = ('domain', 'mode', 'optional', 'plural')\n\n @classmethod\n def __basis__(cls, domain, mode=None, optional=False, plural=False):\n if not isinstance(domain, Domain):\n domain = Domain.convert(domain)\n if mode is None:\n mode = OutputMode(optional=optional, plural=plural)\n return (cls, domain, mode)\n\n def __init__(self, domain, mode=None, optional=False, plural=False):\n if not isinstance(domain, Domain):\n domain = Domain.convert(domain)\n if mode is None:\n mode = OutputMode(optional=optional, plural=plural)\n self.domain = domain\n self.mode = mode\n self.optional = mode.optional\n self.plural = mode.plural\n\n def __repr__(self):\n args = [repr(self.domain)]\n if self.optional is not False:\n args.append(\"optional=%r\" % self.optional)\n if self.plural is not False:\n args.append(\"plural=%r\" % self.plural)\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(args))\n\n def __or__(self, other):\n if not isinstance(other, Output):\n return NotImplemented\n return Output(self.domain|other.domain, self.mode|other.mode)\n\n\nSQLSchema = collections.namedtuple('SQLSchema', ['name'])\nSQLTable = collections.namedtuple('SQLTable', ['schema', 'name'])\nSQLColumn = collections.namedtuple('SQLColumn', ['table', 'name'])\nSQLKey = collections.namedtuple('SQLKey', ['table', 'names'])\n\n\nclass Column(object):\n\n __slots__ = ('idxs', 'vals')\n\n def __init__(self, idxs, vals):\n self.idxs = idxs\n self.vals = vals\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.idxs, self.vals)\n\n\nclass DataSet(object):\n\n __slots__ = ('cols', 'length')\n\n def __init__(self, cols, length):\n self.cols = cols\n self.length = length\n\n def __repr__(self):\n return \"%s(%r, length=%r)\" % (\n self.__class__.__name__, self.cols, self.length)\n\n def __len__(self):\n return self.length\n\n\nclass Pipe(Comparable):\n\n __slots__ = ('input', 'output')\n\n def __rshift__(self, other):\n if not isinstance(other, Pipe):\n return NotImplemented\n return ComposePipe(self, other)\n\n def __call__(self):\n env = {}\n self.prepare(env)\n col = self.evaluate([()], env)\n self.finalize(env)\n return col\n\n def prepare(self, env):\n pass\n\n def finalize(self, env):\n pass\n\n def evaluate(self, ivals, env):\n raise NotImplementedError(\"%s.evaluate()\" % self.__class__.__name__)\n\n\nclass SQLPipe(Pipe):\n\n __slots__ = ()\n\n def prepare(self, env):\n if 'driver' not in env:\n cluster = get_cluster()\n driver = cluster.drive()\n env['driver'] = driver\n\n def finalize(self, env):\n if 'driver' in env:\n driver = env['driver']\n driver.close()\n del env['driver']\n\n\nclass SQLTablePipe(SQLPipe):\n\n __slots__ = ('table',)\n\n @classmethod\n def __basis__(cls, table):\n return (cls, table)\n\n def __init__(self, table):\n self.table = table\n self.input = Input(void_t)\n self.output = Output(EntityDomain(table), optional=True, plural=True)\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.table)\n\n def evaluate(self, ivals, env):\n if not ivals:\n return Column([0], [])\n driver = env['driver']\n catalog = driver.get_catalog()\n table = catalog[self.table.schema.name][self.table.name]\n sql = \"\"\"SELECT %s FROM %s\"\"\" % (\n sql_name([column.name for column in table]),\n sql_qname((table.schema.name, table.name)))\n rows = driver.submit(sql)\n cols = []\n for pos in range(len(table)):\n idx = 0\n idxs = [0]\n vals = []\n for ival in ivals:\n for row in rows:\n val = row[pos]\n if val is not None:\n idx += 1\n vals.append(val)\n idxs.append(idx)\n col = Column(idxs, vals)\n cols.append(col)\n data = DataSet(cols, len(rows))\n idx = 0\n idxs = [0]\n for ival in ivals:\n idx += len(rows)\n idxs.append(idx)\n return Column(idxs, data)\n\n\nclass SQLColumnPipe(SQLPipe):\n\n __slots__ = ('column', 'domain', 'optional')\n\n @classmethod\n def __basis__(cls, column, domain, optional=False):\n domain = Domain.convert(domain)\n return (cls, column, domain, optional)\n\n def __init__(self, column, domain, optional=False):\n domain = Domain.convert(domain)\n self.column = column\n self.domain = domain\n self.optional = optional\n self.input = Input(EntityDomain(column.table))\n self.output = Output(domain, optional=optional)\n\n def __repr__(self):\n args = [repr(self.column), repr(self.domain)]\n if self.optional is not False:\n args.append(\"optional=%r\" % self.optional)\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(args))\n\n def evaluate(self, ivals, env):\n if not ivals:\n return Column([0], [])\n driver = env['driver']\n catalog = driver.get_catalog()\n table = catalog[self.column.table.schema.name][self.column.table.name]\n column = table[self.column.name]\n return ivals.cols[column.position]\n\n\nclass SQLLinkPipe(SQLPipe):\n\n __slots__ = ('origin', 'target', 'optional', 'plural')\n\n @classmethod\n def __basis__(cls, origin, target, optional=False, plural=False):\n return (origin, target, optional, plural)\n\n def __init__(self, origin, target, optional=False, plural=False):\n self.origin = origin\n self.target = target\n self.optional = optional\n self.plural = plural\n self.input = Input(EntityDomain(origin.table))\n self.output = Output(\n EntityDomain(target.table), optional=optional, plural=plural)\n\n def __repr__(self):\n args = [repr(self.origin), repr(self.target)]\n if self.optional is not False:\n args.append(\"optional=%r\" % self.optional)\n if self.plural is not False:\n args.append(\"plural=%r\" % self.plural)\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(args))\n\n\nclass ComposePipe(Pipe):\n\n __slots__ = ('p', 'q')\n\n @classmethod\n def __basis__(cls, p, q):\n return (cls, p, q)\n\n def __init__(self, p, q):\n assert p.output.domain <= q.input.domain\n self.p = p\n self.q = q\n self.input = Input(p.input.domain, p.input.mode|q.input.mode)\n self.output = Output(q.output.domain, p.output.mode|q.output.mode)\n\n def __repr__(self):\n return \"(%r >> %r)\" % (self.p, self.q)\n\n def prepare(self, env):\n self.p.prepare(env)\n self.q.prepare(env)\n\n def finalize(self, env):\n self.p.finalize(env)\n self.q.finalize(env)\n\n def evaluate(self, ivals, env):\n pcol = self.p.evaluate(ivals, env)\n qcol = self.q.evaluate(pcol.vals, env)\n idxs = [qcol.idxs[i] for i in pcol.idxs]\n vals = qcol.vals\n return Column(idxs, vals)\n\n\nclass ConstPipe(Pipe):\n\n __slots__ = ('value', 'domain')\n\n @classmethod\n def __basis__(cls, value, domain):\n domain = Domain.convert(domain)\n return (cls, value, domain)\n\n def __init__(self, value, domain):\n domain = Domain.convert(domain)\n self.value = value\n self.domain = domain\n self.input = Input(())\n self.output = Output(domain)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.value, self.domain)\n\n def evaluate(self, ivals, env):\n N = len(ivals)\n return Column(list(range(0, N+1)), [self.value]*N)\n\n\nclass NullPipe(Pipe):\n\n __slots__ = ()\n\n def __init__(self):\n self.input = Input(void_t)\n self.output = Output(null_t, optional=True)\n\n def evaluate(self, ivals, env):\n N = len(ivals)\n return Column([0]*(N+1), [])\n\n\nclass EmptyPipe(Pipe):\n\n __slots__ = ()\n\n def __init__(self):\n self.input = Input(void_t)\n self.output = Output(null_t, optional=True, plural=True)\n\n def evaluate(self, ivals, env):\n N = len(ivals)\n return Column([0]*(N+1), [])\n\n\nclass VoidPipe(Pipe):\n\n __slots__ = ('domain',)\n\n @classmethod\n def __basis__(cls, domain):\n domain = Domain.convert(domain)\n return (cls, domain)\n\n def __init__(self, domain):\n domain = Domain.convert(domain)\n self.domain = domain\n self.input = Input(domain)\n self.output = Output(void_t)\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.domain)\n\n def evaluate(self, ivals, env):\n N = len(ivals)\n return Column(list(range(0, N+1)), [()]*N)\n\n\nclass HerePipe(Pipe):\n\n __slots__ = ('domain',)\n\n @classmethod\n def __basis__(cls, domain):\n domain = Domain.convert(domain)\n return (cls, domain)\n\n def __init__(self, domain):\n domain = Domain.convert(domain)\n self.domain = domain\n self.input = Input(domain)\n self.output = Output(domain)\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.domain)\n\n def evaluate(self, ivals, env):\n N = len(ivals)\n return Column(list(range(0, N+1)), ivals)\n\n\nclass DataSetPipe(Pipe):\n\n __slots__ = ('generators',)\n\n @classmethod\n def __basis__(cls, generators):\n return (cls, generators)\n\n def __init__(self, generators):\n self.generators = generators\n input = Input(any_t)\n for generator in generators:\n input = input|generator.input\n self.input = input\n self.output = Output(\n DataSetDomain(\n tuple([generator.output for generator in generators])))\n\n def __repr__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.generators)\n\n def prepare(self, env):\n for generator in self.generators:\n generator.prepare(env)\n\n def finalize(self, env):\n for generator in self.generators:\n generator.finalize(env)\n\n def evaluate(self, ivals, env):\n cols = [generator.evaluate(ivals, env)\n for generator in self.generators]\n return Column(list(range(0, len(ivals)+1)), DataSet(cols, len(ivals)))\n\n\nclass FieldPipe(Pipe):\n\n __slots__ = ('fields', 'index')\n\n @classmethod\n def __basis__(cls, fields, index):\n return (cls, fields, index)\n\n def __init__(self, fields, index):\n self.fields = fields\n self.index = index\n self.input = Input(DataSetDomain(fields))\n self.output = fields[index]\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.fields, self.index)\n\n def evaluate(self, ivals, env):\n return ivals.cols[self.index]\n\n\nclass FilterPipe(Pipe):\n\n __slots__ = ('base', 'predicate')\n\n @classmethod\n def __basis__(cls, base, predicate):\n return (cls, base, predicate)\n\n def __init__(self, base, predicate):\n assert base.output.domain <= predicate.input.domain\n assert predicate.output.domain <= boolean_t\n assert base.output.plural and not predicate.output.plural\n self.base = base\n slef.predicate = predicate\n self.input = Input(\n base.input.domain,\n base.input.mode|predicate.input.mode)\n self.output = base.output\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n self.__class__.__name__, self.base, self.predicate)\n\n\nclass SortPipe(Pipe):\n\n __slots__ = ('base', 'keys', 'orders')\n\n @classmethod\n def __basis__(cls, base, keys, orders):\n return (cls, base, keys, orders)\n\n def __init__(self, base, keys, orders):\n assert base.output.plural\n for key in keys:\n assert base.output.domain <= key.input.domain\n assert not key.output.plural\n self.base = base\n self.keys = keys\n self.orders = orders\n input = base.input\n for key in keys:\n input = Input(input.domain, input.mode|key.input.mode)\n self.input = input\n self.output = base.output\n\n def __repr__(self):\n return \"%s(%r, %r, %r)\" % (\n self.__class__.__name__, self.base, self.keys, self.orders)\n\n\nclass GroupPipe(Pipe):\n\n __slots__ = ('base', 'keys')\n\n @classmethod\n def __basis__(cls, base, keys):\n return (cls, base, keys)\n\n def __init__(self, base, keys):\n assert base.output.plural\n for key in keys:\n assert base.output.domain <= key.input.domain\n assert not key.output.plural\n self.base = base\n self.keys = keys\n input = base.input\n for key in keys:\n input = Input(input.domain, input.mode|key.input.mode)\n self.input = input\n fields = []\n for key in keys:\n fields.append(Output(key.domain))\n fields.append(Output(base.domain, plural=True))\n self.output = Output(\n DataSetDomain(tuple(fields)),\n optional=True, plural=True)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.base, self.keys)\n\n\nSignature = collections.namedtuple('Signature', ['name', 'domains', 'range'])\n\nint_add_sig = Signature('+', (integer_t, integer_t), integer_t)\nint_sub_sig = Signature('-', (integer_t, integer_t), integer_t)\nint_mul_sig = Signature('*', (integer_t, integer_t), integer_t)\nint_div_sig = Signature('/', (integer_t, integer_t), integer_t)\n\nconcat_sig = Signature('+', (text_t, text_t), text_t)\n\ndate_year_sig = Signature('year', (date_t,), integer_t)\ndate_month_sig = Signature('month', (date_t,), integer_t)\ndate_day_sig = Signature('day', (date_t,), integer_t)\n\ndatetime_year_sig = Signature('year', (datetime_t,), integer_t)\ndatetime_month_sig = Signature('month', (datetime_t,), integer_t)\ndatetime_day_sig = Signature('day', (datetime_t,), integer_t)\ndatetime_hour_sig = Signature('hour', (datetime_t,), integer_t)\ndatetime_minute_sig = Signature('minute', (datetime_t,), integer_t)\ndatetime_second_sig = Signature('second', (datetime_t,), float_t)\n\ntime_hour_sig = Signature('hour', (time_t,), integer_t)\ntime_minute_sig = Signature('minute', (time_t,), integer_t)\ntime_second_sig = Signature('second', (time_t,), float_t)\n\nnot_sig = Signature('!', (boolean_t,), boolean_t)\nand_sig = Signature('&', (boolean_t, boolean_t), boolean_t)\nor_sig = Signature('|', (boolean_t, boolean_t), boolean_t)\n\nint_lt_sig = Signature('<', (integer_t, integer_t), boolean_t)\nint_le_sig = Signature('<=', (integer_t, integer_t), boolean_t)\nint_eq_sig = Signature('=', (integer_t, integer_t), boolean_t)\nint_ne_sig = Signature('!=', (integer_t, integer_t), boolean_t)\nint_ge_sig = Signature('>=', (integer_t, integer_t), boolean_t)\nint_gt_sig = Signature('>', (integer_t, integer_t), boolean_t)\n\ntext_lt_sig = Signature('<', (text_t, text_t), boolean_t)\ntext_le_sig = Signature('<=', (text_t, text_t), boolean_t)\ntext_eq_sig = Signature('=', (text_t, text_t), boolean_t)\ntext_ne_sig = Signature('!=', (text_t, text_t), boolean_t)\ntext_ge_sig = Signature('>=', (text_t, text_t), boolean_t)\ntext_gt_sig = Signature('>', (text_t, text_t), boolean_t)\n\ncount_sig = Signature('count', (any_t,), integer_t)\nexists_sig = Signature('exists', (any_t,), boolean_t)\nany_sig = Signature('any', (boolean_t,), boolean_t)\nall_sig = Signature('all', (boolean_t,), boolean_t)\nmin_sig = Signature('min', (integer_t,), integer_t)\nmax_sig = Signature('max', (integer_t,), integer_t)\nsum_sig = Signature('sum', (integer_t,), integer_t)\n\n\nclass FormulaPipe(Pipe):\n\n __slots__ = ('signature', 'arguments')\n\n @classmethod\n def __basis__(cls, signature, arguments):\n return (cls, signature, arguments)\n\n def __init__(self, signature, arguments):\n assert len(signature.domains) == len(arguments)\n for domain, argument in zip(signature.domains, arguments):\n assert argument.output.type <= domain\n self.signature = signature\n self.arguments = arguments\n input = Input(any_t)\n for argument in arguments:\n input = input|argument.input\n self.input = input\n mode = OutputMode()\n for argument in arguments:\n mode = mode|argument.output.mode\n self.output = Output(signature.range, mode)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n self.__class__.__name__, self.signature, self.arguments)\n\n\n\nclass AggregatePipe(FormulaPipe):\n\n __slots__ = ()\n\n def __init__(self, signature, arguments):\n assert len(signature.domains) == len(arguments)\n for domain, argument in zip(signature.domains, arguments):\n assert argument.output.domain <= domain\n self.signature = signature\n self.arguments = arguments\n input = Input(any_t)\n for argument in arguments:\n input = input|argument.input\n self.input = input\n self.output = Output(signature.range)\n\n\nclass OptionalAggregatePipe(AggregatePipe):\n\n __slots__ = ()\n\n def __init__(self, signature, arguments):\n assert len(signature.domains) == len(arguments)\n for domain, argument in zip(signature.domains, arguments):\n assert argument.output.type <= domain\n self.signature = signature\n self.arguments = arguments\n input = Input(any_t)\n for argument in arguments:\n input = input|argument.input\n self.input = input\n optional = False\n for argument in arguments:\n if arguments.output.mode.optional:\n optional = True\n break\n self.output = Output(signature.range, optional=optional)\n\n\n","sub_path":"src/rex.query/src/rex/query/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":24285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"552222209","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport os\nimport sys\nimport tempfile\nimport shutil\nfrom bs4 import BeautifulSoup\nfrom concurrent.futures import ThreadPoolExecutor\nfrom argparse import ArgumentParser\n\nclass MaomiAV:\n\n req_timeout = 10\n headers = {\n \"User-Agent\": (\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) Apple\"\n \"WebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3\"\n \"202.94 Safari/537.36\")\n }\n\n def __init__(self, url, jobs, proxies):\n self.url = url\n if \"play-\" not in self.url:\n self.url = \"%s/play-%s\" % tuple(self.url.rsplit(\"/\", 1))\n self.jobs = int(jobs)\n self.proxies = proxies\n self.bs4_parser = select_bs4_parser()\n if not self.bs4_parser:\n print(\"\\nFailed to run this program!\")\n print(\"\\nPlease install at least one parser \"\n \"in \\\"lxml\\\" and \\\"html5lib\\\"!\")\n sys.exit()\n\n def run(self):\n self.get_bs()\n self.get_m3u8()\n self.temp_dir = tempfile.mkdtemp(prefix=\"mmav_\")\n dload_file_all(self.jobs, self.temp_dir, self.proxies, self.m3u8_tss_urls)\n self.merge_ts()\n remove_path(self.temp_dir)\n print(\"\\nDone!\")\n\n def get_m3u8(self):\n m3u8_script = self.bs.find(\"head\").find(\"script\").get_text().split(\"\\n\")\n self.m3u8_info = {}\n for line in m3u8_script:\n if not line.strip():\n continue\n if \"var video\" in line:\n self.m3u8_info[\"end\"] = line.split()[-1][1:-2]\n if \"var m3u8_host\" in line:\n self.m3u8_info[\"head\"] = line.split()[-1][1:-2]\n if \"var m3u8_host1\" in line:\n self.m3u8_info[\"head1\"] = line.split()[-1][1:-2]\n if \"var m3u8_host2\" in line:\n self.m3u8_info[\"head2\"] = line.split()[-1][1:-2]\n m3u8_req = requests.get(\n url=self.m3u8_info[\"head1\"] + self.m3u8_info[\"end\"],\n headers=self.headers,\n timeout=self.req_timeout,\n proxies={\"http\": self.proxies, \"https\": self.proxies}\n )\n m3u8_req.encoding = \"utf-8\"\n self.m3u8_tss_names = [line.strip() for line in m3u8_req.text.split() if not line.startswith(\"#\")]\n self.m3u8_tss_urls = [self.merge_m3u8_url(line.strip()) for line in self.m3u8_tss_names]\n\n def merge_m3u8_url(self, file_name):\n return self.m3u8_info[\"head1\"] + self.m3u8_info[\"end\"].rsplit(\"/\", 1)[0] + \"/\" + file_name\n\n def merge_ts(self):\n dst_filename = adj_dir_name(self.bs.find(\"title\").get_text()) + \".mp4\"\n print(\"File name: \" + dst_filename)\n print(\"\\nMerge files...\")\n workdir_bak = os.getcwd()\n os.chdir(self.temp_dir)\n if len(self.m3u8_tss_names) > 100:\n # 列表分割\n names_split = [self.m3u8_tss_names[i:100+i]\n for i in range(0, len(self.m3u8_tss_names), 100)]\n files_split = []\n i = 0\n for names in names_split:\n files_split.append(\"tmp.%s\" % i)\n merge_files(names, files_split[-1])\n i += 1\n merge_files(files_split, dst_filename)\n else:\n merge_files(self.m3u8_tss_names, dst_filename)\n file2dir(os.path.join(self.temp_dir, dst_filename), workdir_bak)\n os.chdir(workdir_bak)\n\n def get_bs(self):\n # 使用浏览器 UA 来请求页面\n req = requests.get(url=self.url,\n headers=self.headers,\n timeout=self.req_timeout,\n proxies={\"http\": self.proxies, \"https\": self.proxies})\n req.encoding = \"utf-8\"\n self.bs = BeautifulSoup(req.text, self.bs4_parser)\n\ndef select_bs4_parser():\n # 选择 BS4 解析器(优先使用lxml)\n try:\n import lxml\n del lxml\n return \"lxml\"\n except ModuleNotFoundError:\n try:\n import html5lib\n del html5lib\n return \"html5lib\"\n except ModuleNotFoundError:\n return\n\ndef dload_file_all(max_threads_num, temp_dir, proxies, urls):\n\n def dload_file(proxies, url):\n # 下载文件\n file_name = url.split(\"/\")[-1]\n try:\n r = requests.get(url, timeout=15,\n proxies={\"http\": proxies, \"https\": proxies})\n except:\n try:\n r = requests.get(url, timeout=15,\n proxies={\"http\": proxies, \"https\": proxies})\n except:\n return \"\", file_name, \"timeout\"\n if r.ok:\n return r.content, file_name, r.status_code\n return \"\", file_name, r.status_code\n\n dl_done_num = 0\n # 神奇的多线程下载\n with ThreadPoolExecutor(max_threads_num) as executor1:\n for req in executor1.map(dload_file, [proxies] * len(urls), urls):\n fcontent, file_name, status_code = req\n if fcontent:\n dload_file = os.path.join(temp_dir, file_name)\n with open(dload_file, 'wb') as f:\n f.write(fcontent)\n dl_done_num += 1\n sys.stderr.write(\"Progress: [%s / %s] (%s%%)\\r\"\n % (dl_done_num, len(urls),\n dl_done_num * 100 // len(urls)))\n else:\n raise Exception(\"\\nFailed to download %s! Status: %s\\n\"\n % (file_name, status_code))\n clean_line()\n\ndef merge_files(files, dst):\n # 合并文件\n if os.name == \"nt\":\n cmd_str = \"copy /b %s %s >nul\" % (\"+\".join(files), dst)\n else:\n cmd_str = \"cat %s > %s\" % (\" \".join(files), dst)\n os.system(cmd_str)\n\ndef clean_line():\n # 清行\n exec(\"sys.stderr.write(\\'%%-%ss\\\\r\\' %% \\\" \\\")\"\n % os.get_terminal_size().columns)\n\ndef adj_dir_name(dir_name):\n for char in (\" \", \"?\", \"/\", \"\\\\\", \":\", \"*\", \"\\\"\", \"<\", \">\", \"|\"):\n dir_name = dir_name.replace(char, \"\")\n return dir_name.strip()\n\ndef mkdir(path):\n # 创建目录\n if os.path.exists(path):\n if not os.path.isdir(path):\n try:\n os.remove(path)\n except:\n pass\n else:\n return\n os.makedirs(path)\n\ndef file2file(src, dst, move=False):\n # 复制文件到文件\n # move为True时移动文件而不是复制文件\n mkdir(os.path.split(dst)[0])\n if move:\n shutil.move(src, dst)\n else:\n shutil.copyfile(src, dst)\n return dst\n\ndef file2dir(src, dst, move=False):\n # 复制文件到目录(不修改文件名)\n # move为True时复制后删除原文件\n mkdir(dst)\n shutil.copy(src, dst)\n if move:\n os.remove(src)\n return os.path.join(dst, os.path.split(src)[1])\n\ndef remove_path(path):\n # 移除文件/目录(如果存在的话)\n if os.path.isdir(path):\n shutil.rmtree(path)\n elif os.path.exists(path):\n os.remove(path)\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"url\", help=\"url for webview\")\n parser.add_argument(\"-j\", \"--jobs\", type=int, default=4, help=\"number of recipes (jobs)(default: 4)\")\n parser.add_argument(\"-p\", \"--proxies\", default=\"\", help=\"use proxy (address:port)(default: None)\")\n\n args = parser.parse_args()\n\n a = MaomiAV(args.url, args.jobs, args.proxies)\n a.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"mmav.py","file_name":"mmav.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"71155587","text":"#!/usr/bin/python\n\n# Example: >>%run EcoliPputida2\n# >>avgfitness,sdfitness=EcoliPputidaFLYCOP_oneConf(...) \n# Goal: individual test to improve consortium {E.coli-P.putida}, depending on initial sucrose, fructose concentration that E. coli secretes and initial E.coli & KT biomasses \n# Run through the function EcoliPputidaFLYCOP_oneConf\n\n\nimport cobra\nimport os, stat\nimport pandas as pd\nimport tabulate\nimport re\nimport sys\nimport getopt\nimport os.path\nimport copy\nimport csv\nimport math\nimport cobra.flux_analysis.variability\nimport massedit\nimport subprocess\nimport shutil, errno\nimport statistics\nimport optlang\nfrom cobra import Reaction\nfrom cobra import Metabolite\n\n# import gurobipy\n# import spec\n\n################################################################\n### FUNCTION initialize_models ################################# \ndef initialize_models():\n # Only to run 1st time, to build the models!!\n if not(os.path.exists('ModelsInput/iEC1364_W_p_coumarate.xml')) or not(os.path.exists('ModelsInput/iJN1463_naringenin_mod.xml')):\n print('ERROR! Not iEC1364_W_p_coumarate.xml or iJN1463_naringenin_mod.xml files with GEM of consortium strains in ModelsInput!')\n else:\n path=os.getcwd() # original path == \"MicrobialCommunities\"\n os.chdir('ModelsInput')\n \n # ---------------------------------------------------------------------------\n # E. coli WT for taking sucrose and excreting fructose\n # ---------------------------------------------------------------------------\n model=cobra.io.read_sbml_model(\"iEC1364_W_p_coumarate.xml\")\n \n # Replace brackets with compartment location (e.g. \"[c]\") in metabolite ids by '_' (e.g. \"_c\") \n for metabolite in model.metabolites:\n metabolite.id = re.sub('__91__c__93__',r'[c]',metabolite.id)\n metabolite.id = re.sub('__91__p__93__$',r'[p]',metabolite.id)\n metabolite.id = re.sub('__91__e__93__',r'[e]',metabolite.id)\n # metabolite.id = re.sub('__',r'_',metabolite.id)\n metabolite.compartment = ''\n # To solve possible problems in changing names \n model.repair()\n cobra.io.save_matlab_model(model,\"iEC1364_W_p_coumaratemod.mat\")\n del(model)\n model = cobra.io.load_matlab_model(\"iEC1364_W_p_coumaratemod.mat\")\n \n # Replace brackets with compartment location (e.g. \"[c]\") in rxn ids by '_' (e.g. \"_c\") \n for rxn in model.reactions:\n rxn.id = re.sub('__40__p__41__',r'(p)',rxn.id)\n rxn.id = re.sub('__40__c__41__',r'(c)',rxn.id)\n rxn.id = re.sub('__40__e__41__',r'(e)',rxn.id) \n # To solve possible problems in changing names \n model.repair()\n cobra.io.save_matlab_model(model,\"iEC1364_W_p_coumaratemod.mat\")\n del(model)\n model = cobra.io.load_matlab_model(\"iEC1364_W_p_coumaratemod.mat\")\n \n # Put sucrose as carbon source and maximize uptake, later changed by the parameter 'sucr1'\n model.reactions.get_by_id(\"EX_sucr(e)\").bounds=(-15,0)\n # OXYGEN UPTAKE\n model.reactions.get_by_id(\"EX_o2(e)\").bounds = (-20, 0)\n \n # MAKE SURE FRUCTOSE METABOLISM IS SHUTTED DOWN\n model.reactions.get_by_id(\"XYLI2\").bounds = (0, 0)\n model.reactions.get_by_id(\"HEX7\").bounds = (0, 0) \n model.reactions.get_by_id(\"FRUpts2pp\").bounds = (0, 0)\n model.reactions.get_by_id(\"FRUptspp\").bounds = (0, 0)\n \n # ACTIVATED REACTION: FFSD: h2o[c] + suc6p[c] --> fru[c] + g6p[c]\n model.reactions.get_by_id(\"FFSD\").bounds = (0, 1000)\n # To un-limit the fructose production, for the flux variability analysis\n model.reactions.get_by_id('FRUtpp').bounds=(-1000,1000) \n model.reactions.get_by_id('FRUtex').bounds=(-1000,1000) \n model.reactions.get_by_id('EX_fru(e)').bounds=(-1000,1000)\n \n cobra.io.save_matlab_model(model,\"iEC1364_W_p_coumaratemod_tmp.mat\")\n del(model)\n # ---------------------------------------------------------------------------\n \n \n # ---------------------------------------------------------------------------\n # P.putida KT 2440 model for taking fructose and secreting B12 // PENDIENTE: excreción B12\n # model=cobra.io.read_sbml_model('iJN1463_naringenin_mod.xml') # Model composed by Iván, based on \"iJN1463 - P.put_malonate.xml\"\n model=cobra.io.read_sbml_model('iJN1463_naringenin_corrected.xml') # David's original model\n\n # Replace brackets with compartment location (e.g. \"[c]\") in metabolite ids by '_' (e.g. \"_c\") \n for metabolite in model.metabolites:\n metabolite.id = re.sub('__91__c__93__',r'[c]',metabolite.id)\n metabolite.id = re.sub('__91__p__93__$',r'[p]',metabolite.id)\n metabolite.id = re.sub('__91__e__93__',r'[e]',metabolite.id)\n # metabolite.id = re.sub('__',r'_',metabolite.id)\n metabolite.compartment = ''\n # To solve possible problems in changing names \n model.repair()\n cobra.io.save_matlab_model(model,\"iJN1463_naringenin_mod.mat\")\n del(model)\n model=cobra.io.load_matlab_model('iJN1463_naringenin_mod.mat') \n \n # Replace brackets with compartment location (e.g. \"[c]\") in rxn ids by '_' (e.g. \"_c\") \n for rxn in model.reactions:\n rxn.id = re.sub('__40__p__41__',r'(p)',rxn.id)\n rxn.id = re.sub('__40__c__41__',r'(c)',rxn.id)\n rxn.id = re.sub('__40__e__41__',r'(e)',rxn.id) \n # To solve possible problems in changing names \n model.repair()\n cobra.io.save_matlab_model(model,\"iJN1463_naringenin_mod.mat\")\n del(model)\n model=cobra.io.load_matlab_model('iJN1463_naringenin_mod.mat') \n \n \n # -------------------------------------------------------------------------\n # MODEL ADJUSTEMENTS\n # model.reactions.get_by_id(\"EX_sucr(e)\").bounds = (0, 0) # Este modelo no puede captar sacarosa\n \n # FRU reactions\n model.reactions.get_by_id(\"EX_fru(e)\").bounds=(-15,0) # Maximize uptake, maximum upper bound // DAVID: (-8, 0) // Later changed by the parameter 'frc2'\n model.reactions.get_by_id(\"FRUtex\").bounds = (0, 1000)\n # model.reactions.FRUtex.bounds=(0,8)\n\n # PREVENT P.putidaKT FROM TAKING glc[e] from the media\n model.reactions.get_by_id(\"EX_glc__D(e)\").bounds = (0, 0)\n # OXYGEN UPTAKE\n model.reactions.get_by_id(\"EX_o2(e)\").bounds = (-20, 0)\n \n # pCOUMARATE UPTAKE --> ¿Cambios sugeridos? Tomar todo el T4hcinnm posible (disponible en el medio)\n model.reactions.get_by_id(\"EX_T4hcinnm(e)\").bounds = (-1000, 0)\n model.reactions.get_by_id(\"T4HCINNMtex\").bounds = (0, 1000)\n model.reactions.get_by_id(\"T4HCINNMtpp\").bounds = (0, 1000)\n model.reactions.get_by_id(\"4CMCOAS\").bounds = (0, 0) # T4hcinnm[c] + atp[c] + coa[c] --> amp[c] + coucoa[c] + ppi[c]\n \n # MALON reactions - no MALON secretion\n model.reactions.get_by_id(\"EX_malon(e)\").bounds = (0, 0)\n model.reactions.get_by_id(\"MALONtex\").bounds = (0, 0)\n model.reactions.get_by_id(\"MALONpp\").bounds = (0, 0)\n model.reactions.get_by_id(\"MALONHY\").bounds = (0, 0) # Reacción de hidrólisis de malcoa[c] --> malon[c]\n \n # NARINGENIN reactions\n model.reactions.get_by_id(\"matB\").bounds = (0, 1000) # Ajuste posterior, de otro modo KT prioriza producción de naringenina, deja de crecer y se consume en el medio\n model.reactions.get_by_id(\"naringenintpp\").bounds = (0, 1000)\n model.reactions.get_by_id(\"naringenintex\").bounds = (0, 1000)\n # -------------------------------------------------------------------------\n \n cobra.io.save_matlab_model(model,\"iJN1463_naringenin_mod_tmp.mat\")\n del(model)\n # ---------------------------------------------------------------------------\n\n os.chdir(path)\n# end-def initialize_models\n################################################################ \n\n\n################################################################ \n### FUNCTION mat_to_comets ##################################### \n# mat_to_comets(modelPath)\ndef mat_to_comets(matInputFile):\n model=cobra.io.load_matlab_model(matInputFile)\n # Open output file:\n with open(matInputFile+'.txt', mode='w') as f:\n # Print the S matrix\n f.write(\"SMATRIX \"+str(len(model.metabolites))+\" \"+str(len(model.reactions))+\"\\n\")\n for x in range(len(model.metabolites)):\n for y in range(len(model.reactions)):\n if (model.metabolites[x] in model.reactions[y].metabolites):\n coeff=model.reactions[y].get_coefficient(model.metabolites[x])\n f.write(\" \"+str(x+1)+\" \"+str(y+1)+\" \"+str(coeff)+\"\\n\")\n f.write(\"//\\n\")\n \n # Print the bounds\n f.write(\"BOUNDS -1000 1000\\n\");\n for y in range(len(model.reactions)):\n lb=model.reactions[y].lower_bound\n up=model.reactions[y].upper_bound\n f.write(\" \"+str(y+1)+\" \"+str(lb)+\" \"+str(up)+\"\\n\")\n f.write(\"//\\n\")\n \n # Print the objective reaction\n f.write('OBJECTIVE\\n')\n for y in range(len(model.reactions)):\n if (model.reactions[y] in model.objective): # Cambio línea ejecución Docker\n # if (str(model.reactions[y].id) in str(model.objective.expression)): # Cambio línea ejecución MiOrdenador\n indexObj=y+1\n f.write(\" \"+str(indexObj)+\"\\n\")\n f.write(\"//\\n\")\n \n # Print metabolite names\n f.write(\"METABOLITE_NAMES\\n\")\n for x in range(len(model.metabolites)):\n f.write(\" \"+model.metabolites[x].id+\"\\n\")\n f.write(\"//\\n\")\n\n # Print reaction names\n f.write(\"REACTION_NAMES\\n\")\n for y in range(len(model.reactions)):\n f.write(\" \"+model.reactions[y].id+\"\\n\")\n f.write(\"//\\n\")\n\n # Print exchange reactions\n f.write(\"EXCHANGE_REACTIONS\\n\")\n for y in range(len(model.reactions)):\n if (model.reactions[y].id.find('EX_')==0):\n f.write(\" \"+str(y+1))\n f.write(\"\\n//\\n\") \n del(model)\n### end-function-mat_to_comets \n################################################################\n\n\n################################################################\n### FUNCTION EcoliPputidaOneConf ############################\ndef EcoliPputidaFLYCOP_oneConf(sucr1, Ecbiomass, frc2, KTbiomass, fitFunc='MaxNaringenin', maxCycles = 240, dirPlot='', repeat=5): \n '''\n Call: avgFitness, sdFitness = EcoliPputidaFLYCOP_oneConf(sucr1, Ecbiomass, frc2, KTbiomass)\n Start with no more than 5 repeats (1st trial)\n\n INPUTS: sucr1: lower bound of sucrose uptake in model 1 (E.coli) (mM)\n Ecbiomass: initial E. coli biomass (gL-1)\n frc2: lower bound of fructose uptake in model 2 (P.putida) (mM)\n KTbiomass: initial P. putida KT biomass (gL-1)\n \n fitFunc: fitness function to optimize. 'MaxNaringenin', maximize Naringenin production by P. putida KT2440 (mM)\n \n maxCycles: cycles in COMETS run, stated in file 'layout_template'. It is not used in the Python scripts (wrapper, individualTest). If desired to change, see 'layout_template'\n dirPlot: copy of the graphs with several run results.\n repeat: number of runs with the same configuration (COMETS, not number of SMAC iterations)\n \n POTENTIAL LIMITATION OF NUTRIENTS: nh4 and/or pi\n please, modify initial concentrations in layout_template\n \n OUTPUT: avgFitness: average fitness of 'repeat' COMETS runs with the same configuration (due to it is not deterministic)\n sdFitness: standard deviation of fitness during 'repeat' COMETS runs (see above)\n '''\n\n if not(os.path.exists('ModelsInput/iEC1364_W_p_coumaratemod_tmp.mat')): # or os.path.exists('ModelsInput/P.put_malonatemod_tmp.mat')): \n initialize_models()\n print(\"Inicializamos modelos\\n\")\n\n print(\"Fitness function: \", fitFunc)\n # print(os.getcwd())\n\n # Single GEMs parameter modifications\n # =================================== \n if not(os.path.exists('iEC1364_W_p_coumaratemod_tmp.mat.txt')) or not (os.path.exists('iJN1463_naringenin_mod_tmp.mat.txt')):\n \n # ========================================================================= \n # MODEL ADAPTATION TO THE PARAMETERS PASSED TO THE 'EcoliPputidaFLYCOP_oneConf' function\n # E. coli\n \n model=cobra.io.load_matlab_model('ModelsInput/iEC1364_W_p_coumaratemod_tmp.mat')\n # model.objective = \"BIOMASS_Ec_iJO1366_core_53p95M\" # Cambiar el objetivo del modelo para optimizar biomasa (core), un objetivo realista\n model.objective = \"BIOMASS_Ec_iJO1366_WT_53p95M\" # WT, en lugar de 'core'\n \n # This reaction ('EX_sucr(e)') controls the global sucr exchange flux for E. coli\n model.reactions.get_by_id(\"EX_sucr(e)\").bounds=(sucr1, 0)\n # The rest of reactions depend on the sucr flux already specified\n model.reactions.get_by_id(\"SUCtpp\").bounds=(0, 1000) # sucr[p] --> sucr[c] \n model.reactions.get_by_id(\"SUCRtpp\").bounds=(0, 1000) # sucr[p] --> sucr[c]\n model.reactions.get_by_id(\"SUCRtex\").bounds=(0, 1000) # sucr[e] --> sucr[p]\n model.optimize()\n \n # -------------------------------------------------------------------------\n # FLUX VARIABILITY ANALYSIS: optimizar producción de FRUCTOSA / pCUMARATO al tiempo que crece E. coli (objective: BIOMASS_core)\n # Excretar fructosa / pCA aunque se pierda una parte de la capacidad de crecimiento máximo (obj.: BIOMASS) == 20% sobre objetivo global\n dictOptValueFru = cobra.flux_analysis.flux_variability_analysis(model, {'EX_fru(e)'}, fraction_of_optimum=(1-0.20))\n dictOptValuepCA = cobra.flux_analysis.flux_variability_analysis(model, {'EX_T4hcinnm(e)'}, fraction_of_optimum=((1-0.20)))\n \n # FRUCTOSA\n # ======================\n FruExLimit=dictOptValueFru['EX_fru(e)']['maximum']\n model.reactions.get_by_id(\"FRUtpp\").bounds=(0, FruExLimit) # FRUtpp: fru[c] --> fru[p] // Bounds: (0, 3.973750000000021)\n model.reactions.get_by_id(\"FRUtex\").bounds=(-FruExLimit, 0) # FRUtex: fru[e] <-- fru[p] // Bounds: (-3.973750000000021, 0)\n model.reactions.get_by_id(\"EX_fru(e)\").bounds=(FruExLimit, FruExLimit) # valor único, óptimo encontrado // PONER SIEMPRE EL MISMO NÚMERO\n cobra.io.save_matlab_model(model,'iEC1364_W_p_coumaratemod_tmp.mat')\n \n # pCUMARATO\n # ======================\n model.reactions.get_by_id('TAL').bounds=(0,1000)\n pCALimit=dictOptValuepCA['EX_T4hcinnm(e)']['maximum']\n model.reactions.get_by_id('T4HCINNMtpp').bounds=(pCALimit,1000)\n model.reactions.get_by_id('T4HCINNMtex').bounds=(pCALimit,1000)\n model.reactions.get_by_id('EX_T4hcinnm(e)').bounds=(pCALimit,pCALimit) # valor único, óptimo encontrado // PONER SIEMPRE EL MISMO NÚMERO\n cobra.io.save_matlab_model(model,'iEC1364_W_p_coumaratemod_tmp.mat')\n # -------------------------------------------------------------------------\n \n model.optimize()\n cobra.io.save_matlab_model(model,'iEC1364_W_p_coumaratemod_tmp.mat')\n del(model) \n \n \n # =========================================================================\n # MODEL ADAPTATION TO THE PARAMETERS PASSED TO THE 'EcoliPputidaFLYCOP_oneConf' function\n # P. putida\n \n model=cobra.io.load_matlab_model('ModelsInput/iJN1463_naringenin_mod_tmp.mat')\n # model.objective = \"BIOMASS_KT2440_Core2\" # Cambiar el objetivo del modelo para optimizar biomasa (core), un objetivo realista\n model.objective = \"BIOMASS_KT2440_WT3\" # WT, en lugar de 'core' - asegurar objetivo biomasa (clave)\n \n # This reaction ('EX_fru(e)') controls the global fru exchange flux for P. putida KT\n model.reactions.get_by_id(\"EX_fru(e)\").bounds=(frc2, 0)\n # The rest of reactions depend on the 'fru' flux already specified\n model.reactions.get_by_id(\"FRUtex\").bounds=(0, 1000) # fru[e] --> fru[p]\n model.reactions.get_by_id(\"FRUptspp\").bounds=(0, 1000) # fru[p] + pep[c] --> f1p[c] + pyr[c]\n model.optimize()\n \n # -------------------------------------------------------------------------\n # FLUX VARIABILITY ANALYSIS: optimizar producción de NARINGENINA al tiempo que crece P. putida Kt (objective: BIOMASS_WT)\n # Excretar naringenina aunque se pierda una parte de la capacidad de crecimiento máximo (obj.: BIOMASS) == 15% sobre objetivo global // David (jupyter notebook, en definición de modelo)\n dictNarValue=cobra.flux_analysis.variability.flux_variability_analysis(model,{'EX_nar(e)'},fraction_of_optimum=(1 - 0.15)) # EXCRECIÓN NARINGENINA\n NarLimit=dictNarValue['EX_nar(e)']['maximum']\n model.reactions.get_by_id('matB').bounds=(0, NarLimit)\n model.reactions.get_by_id('naringenintpp').bounds=(NarLimit,1000)\n model.reactions.get_by_id('naringenintex').bounds=(NarLimit,1000)\n model.reactions.get_by_id('EX_nar(e)').bounds=(NarLimit,NarLimit) # PONER SIEMPRE EL MISMO NÚMERO\n # -------------------------------------------------------------------------\n \n model.optimize()\n cobra.io.save_matlab_model(model,'iJN1463_naringenin_mod_tmp.mat')\n del(model)\n \n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\n # MAT to COMETS\n mat_to_comets('iEC1364_W_p_coumaratemod_tmp.mat')\n mat_to_comets('iJN1463_naringenin_mod_tmp.mat')\n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\n # Community parameter modifications\n # ================================= \n # 4.- [shell script] Write automatically the COMETS parameter about initial biomass of strains // Initial biomass --> parameter subject to optimization (.pcs)\n massedit.edit_files(['EcPp2_layout_template2.txt'],[\"re.sub(r'XXX','\"+str(Ecbiomass)+\"',line)\"], dry_run=False) # dry_run = False --> guardar archivo modificado // True, mostrar diferencias. En ese caso, ¿guarda?\n massedit.edit_files(['EcPp2_layout_template2.txt'],[\"re.sub(r'YYY','\"+str(KTbiomass)+\"',line)\"], dry_run=False)\n # end-if building models\n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\n\n\n # [COMETS by command line] Run COMETS\n if not(os.path.exists('IndividualRunsResults')):\n os.makedirs('IndividualRunsResults')\n totfitness=0\n sumpCA=0 # pCA quantity variable (production by E. coli)\n sumNar=0 # Nar quantity variable (production by P.putida KT)\n fitnessList=[] # List with the different values for 'totfitness' in every execution ('n' repeats)\n \n # path=os.getcwd()\n # print(path)\n # print(\"\\n---------------------------\")\n for i in range(repeat):\n with open(\"output.txt\", \"w\") as f:\n subprocess.run(args=['./comets_scr', 'comets_script_template'], stdout=f, stderr=subprocess.STDOUT) # ejecutar COMETS con layout --> media_log_template / total_biomass_log_template / flux_log_template\n \n # [R call] Run script to generate one graph:subprocess.call\n title=str(sucr1)+'_'+str(Ecbiomass)+'_'+str(frc2)+'_'+str(KTbiomass) \n # print(title)\n \n subprocess.run(['../../Scripts/plot_biomassX2_vs_4mediaItem_modGS_limNut.sh template2 sucr T4hcinnm fru nar nh4 pi '+str(maxCycles)+' '+title+' blue black darkmagenta yellow orange aquamarine EcoliWT KT2440'], shell=True)\n # 3 4 5 6 7 8 Green Red\n \n # Compute fitness (measure to optimize):\n # (A) DETERMINE ENDCYCLE: when sucrose is exhausted {}\n with open(\"biomass_vs_sucr_T4hcinnm_fru_nar_nh4_pi_template2.txt\", \"r\") as sources: # Creado en ejecución COMETS 'subprocess.run()'\n lines = sources.readlines() \n iniPointV=lines[0].split() # Initial line, initial values \n iniBiomass=float(iniPointV[1])+float(iniPointV[2]) # Biomass sum: Ecbiomass + KTbiomass\n endCycle=0\n \n # Endcycle occurs when sucrose is exhausted. Otherwise, 'endcycle' = last cycle\n for line in lines:\n sucrConc=float(line.split()[3])\n endCycle=int(line.split()[0])\n if sucrConc == float(0.0):\n break;\n \n # (B) FINAL BIOMASS // FINAL CONCENTRATIONS: pCA, Nar, limiting nutrients\n finalLineV=lines[endCycle].split() # Line where the 'endcycle' is reached // Either sucrConc = 0.0, either encycle = last_cycle\n final_Ecbiomass = float(finalLineV[1])\n final_KTbiomass = float(finalLineV[2])\n \n totpCA=float(finalLineV[4]) \n totNar=float(finalLineV[6])\n Final_nh4=float(finalLineV[7]) # First limiting nutrient\n Final_pi=float(finalLineV[8]) # Second limiting nutrient\n \n # PRINTING\n print(\"Execution: \"+str(i+1)+\" of \"+str(repeat)+\". CYCLE: \"+str(endCycle))\n print(\"Final line: \", finalLineV, \" Final cycle: \", endCycle)\n print(\"T4hcinnm: \"+str(totpCA)+\"\\t\\t\"+\"Nar: \"+str(totNar))\n print(\"Final Ec biomass: \", final_Ecbiomass, \"\\tFinal KT biomass: \", final_KTbiomass)\n finalBiomass = final_Ecbiomass + final_KTbiomass\n \n # (C) COMPUTE FITNESS: maximize pCA AND/OR Nar\n fitTime=1-(float(endCycle)/float(maxCycles)) # maxCycles == Number of total cycles, stated in the layout_template\n fitpCA=float(totpCA/final_Ecbiomass) # Normalized with respect to the final E. coli biomass (mM / g biomasa)\n fitNar=float(totNar/final_KTbiomass) # Normalized with respect to the final P. putida KT biomass (mM / g biomasa)\n \n # PENDIENTE PRECISAR\n fitness=fitNar\n\n print(\"Fitness: \"+str(round(fitness,6))+\" in cycle \"+str(endCycle), \"\\n\")\n\n totfitness += fitness # 'n' repeats\n fitnessList.append(fitness) # List with fitness values in 'n' repeats\n sumpCA += totpCA # Total pCA for 'n' repeats\n sumNar += totNar # Total Nar for 'n' repeats\n\n # ---------------------------------------------------------------------\n # Copy individual solution\n file='IndividualRunsResults/'+'biomass_vs_sucr_T4hcinnm_fru_nar_nh4_pi_run'+str(i+1)+'_'+str(fitness)+'_'+str(endCycle)+'.pdf'\n shutil.move('biomass_vs_sucr_T4hcinnm_fru_nar_nh4_pi_template2_plot.pdf',file) \n if(dirPlot != ''):\n file2=dirPlot+'biomass_vs_sucr_T4hcinnm_fru_nar_nh4_pi'+str(sucr1)+'_'+str(Ecbiomass)+'_'+str(frc2)+'_'+str(KTbiomass)+'_run'+str(i+1)+'_'+str(fitness)+'_'+str(endCycle)+'.pdf'\n shutil.move(file,file2)\n \n file='IndividualRunsResults/'+'total_biomass_log_run'+str(i+1)+'.txt'\n shutil.move('total_biomass_log_template2.txt',file)\n file='IndividualRunsResults/'+'media_log_run'+str(i+1)+'.txt'\n shutil.move('media_log_template2.txt',file)\n file='IndividualRunsResults/'+'flux_log_run'+str(i+1)+'.txt'\n shutil.move('flux_log_template2.txt',file) \n\n avgfitness=totfitness/repeat # 'totfitness' average in 'n' repeats\n if(repeat>1):\n sdfitness=statistics.stdev(fitnessList) # standard deviations for 'n' values\n else:\n sdfitness=0.0\n \n avgpCA = sumpCA/repeat\n avgNar = sumNar/repeat\n # --------------------------------------------------------------------------- \n # Display results in terminal\n print(\"Fitness_function\\tconfiguration\\t\\tfitness\\t\\tsd\\t\\tpCA(mM)\\tFinalEc(gL-1)\\tNar(mM)\\tFinalKT(gL-1)\\tendCycle\") # UNITS: mM (metabolites) // gL-1 (biomass)\n \n print(fitFunc+\"\\t\\t\"+str(sucr1)+','+str(Ecbiomass)+','+str(frc2)+','+str(KTbiomass)+\"\\t\"+\n str(round(avgfitness,6))+\"\\t\\t\"+str(round(sdfitness, 6))+\n \"\\t\"+str(round(avgpCA,6))+\"\\t\\t\"+str(round(final_Ecbiomass, 4))+\"\\t\\t\"+str(round(avgNar,6))+\"\\t\"+str(round(final_KTbiomass, 4))+\"\\t\"+str(endCycle)+\"\\n\")\n \n \n # Save results in 'configurationsResults(...).txt' file\n if not os.path.isfile(dirPlot+\"configurationsResults\"+fitFunc+\".txt\"): \n myfile = open(dirPlot+\"configurationsResults\"+fitFunc+\".txt\", \"a\")\n myfile.write(\"Fitness_function\\tconfiguration\\tfitness\\tsd\\tpCA_mM\\tFinalEc_gL\\tNar_mM\\tFinalKT_gL\\tendCycle\\tNH4_mM\\tpi_mM\\n\")\n \n myfile.write(fitFunc+\"\\t\"+str(sucr1)+','+str(Ecbiomass)+','+str(frc2)+','+str(KTbiomass)+\n \"\\t\"+str(round(avgfitness, 6))+\"\\t\"+str(round(sdfitness, 6))+\n \"\\t\"+str(round(avgpCA, 6))+\"\\t\"+str(round(final_Ecbiomass, 4))+\"\\t\"+str(round(avgNar,6))+\"\\t\"+str(round(final_KTbiomass, 4))+\"\\t\"+str(endCycle)+\n \"\\t\"+str(round(Final_nh4, 4))+\"\\t\"+str(round(Final_pi, 4))+\"\\n\")\n myfile.close()\n \n else:\n myfile = open(dirPlot+\"configurationsResults\"+fitFunc+\".txt\", \"a\")\n \n myfile.write(fitFunc+\"\\t\"+str(sucr1)+','+str(Ecbiomass)+','+str(frc2)+','+str(KTbiomass)+\n \"\\t\"+str(round(avgfitness, 6))+\"\\t\"+str(round(sdfitness, 6))+\n \"\\t\"+str(round(avgpCA, 6))+\"\\t\"+str(round(final_Ecbiomass, 4))+\"\\t\"+str(round(avgNar,6))+\"\\t\"+str(round(final_KTbiomass, 4))+\"\\t\"+str(endCycle)+\n \"\\t\"+str(round(Final_nh4, 4))+\"\\t\"+str(round(Final_pi, 4))+\"\\n\")\n myfile.close()\n \n \n print(\"Avg.fitness(sd): \"+str(avgfitness)+\" (+/-\"+str(sdfitness)+\")\")\n if(sdfitness>(0.1 * avgfitness)): # Correction if SD is too high. Maximum allowed SD ~ 10% (avgfitness)\n avgfitness=0.0\n \n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\n# MODEL SUMMARY\n model = cobra.io.load_matlab_model('iEC1364_W_p_coumaratemod_tmp.mat')\n model.optimize()\n print(\"\\nMODEL SUMMARY E. coli\")\n print(model.summary())\n print()\n del(model)\n \n model = cobra.io.load_matlab_model('iJN1463_naringenin_mod_tmp.mat')\n model.optimize()\n print(\"\\nMODEL SUMMARY KT\")\n print(model.summary())\n print()\n del(model)\n# -----------------------------------------------------------------------------\n# -----------------------------------------------------------------------------\n \n return avgfitness,sdfitness\n# end-def EcoliPputidaOneConf\n################################################################\n\n\n\n\n","sub_path":"Project3_EcPp2_NutrientLimitations/Scripts/EcPp2_limNut.py","file_name":"EcPp2_limNut.py","file_ext":"py","file_size_in_byte":26249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"230471474","text":"\"\"\"PDBTMlink\n\nauthor: jbonet\ndate: 02/2014\n\n@oliva's lab\n\"\"\"\n\n\"\"\"\nImport Standard Libraries\n\"\"\"\nimport os, re\nimport urllib\n\n\"\"\"\nDependences in SBI library\n\"\"\"\nfrom SBI.databases import PDBTMftp\nfrom SBI.beans import Path\nfrom SBI.beans import File\n\nclass PDBTMlink(object):\n\n def __init__(self, local = None):\n self._local = os.path.abspath(local)\n self._main = None\n self._pdbtmfile = None\n if local is not None:\n self.local = local\n\n \"\"\"ATTRIBUTES\"\"\"\n @property\n def local(self): return self._local\n @local.setter\n def local(self, value):\n self._local = os.path.abspath(value)\n self._pdbtmfile = os.path.join(self._local, 'pdbtm.gz')\n\n @property\n def source(self):\n return PDBTMftp['show']\n\n \"\"\"BOOLEANS\"\"\"\n @property\n def has_local(self): return self._local is not None\n\n \"\"\"METHODS\"\"\"\n def download(self):\n if not self.has_local:\n raise NameError('A local drugBank database directory must be defined.')\n\n Path.mkdir(self.local)\n here = os.getcwd()\n os.chdir(self.local)\n os.system(\"svn export {0}\".format(PDBTMftp['svn']))\n\n self._process()\n\n return True\n\n @property\n def localTM(self):\n tmoFile = File(self._pdbtmfile, 'r')\n for tm_line in tmoFile.descriptor:\n yield tm_line\n\n \"\"\"PRIVATE METHODS\"\"\"\n def _process(self):\n tmoFile = File(self._pdbtmfile,'w', True)\n for xmlfile in Path.list_files(os.path.join(self._local,'pdbtm/database/'), '*.xml'):\n xmldata = TM(pdb = os.path.splitext(os.path.split(xmlfile)[1])[0].upper())\n skip_chains = set()\n read = False\n fdxml = open(xmlfile)\n for line in fdxml:\n if line.startswith(' '): xmldata.tmres = line\n elif line.startswith(' '): read = False\n fdxml.close()\n if len(xmldata.chains) > 0:\n tmoFile.write(str(xmldata)+\"\\n\")\n tmoFile.close()\n\n\nclass TM(object):\n section_types = {'1':'Side1', '2':'Side2', 'i':'Inside', 'o':'Outside',\n 'B':'Beta-strand', 'H':'alpha-helix', 'C':'coil',\n 'I':'membrane-inside', 'L':'membrane-loop', 'F':'interfacial helix', 'U':'unknown localizations'}\n\n def __init__(self, pdb= None, inline = None):\n if inline is not None: inline = inline.strip().split('\\t')\n\n self._pdb = pdb if inline is None else eval(inline[0])\n self._tmres = '' if inline is None else eval(inline[1])\n self._tmtype = '' if inline is None else eval(inline[2])\n self._kwres = '' if inline is None else eval(inline[3])\n self._side = '' if inline is None else eval(inline[4])\n self._chains = {} if inline is None else eval(inline[5])\n\n @property\n def pdb(self): return self._pdb\n @property\n def tmres(self): return self._tmres\n @tmres.setter\n def tmres(self, value): self._tmres = TM._clean_xml(value)\n @property\n def tmtype(self): return self._tmtype\n @tmtype.setter\n def tmtype(self, value): self._tmtype = TM._clean_xml(value)\n @property\n def kwres(self): return self._kwres\n @kwres.setter\n def kwres(self, value): self._kwres = TM._clean_xml(value)\n @property\n def side(self): return self._side\n @side.setter\n def side(self, value): self._side = value\n @property\n def chains(self): return self._chains\n\n def set_chain(self, key, value = None):\n if value is None:\n self._chains.setdefault(key,[])\n else:\n self._chains[key].append(value)\n\n @staticmethod\n def _clean_xml(xml):\n return re.search(\">([\\S\\s]+) Dict[str, str]:\n \"\"\"Calls subprocess.Popen and returns the process.\n\n Args:\n command_to_run (str): The command to run in Popen.\n timeout_sec (int): Timeout in seconds.\n\n Returns:\n Dict[str, str]: A named tuple containing return_code, stdout, and stderr.\n\n Raises:\n Various exceptions.\n \"\"\"\n if not command_to_run:\n command_to_run = ''\n\n try:\n LOGGER.write('command: ' + command_to_run)\n process = subprocess.Popen(\n command_to_run,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n start_new_session=True)\n outs, errs = process.communicate(timeout=timeout_sec)\n except subprocess.CalledProcessError as cpe:\n LOGGER.write(\n '{0}\\n{1}\\n{2}'.format(cpe.cmd, cpe.output, cpe.returncode), 'error')\n raise\n except subprocess.TimeoutExpired as t_e:\n LOGGER.write('{0}\\n{1}\\n{2}'.format(t_e.cmd, t_e.output, t_e.timeout), 'error')\n raise\n except Exception as g_e:\n LOGGER.write(str(g_e), 'error')\n raise\n else:\n LOGGER.write('''[-] command result: return_code: {0} stdout: {1} stderr: {2} '''.format(\n process.returncode, outs, errs))\n\n return {'return_code': process.returncode, 'stdout': outs, 'stderr': errs}\n","sub_path":"syslinkats/framework/local/local_shell_commands.py","file_name":"local_shell_commands.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"342456907","text":"import datetime\nfrom testcase.test_base import TestBase,exception_monitor # 导包可以导入函数,这里将装饰器放在测试基类里,方便调用\nfrom page.form_page.list_page import ListPage\nfrom page.form_page.Design_page import DesignPage\nfrom page.form_page.allocation_right_page import AllocationRightPage\nfrom page.form_page.data_manage_page import DataManagePage\nfrom time import sleep\nclass Data_Source_Case(TestBase):\n\n url='http://cd.sysdsoft.cn:10003/#/formViewList?userId=475FC3FA-9833-416D-A1C5-E1E885E24C23&userOrgDutyId=90BF1987-E638-42BB-B491-69EF774FF1FD&feopapi1=userOrgDutyId,userId,platform&feopapi2=userOrgDutyId,userId&platform=NPF'\n\n @exception_monitor('./results')\n def test_data_source(self):\n listpage=ListPage(self.driver)\n listpage.open(self.url)\n sleep(3)\n listpage.build_form()\n designpage = DesignPage(self.driver)\n designpage.form_rename('自动化测试')\n designpage.card_container()\n designpage.card_container1()\n designpage.text_single()\n designpage.text_module()\n designpage.date_field()\n designpage.num_field()\n designpage.accessory_field()\n designpage.radio_button()\n designpage.check_box()\n designpage.pulldown_list()\n designpage.dropdown_check()\n designpage.cross_line()\n designpage.on_off()\n designpage.tree_selection()\n designpage.serial_num()\n designpage.address_field()\n designpage.member_field()\n designpage.dept_field()\n designpage.form_save()\n print('完成表单设计')\n sleep(3)\n print('开始分配权限')\n sleep(3)\n designpage.allocation_right()\n print('点击“分配选项”')\n rightpage=AllocationRightPage(self.driver)\n\n sleep(4)\n rightpage.search_allocation_right()\n sleep(3)\n rightpage.seatch_sendkeys('系统管理员')\n sleep(1)\n rightpage.select_role()\n sleep(1000)\n rightpage.add_right()\n sleep(1000)\n rightpage.field_permissions()\n sleep(1000)\n rightpage.editable()\n rightpage.allocation_right_save()\n sleep(3000)\n rightpage.data_manage()\n sleep(3000)\n data=DataManagePage(self.driver)\n data.add\n\n\n","sub_path":"ui_web_auto_pytest/uiautomation_weibiaodan/testcase/data_source_case.py","file_name":"data_source_case.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"406144762","text":"# importing the required module \n# \nfrom kafka import KafkaConsumer\nfrom json import loads\nfrom time import sleep\nfrom json import dumps\nfrom kafka import KafkaProducer\nfrom _thread import *\nimport json \nimport schedule \nimport sys\nimport datetime\n\nimport time\nimport collections \nimport os\n# topic_own=\"pandey\"\n\nsys.path.insert(0, \"platform/communication_module\")\n\ncurpath=str(os.path.dirname(os.path.realpath(__file__)))\n\nos.system(\"python3 \"+curpath+\"/dashboard/dashboard.py &\")\n\nfile=open(curpath+\"/dashboard/running.txt\",\"w\")\nfile.close()\nimport communication_module as cm\n\n# file=open(\"running.txt\",\"a+\")\n\ncurrent_process=None\nprint(\"\\n[Schedular] - started\\n\")\n## using global deque with firstcome first serve schedule.\ndq = collections.deque() \n\nqueue=[]\nmeta_data=None\n\n#funstion to send data to service life cycle module when its turn comes.\ndef to_servicelifecycle():\n\n\n global meta_data\n global dq\n curpath=str(os.path.dirname(os.path.realpath(__file__)))\n file=open(curpath+\"/dashboard/running.txt\",\"a\")\n file.write(str(meta_data[\"algoid\"])+\" at location \"+meta_data[\"location\"]+\" is running \"+ \"from \"+meta_data[\"start_time\"])\n file.close()\n cm.Schedular_to_ServiceLifeCycle_Producer_interface(meta_data)\n\nglobal x,y,z,a\n\n#the below functions are called according to the scheduling requirement of the user\n\ndef regular(days,start_time,duration,algo):\n global x\n x=schedule.every().days.at(start_time).do((to_servicelifecycle))\n\n\ndef notregular(days,start_time,duration,algo):\n global y\n y=schedule.every().days.at(start_time).do((to_servicelifecycle)) \n\n\n# def immediate(duration,algo):\n# global z\n# z=schedule.every().day.do(to_servicelifecycle) \n\n\ndef period(duration,start,algo):\n global a\n\n name=algo\n this_module = sys.modules[__name__]\n \n a=schedule.every(int(duration)).minutes.at(start).do((to_servicelifecycle))\n\n\n#If priority is high push the data in front of the queue\ndef inputq(msg):\n global dq\n \n\n\n if(msg[\"priority\"]==\"high\"):\n # print(\"hey ya\")\n dq.appendleft(msg)\n else:\n dq.append(msg)\n # print(\"message\",msg)\n\n#function to receive meta_data from application manager\ndef to_recv():\n\tcm.ApplicationManager_to_Scheduler_interface(inputq)\n\n\n#function to check is there is any pending service to br scheduled . For example a service is to be scheduled on \"tuesdays\",\n#this function will ensure that it pushed in deque and considered \ndef pending():\n while True: \n \n schedule.run_pending() \n time.sleep(1) \n\ni=0\nstart_new_thread(to_recv,())\n\n## Calling respective functions according to the scheduling information received\ndef main():\n while(1):\n #print(\"Hello\")\n global dq\n global meta_data\n \n\n while(len(dq)>0):\n\n #print(\"Hello 1\")\n global i\n i=i+1\n\n\n meta_data=dq.popleft()\n # file=open(\"running.txt\",\"a+\")\n # file.write(str(meta_data[\"algoid\"])+\" at location \"+meta_data[\"location\"]+\" is running \"+ \"from \"+meta_data[\"start_time\"])\n\n \n if(meta_data[\"form\"]==\"run\"):\n\n\n if (meta_data[\"days\"]==\"everyday\" and meta_data[\"request_type\"]!=\"immediate\" ):\n regular(meta_data[\"days\"],meta_data[\"start_time\"],meta_data[\"duration\"],meta_data[\"algo\"])\n \n\n elif (meta_data[\"days\"]!=\"everyday\" and meta_data[\"days\"]!=\"\"):\n notregular(meta_data[\"days\"],meta_data[\"start_time\"],meta_data[\"duration\"],meta_data[\"algo\"])\n\n elif(meta_data[\"request_type\"]==\"immediate\"):\n # print(\"heyyyyyyyyyyyyyy\")\n # global file\n print(\"\\n [Schedular] : Scheduling immediately Service with id : \",meta_data[\"algoid\"])\n curpath=str(os.path.dirname(os.path.realpath(__file__)))\n file=open(curpath+\"/dashboard/running.txt\",\"a\")\n # file=open(\"running.txt\",\"a\")\n \n file.write(str(meta_data[\"algoid\"])+\" at location \"+meta_data[\"location\"]+\" is scheduled immediately\\n\")\n file.close()\n\n cm.Schedular_to_ServiceLifeCycle_Producer_interface(meta_data)\n # immediate(meta_data[\"duration\"],meta_data[\"algo\"])\n\n\n elif(meta_data[\"request_type\"]==\"periodic\"):\n print(\"in periodic scheduling\")\n period(meta_data[\"duration\"],meta_data[\"start_time\"],meta_data[\"algo\"])\n\n start_new_thread(pending,())\n\n\n else:\n schedule.cancel_job(eval(meta_data[\"algo\"]))\n\n\n\n \n\n \nif __name__ == \"__main__\":\n main()\n\n \n\n\n\n \n\n","sub_path":"platform/Scheduler/sched.py","file_name":"sched.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"153154740","text":"\"\"\"Test pydeCONZ session class.\n\npytest --cov-report term-missing --cov=pydeconz.gateway tests/test_gateway.py\n\"\"\"\n\nfrom unittest.mock import AsyncMock, Mock, patch\nimport pytest\n\nfrom pydeconz import (\n DeconzSession,\n RequestError,\n ResponseError,\n ERRORS,\n pydeconzException,\n)\n\nimport aiohttp\nfrom aioresponses import aioresponses\n\nAPI_KEY = \"1234567890\"\nHOST = \"127.0.0.1\"\nPORT = \"80\"\n\n\n@pytest.fixture\ndef mock_aioresponse():\n with aioresponses() as m:\n yield m\n\n\nasync def test_websocket_not_setup():\n \"\"\"Test websocket method is not set up if websocket port is not provided.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n\n with patch(\"pydeconz.gateway.WSClient\") as mock_wsclient:\n session.start()\n assert not session.websocket\n mock_wsclient.assert_not_called()\n\n session.close()\n\n\nasync def test_websocket_setup(mock_aioresponse):\n \"\"\"Test websocket methods work.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n\n with patch(\"pydeconz.gateway.WSClient\") as mock_wsclient:\n session.start(websocketport=443)\n assert session.websocket\n mock_wsclient.assert_called()\n session.websocket.start.assert_called()\n\n session.close()\n session.websocket.stop.assert_called()\n\n\nasync def test_websocket_config_provided_websocket_port(mock_aioresponse):\n \"\"\"Test websocket methods work.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload={\n \"config\": {\"websocketport\": 8080},\n \"groups\": {},\n \"lights\": {},\n \"sensors\": {},\n },\n content_type=\"application/json\",\n status=200,\n )\n\n await session.initialize()\n\n with patch(\"pydeconz.gateway.WSClient\") as mock_wsclient:\n session.start()\n mock_wsclient.assert_called()\n session.websocket.start.assert_called()\n\n session.close()\n session.websocket.stop.assert_called()\n\n\nasync def test_initialize(mock_aioresponse):\n \"\"\"Test initialize creates devices as expected.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n init_response = {\n \"config\": {\"bridgeid\": \"012345\"},\n \"groups\": {\n \"g1\": {\n \"id\": \"gid\",\n \"scenes\": [{\"id\": \"sc1\", \"name\": \"scene1\"}],\n \"lights\": [],\n }\n },\n \"lights\": {\"l1\": {\"type\": \"light\"}},\n \"sensors\": {\"s1\": {\"type\": \"sensor\"}},\n }\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload=init_response,\n content_type=\"application/json\",\n status=200,\n )\n\n await session.initialize()\n\n assert session.config.bridgeid == \"012345\"\n\n assert \"g1\" in session.groups\n assert \"sc1\" in session.groups[\"g1\"].scenes\n assert \"l1\" in session.lights\n assert \"s1\" in session.sensors\n assert \"gid_sc1\" in session.scenes\n\n assert session.groups[\"g1\"].id == \"gid\"\n assert session.groups[\"g1\"].deconz_id == \"/groups/g1\"\n assert session.groups[\"g1\"].scenes[\"sc1\"].id == \"sc1\"\n assert session.lights[\"l1\"].deconz_id == \"/lights/l1\"\n assert session.sensors[\"s1\"].deconz_id == \"/sensors/s1\"\n assert session.scenes == {\"gid_sc1\": session.groups[\"g1\"].scenes[\"sc1\"]}\n\n await session.session.close()\n\n\nasync def test_refresh_state(mock_aioresponse):\n \"\"\"Test refresh_state creates devices as expected.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n init_response = {\n \"config\": {},\n \"groups\": {},\n \"lights\": {},\n \"sensors\": {},\n }\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload=init_response,\n content_type=\"application/json\",\n status=200,\n )\n\n await session.initialize()\n\n assert session.config.bridgeid == \"0000000000000000\"\n assert len(session.groups.values()) == 0\n assert len(session.lights.values()) == 0\n assert len(session.sensors.values()) == 0\n assert len(session.scenes.values()) == 0\n\n refresh_response = {\n \"config\": {\"bridgeid\": \"012345\"},\n \"groups\": {\n \"g1\": {\n \"id\": \"gid\",\n \"scenes\": [{\"id\": \"sc1\", \"name\": \"scene1\"}],\n \"lights\": [],\n }\n },\n \"lights\": {\"l1\": {\"type\": \"light\"}},\n \"sensors\": {\"s1\": {\"type\": \"sensor\"}},\n }\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload=refresh_response,\n content_type=\"application/json\",\n status=200,\n )\n\n await session.refresh_state()\n\n assert session.config.bridgeid == \"0000000000000000\"\n\n assert \"g1\" in session.groups\n assert \"sc1\" in session.groups[\"g1\"].scenes\n assert \"l1\" in session.lights\n assert \"s1\" in session.sensors\n assert \"gid_sc1\" in session.scenes\n\n assert session.groups[\"g1\"].id == \"gid\"\n assert session.groups[\"g1\"].deconz_id == \"/groups/g1\"\n assert session.groups[\"g1\"].scenes[\"sc1\"].id == \"sc1\"\n assert session.lights[\"l1\"].deconz_id == \"/lights/l1\"\n assert session.sensors[\"s1\"].deconz_id == \"/sensors/s1\"\n assert session.scenes == {\"gid_sc1\": session.groups[\"g1\"].scenes[\"sc1\"]}\n\n await session.session.close()\n\n\nasync def test_request(mock_aioresponse):\n \"\"\"Test request method and all its exceptions.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n content_type=\"application/json\",\n payload={\"result\": \"success\"},\n )\n assert await session.request(\"get\", \"\") == {\"result\": \"success\"}\n\n # Bad content type\n\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}/bad_content_type\",\n content_type=\"http/text\",\n )\n with pytest.raises(ResponseError):\n await session.request(\"get\", \"/bad_content_type\")\n\n # Client error\n\n with patch.object(\n session.session, \"request\", side_effect=aiohttp.client_exceptions.ClientError\n ), pytest.raises(RequestError):\n await session.request(\"get\", \"/client_error\")\n\n # Raise on error\n\n for error_code, error in ERRORS.items():\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}/{error_code}\",\n content_type=\"application/json\",\n payload={\"error\": {\"type\": error_code, \"address\": HOST, \"description\": \"\"}},\n )\n with pytest.raises(error):\n await session.request(\"get\", f\"/{error_code}\")\n\n # Raise on error - Unknown error\n\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}/unknown\",\n content_type=\"application/json\",\n payload=[{\"error\": {\"type\": 0, \"address\": HOST, \"description\": \"\"}}],\n )\n with pytest.raises(pydeconzException):\n await session.request(\"get\", \"/unknown\")\n\n # Generic exception\n\n with patch.object(session.session, \"request\", side_effect=Exception), pytest.raises(\n Exception\n ):\n await session.request(\"get\", \"\")\n\n await session.session.close()\n\n\nasync def test_session_handler():\n \"\"\"Test session_handler works.\"\"\"\n session = DeconzSession(Mock(), HOST, PORT, API_KEY, connection_status=Mock())\n session.websocket = Mock()\n session.websocket.data = {}\n session.websocket.state = \"running\"\n\n # Event data\n\n with patch.object(session, \"event_handler\", return_value=True) as event_handler:\n await session.session_handler(signal=\"data\")\n event_handler.assert_called()\n\n # Connection status changed\n\n await session.session_handler(signal=\"state\")\n session.async_connection_status_callback.assert_called_with(True)\n\n\nasync def test_event_handler(mock_aioresponse):\n \"\"\"Test event_handler works.\"\"\"\n session = DeconzSession(\n aiohttp.ClientSession(), HOST, PORT, API_KEY, async_add_device=Mock()\n )\n init_response = {\n \"config\": {},\n \"groups\": {},\n \"lights\": {},\n \"sensors\": {},\n }\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload=init_response,\n content_type=\"application/json\",\n status=200,\n )\n\n await session.initialize()\n\n assert not session.event_handler({\"e\": \"deleted\"})\n\n assert not session.event_handler({\"e\": \"added\", \"r\": \"scenes\"})\n\n # Add light\n\n session.event_handler(\n {\n \"e\": \"added\",\n \"id\": \"1\",\n \"r\": \"lights\",\n \"light\": {\n \"type\": \"light\",\n \"state\": {\n \"bri\": 1,\n \"reachable\": True,\n },\n },\n }\n )\n\n assert \"1\" in session.lights\n assert session.lights[\"1\"].brightness == 1\n session.async_add_device_callback.assert_called_with(\"lights\", session.lights[\"1\"])\n\n # Add group\n\n session.event_handler(\n {\n \"e\": \"added\",\n \"id\": \"1\",\n \"r\": \"groups\",\n \"group\": {\n \"action\": {\"bri\": 1},\n \"lights\": [\"1\"],\n \"scenes\": [],\n },\n }\n )\n\n assert \"1\" in session.groups\n assert session.groups[\"1\"].brightness == 1\n session.async_add_device_callback.assert_called_with(\"groups\", session.groups[\"1\"])\n\n # Add sensor\n\n session.event_handler(\n {\n \"e\": \"added\",\n \"id\": \"1\",\n \"r\": \"sensors\",\n \"sensor\": {\n \"type\": \"\",\n \"config\": {\n \"reachable\": True,\n },\n },\n }\n )\n\n assert \"1\" in session.sensors\n assert session.sensors[\"1\"].reachable\n session.async_add_device_callback.assert_called_with(\n \"sensors\", session.sensors[\"1\"]\n )\n\n # Update light\n\n mock_light_callback = Mock()\n session.lights[\"1\"].register_callback(mock_light_callback)\n session.event_handler(\n {\"e\": \"changed\", \"id\": \"1\", \"r\": \"lights\", \"state\": {\"bri\": 2}}\n )\n\n mock_light_callback.assert_called()\n assert session.lights[\"1\"].changed_keys == {\"state\", \"bri\", \"e\", \"id\", \"r\"}\n assert session.lights[\"1\"].brightness == 2\n assert (\n session.groups[\"1\"].brightness == 2\n ) # Updating light will also reflect on group brightness\n\n # Update group\n\n mock_group_callback = Mock()\n session.groups[\"1\"].register_callback(mock_group_callback)\n session.event_handler(\n {\"e\": \"changed\", \"id\": \"1\", \"r\": \"groups\", \"action\": {\"bri\": 3}}\n )\n\n mock_group_callback.assert_called()\n assert session.groups[\"1\"].changed_keys == {\"action\", \"bri\", \"e\", \"id\", \"r\"}\n assert session.groups[\"1\"].brightness == 3\n assert (\n session.lights[\"1\"].brightness == 2\n ) # Group update doesn't by itself reflect back on light\n\n # Update sensor\n\n mock_sensor_callback = Mock()\n session.sensors[\"1\"].register_callback(mock_sensor_callback)\n session.event_handler(\n {\"e\": \"changed\", \"id\": \"1\", \"r\": \"sensors\", \"config\": {\"reachable\": False}}\n )\n\n mock_sensor_callback.assert_called()\n assert session.sensors[\"1\"].changed_keys == {\"config\", \"reachable\", \"e\", \"id\", \"r\"}\n assert not session.sensors[\"1\"].reachable\n\n await session.session.close()\n\n\nasync def test_update_group_color(mock_aioresponse):\n \"\"\"Test update_group_color works as expected.\"\"\"\n session = DeconzSession(aiohttp.ClientSession(), HOST, PORT, API_KEY)\n init_response = {\n \"config\": {},\n \"groups\": {\n \"g1\": {\n \"action\": {\n \"bri\": 1,\n \"hue\": 1,\n \"sat\": 1,\n \"xy\": (1, 1),\n \"ct\": 1,\n \"colormode\": \"hs\",\n },\n \"id\": \"gid\",\n \"lights\": [\"l1\"],\n \"scenes\": [],\n }\n },\n \"lights\": {\n \"l1\": {\n \"type\": \"light\",\n \"state\": {\n \"bri\": 2,\n \"hue\": 2,\n \"sat\": 2,\n \"xy\": (0.5, 0.5),\n \"ct\": 2,\n \"colormode\": \"xy\",\n \"reachable\": True,\n },\n },\n \"l2\": {\"type\": \"\"},\n },\n \"sensors\": {},\n }\n mock_aioresponse.get(\n f\"http://{HOST}:{PORT}/api/{API_KEY}\",\n payload=init_response,\n content_type=\"application/json\",\n status=200,\n )\n\n await session.initialize()\n\n assert session.groups[\"g1\"].brightness == 2\n assert session.groups[\"g1\"].hue == 2\n assert session.groups[\"g1\"].sat == 2\n assert session.groups[\"g1\"].xy == (0.5, 0.5)\n assert session.groups[\"g1\"].colormode == \"xy\"\n assert session.groups[\"g1\"].ct == 2\n\n await session.session.close()","sub_path":"tests/test_gateway.py","file_name":"test_gateway.py","file_ext":"py","file_size_in_byte":12942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"483474403","text":"import numpy as np\nfrom numpy import genfromtxt\nimport BAquad as BA\nimport setting\nfrom optimisation import *\n\neps = 1e-6\nsolve = True\nu = 20\nv = 20\nd = 2\nptcFileName = 'pc_9.ply'\nxgridFile = 'xgrid_9.csv'\nygridFile = 'ygrid_9.csv'\nPhiFile = 'Phi_9.csv'\n\nsetting.init(solve, u, v, eps, d, xgridFile, ygridFile, PhiFile, ptcFileName)\nPhi = setting.Phi_control_nonuni\npoints = setting.points\nxgrid = setting.xgrid\nygrid = setting.ygrid\nxyrange = setting.xyrange\ncentroid = setting.centroid\nV = setting.V\n\nptex = genfromtxt('../camparams/texturemap', delimiter=',')\nxypoints = ptex[:,0:2]\ntexture = ptex[:,2]\n\nworldpoints = np.c_[xypoints, np.zeros(len(xypoints))]\norigin = [np.dot(np.array([0,0,0]) - centroid, V[0,:]), \n np.dot(np.array([0,0,0]) - centroid, V[1,:]), \n np.dot(np.array([0,0,0]) - centroid, V[2,:])]\n\n#texture_obj(xypoints[0,:], origin, xypoints[0,:], xgrid, ygrid, xyrange, Phi, d)\n#te = texture_coords(Phi, xypoints, origin, xypoints, xgrid, ygrid, xyrange, d)\n#planarpoints = np.array(te[:,0].tolist())\nfrom numpy import genfromtxt\nplanarpoints = genfromtxt('planepoints.csv', delimiter=',')\n\nxlogic = (planarpoints[:,0] >= min(points[:,0])) & (planarpoints[:,0] <= max(points[:,0]))\nylogic = (planarpoints[:,1] >= min(points[:,1])) & (planarpoints[:,1] <= max(points[:,1]))\n\ncleanpoints = planarpoints[xlogic & ylogic]\nZ = BA.vevaluatePoint_Control_nonuni(d, cleanpoints[:,0], cleanpoints[:,1], xgrid, ygrid, xyrange, Phi)\nsurfacepoints = np.c_[cleanpoints, Z]\nworld_coord = np.zeros(np.shape(surfacepoints))\nfor ind,i in enumerate(surfacepoints):\n world_coord[ind] = centroid + V[0,:]*i[0] + V[1,:]*i[1] + V[2,:]*i[2]\n\nnp.savetxt('surface_3d.csv', world_coord, delimiter=',')\n\nif plot:\n tx = points[:,0]\n ty = points[:,1]\n\n X = np.arange(np.ceil(min(tx)),np.floor(max(tx)), 1)\n Y = np.arange(np.ceil(min(ty)),np.floor(max(ty)), 1)\n\n X,Y = np.meshgrid(X,Y)\n\n Z = BA.evaluateSurface_Control_nonuni(d, X, Y, xgrid, ygrid, xyrange, Phi)\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n # Create figure.\n fig = plt.figure()\n ax = fig.gca(projection = '3d')\n surf = ax.plot_surface(X, Y, Z)\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n\n #plt.show(block=True)\n # Set viewpoint.\n ax.azim = 130\n ax.elev = -120\n\n #fig.savefig('face.png')\n plt.show(block=True)\n","sub_path":"Bsplines/Final/quadratic/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"513189726","text":"# Copyright (c) 2018 Ultimaker B.V.\n# Cura is released under the terms of the LGPLv3 or higher.\n\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom . import VersionUpgrade25to26\n\nif TYPE_CHECKING:\n from UM.Application import Application\n\nupgrade = VersionUpgrade25to26.VersionUpgrade25to26()\n\ndef getMetaData() -> Dict[str, Any]:\n return {\n \"version_upgrade\": {\n # From To Upgrade function\n (\"preferences\", 4000000): (\"preferences\", 4000001, upgrade.upgradePreferences),\n # NOTE: All the instance containers share the same general/version, so we have to update all of them\n # if any is updated.\n (\"quality_changes\", 2000000): (\"quality_changes\", 2000001, upgrade.upgradeInstanceContainer),\n (\"user\", 2000000): (\"user\", 2000001, upgrade.upgradeInstanceContainer),\n (\"definition_changes\", 2000000): (\"definition_changes\", 2000001, upgrade.upgradeInstanceContainer),\n (\"machine_stack\", 3000000): (\"machine_stack\", 3000001, upgrade.upgradeMachineStack),\n },\n \"sources\": {\n \"quality_changes\": {\n \"get_version\": upgrade.getCfgVersion,\n \"location\": {\"./quality\"}\n },\n \"preferences\": {\n \"get_version\": upgrade.getCfgVersion,\n \"location\": {\".\"}\n },\n \"user\": {\n \"get_version\": upgrade.getCfgVersion,\n \"location\": {\"./user\"}\n },\n \"definition_changes\": {\n \"get_version\": upgrade.getCfgVersion,\n \"location\": {\"./machine_instances\"}\n },\n \"machine_stack\": {\n \"get_version\": upgrade.getCfgVersion,\n \"location\": {\"./machine_instances\"}\n }\n }\n }\n\ndef register(app: \"Application\") -> Dict[str, Any]:\n return { \"version_upgrade\": upgrade }\n","sub_path":"Cura/Cura/plugins/VersionUpgrade/VersionUpgrade25to26/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"15967084","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 14:30:49 2018\n\n@author: PE_102\n\"\"\"\nfrom player import *\nfrom operator import mod\nfrom trick import *\n\n\n \ndef DefenseNoFirst_play(self,trick, bidder, j,first_player): # fct générale appelée quand une IA en défense doit jouer un pli déjà commencé( j est le nº du pli)\n print('from dnf bidder = ',bidder)\n print('from dnf first_player = ',first_player)\n print('from dnf plie nº = ',j)\n \n print('hello from DefenseNoFirst')\n \n playable_cards=self.playable_cards(trick)\n print('playables_cards= ',playable_cards)\n \n bidder_relatif = mod((bidder - first_player),4)\n print('from dnf bidder_relatif = ',bidder_relatif)\n \n \n presence_excuse=False\n i=0\n for el in playable_cards:\n if el.get_rank()==0: #vérifier que la suite de excuse est bien \"excuse\"\n presence_excuse=True\n position_excuse=i\n i += 1\n if (j==16 and presence_excuse): # jouer l'excuse si c'est l'avant dernier plie et quand possède l'excuse \n return play_DNF(trick,position_excuse,bidder_relatif)\n \n bidder_played= False\n \n if bidder_relatif < len(trick.get_cards()):\n bidder_played = True\n \n \n if bidder_played: \n return after_bidder(self,trick,bidder_relatif)\n else:\n return before_bidder(self,trick,bidder_relatif)\n \n \n \n\n \n# vérifier pertinence de la fct play \ndef play_DNF (self,trick,i,bidder_relatif): #fct en charge de jouer une carte / i correspond à la position de la position de la carte de playable_card à être joué\n playable_cards = self.playable_cards(trick)\n card = playable_cards[i]\n try:\n print(card)\n except:\n print('erreur pour print card à play dnf')\n \n else:\n print('play_DNF va jouer:',card)\n \n \n return card\n \n \n\ndef check_if_iam_winner(self,trick,bidder_relatif): \n print('from check_if_iam_winner' )\n # fct =True si une de mes cartes bat à l'ATT \n hypo_trick=[] #liste de mes cartes jouables + la carte gagnante de l'att\n hypo_trick.append(trick.get_cards()[0]) #j'ajouter la premiere carte du plie (pour savoir la couleur demandé)\n for el,j in enumerate(trick.get_cards()):\n if j==bidder_relatif:\n hypo_trick.append(el) #j'introduit la carte gagnante de l'att\n \n \n playable_cards=self.playable_cards(trick)\n \n for el in playable_cards:\n hypo_trick.append(el) #j'introduit mes cartes jouables\n print('from check_if_iam_winner hypo_trick =',hypo_trick) \n position_meilleur_carte= Player.best_card(hypo_trick) - 2 #si la meilleur carte de hypo_trick est mienne, cette variable prend la valeur de la position dans payable_cards de ma meilleur carte\n print('from check_if_iam_winner position_meilleur_carte =',position_meilleur_carte) \n if Player.best_card(hypo_trick)==1: #cad que je ne peux pas battre la carte de l'attaquant \n return False , position_meilleur_carte\n else:\n return True , position_meilleur_carte\n \n \n \"\"\"\n \ndef check_if_iam_winner_before(trick): # fct =True si une de mes cartes bat les cartes deja joue par la defense \n hypo_trick=[] #liste de mes cartes jouables + la carte gagnante de l'att\n \n l_trick = len(trick)\n for i in range (l_trick): #je met les cartes joue par la defense\n hypo_trick.append((trick[i])\n \n playable_cards=playable_cards(self, trick)\n l_playable = len(playable_cards)\n \n for e in range(l_playable):\n hypo_trick.append(playable_cards[e]) #j'introduit mes cartes jouables\n \n a = Player.best_card(hypo_trick)\n \n if a < l_trick: #aucune de mes carte gagne la meilleur carte deja joué par un pote defenseur\n play(trick,worst_card(trick)) # reste a optimiser\n else\n \n \n \"\"\"\n \n \n \ndef bidder_payable(self,trick,bidder_relatif): # savoir si l'attaquant peut a priori jouer la couleur demandée + savoir la couleur demande\n \n playable_cards=self.playable_cards(trick)\n first_card = trick.get_cards()[0]\n \n if print(first_card) == \"Excuse\": #si la 1º carte jouée est une excuse\n if len(trick.get_cards()) == 1: #si l'IA est 2º\n first_card = random.choice(playable_cards)\n else: #on prend la couleur du 2º joueur\n first_card = trick.get_cards()[1] \n else:\n first_card = trick.get_cards()[0]\n \n if isinstance(first_card, Trump): #si first_card est un obj type Trump\n #to do: estimer si il peut gagner le plie\n couleur_demande=\"T\"\n if basic_game.get_data().coupes[bidder_relatif]['max_trump'] != 0: #si bidder a des atout d'apres database\n return 'oui' , couleur_demande #la couleur demandé est atout est l'ATT va jouer atout\n else:\n return 'atout_couleur' , couleur_demande #ATT n'a pas atout et joue une autre couleur (ATT perd le plie)\n \n if isinstance(first_card, Card):\n couleur_demande=trick.get_cards()[0].get_suit() \n if basic_game.get_data().coupes[bidder_relatif][couleur_demande] == False :#si bidder a couleur_demande d apres database\n return 'oui' , couleur_demande\n else:\n if basic_game.get_data().coupes[bidder_relatif]['max_trump'] != 0: #bidder a des atout d apres database:\n return 'couleur_atout' , couleur_demande #ATT n'a pas une couleur et joue atout\n else:\n return 'couleur_couleur' , couleur_demande #ATT n'a pas la couleur et joue une autre couleur (ATT perd le plie)\n \ndef avoir_couleur_demande(self,trick): #True si j'ai la couleur demandé\n print('from avoir_couleur_demande' )\n a , couleur_demande=bidder_payable(self,trick,bidder_relatif)\n playable_cards=self.playable_cards(trick)\n for el in playable_cards:\n suit=el.get_suit()\n if suit==couleur_demande :\n return True\n return False\n \n \n \ndef avoir_atout(self,trick): #True si j'ai des atout dans mes cartes jouables\n print('from avoir_atout' )\n playable_cards=self.playable_cards(trick)\n for el in playable_cards:\n if el.get_oulder() == 1: #à vérifier\n return True\n return False\n\ndef atout_max(self,trick): #la fct retourne le rang de ton plus grand atout jouable et sa position ds playable_cards\n print('from atout_max' )\n playable_cards=self.playable_cards(trick)\n rank_max=0\n position_atout_max = 10\n i=-1\n for el in playable_cards:\n i += 1\n if el.get_oulder() == 1 :\n if el.get_rank() > rank_max:\n rank_max == el.get_rank()\n position_atout_max = i\n \n return rank_max , position_atout_max\n\ndef atout_min(self,trick): #la fct retourne le rang de ton plus petit atout jouable et sa position ds playable_cards\n print('from atout_min' )\n playable_cards=self.playable_cards(trick)\n rank_min=22\n position_atout_min = 0\n i = -1\n for el in playable_cards:\n i += 1\n if el.get_oulder() == 1 :\n if el.get_rank() < rank_max:\n rank_min == el.get_rank()\n position_atout_max = i\n return rank_min , position_atout_min\n \ndef jouer_petit(self,trick):\n print('from jouer_petit' )\n playable_cards=self.playable_cards(trick)\n for el in enumerate(playable_cards):\n if el[1].get_oulder() == 1 and el[1].get_rank() == 1: # True si je peux jouer le petit \n return True , el[0]\n return False , -1\n \n\n \ndef jouer_excuse(self,trick): # la fct joue l'excuse si l'ATT gagne le plie par l'instant et qu'on je peux jouer que des atouts/l'excuse\n print('from jouer_excuse' )\n \n playable_cards=self.playable_cards(trick)\n i=-1\n for el in playable_cards:\n i+=1\n if el.get_oulder()!= 1: #True si la carte ≠ de atout/excuse\n return False, 1 #la valeur 1 est arbitraire, elle ne seras pas utilisée\n if el.get_oulder() == 1 and el.get_rank() == 0: #c'est l'excuse\n position_excuse=i\n return True,position_excuse\n\n\ndef proba_apriori(self,trick,couleur_demande,bidder_relatif): #choisir une carte quand l'attaquant n'a pas encore jouer et il peut a priori jouer la couler demandé\n print('from proba_apriori' )\n playable_cards=self.playable_cards(trick)\n if couleur_demande == 'T':\n if avoir_atout(self,trick):\n bidder_atout_max = basic_game.get_data().coupes[bidder_relatif]['max_trump'] #c'est une cote sup de l'atout max du bidder\n mon_atout_max = atout_max(self,trick)[0]\n if mon_atout_max > bidder_atout_max: #je suis sûre de gagner \n return play_DNF(self,trick,atout_max(self,trick)[1],bidder_relatif)\n \n else: #je sais pas si je gagne\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif) #to do: calcul proba de battre l atout que jouras l ATT \n \n elif couleur_demande in ['S','H','D','C']: \n if avoir_couleur_demande(self,trick):\n i=-1\n maxi = True\n rank_max = basic_game.get_data().dico[couleur_demande][-1]\n for c in playable_cards:\n i+=1\n if c.get_suit() == couleur_demande:\n if c.get_rank() == rank_max:\n return play_DNF(self,trick,i,bidder_relatif)\n maxi = False\n if maxi:\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif)\n \n #to do calcul proba de battre à l ATT\n else:\n if avoir_atout(self,trick):\n return play_DNF(self,trick,atout_min[1],bidder_relatif) \n else:\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif) \n \n \ndef before_bidder (self,trick,bidder_relatif): #L'attaquant n'a pas encore joué dans ce plie\n \n print('hello from before_bidder' )\n a , couleur_demande=bidder_payable(self,trick,bidder_relatif) # savoir si l'attaquant peut a priori jouer la couleur demandée\n if a==\"oui\":\n return proba_apriori(self,trick,couleur_demande,bidder_relatif) #choisir une carte quand l'attaquant n'a pas encore jouer et il peut a priori jouer la couler demandé\n \n elif a==\"atout_couleur\": # la défense remporte le plie\n if jouer_petit(self,trick)[0]:\n return play_DNF(self,trick,jouer_petit(self,trick)[1],bidder_relatif)\n else:\n return play_DNF(self,trick,card_max_point(self,trick),bidder_relatif) # à optimiser: remporter des points + garder des cartes qui peuvent remporter d autres plies\n \n elif a==\"couleur_atout\":\n if avoir_couleur_demande(self,trick):\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif)\n else:\n if avoir_atout(self,trick): \n \n \n bidder_atout_max = basic_game.get_data().coupes[bidder_relatif]['max_trump'] #c'est une cote sup de l'atout max du bidder\n mon_atout_max = atout_max(self,trick)[0]\n if mon_atout_max > bidder_atout_max: #je suis sûre de gagner \n return play_DNF(self,trick,atout_max(self,trick)[1],bidder_relatif)\n else: #je sais pas si je gagne\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif) #to do: calcul proba de battre l atout que jouras l ATT \n \n else:\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif)\n \n elif a==\"couleur_couleur\": # la défense remporte le plie\n \n if jouer_petit(self,trick)[0]:\n return play_DNF(self,trick,jouer_petit(self,trick)[1],bidder_relatif)\n else:\n return play_DNF(self,trick,card_max_point(self,trick),bidder_relatif) # à optimiser: remporter des points + garder des cartes qui peuvent remporter d autres plies\n\ndef worst_card (self,trick): #renvoie la position dans playing_carte de la pire carte (carte couleur nº minimum < atout de nº minimum)\n print('hello from worst_card' )\n rank_min_couleur = 15\n rank_min_atout = 22\n playable_cards=self.playable_cards(trick)\n presence_carte_couleur= False\n i=-1\n for el in playable_cards:\n i+=1\n if el.get_oulder() == 0: #c'est une carte couleur\n presence_carte_couleur= True\n if el.get_rank() < rank_min_couleur:\n rank_min_couleur = el.get_rank()\n position_worst_card = i\n \n if presence_carte_couleur:\n return position_worst_card\n else:\n i=-1\n for el in playable_cards:\n i+=1\n if el.get_rank() !=0 and el.get_oulder() == 1: # carte atout\n if el.get_rank() < rank_min_atout:\n rank_min_atout = el.get_rank()\n position_worst_card = i\n return position_worst_card\n \ndef card_max_point (self,trick): #le pli est deja gagner, on cherche a mettre la carte couleur avec max point, si absence de carte couleur en playable_cards on joue l'atout de min rank\n print('hello from card_max_point' )\n playable_cards=self.playable_cards(trick)\n point_max = 0\n presence_carte_couleur= False\n rank_min = 21\n i = -1\n for el in playable_cards:\n \n \n i +=1\n if el.get_oulder()== 0: #c'est une carte couleur\n \n presence_carte_couleur= True\n if el.get_point() > point_max:\n \n point_max = el.get_point()\n position_card = i\n if presence_carte_couleur:\n \n return position_card\n else:\n i=-1\n for el in playable_cards:\n i+=1\n if el.get_oulder()== 1 and print(el) != \"Excuse\": #c'est une carte atout \n if el.get_rank()< rank_min:\n rank_min = el.get_rank()\n position_card = i\n return position_card\n \n \ndef after_bidder (self,trick,bidder_relatif): #L'attaquant à déjà joué dans ce plie\n print('hello from after_bidder' )\n \n if bidder_relatif== Player.best_card(trick.get_cards()): # True si L'attaquant à joué la meilleur carte par l'instant \n if check_if_iam_winner(self,trick,bidder_relatif)[0]: # fct =True si une de mes cartes bat à l'ATT \n if jouer_petit(self,trick)[0]: #True si je peux jouer le petit\n hypo_trick = []\n hypo_trick.append(trick.get_cards()[0]) #j'introduit la 1º carte\n hypo_trick.append(trick.get_cards()[bidder_relatif]) #j'introduit la carte gagnante de l'att (VERIFIER j)\n playable_cards=self.playable_cards(trick) \n hypo_trick.append(playable_cards[jouer_petit(self,trick)[1]]) #j'introduit le petit\n if Player.best_card(hypo_trick) == 2: #je joue le petit et je sais que je remporte le plie\n return play_DNF(self,trick,jouer_petit(self,trick)[1],bidder_relatif)\n else:\n return play_DNF(self,trick,check_if_iam_winner(self,trick,bidder_relatif)[1],bidder_relatif) #jouer ma meilleur carte\n \n else:\n return play_DNF(self,trick,check_if_iam_winner(self,trick,bidder_relatif)[1],bidder_relatif) #jouer ma meilleur carte\n # reste à optimiser la carte joué\n \n \n \n \n else:\n if jouer_excuse(self,trick)[0]:\n position_excuse=jouer_excuse(self,trick)[1]\n return play_DNF(self,trick,position_excuse,bidder_relatif)\n \n else: # jouer la carte la plus petite (reste à optimiser la carte joué )\n return play_DNF(self,trick,worst_card(self,trick),bidder_relatif)\n \n else: #la defense remporte le plie\n if jouer_petit(self,trick)[0]: \n return play_DNF(self,trick,jouer_petit(self,trick)[1],bidder_relatif)\n \n else:\n return play_DNF(self,trick,card_max_point(self,trick),bidder_relatif)\n \n\n \n\n \n \n \n \n\n\n\n\n","sub_path":"code marco/DefenseNoFirst.py","file_name":"DefenseNoFirst.py","file_ext":"py","file_size_in_byte":16593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"588568526","text":"import random\nimport sys\nimport string\nsys.path.append('../qom_questions_transformer')\n\nfrom python_transformer.pt_python_transformer_interface import change_token_all_occurrences\nfrom python_transformer.pt_python_transformer_interface import change_all_occurrences\nfrom text_transformer.tt_text_transformer_interface import clear\nfrom text_transformer.tt_text_transformer_interface import load_text\n\nfrom file_generator.helper import Helper\nfrom generate_random_version.generate_random_version import GenerateRandomVersion\n\t\t\n\n\nclass QuestionTransformer(GenerateRandomVersion):\n\n\t# python_files_list -> name of the files (python) that wants to be changed\n\t# general_files_to_change_list -> name of the files (latex) that wants to be generated\n\t# num_versions -> number of verstions that wants to be created\n\t# output_index -> index of the output (1-question, 2-answer)\n\tdef __init__(self, python_files_list, general_files_to_change_list, directory_latex, num_versions, output_index, version_path, repeat_seed=None):\n\n\t\tsuper().__init__(python_files_list, general_files_to_change_list, directory_latex, num_versions, output_index, version_path, repeat_seed)\n\n\t\tself.__list_first = None\n\t\tself.__list_second = None\n\t\tself.__list_third = None\n\t\tself.__repeated_seed = repeat_seed\n\n\n\n\t# makes the changes on the python files and adds to the lists created on the constructor\n\t# adds to the lists to be also known on the latex files the new numbers\n\tdef create_python_program(self, python_files_list, version):\n\n\t\tlist_python_programs = self.get_programs_python(python_files_list)\n\n\t\t\n\t\tseed = random.choice(range(10000, 99999))\n\t\tself.__seed = seed\n\n\t\t# if is to repeat a created exercise\n\t\tif self.__repeated_seed != None:\n\t\t\tself.__seed = self.__repeated_seed\n\t\t\tseed = self.__repeated_seed\n\t\t\n\t\trandom.seed(self.__seed)\n\n\n\t\t[a_var, b_var,x_var] = random.sample(string.ascii_uppercase, 3)\n\n\t\tten_var = random.randint(0,50)\n\t\t\n\t\tself.__list_first = a_var\n\t\tself.__list_second = b_var\n\t\tself.__list_third = a_var.lower()\n\t\n\t\tlist_strings = [\"A\",\"B\",\"x\",\"10\",\"a\",\"b\"]\n\n\t\tself.add_changable_strings(list_strings)\n\n\t\tchange_all_occurrences(\"A\", a_var)\n\t\tchange_all_occurrences(\"B\", b_var)\n\t\tchange_all_occurrences(\"x\", (x_var.lower()))\n\t\tchange_token_all_occurrences(\"10\", str(ten_var))\n\t\tchange_all_occurrences(\"a\", (a_var.lower()))\n\t\tchange_all_occurrences(\"b\", (b_var.lower()))\n\t\tclear()\n\n\t\treturn list_python_programs,seed\n\n\t\t\n\t# makes changes to latex files\n\tdef create_latex_files(self, general_latex_files, version, output):\n\n\t\tlatex_parts = self.get_text_files(general_latex_files)\n\t\t\n\n\t\tlist_strings = [r'\\verb+A+', r'\\verb+B+', r'\\verb+a+']\n\n\t\tself.add_changable_strings(list_strings)\n\n\t\tchange_all_occurrences(r'\\verb+A+', r'\\verb+' + str(self.__list_first) + '+')\n\t\tchange_all_occurrences(r'\\verb+B+', r'\\verb+' + str(self.__list_second) + '+')\n\t\tchange_all_occurrences(r'\\verb+a+', r'\\verb+' + str(self.__list_third) + '+')\n\n\t\tclear()\n\n\t\treturn latex_parts\n\n\t","sub_path":"03_Implementação/questions_data/questions/question_mcg_4_tipos/question_transformer.py","file_name":"question_transformer.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"174513278","text":"class Solution:\n def canTransform(self, start, end):\n \"\"\"\n :type start: str\n :type end: str\n :rtype: bool\n \"\"\"\n def get_nxt_lr(idx, string):\n if idx >= len(string):\n return idx, None\n for i in range(idx, len(string)):\n if string[i] in 'RL':\n return i, string[i]\n return i, None\n\n\n if len(start) != len(end):\n return False\n\n si = ei = 0\n while si < len(start) or ei < len(start):\n si, sc = get_nxt_lr(si, start)\n ei, ec = get_nxt_lr(ei, end)\n if sc != ec:\n return False\n elif sc == 'L':\n if si < ei:\n return False\n elif sc == 'R':\n if si > ei:\n return False\n si, ei = si+1, ei+1\n\n return True\n\n\ndef test(s, e):\n print(s, e)\n r = Solution().canTransform(s, e)\n print(r)\n\ntest('XXL', 'LXX')\ntest('XXR', 'RXX')\ns = 'RXXLRXRXL'\ne = 'XRLXXRRLX'\ntest(s, e)\ns = 'XXRLLXRRXXL'\ne = 'RXXXLLXXRRL'\ntest(s, e)\ns=\"XXRXXLXXXX\"\ne=\"XXXXRXXLXX\"\ntest(s, e)\n","sub_path":"777.Swap_Adjacent_in_LR_String.py","file_name":"777.Swap_Adjacent_in_LR_String.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"420066828","text":"import matplotlib.pyplot as plt\r\nimport torch\r\nimport numpy as np\r\nfrom torch import nn\r\nfrom torch import optim\r\nfrom torchvision import datasets, transforms, models\r\nfrom PIL import Image\r\nimport pandas as pd\r\nimport argparse\r\nfrom torch.autograd import Variable\r\nimport json\r\nimport sys\r\nimport os, random\r\n\r\n# Don't think I'll need it here\r\nfrom collections import OrderedDict\r\nimport seaborn as sb\r\nimport torch.nn.functional as F\r\n\r\n#####\r\n\r\ndef dataparser():\r\n parser = argparse.ArgumentParser(description = 'File prediction')\r\n\r\n parser.add_argument('--data_dir', type = str, default = 'flowers', help = 'Dataset Directory')\r\n parser.add_argument('--checkpoint', type = str, default = 'checkpoint.pth', help = 'Checkpoint')\r\n parser.add_argument('--gpu', type = bool, default = 'True', help = 'By default, parser will use CUDA if True, and CPU if False.')\r\n parser.add_argument('--top_k', type = int, default = 5, help = 'Top K most likely classes')\r\n parser.add_argument('--image', type = str, default = 'default', help = 'Path to default random image')\r\n parser.add_argument('--json_file', type = str, default = 'cat_to_name.json', help = 'File for mapping flower names')\r\n\r\n arguments = parser.parse_args()\r\n return arguments\r\n\r\n#####\r\n\r\nargs = dataparser()\r\n\r\nwith open(args.json_file, 'r') as f:\r\n cat_to_name = json.load(f)\r\n\r\ndefault_device = args.gpu\r\nuse_gpu = torch.cuda.is_available()\r\ndevice = torch.device(\"cpu\")\r\nif default_device and use_gpu:\r\n device = torch.device(\"cuda:0\")\r\n print(f\"Device is set to {device}\")\r\nelse:\r\n device = torch.device(\"cpu\")\r\n print(f\"Device is set to {device}\")\r\n\r\n#####\r\n\r\ndef load_checkpoint(filepath):\r\n checkpoint = torch.load(filepath)\r\n\r\n if checkpoint['arch'] == 'pretrained vgg19':\r\n model = models.vgg19(pretrained=True)\r\n for param in model.parameters():\r\n param.requires_grad = False\r\n\r\n\r\n model.class_to_idx = checkpoint['model_index']\r\n\r\n model.classifier = checkpoint['classifier']\r\n\r\n model.optimizer = checkpoint['optimizer']\r\n\r\n model.load_state_dict(checkpoint['state_dict'])\r\n\r\n return model\r\n\r\n#####\r\n\r\ndef process_image(image):\r\n pil_image = Image.open(image)\r\n\r\n if pil_image.width > 256 or pil_image.height > 256:\r\n if pil_image.height < pil_image.width:\r\n factor = 256 / pil_image.height\r\n else:\r\n factor = 256 / pil_image.width\r\n pil_image = pil_image.resize((int(pil_image.width * factor), int(pil_image.height * factor)))\r\n\r\n left = (256 - 224) / 2\r\n top = (256 - 224) / 2\r\n right = (256 + 224) / 2\r\n bottom = (256 + 224) / 2\r\n pil_image = pil_image.crop((left, top, right, bottom))\r\n\r\n np_image = np.array(pil_image) / 255\r\n\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n\r\n np_image = (np_image - mean) / std\r\n\r\n np_image = np_image.transpose((2, 0, 1))\r\n\r\n return np_image\r\n\r\n#####\r\n\r\ndef predict(image_path, model, device, cat_to_name, topk):\r\n\r\n model.to(device)\r\n\r\n img = process_image(image_path)\r\n\r\n if torch.cuda.is_available():\r\n image = torch.from_numpy(img).type(torch.cuda.FloatTensor)\r\n else:\r\n image = torch.from_numpy(img).type(torch.FloatTensor)\r\n\r\n\r\n image = image.unsqueeze(0)\r\n\r\n probs = torch.exp(model.forward(image))\r\n prob_arr, top_classes = probs.topk(topk)\r\n\r\n prob_arr = prob_arr.detach().cpu().numpy().tolist()[0]\r\n top_classes = top_classes.detach().cpu().numpy().tolist()[0]\r\n\r\n idx_to_class = {v: k for k, v in\r\n model.class_to_idx.items()}\r\n\r\n pred_labels = [idx_to_class[label] for label in top_classes]\r\n pred_class = [cat_to_name[idx_to_class[label]] for label in top_classes]\r\n\r\n return prob_arr, pred_labels, pred_class\r\n\r\n#####\r\n\r\nmodel = load_checkpoint(args.checkpoint)\r\n\r\nrandom_set = args.data_dir + '/' + random.choice(os.listdir(args.data_dir)) + '/'\r\nrandom_path = random_set + random.choice(os.listdir(random_set)) + '/'\r\n\r\nif args.image == 'default':\r\n loaded_image = random_path + random.choice(os.listdir(random_path))\r\nelse:\r\n loaded_image = args.image\r\n\r\npredict_image = process_image(loaded_image)\r\n\r\ntopk_probs, topk_labels, topk_classes = predict(predict_image, model, device, cat_to_name, args.top_k)\r\n\r\nprint('Predicted top classes : ', topk_classes)\r\nprint('Flowers: ', topk_labels)\r\nprint('Probablity: ', topk_probs)\r\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"123839399","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom requests_html import HTMLSession\nfrom urllib import request\nimport os\n\nclass Anime:\n\tdef __init__(self, soup_main_page):\n\t\tself.soup_main_page = soup_main_page\n\t\t#self.soup_download_page = soup_download_page\n\t\tself.name = self.__getName__()\n\t\tself.synopsis = self.__getSynopsis__()\n\t\tself.genders = self.__getGenders__()\n\t\tself.num_caps = self.__getNumCaps__()\n\t\tself.url_download_mode = self.__getURLDownloadMode__()\n\t\tself.soup_download_page = self.__getSoupDownloadPage__()\n\t\tself.chapters = self.__generateChapters__()\n\t\n\tdef __getName__(self):\n\t\tresult = self.soup_main_page.find('h1').get_text()\n\t\treturn result\n\t\n\tdef __getSynopsis__(self):\n\t\tresult = self.soup_main_page.find_all('div')[22].get_text()\n\t\treturn result\n\t\n\tdef __getGenders__(self):\n\t\tresult = []\n\t\tgenders_container = self.soup_main_page.find_all('div')[20].find_all('a')\n\t\tfor i in range(genders_container.__len__()):\n\t\t\tresult.append(genders_container[i].get_text())\n\t\treturn result\n\n\tdef __getNumCaps__(self):\n\t\tnumber_caps_container_str = self.soup_main_page.find(id=\"showEpisodes\").find('span').get_text()\n\t\tresult = getNumsOnString(number_caps_container_str)[0]\n\t\treturn result\n\n\tdef __getURLDownloadMode__(self):\n\t\turl_download_mode_container = self.soup_main_page.find_all('a')[4]\n\t\turl_download_mode = \"https://animemovil.com{}\".format(url_download_mode_container.attrs['href'])\n\t\treturn url_download_mode\n\n\tdef __getSoupDownloadPage__(self):\n\t\tresponse = requests.get(self.url_download_mode)\n\t\tsoup_download_page = BeautifulSoup(response.content, \"html.parser\")\n\t\treturn soup_download_page\n\n\tdef __generateChapters__(self):\n\t\tascontainer = self.soup_download_page.find(id=\"showEpisodes\").find_all('a')\n\t\tchapters_reverse = []\t# Capitulos en modo invertido\n\n\t\tfor i in range(ascontainer.__len__()):\n\t\t\ttitle = ascontainer[i].attrs['title']\n\t\t\tepisode = getNumsOnString(ascontainer[i].find('span').get_text())[0]\n\t\t\tcaps_agruped = getNumsOnString(title)\n\t\t\turl_download = ascontainer[i].attrs['href']\n\t\t\t\n\t\t\tchapter = Chapter(episode, title, caps_agruped, url_download)\n\t\t\tchapters_reverse.append(chapter)\n\n\t\tchapters_reverse.reverse()\t# Ordenando capitulos\n\t\treturn chapters_reverse\n\n\tdef downloadChapter(self, num):\n\t\tpass\n\nclass Chapter:\n\tdef __init__(self, episode, title, caps_agruped, url_download):\n\t\tself.episode = episode\n\t\tself.title = title\n\t\tself.caps_agruped = caps_agruped\n\t\tself.url_download = url_download\n\t\tself.num_caps_agruped = len(caps_agruped)\n\n\tdef download(self, Anime):\n\t\truta = \"Downloads/\" + Anime.name\n\t\tif self.num_caps_agruped == 1:\n\t\t\trequest.urlretrieve(\"https:{}\".format(self.url_download), \"{}/{}.mp4\".format(ruta, self.caps_agruped[0]))\n\t\telif self.num_caps_agruped == 2:\n\t\t\trequest.urlretrieve(\"https:{}\".format(self.url_download), \"{}/{} y {}.mp4\".format(ruta, self.caps_agruped[0], self.caps_agruped[1]))\n\nclass Selector:\n\tdef __init__(self, q):\n\t\tself.q = q\n\t\tself.anime_found_container = self.generateAnimeFoundContainer()\n\t\tself.anime_titles = self.generateAnimeTitles()\n\t\tself.anime_urls = self.generateAnimeURLs()\n\t\tself.size = self.generateSize()\n\n\tdef generateAnimeFoundContainer(self):\n\t\tsession = HTMLSession()\n\t\tdata = {}\n\t\tdata['q'] = self.q\n\t\tselector_response = session.get(\"https://animemovil.com/anime\", params = data)\n\t\tselector_response.html.render()\n\t\tanime_found_container = selector_response.html.find(\".hovers\", first = True).find(\"li\")\n\n\t\treturn anime_found_container\n\n\tdef generateAnimeTitles(self):\n\t\tanime_titles = []\n\t\tfor item in self.anime_found_container:\n\t\t\tanime_title = item.find(\"span\", first=True).text\n\t\t\tanime_titles.append(anime_title)\n\n\t\treturn anime_titles\n\n\tdef generateAnimeURLs(self):\n\t\tanime_urls = []\n\t\tfor item in self.anime_found_container:\n\t\t\tanime_url = \"https://animemovil.com\" + item.find(\"a\", first=True).attrs['href']\n\t\t\tanime_urls.append(anime_url)\n\n\t\treturn anime_urls\n\n\tdef generateSize(self):\n\t\tsize = len(self.anime_found_container)\n\t\treturn size\n\t\t\ndef SelectAnime():\n\tprint()\n\tprint(\"WEB SCRAPING (www.animemovil.com)\")\n\tprint(\"---------------------------------\")\n\tanime_name = input(\"Ingrese nombre del anime: \")\n\tselector = Selector(anime_name)\n\n\tprint()\n\tprint(\"Lista de animes encontrados: \")\n\n\tprint()\n\tfor i in range(selector.size):\n\t\tprint(\"\t<{}> {}\".format(i+1, selector.anime_titles[i]))\n\t\n\tprint()\n\toption = int(input(\"Opcion: \"))\n\n\t# Lo de abajo no va\n\n\tresponse_test = requests.get(selector.anime_urls[option-1])\n\t\n\tsoup = BeautifulSoup(response_test.content, \"html.parser\")\n\tan = Anime(soup)\n\n\treturn an\n\ndef checkDir(an):\n\tos.getcwd()\n\tdir = os.listdir('.')\n\n\tif \"Downloads\" not in dir:\n\t\tos.mkdir(\"Downloads\")\n\n\tos.chdir(\"Downloads\")\n\n\tdir = os.listdir('.')\n\tif an.name not in dir:\n\t\tos.mkdir(an.name)\n\n\tos.chdir('..')\n\ndef execute_option_1(an):\n\tprint()\n\tprint(\"DATOS DEL ANIME:\")\n\tprint()\n\tprint(\"> Nombre: {}\".format(an.name))\n\tprint()\n\tprint(\"> Generos: \", end=\"\")\n\tprint(\", \".join(an.genders))\n\tprint()\n\tprint(\"> Número de capitulos: {}\".format(an.num_caps))\n\tprint()\n\tprint(\"> Sinopsis: {}\".format(an.synopsis))\n\tprint()\n\tinput(\"Presione ENTER para regresar al menu principal...\")\n\ndef execute_option_2(an):\n\tprint()\n\tprint(\"LISTA DE CAPITULOS\")\n\tprint()\n\n\tfor chapter in an.chapters:\n\t\tprint(chapter.title)\n\n\tprint()\n\tinput(\"Presione ENTER para regresar al menu principal...\")\n\ndef execute_option_3(an):\n\tprint()\n\tprint(\"SE EMPEZARAN A DESCARGAR TODOS LOS EPISODIOS\")\n\n\tcheckDir(an)\n\tfor chapter in an.chapters:\n\t\tchapter.download(an)\n\t\tif chapter.num_caps_agruped == 1:\n\t\t\tprint(\"Capitulo {} descargado.\".format(chapter.caps_agruped[0]))\n\t\telif chapter.num_caps_agruped == 2:\n\t\t\tprint(\"Capitulos {} y {} descargados.\".format(chapter.caps_agruped[0], chapter.caps_agruped[1]))\n\n\tprint()\n\tprint(\"Se descargaron todos los episodios.\")\n\tprint()\n\tinput(\"Presione ENTER para regresar al menu principal...\")\n\ndef execute_option_4(an):\n\tprint()\n\tprint(\"DESCARGA DE INTERVALO DE EPISODIOS\")\n\tprint()\n\tprint(\"Ingrese los capitulos extremos: (Se incluyen los extremos)\")\n\tnum_chapter_0 = int(input(\"Ingrese el numero del anime inicial: \"))\n\tnum_chapter_f = int(input(\"Ingrese el numero del anime final: \"))\n\tprint()\n\n\tcheckDir(an)\n\n\tfor chapter in an.chapters:\n\t\tif chapter.num_caps_agruped == 1:\n\t\t\tif num_chapter_0 <= chapter.caps_agruped[0] and chapter.caps_agruped[0] <= num_chapter_f:\n\t\t\t\tchapter.download(an)\n\t\t\t\tprint(\"Capitulo {} descargado.\".format(chapter.caps_agruped[0]))\n\t\tif chapter.num_caps_agruped == 2:\n\t\t\tif (num_chapter_0 <= chapter.caps_agruped[0] and chapter.caps_agruped[0] <= num_chapter_f) or (num_chapter_0 <= chapter.caps_agruped[1] and chapter.caps_agruped[1] <= num_chapter_f):\n\t\t\t\tchapter.download(an)\n\t\t\t\tprint(\"Capitulos {} y {} descargados.\".format(chapter.caps_agruped[0], chapter.caps_agruped[1]))\n\n\tprint()\n\tprint(\"Se descargo el rango de episodios indicado.\")\n\tprint()\n\tprint(\"Presione ENTER para regresar al menu principal...\")\n\ndef execute_option_5(an):\n\tprint()\n\tprint(\"DESCARGA DE EPISODIOS\")\n\tprint()\n\tepisodes_str_input = input(\"Ingrese espisodios separados por comas: \")\n\tepisodes_str = episodes_str_input.replace(\" \", \"\")\n\tepisodes_list_str = episodes_str.split(\",\")\n\n\tepisodes_list = []\n\tepisodes_downloaded = []\n\tfor num_str in episodes_list_str:\n\t\tepisodes_list.append(int(num_str))\n\n\tcheckDir(an)\n\n\tfor chapter in an.chapters:\n\t\tfor episode in episodes_list:\n\t\t\tif chapter.num_caps_agruped == 1 and episode not in episodes_downloaded:\n\t\t\t\tif chapter.caps_agruped[0] == episode:\n\t\t\t\t\tchapter.download(an)\n\t\t\t\t\tepisodes_downloaded.append(chapter.caps_agruped[0])\n\t\t\t\t\tprint(\"Capitulo {} descargado.\".format(chapter.caps_agruped[0]))\n\n\t\t\telif chapter.num_caps_agruped == 2 and episode not in episodes_downloaded:\n\t\t\t\tif chapter.caps_agruped[0] == episode or chapter.caps_agruped[1] == episode:\n\t\t\t\t\tchapter.download(an)\n\t\t\t\t\tepisodes_downloaded.append(chapter.caps_agruped[0])\n\t\t\t\t\tepisodes_downloaded.append(chapter.caps_agruped[1])\n\t\t\t\t\tprint(\"Capitulos {} y {} descargados.\".format(chapter.caps_agruped[0], chapter.caps_agruped[1]))\n\n\tprint()\n\tprint(\"Se descargaron los episodios ingresados.\")\n\tprint()\n\tinput(\"Presione ENTER para regresar al menu principal...\")\n\ndef getNumsOnString(text): # Mejorar el codigo\n\tnumeros = []\n\ti = 0\n\tfinal = False\n\twhile i < len(text) and not final:\n\t\twhile not text[i].isdigit():\n\t\t\ti = i + 1\n\t\t\tif i == len(text):\n\t\t\t\tfinal = True\n\t\t\t\tbreak\n\t\tnum = []\n\t\tif i == len(text):\n\t\t\tfinal = True\n\t\t\tbreak\n\t\twhile text[i].isdigit():\n\t\t\tnum.append(text[i])\n\t\t\ti = i + 1\n\t\t\tif i == len(text):\n\t\t\t\tfinal = True\n\t\t\t\tbreak\n\t\tnumeros.append(int(\"\".join(num)))\n\n\treturn numeros\n\ndef test(an):\n\tprint(type(an.chapters))\n\ndef test2():\n\tSelectAnime()\n\ndef main_menu(an):\n\tprint()\n\tprint(\"WEB SCRAPING (www.animemovil.com)\")\n\tprint(\"---------------------------------\")\n\tprint(\"\t<1> Informacion del anime\")\n\tprint(\"\t<2> Mostrar capitulos\")\n\tprint(\"\t<3> Descargar todos los capitulos\")\n\tprint(\"\t<4> Descargar capitulos entre 2 valores\")\n\tprint(\"\t<5> Seleccionar capitulos para descargar\")\n\tprint(\"\t<6> Buscar otro anime\")\n\tprint(\"\t<7> Salir\")\n\toption = input(\"Opcion: \")\n\n\tif option == '1':\n\t\texecute_option_1(an)\n\telif option == '2':\n\t\texecute_option_2(an)\n\telif option == '3':\n\t\texecute_option_3(an)\n\telif option == '4':\n\t\texecute_option_4(an)\n\telif option == '5':\n\t\texecute_option_5(an)\n\telif option == '6':\n\t\treturn 1\n\telif option == '7': # Opcion secreta para testear\n\t\texit()\n\telse:\n\t\toption = print(\"No existe esa opcion, se cerrara el programa...\")\n\t\texit()\n\ndef run():\n\tan = SelectAnime()\n\n\tbucle_status = 2\n\n\twhile True:\n\t\tif bucle_status == 1:\n\t\t\tan = SelectAnime()\n\t\t\tbucle_status = 2\n\n\t\twhile bucle_status == 2:\n\t\t\tbucle_status = main_menu(an)\n\t\t\tif bucle_status == None:\n\t\t\t\tbucle_status = 2\n\nif __name__ == '__main__':\n\trun()\n\t#test2()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"620264303","text":"\"\"\"\n### BEGIN NODE INFO\n[info]\nname = AWG Server\nversion = 1.0\ndescription =\ninstancename = AWGServer\n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 5\n### END NODE INFO\n\"\"\"\n\nimport pyvisa\nfrom labrad.server import LabradServer\nfrom labrad.server import setting\nfrom twisted.internet.defer import inlineCallbacks\n\n\nclass AWG_Server(LabradServer):\n \"\"\"\n Basic Server\n \"\"\"\n\n name = 'AWG Server'\n\n def initServer(self):\n # self.password = os.environ['LABRADPASSWORD']\n print(\"I'm here\")\n self.name = 'AWG Server'\n self.connect_to_AWG()\n\n @inlineCallbacks\n def connect_to_AWG(self):\n \"\"\"\n \"\"\"\n rm = pyvisa.ResourceManager()\n self.inst = yield rm.open_resource('TCPIP::10.97.112.68::INSTR')\n try:\n self.inst.query(\"*IDN?\")\n except:\n pass\n #test\n print(self.inst.query(\"*IDN?\"))\n\n @setting(1, returns='s')\n def test(self):\n print(self.inst.query(\"*IDN?\"))\n\n\nif __name__ == \"__main__\":\n from labrad import util\n util.runServer(AWG_Server())\n","sub_path":"servers/AWG_test/AWG_test.py","file_name":"AWG_test.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"250453605","text":"from usaspending_api.accounts.models import TreasuryAppropriationAccount\nfrom usaspending_api.accounts.serializers import TreasuryAppropriationAccountSerializer\nfrom usaspending_api.accounts.models import AppropriationAccountBalances\nfrom usaspending_api.accounts.serializers import AppropriationAccountBalancesSerializer\nfrom usaspending_api.awards.serializers import FinancialAccountsByAwardsSerializer\nfrom usaspending_api.awards.models import FinancialAccountsByAwards\nfrom usaspending_api.common.mixins import FilterQuerysetMixin, ResponseMetadatasetMixin\nfrom usaspending_api.common.views import DetailViewSet\nfrom usaspending_api.common.mixins import SuperLoggingMixin\n\n\nclass TreasuryAppropriationAccountViewSet(SuperLoggingMixin,\n FilterQuerysetMixin,\n ResponseMetadatasetMixin,\n DetailViewSet):\n \"\"\"Handle requests for appropriation account (i.e., TAS) information.\"\"\"\n serializer_class = TreasuryAppropriationAccountSerializer\n\n def get_queryset(self):\n \"\"\"Return the view's queryset.\"\"\"\n queryset = TreasuryAppropriationAccount.objects.all()\n queryset = self.serializer_class.setup_eager_loading(queryset)\n filtered_queryset = self.filter_records(self.request, queryset=queryset)\n ordered_queryset = self.order_records(self.request, queryset=filtered_queryset)\n return ordered_queryset\n\n\nclass TreasuryAppropriationAccountBalancesViewSet(SuperLoggingMixin,\n FilterQuerysetMixin,\n ResponseMetadatasetMixin,\n DetailViewSet):\n \"\"\"Handle requests for appropriation account balance information.\"\"\"\n serializer_class = AppropriationAccountBalancesSerializer\n\n def get_queryset(self):\n queryset = AppropriationAccountBalances.objects.all()\n queryset = self.serializer_class.setup_eager_loading(queryset)\n filtered_queryset = self.filter_records(self.request, queryset=queryset)\n ordered_queryset = self.order_records(self.request, queryset=filtered_queryset)\n return ordered_queryset\n\n\nclass FinancialAccountsByAwardListViewSet(\n SuperLoggingMixin,\n FilterQuerysetMixin,\n ResponseMetadatasetMixin,\n DetailViewSet):\n \"\"\"\n Handles requests for financial account data grouped by award.\n \"\"\"\n\n serializer_class = FinancialAccountsByAwardsSerializer\n\n def get_queryset(self):\n \"\"\"Return the view's queryset.\"\"\"\n queryset = FinancialAccountsByAwards.objects.all()\n queryset = self.serializer_class.setup_eager_loading(queryset)\n filtered_queryset = self.filter_records(self.request, queryset=queryset)\n ordered_queryset = self.order_records(self.request, queryset=filtered_queryset)\n return ordered_queryset\n","sub_path":"usaspending_api/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"54295483","text":"def average(input_list):\n return sum(input_list) / len(input_list)\n\nnewlist = []\nmore_numbers = \"y\"\n\nwhile more_numbers == \"y\":\n newnumber = input(\"give me a number! \")\n newlist.append(int(newnumber))\n more_numbers = input(\"more numbers y/n? \")\n\nprint(f\" You entered {newlist}\")\nprint(f\" The average is {average(newlist)}\")","sub_path":"LearnPython/practice_functions.py","file_name":"practice_functions.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"95802580","text":"import os\nimport netCDF4\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom flask import Flask, flash, request, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\n# %matplotlib inline\nUPLOAD_FOLDER = '/tmp/scaas'\nALLOWED_EXTENSIONS = set(['txt', 'nc'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nif not os.path.isdir('/tmp/scaas'):\n os.mkdir('/tmp/scaas')\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'GET':\n f = os.listdir('./static')\n for i in f:\n os.remove('./static/' + i)\n print(i + ' removed')\n f = os.listdir('/tmp/scaas')\n for i in f:\n os.remove('/tmp/scaas/' + i)\n print(i + ' removed')\n\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if not allowed_file(file.filename):\n return '''\n

Selected file is not valid... !!!

\n

Allowed Extensions are .txt or .nc

\n \n '''\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n if os.path.isfile(\"/tmp/scaas/\" + filename):\n msg = \"Your file \" + filename + \" Successfully uploaded\"\n else:\n msg = \"Something went wrong...!!!\"\n return redirect(url_for('show', fname = filename, msg = msg))\n\n return '''\n \n \n \n \n Welcome to Scientific Computing as a Service Beta Version\n \n \n \n \n

Welcome to Scientific Computing as a Service Beta Version

\n

Please upload new netCDN file below

\n
\n \n \n
\n\n
\n

\n One can find all the netCDF files on below links:
\n http://borealscicomp.com/metfiles/Irma-09-2017/wrfout/
\n http://borealscicomp.com/metfiles/Alaska-08-2018/wrfout/
\n

\n

Direction Of Using ScaaS

\n

Step 1: Please upload only .txt or .nc file or else if will not proceed further.

\n

Step 2: please click on \"load variable\" button

\n

Optional step: Press \"show variable\" button in the page to see variable list

\n

Step 3: Write a name of the variable in the text box and click on Draw Graph

\n

If your variable is 3D then it will ask for the level. Please provide intiger value there.

\n
\n \n '''\n# /index?msg=Your+file+minion.jpegSuccessfully+uploaded&fname=minion.jpeg\n@app.route('/index/,', methods=['POST', 'GET'])\ndef show(fname, msg):\n if request.method == 'POST':\n if os.path.isfile(\"/tmp/scaas/\" + fname):\n print(\"we are in post method\")\n return redirect(url_for('image', fname=fname))\n\n return render_template(\"var_load.html\", msg=msg, fname=fname)\n\n@app.route('/image/', methods=['POST', 'GET'])\ndef image(fname):\n try:\n dset = netCDF4.Dataset(\"/tmp/scaas/\" + fname)\n except OSError:\n return '''

Invalid txt tile uploaded

\n \n '''\n l = []\n unit = []\n des = []\n c = []\n for i in dset.variables.keys():\n l.append(i)\n\n for i in l:\n try:\n sv = dset.variables[i]\n # print(str(sv.units))\n unit.append(str(sv.units))\n except AttributeError:\n unit.append(str(\"No Unit\"))\n\n for i in l:\n try:\n sv = dset.variables[i]\n # print(str(sv.description))\n des.append(str(sv.description))\n except AttributeError:\n des.append(\"No description\")\n\n for i in l:\n try:\n q = dset.variables[i]\n # print(str(q.dimensions))\n c.append(str(q.dimensions))\n except:\n c.append(\"No Dimensions\")\n\n\n if request.method == 'POST':\n\n var_name = request.form['lname']\n print(var_name)\n if not var_name:\n return '''

Please give the variable name in the Text box

\n \n '''\n\n dset = netCDF4.Dataset(\"/tmp/scaas/\" + fname)\n try:\n q = dset.variables[var_name]\n print(type(q.dimensions))\n print(str(q.dimensions))\n print(len(q.dimensions))\n\n dy = len(q.dimensions)\n except KeyError:\n return '''

Give Correct Name of variable

\n '''\n if dy == 3:\n # dset = netCDF4.Dataset(\"./static/\" + fname)\n t2 = dset.variables[var_name]\n T2 = t2[:]\n uni = t2.units\n plt.contourf(T2[0,:,:])\n plt.colorbar()\n plt.title(\"Graph of variable \" + str(var_name))\n plt.savefig('./static/' + var_name + '.png')\n dset.close()\n return render_template(\"image.html\", user_image='/static/' + var_name + '.png', var=var_name, unit=uni)\n elif dy == 4:\n # print(var_name + \"in dy == 4\")\n # t2 = dset.variables[var_name]\n # y = t2.dimensions[1]\n #\n # if request.method == 'POST':\n # lvl = request.form['yname']\n # T2 = t2[:]\n # uni = t2.units\n # plt.contourf(T2[0, lvl,:, :])\n # plt.colorbar()\n # plt.title(\"Graph of variable \" + str(var_name))\n # plt.savefig('./static/' + var_name + '.png')\n # return render_template(\"image.html\", user_image='/static/' + var_name + '.png', var=var_name, unit=uni)\n # dset.close()\n # return render_template(\"3Dimage.html\", var= var_name , unit=uni, y=y)\n return redirect(url_for('tdimage', var_name=var_name, fname=fname))\n dset.close()\n return render_template(\"var_show.html\", dset=l, unit=unit, des=des, dim=c, num=len(i), fname=fname)\n\n\n@app.route('/tdimage/,', methods=['POST', 'GET'])\ndef tdimage(var_name, fname):\n z = netCDF4.Dataset(\"/tmp/scaas/\" + fname)\n t2 = z.variables[var_name]\n uni = t2.units\n y = t2.dimensions[1]\n\n if request.method == 'POST':\n lvl = int(request.form['yname'])\n T2 = t2[:]\n uni = t2.units\n plt.contourf(T2[0, lvl,:, :])\n plt.colorbar()\n plt.title(\"Graph of variable \" + str(var_name) + \" at \" + '\\n' + y + \" = \" + str(lvl) )\n plt.savefig('./static/' + var_name + '.png')\n return render_template(\"tdimage.html\", user_image='/static/' + var_name + '.png', var=var_name, unit=uni)\n\n z.close()\n return render_template(\"3Dimage.html\", var= var_name , unit=uni, y=y)","sub_path":"myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":8259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"66643962","text":"#takes num, an integer\n# returns true if num is prime\n#returns false if num is composite\ndef isPrime(num):\n\tfor n in range(2,num):\n\t\tif num % n == 0:\n\t\t\treturn False\n\treturn True\n\n\n# Creates an empty list so that it can print the numbers all at once that are prime\nprimeList = []\n# this is a for loop that will go through therange of numbers and will see if they are prime or not, if they are this will add them to the list of primes.\nfor n in range(2,10000):\n\tx = isPrime(n)\n\tif x == True:\n\t\tprimeList.append(n)\n# This opens a file\nmyFile = open(\"primeList\", \"w\")\n# this writes to the file\nmyFile.write(str(primeList) + \"\\n\")\n# This closes the file\nmyFile.close()\n# prints the numbers\nprint(primeList) \n","sub_path":"5-4WrittingHTML/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"187157284","text":"def get_input(msg, options=None):\n\tif options:\n\t\twhile True:\n\t\t\tuser_input = input(msg + ' Options: ' + ','.join(options) + '\\n>> ')\n\t\t\toptions = list(map(lambda opt: opt.lower(), options))\n\t\t\tif user_input.lower() in options:\n\t\t\t\treturn user_input.lower()\n\t\t\telse:\n\t\t\t\tprint('Opção inválida!')\n\telse:\n\t\treturn input(msg + '\\n>> ')\n\n\ndef print_board(board):\n\tprint('\\n'*100)\n\tprint('\\t\\t Tic Tac Toe em Python\\n\\n')\n\tprint('\\t\\t\\t ' + board[1] + ' | ' + board[2] + ' | ' + board[3])\n\tprint('\\t\\t\\t-----------')\n\tprint('\\t\\t\\t ' + board[4] + ' | ' + board[5] + ' | ' + board[6])\n\tprint('\\t\\t\\t-----------')\n\tprint('\\t\\t\\t ' + board[7] + ' | ' + board[8] + ' | ' + board[9])\n\tprint('\\n'*10)\n\ndef validate(user_input, board):\n\tif user_input not in '123456789' or len(user_input) <= 0:\n\t\tprint(f'Inputted value \"{user_input}\" is invalid!')\n\t\treturn False\n\n\tif int(user_input) < 0 or int(user_input) > 9:\n\t\tprint(f'Inputted value \"{user_input}\" is out of range!')\n\t\treturn False\n\n\tif board[int(user_input)] != ' ':\n\t\tprint(f'Place number {user_input} is not available!')\n\t\treturn False\n\n\treturn True\n\ndef is_final_state(board):\n\tresult = (False, board[0])\n\tif board[1] == board[2] == board[3] != ' ':\n\t\treturn (True, board[0])\n\tif board[4] == board[5] == board[6] != ' ':\n\t\treturn (True, board[0])\n\tif board[7] == board[8] == board[9] != ' ':\n\t\treturn (True, board[0])\n\tif board[1] == board[4] == board[7] != ' ':\n\t\treturn (True, board[0])\n\tif board[2] == board[5] == board[8] != ' ':\n\t\treturn (True, board[0])\n\tif board[3] == board[6] == board[9] != ' ':\n\t\treturn (True, board[0])\n\tif board[1] == board[5] == board[9] != ' ':\n\t\treturn (True, board[0])\n\tif board[3] == board[5] == board[7] != ' ':\n\t\treturn (True, board[0])\n\treturn (False, board[0])\n\n\np1_name = 'Player One'\np2_name = 'Player Two'\np1_marker = 'X'\np2_marker = 'O'\np_turn = 'p1'\np_turn_name = ''\nboard = ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\nuser_input = ''\n\nwhile True:\n\tp1_name = get_input('Player one, what is your name?')\n\tp1_marker = get_input(f'{p1_name}: Would you like to play with X or O ?', ['X', 'Y']).upper()\n\tp2_name = get_input('Player two, what is your name?')\n\tif(p1_marker == 'X'):\n\t\tp2_marker = 'O'\n\telse:\n\t\tp2_marker = 'X'\n\n\tprint(f'>> {p1_name}, you will play with: {p1_marker}!')\n\tprint(f'>> {p2_name}, you will play with: {p2_marker}!')\n\tprint('>> Press a number from 1 to 9 to place your marker on the board!\\n\\n')\n\tprint_board(board)\n\n\twhile True:\n\t\tif p_turn == 'p1':\n\t\t\tp_turn_name = p1_name\n\t\t\tboard[0] = p1_marker\n\t\telse: \n\t\t\tp_turn_name = p2_name\n\t\t\tboard[0] = p2_marker\n\n\t\tuser_input = get_input(f\"{p_turn_name}, it's your turn: \\n>> \")\n\t\tif validate(user_input, board):\n\t\t\tboard[int(user_input)] = board[0]\n\t\t\tprint_board(board)\n\t\t\tended, winner = is_final_state(board)\n\n\t\t\tif ended:\n\t\t\t\tif winner == p1_marker:\n\t\t\t\t\twinner = p1_name\n\t\t\t\telse:\n\t\t\t\t\twinner = p2_name\n\n\t\t\t\tprint(f'The end! {winner} won the game!')\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif p_turn == 'p1':\n\t\t\t\t\tp_turn = 'p2'\n\t\t\t\telse:\n\t\t\t\t\tp_turn = 'p1'\n\n\tplay_again = get_input('Would you like to play again? Y or N\\n>> ', ['Y', 'N']).upper()\n","sub_path":"ttc.py","file_name":"ttc.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"241561667","text":"#! /usr/bin/python\n\nimport re\nimport os\nimport matplotlib as mpl\n\nimport math\nimport numpy\n\nmpl.use('agg')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef config_matplotlib():\n plt.rc('text', usetex = True)\n plt.rc('font', family = 'serif')\n\n font = {'family' : 'serif',\n 'size' : 28}\n\n mpl.rc('font', **font)\n\ndef autolabel(rects, ax):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., height + .03,\n '%.2f' % height,\n ha = 'center', va = 'bottom', rotation = '90')\n\ndef plot_sct(data_x,\n data_y,\n# data_error_y,\n plot_name,\n title,\n xlabel,\n ylabel):\n fig = plt.figure(1, figsize=(10, 8))\n ax = fig.add_subplot(111)\n\n ax.scatter(data_x, data_y)\n# ax.errorbar(data_x, data_y, yerr = data_error_y, linestyle=\"None\")\n\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.tight_layout()\n\n fig.savefig(\"{0}.eps\".format(plot_name), format = 'eps', dpi = 1000)\n\n plt.clf()\n\ndef plot_sct_cmp(data_x,\n data_y,\n labels,\n plot_name,\n title,\n xlabel,\n ylabel):\n\n colors = len(data_x)\n color_map = plt.get_cmap('Spectral')\n\n fig = plt.figure(1, figsize=(10, 8))\n ax = fig.add_subplot(111)\n\n ax.set_color_cycle([color_map(1. * i / colors) for i in range(colors)])\n\n for i in range(len(data_x)):\n ax.plot(data_x[i], data_y[i], '-o', label = labels[i])\n\n legend = plt.legend(loc = 9, bbox_to_anchor = [0.5, -0.1], ncol = 4, shadow = True,\n title = \"\", fancybox = True)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.tight_layout()\n\n fig.savefig(\"{0}.eps\".format(plot_name), format = 'eps', dpi = 1000,\n bbox_extra_artists=(legend,), bbox_inches = 'tight')\n\n plt.clf()\n\ndef plot_bar(data1,\n data1_error,\n data2,\n data2_error,\n xlabel,\n ylabel,\n index_range,\n width,\n tick_labels,\n file_title,\n title,\n ymax,\n line):\n\n indexes = np.arange(index_range)\n fig = plt.figure(1, figsize=(18, 10))\n ax = fig.add_subplot(111)\n\n rects1 = ax.bar(indexes, data1, width, color = 'lightgray', yerr = data1_error)\n rects2 = ax.bar(indexes + width, data2, width, color = 'gray', yerr = data2_error)\n\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_xticks(indexes + (.5 * width))\n ax.set_ylabel(ylabel)\n ax.set_xticklabels(tick_labels, rotation = '0')\n\n ax.set_ylim([0, ymax + (.1 * ymax)])\n\n if line:\n ax.axhline(y=1., color='r')\n\n legend = ax.legend((rects1[0], rects2[0]), ('Default Start', 'Random Start'),\n loc = 9, bbox_to_anchor = [0.5, -0.1], ncol = 4, shadow = True,\n title = \"\", fancybox = True)\n\n #autolabel(rects1, ax)\n #autolabel(rects2, ax)\n\n plt.tight_layout()\n\n fig.savefig(\"{0}.eps\".format(file_title), format = 'eps', dpi = 2000,\n bbox_extra_artists=(legend,), bbox_inches = 'tight')\n\n plt.clf()\n\ndef plot_heatmap(data,\n xlabels,\n ylabels,\n xlabel,\n ylabel,\n file_title,\n title):\n fig = plt.figure(1, figsize=(18, 10))\n ax = fig.add_subplot(111)\n\n aux = data[-1]\n\n data[-1] = data[-2]\n\n data[-2] = aux\n\n heatmap = plt.pcolor(data, cmap = plt.cm.seismic, vmin = 0.0, vmax = 2.0)\n plt.colorbar()\n\n ylabels = [\"\\\\textbf{NSM}\", \"Blocks\", \"Cycles\", \"Regs\", \"Pins\", \"DSP\", \"FMax\", \"LUTs\", \"BRAM\" ]\n ylabels.reverse()\n\n ax.set_yticks(np.arange(len(ylabels)) + 0.5, minor = False)\n ax.set_xticks(np.arange(len(xlabels)) + 0.5, minor = False)\n\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_xticklabels(xlabels, minor=False)\n ax.set_ylabel(ylabel)\n ax.set_yticklabels(ylabels, minor=False)\n\n #plt.xticks(rotation = 45)\n #plt.yticks(rotation = 45)\n\n plt.tight_layout()\n\n fig.savefig(\"{0}.eps\".format(file_title), format = 'eps', dpi = 2000, bbox_inches = 'tight')\n\n plt.clf()\n\nif __name__ == '__main__':\n config_matplotlib()\n\n random_experiments = [ 'random_cyloneV' ]\n default_experiments = [ 'default_cycloneV', 'default_stratixV' ]\n\n runs = 5\n\n applications = [\"dfadd_5400_\",\n \"dfdiv_5400_\",\n \"dfmul_5400_\",\n \"dfsin_5400_\",\n \"gsm_5400_\",\n #\"jpeg_5400_\",\n \"mips_5400_\",\n \"motion_5400_\",\n \"sha_5400_\",\n \"adpcm_5400_\",\n \"aes_5400_\",\n \"blowfish_5400_\"\n ]\n\n metrics = [\n { 'name': 'Normalized Sum of Metrics',\n 'source_file': 'best_log.txt',\n 'dest_file': 'nsam'},\n { 'name': 'Logic Utilization',\n 'source_file': 'best_lu_log.txt',\n 'dest_file': 'lu'},\n { 'name': 'Virtual Pins',\n 'source_file': 'best_pins_log.txt',\n 'dest_file': 'pins'},\n { 'name': 'RAM Blocks',\n 'source_file': 'best_ram_log.txt',\n 'dest_file': 'ram'},\n { 'name': 'Registers',\n 'source_file': 'best_regs_log.txt',\n 'dest_file': 'regs'},\n { 'name': 'Block Memory Bits',\n 'source_file': 'best_block_log.txt',\n 'dest_file': 'block'},\n { 'name': 'Cycles',\n 'source_file': 'best_cycles_log.txt',\n 'dest_file': 'cycles'},\n { 'name': 'DSP Blocks',\n 'source_file': 'best_dps_log.txt',\n 'dest_file': 'dsp'},\n { 'name': 'FMax',\n 'source_file': 'best_fmax_log.txt',\n 'dest_file': 'fmax'},\n ]\n\n boards = [[\"default_stratixV_area\", \"random_cycloneV\"], [\"default_stratixV_perf\", \"random_stratixV_perflat\"]]\n\n for current_board, random_board in boards:\n # For each metric, plot how it performed in each application\n # using relative improvements\n default_heatmap = {}\n random_heatmap = {}\n for metric in metrics:\n default_heatmap[metric['name']] = []\n random_heatmap[metric['name']] = []\n\n best_filename = metric['source_file']\n # For all metrics, plot the summary of relative improvements for all\n # applications in a single figure.\n # For the Normalized Sum, plot the relative improvements\n dest_filename = \"rel_comp_\" + metric['dest_file'] + \"_5400_chstone_\" + current_board.split(\"_\")[1]\n name = \"Relative Improvement for \" + metric['name'] + \", after 1.5h of Tuning ({0})\".format(current_board.split(\"_\")[1])\n default_speedups = []\n random_speedups = []\n\n default_error = []\n random_error = []\n\n for i in range(len(applications)):\n application = applications[i]\n\n default_all_best = []\n random_all_best = []\n\n for j in range(1, runs + 1):\n if os.path.isfile(\"{0}/{1}{2}/{3}\".format(current_board,\n application,\n j,\n best_filename)):\n\n default_data_file = open(\"{0}/{1}{2}/{3}\".format(current_board,\n application,\n j,\n best_filename),\n \"r\")\n default_data = default_data_file.readlines()\n default_data_file.close()\n default_best = float(default_data[-1].split()[1])\n\n # Relative improvement already computed\n if metric['name'] == 'Normalized Sum of Metrics':\n if default_best != float('inf'):\n default_all_best.append(default_best)\n else:\n # Compute relative improvements\n index = 0\n default_start = float(default_data[index].split()[1])\n index += 1\n\n while default_start == float('inf') and index < len(default_data):\n default_start = float(default_data[index].split()[1])\n index += 1\n\n if default_best != float('inf') and default_start != float(0) and default_start != float('inf'):\n if metric['name'] != 'FMax':\n default_all_best.append(default_best / default_start)\n else:\n default_all_best.append(default_start / default_best)\n\n\n if os.path.isfile(\"{0}/{1}{2}/{3}\".format(random_board,\n application,\n j,\n best_filename)):\n\n random_data_file = open(\"{0}/{1}{2}/{3}\".format(random_board,\n application,\n j,\n best_filename),\n \"r\")\n random_data = random_data_file.readlines()\n\n random_data_file.close()\n random_best = float(random_data[-1].split()[1])\n\n # Relative improvement already computed\n if metric['name'] == 'Normalized Sum of Metrics':\n if random_best != float('inf'):\n random_all_best.append(random_best)\n else:\n # Compute relative improvements\n index = 0\n random_start = float(default_data[0].split()[1])\n index += 1\n\n while random_start == float('inf') and index < len(random_data):\n random_start = float(random_data[index].split()[1])\n index += 1\n\n if random_best != float('inf') and default_start != float(0) and random_start != float('inf'):\n if metric['name'] != 'FMax':\n random_all_best.append(random_best / random_start)\n else:\n random_all_best.append(random_start / random_best)\n\n if len(default_all_best) > 0:\n default_heatmap[metric['name']].append((application.split(\"_\")[0], numpy.mean(default_all_best)))\n default_speedups.append((application.split(\"_\")[0], numpy.mean(default_all_best)))\n default_error.append((application.split(\"_\")[0], numpy.std(default_all_best)))\n else:\n default_heatmap[metric['name']].append((application.split(\"_\")[0], 1))\n default_speedups.append((application.split(\"_\")[0], 1))\n default_error.append((application.split(\"_\")[0], 1))\n\n if len(random_all_best) > 0:\n random_heatmap[metric['name']].append((application.split(\"_\")[0], numpy.mean(random_all_best)))\n random_speedups.append((application.split(\"_\")[0], numpy.mean(random_all_best)))\n random_error.append((application.split(\"_\")[0], numpy.std(random_all_best)))\n else:\n random_heatmap[metric['name']].append((application.split(\"_\")[0], 1))\n random_speedups.append((application.split(\"_\")[0], 1))\n random_error.append((application.split(\"_\")[0], 1))\n\n default_ymax = max([s[1] for s in default_speedups])\n random_ymax = max([s[1] for s in random_speedups])\n\n print(default_ymax, random_ymax)\n\n ymax = max(default_ymax, random_ymax)\n\n # Plot summary of relative improvements\n plot_bar([s[1] for s in default_speedups],\n [s[1] for s in default_error],\n [s[1] for s in random_speedups],\n [s[1] for s in random_error],\n \"CHStone Applications\",\n \"Improvement vs. Starting Point\",\n len(default_speedups),\n .225,\n [s[0] for s in default_speedups],\n dest_filename,\n name,\n ymax,\n True)\n\n default_heatmap_data = []\n default_heatmap_apps = []\n default_heatmap_metr = []\n\n for name in default_heatmap.keys():\n metric_values = []\n default_heatmap_apps.append(name)\n\n for value in default_heatmap[name]:\n metric_values.append(value[1])\n if value[0] not in default_heatmap_metr:\n default_heatmap_metr.append(value[0])\n\n default_heatmap_data.append(metric_values)\n\n plot_heatmap(default_heatmap_data,\n default_heatmap_metr,\n default_heatmap_apps,\n \"CHStone Applications\",\n \"Quartus Metrics\",\n \"heatmap_\" + current_board,\n \"\")\n #\"Relative value to start ({0})\".format(current_board.split(\"_\")[1].title))\n\n random_heatmap_data = []\n random_heatmap_apps = []\n random_heatmap_metr = []\n\n for name in random_heatmap.keys():\n metric_values = []\n random_heatmap_apps.append(name)\n\n for value in random_heatmap[name]:\n metric_values.append(value[1])\n if value[0] not in random_heatmap_metr:\n random_heatmap_metr.append(value[0])\n\n random_heatmap_data.append(metric_values)\n\n plot_heatmap(random_heatmap_data,\n random_heatmap_metr,\n random_heatmap_apps,\n \"Applications\",\n \"Metrics\",\n \"heatmap_\" + current_board,\n \"\")\n\n # For each metric, plot how it performed in each application\n # using the absolute values\n for metric in metrics:\n # Plot absolute values\n if metric['name'] != 'Normalized Sum of Metrics':\n best_filename = metric['source_file']\n # For all metrics, plot the summary of absolute final values for all\n # applications in a single figure.\n dest_filename = \"abs_comp_\" + metric['dest_file'] + \"_5400_chstone_\" + current_board.split(\"_\")[1]\n name = \"Final Values for \" + metric['name'] + \", after 1.5h of Tuning ({0})\".format(current_board.split(\"_\")[1])\n default_speedups = []\n random_speedups = []\n\n default_error = []\n random_error = []\n\n for i in range(len(applications)):\n application = applications[i]\n\n default_all_best = []\n random_all_best = []\n\n for j in range(1, runs + 1):\n if os.path.isfile(\"{0}/{1}{2}/{3}\".format(current_board,\n application,\n j,\n best_filename)):\n default_data_file = open(\"{0}/{1}{2}/{3}\".format(current_board,\n application,\n j,\n best_filename),\n \"r\")\n default_data = default_data_file.readlines()\n default_data_file.close()\n default_best = float(default_data[-1].split()[1])\n\n if default_best != float('inf'):\n default_all_best.append(default_best)\n\n if os.path.isfile(\"{0}/{1}{2}/{3}\".format(random_board,\n application,\n j,\n best_filename)):\n\n random_data_file = open(\"{0}/{1}{2}/{3}\".format(random_board,\n application,\n j,\n best_filename),\n \"r\")\n random_data = random_data_file.readlines()\n\n random_data_file.close()\n random_best = float(random_data[-1].split()[1])\n\n if random_best != float('inf'):\n random_all_best.append(random_best)\n\n if len(default_all_best) > 0:\n default_speedups.append((application.split(\"_\")[0], numpy.mean(default_all_best)))\n default_error.append((application.split(\"_\")[0], numpy.std(default_all_best)))\n else:\n default_speedups.append((application.split(\"_\")[0], 0))\n default_error.append((application.split(\"_\")[0], 0))\n\n if len(random_all_best) > 0:\n random_speedups.append((application.split(\"_\")[0], numpy.mean(random_all_best)))\n random_error.append((application.split(\"_\")[0], numpy.std(random_all_best)))\n else:\n random_speedups.append((application.split(\"_\")[0], 0))\n random_error.append((application.split(\"_\")[0], 0))\n\n default_ymax = max([s[1] for s in default_speedups])\n random_ymax = max([s[1] for s in random_speedups])\n\n print(default_ymax, random_ymax)\n\n ymax = max(default_ymax, random_ymax)\n\n if metric['name'] == 'Cycles':\n ymax = 80000\n\n # Plot summary of relative improvements\n plot_bar([s[1] for s in default_speedups],\n [s[1] for s in default_error],\n [s[1] for s in random_speedups],\n [s[1] for s in random_error],\n \"CHStone Applications\",\n \"Improvement vs. Starting Point\",\n len(default_speedups),\n .225,\n [s[0] for s in default_speedups],\n dest_filename,\n name,\n ymax,\n False)\n","sub_path":"post_place_and_route/py/results/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":20810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"591718116","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport asyncio\nfrom telethon import TelegramClient, events\nfrom telethon.tl.types import PeerUser, PeerChat, PeerChannel\nAPI_ID = 5742352 #api id\nAPI_HASH = \"affd0f00f502063c080546a324c15b88\"#api hash\ngroups = []\nspamEnabled = False\nTime = 15\nMessage = None\n\nclient = TelegramClient('opentelegramfiles',API_ID,API_HASH)\nasync def doSpam(client, msg):\n try:\n for group in groups:\n try:\n await client.send_message(group, msg[0], file=msg[1], link_preview=msg[2])\n await asyncio.sleep(0.2)\n except:\n pass\n except:\n pass\n@client.on(events.NewMessage( outgoing=True))\nasync def miei_msg(event):\n global groups, Message, spamEnabled, Time\n if event.text == \".start_spam\":\n if not spamEnabled:\n await event.edit(\"spam avviato!\")\n spamEnabled = True\n if Message != None:\n while spamEnabled:\n await asyncio.wait([doSpam(event.client, Message)])\n for i in range(Time * 60):\n if spamEnabled:\n await asyncio.sleep(1)\n else:\n break\n\n elif event.text == \".stop_spam\":\n await event.edit(\"spam stoppato!\")\n spamEnabled = False\n elif event.text == \".addgroupforspam\":\n if isinstance(event.peer_id, PeerUser):\n await event.edit(\"stai parlando con un utente! non è un gruppo!\")\n else:\n if isinstance(event.peer_id, PeerChat):\n groups.append(event.peer_id.chat_id)\n await event.edit(\"gruppo aggiunto per lo spam correttamente!\")\n elif isinstance(event.peer_id, PeerChannel):\n groups.append(event.peer_id.channel_id)\n await event.edit(\"gruppo aggiunto per lo spam correttamente!\")\n else:\n event.edit(\"errore nell'aggiunta.(controllare il log.)\")\n elif event.text == \".addchannelforspam\":\n if isinstance(event.peer_id, PeerChannel):\n groups.append(event.peer_id.channel_id)\n await event.edit(\"canale aggiunto per lo spam correttamente!\")\n elif event.text == \".remove_this_forspam\":\n await event.edit(\"!rimosso per lo spam correttamente!\")\n elif event.text == \".set_this_message_forspam\":\n if event.is_reply:\n new = await event.get_reply_message()\n if new.media != None and type(new.media).__name__ != \"MessageMediaWebPage\" and type(\n new.media).__name__ != \"MessageMediaUnsupported\":\n media = new.media\n else:\n media = None\n if new.web_preview != None:\n lp = True\n else:\n lp = False\n Message = [new.text, media, lp]\n await event.edit(\"messaggio fissato per lo spam\")\n else:\n await event.edit(\"!nessun messaggio fissato per lo spam!\\n(rispondi ad un messaggio per usare questo comando!)\")\n elif event.text == \".set_time_for_spam\":\n if event.is_reply:\n new = await event.get_reply_message()\n if int(new.text) in range(10, 60):\n Time = int(new.text)\n await event.edit(\"tempo VALIDO selezionato!\")\n else:\n await event.edit(\"tempo non VALIDO(rimane il precedente impostato):\\ndefault- 15 minuti\")\n\nclient.start()\nclient.run_until_disconnected()\n","sub_path":"Bhspamo.py","file_name":"Bhspamo.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"624611514","text":"import http.client\nimport json\nimport paho.mqtt.client as mqtt\nimport hashlib\nfrom Driver_Base import Driver\nimport time\n\n\nclass ThingsBoard(Driver):\n def __init__(self, config_path, mode):\n self.now_info = []\n Driver.__init__(self, config_path, mode)\n\n # Get Jwt\n def get_authorization(self):\n conn = http.client.HTTPConnection(self.host + ':' + self.port)\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\"\n }\n\n body = '{\"username\":\"tenant@thingsboard.org\", \"password\":\"tenant\"}'\n\n conn.request(\"POST\", \"/api/auth/login\", body=body, headers=headers)\n response_data = conn.getresponse().read()\n response_json = json.loads(response_data.decode(\"utf-8\"))\n\n return response_json\n\n def connect(self):\n while True:\n try:\n conn = http.client.HTTPConnection(self.host + ':' + self.port)\n authorization = self.get_authorization()\n headers = {\n 'Accept': \"application/json\",\n 'X-Authorization': \"Bearer \" + authorization[\"token\"],\n }\n return [conn, headers]\n except:\n print(\"Error connect to Platform\")\n time.sleep(2)\n continue\n\n # Get list device of user\n def get_list_device_on_customes(self):\n print(\"get list\")\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n conn.request(\"GET\", \"/api/customer/3f4fd570-4ed4-11e8-a082-9dc4b7fcfa12/devices?limit=111\", headers=headers)\n data = conn.getresponse().read()\n json_data = json.loads(data.decode(\"utf-8\"))\n device_list = json_data['data']\n\n return device_list\n\n # Get Access token of device\n def get_access_token_device(self, thing_local_id):\n print(\"Get Access token device: \")\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n conn.request(\"GET\", \"/api/device/\" + thing_local_id + \"/credentials\", headers=headers)\n data = conn.getresponse().read()\n json_data = json.loads(data.decode(\"utf-8\"))\n\n return json_data['credentialsId']\n\n # Get telemetry key (ex: Temperature, Humidity,...) & concatenation\n def get_telemetry_keys(self, thing_local_id):\n telemetries = \"\"\n\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n url = \"/api/plugins/telemetry/DEVICE/\" + thing_local_id + \"/keys/timeseries\"\n conn.request(\"GET\", url, headers=headers)\n data = conn.getresponse().read()\n json_data = json.loads(data.decode(\"utf-8\"))\n\n for i, telemetry in enumerate(json_data):\n if i == len(json_data) - 1:\n telemetries = telemetries + telemetry\n else:\n telemetries = telemetries + telemetry + \",\"\n\n return [json_data, telemetries]\n\n # Get id dashboard of user\n def get_dashboard_id_on_customes_id(self, customers_id):\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n conn.request(\"GET\", \"/api/customer/\" + customers_id + \"/dashboards?ascOrder=false&limit=111\", headers=headers)\n data = conn.getresponse().read()\n json_data = json.loads(data.decode(\"utf-8\"))\n\n return json_data['data'][0]['id']['id']\n\n # Get label of dashboard\n def get_label_on_dashboard_id(self):\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n dashboard_id = self.get_dashboard_id_on_customes_id(\"3f4fd570-4ed4-11e8-a082-9dc4b7fcfa12\")\n conn.request(\"GET\", \"/api/dashboard/\" + dashboard_id, headers=headers)\n data = conn.getresponse().read()\n json_data = json.loads(data.decode(\"utf-8\"))\n\n return json_data[\"configuration\"][\"widgets\"][\"0c6413aa-8860-50e4-6eb8-935d21a1eacc\"][\"config\"][\"settings\"][\n \"gpioList\"]\n\n # Sort object for compare\n def ordered(self, obj):\n if isinstance(obj, dict):\n return sorted((k, self.ordered(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(self.ordered(x) for x in obj)\n else:\n return obj\n\n def get_states(self):\n print(\"get states\")\n list_thing = {\n 'platform_id': str(self.platform_id),\n 'things': []\n }\n\n states = []\n device_list = self.get_list_device_on_customes()\n\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n for device in device_list:\n result_telemetry_keys = self.get_telemetry_keys(device[\"id\"][\"id\"])\n keys_telemetry_list = result_telemetry_keys[0]\n telemetries = result_telemetry_keys[1]\n\n url = \"/api/plugins/telemetry/DEVICE/\" + device[\"id\"][\"id\"] + \"/values/timeseries?keys=\" + telemetries\n\n conn.request(\"GET\", url, headers=headers)\n response_data = conn.getresponse().read()\n\n # Json contain telemetry value\n response_json = json.loads(response_data.decode(\"utf-8\"))\n\n state = {\n 'thing_type': device[\"type\"],\n 'thing_name': device[\"name\"],\n 'thing_global_id': self.platform_id + '-' + device[\"id\"][\"id\"],\n 'thing_local_id': device[\"id\"][\"id\"],\n 'location': \"null\",\n 'items': []\n }\n\n for telemetry in keys_telemetry_list:\n item_state = response_json[telemetry][0][\"value\"]\n if device[\"id\"][\"id\"] == \"bb12cda0-4f80-11e8-a082-9dc4b7fcfa12\":\n device[\"type\"] = \"sensor\"\n item_state = int(response_json[telemetry][0][\"value\"])\n item_name = telemetry\n elif device[\"id\"][\"id\"] == \"65c55ed0-601a-11e8-a3b8-6d591acbfd77\":\n result_label_light = self.get_label_on_dashboard_id()\n if telemetry == \"motion\":\n device[\"type\"] = \"binary_sensor\"\n if response_json[telemetry][0][\"value\"] == \"1\":\n item_state = \"on\"\n else:\n item_state = \"off\"\n elif telemetry == \"3\": # Green light\n device[\"type\"] = \"light\"\n item_name = result_label_light[0][\"label\"]\n if response_json[telemetry][0][\"value\"] == \"true\":\n item_state = \"on\"\n else:\n item_state = \"off\"\n elif telemetry == \"4\": # Red light\n item_name = result_label_light[1][\"label\"]\n device[\"type\"] = \"light\"\n if response_json[telemetry][0][\"value\"] == \"true\":\n item_state = \"on\"\n else:\n item_state = \"off\"\n elif telemetry == \"5\": # Yellow light\n item_name = result_label_light[2][\"label\"]\n device[\"type\"] = \"light\"\n if response_json[telemetry][0][\"value\"] == \"true\":\n item_state = \"on\"\n else:\n item_state = \"off\"\n else:\n item_state = response_json[telemetry][0][\"value\"]\n item_name = telemetry\n\n item = {\n 'item_type': device[\"type\"],\n 'item_name': item_name,\n 'item_global_id': self.platform_id + '-' + device[\"id\"][\"id\"] + '-' + telemetry,\n 'item_local_id': device[\"id\"][\"id\"] + '-' + telemetry,\n 'item_state': item_state,\n 'can_set_state': self.check_can_set_state(device[\"type\"])\n }\n state['items'].append(item)\n\n states.append(state)\n\n list_thing['things'] = states\n print(list_thing)\n return list_thing\n\n def check_configuration_changes(self):\n new_info = []\n device_list = self.get_list_device_on_customes()\n\n for device in device_list:\n result_telemetry_keys = self.get_telemetry_keys(device[\"id\"][\"id\"])\n keys_telemetry_list = result_telemetry_keys[0]\n\n state = {\n 'thing_type': device[\"type\"],\n 'thing_name': device[\"name\"],\n 'platform_id': str(self.platform_id),\n 'thing_global_id': self.platform_id + '-' + device[\"id\"][\"id\"],\n 'thing_local_id': device[\"id\"][\"id\"],\n 'location': \"null\",\n 'items': []\n }\n\n for telemetry in keys_telemetry_list:\n if device[\"id\"][\"id\"] == \"bb12cda0-4f80-11e8-a082-9dc4b7fcfa12\":\n device[\"type\"] = \"sensor\"\n item_name = telemetry\n elif device[\"id\"][\"id\"] == \"65c55ed0-601a-11e8-a3b8-6d591acbfd77\":\n result_label_light = self.get_label_on_dashboard_id()\n if telemetry == \"motion\":\n device[\"type\"] = \"binary_sensor\"\n item_name = telemetry\n elif telemetry == \"3\":\n device[\"type\"] = \"light\"\n item_name = result_label_light[0][\"label\"]\n elif telemetry == \"4\":\n device[\"type\"] = \"light\"\n item_name = result_label_light[1][\"label\"]\n elif telemetry == \"5\":\n device[\"type\"] = \"light\"\n item_name = result_label_light[2][\"label\"]\n else:\n item_name = telemetry\n\n item = {\n 'item_type': device[\"type\"],\n 'item_name': item_name,\n 'item_global_id': self.platform_id + '-' + device[\"id\"][\"id\"] + '-' + telemetry,\n 'item_local_id': device[\"id\"][\"id\"] + '-' + telemetry,\n 'can_set_state': self.check_can_set_state(device[\"type\"])\n }\n state['items'].append(item)\n\n new_info.append(state)\n\n hash_now = hashlib.md5(str(self.ordered(new_info)).encode())\n hash_pre = hashlib.md5(str(self.ordered(self.now_info)).encode())\n if hash_now.hexdigest() == hash_pre.hexdigest():\n return {\n 'have_change': False,\n 'new_info': new_info,\n 'platform_id': str(self.platform_id)\n }\n else:\n self.now_info = new_info\n return {\n 'have_change': True,\n 'new_info': new_info,\n 'platform_id': str(self.platform_id)\n }\n\n def check_can_set_state(self, thing_type):\n if thing_type == \"light\":\n return \"yes\"\n return \"no\"\n\n def set_state(self, thing_type, thing_local_id, location, thing_name,\n item_type, item_local_id, item_name, new_state):\n print(\"Set state {} into {}\".format(thing_local_id, new_state))\n result = self.connect()\n conn = result[0]\n headers = result[1]\n\n if item_type == \"light\":\n pin = item_local_id.rsplit('-', 1)[1]\n if new_state == \"ON\":\n body = '{\"method\":\"setGpioStatus\",\"params\":{\"pin\":' + pin + ',\"enabled\":true}}'\n conn.request(\"POST\", \"/api/plugins/rpc/twoway/\" + thing_local_id, body=body, headers=headers)\n elif new_state == \"OFF\":\n body = '{\"method\":\"setGpioStatus\",\"params\":{\"pin\":' + pin + ',\"enabled\":false}}'\n conn.request(\"POST\", \"/api/plugins/rpc/twoway/\" + thing_local_id, body=body, headers=headers)\n else:\n print(\"Error set state\")\n else:\n print(\"Type not support set state\")\n\n\nif __name__ == '__main__':\n CONFIG_PATH = \"config/thingsboard.ini\"\n MODE = 'PULL'\n things_board = ThingsBoard(CONFIG_PATH, MODE)\n things_board.run()\n","sub_path":"ThingsBoard/tb/ThingsBoard_Driver.py","file_name":"ThingsBoard_Driver.py","file_ext":"py","file_size_in_byte":12259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"133274349","text":"from ..handler import Handler\nfrom ..data import Page\n\nclass WikiHome(Handler):\n def render_page(self, username=\"\"):\n self.render(\"wiki_home.html\",\n username=username);\n\n def get(self):\n if self.user:\n self.render_page(self.user.username)\n else:\n self.render_page();\n\n def post(self):\n topic = self.request.get('topic').split(' ')\n self.redirect('/wiki/' + topic[0])\n\nclass WikiPage(Handler):\n def get(self, page):\n topic = page.split('/')[-1]\n page = Page.get_by_topic(topic)\n\n if page and self.user:\n self.render(\"wiki_page.html\", username=self.user.username,\n topic=topic, content=page.content)\n elif page:\n self.render(\"wiki_page.html\",\n topic=topic, content=page.content)\n elif not page and self.user:\n self.redirect('/wiki/edit/' + topic)\n else:\n self.render(\"wiki_no_page.html\", topic=topic)\n\nclass WikiEdit(Handler):\n def render_page(self, username, topic, content=\"\"):\n self.render(\"wiki_edit.html\", username=username,\n topic=topic, content=content)\n\n def get(self, page):\n topic = page.split('/')[-1]\n\n if not self.user:\n self.redirect('/wiki/' + topic)\n\n page = Page.get_by_topic(topic)\n if page:\n self.render_page(self.user.username, topic, page.content)\n else:\n self.render_page(self.user.username, topic)\n\n def post(self, page):\n topic = page.split('/')[-1]\n content = self.request.get('content')\n\n if not self.user:\n return\n\n page = Page.get_by_topic(topic)\n if page:\n page.content = content\n page.put()\n else:\n page = Page.make_page(topic, content)\n\n self.redirect('/wiki/' + topic)\n","sub_path":"python/wiki/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"451672503","text":"from openpyxl import load_workbook\nfrom duvasheetcalc.sheetcalculator import SheetCalculator\nfrom duvasheetcalc.calcsaver import CalcSaver\nfrom duvasheetcalc.utils import clean_days\n\n\nclass SheetLoader(object):\n\n def __init__(self):\n self.calculator = SheetCalculator()\n self.saver = CalcSaver()\n self.height = 31*31\n self.letters = ['A', 'B', 'C', 'D']\n\n\n def load(self, filename, output):\n days = []\n\n wb = load_workbook(filename)\n ws = wb.active\n\n i = 2\n for row in range(0, self.height):\n day = {}\n for char in self.letters:\n coords = '{}{}'.format(char, i)\n cell = ws[coords]\n\n if char == 'A':\n if cell.value is not None:\n day['date'] = cell.value.strftime(\"%d-%B-%Y\")\n elif char == 'B':\n day['interval'] = cell.value\n elif char == 'C':\n day['hours'] = cell.value\n elif char == 'D':\n day['ref'] = cell.value\n\n days.append(day)\n i+=1\n\n days = clean_days(days)\n calculations = self.calculator.calculate(days)\n\n return self.saver.save(calculations, output)\n","sub_path":"build/lib/duvasheetcalc/sheetloader.py","file_name":"sheetloader.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"14805648","text":"\"\"\"\n\n\n\"\"\"\nimport pprint\n\nimport json\nimport logging\nfrom urlparse import urlparse, parse_qs\nfrom datetime import date\nfrom django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import model_to_dict\nfrom django.views.generic import View, ListView, DetailView, TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.http import (HttpResponse, JsonResponse,\n HttpResponseBadRequest, # 400\n HttpResponseForbidden, # 403\n HttpResponseNotFound, # 404\n HttpResponseNotAllowed, # 405\n HttpResponseNotModified, # 304\n HttpResponseRedirect) # 302\n\nfrom django.conf import settings\nfrom django.template import loader\nimport requests\nfrom hbp_app_python_auth.auth import get_access_token, get_auth_header\n\nfrom .models import (ValidationTestDefinition, \n ValidationTestCode,\n ValidationTestResult, \n ScientificModelInstance, \n ScientificModel, \n ScientificModelInstance,\n ScientificModelImage, \n Comment,\n\n# configview,\n\n CollabParameters,)\n\n\nfrom .forms import (ValidationTestDefinitionForm, \n ValidationTestCodeForm,\n ScientificModelForm,\n ScientificModelImageForm, \n ScientificTestForm, \n ValidationTestResultForm, \n ScientificModelInstanceForm,\n CommentForm,\n# configviewForm, \n )\n\nfrom .serializer import (ValidationTestDefinitionSerializer, \n ScientificModelSerializer, \n ScientificModelInstanceSerializer,\n ScientificModelImageSerializer,\n ValidationTestResultSerializer,\n ValidationTestCodeSerializer,\n ValidationTestDefinitionWithCodesReadSerializer,\n CommentSerializer,\n# configviewSerializer, \n )\n\n\nfrom django.shortcuts import get_object_or_404\n\nfrom django.core import serializers\n\n\n#rest_framework\nfrom rest_framework import (viewsets,\n status,\n mixins,\n generics,\n permissions,)\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\n\n\nCROSSREF_URL = \"http://api.crossref.org/works/\"\nVALID_FILTER_NAMES = ('name', 'age', 'brain_region', 'cell_type',\n 'data_type', 'data_modality', 'test_type',\n 'author', 'species', 'data_location', 'publication')\nVALID_MODEL_FILTER_NAMES = ('brain_region', 'cell_type',\n 'author', 'species')\nVALID_RESULT_FILTERS = {\n 'model': 'model_instance__model__name__icontains',\n 'validation': 'test_definition__test_definition__name__icontains',\n 'project': 'project',\n 'collab_id': 'project',\n 'brain_region': 'test_definition__test_definition__brain_region__icontains',\n}\n\nlogger = logging.getLogger(\"model_validation_api\")\n\n\ndef get_authorization_header(request):\n auth = request.META.get(\"HTTP_AUTHORIZATION\", None)\n if auth is None:\n try:\n# auth = get_auth_header(request.user.social_auth.get())\n logger.debug(\"Got authorization from database\")\n except AttributeError:\n pass\n # in case of 401 error, need to trap and redirect to login\n else:\n logger.debug(\"Got authorization from HTTP header\")\n return {'Authorization': auth}\n\n\n# def get_admin_list(request):\n# url = 'https://services.humanbrainproject.eu/idm/v1/api/group/hbp-neuromorphic-platform-admin/members'\n# headers = get_authorization_header(request)\n# res = requests.get(url, headers=headers)\n# logger.debug(headers)\n# if res.status_code != 200:\n# raise Exception(\"Couldn't get list of administrators.\" + res.content + str(headers))\n# data = res.json()\n# assert data['page']['totalPages'] == 1\n# admins = [user['id'] for user in data['_embedded']['users']]\n# return admins\n\n\n# def is_admin(request):\n# try:\n# admins = get_admin_list(request)\n# except Exception as err:\n# logger.warning(err.message)\n# return False\n# try:\n# user_id = get_user(request)[\"id\"]\n# except Exception as err:\n# logger.warning(err.message)\n# return False\n# return user_id in admins\n\n\n\n# to put inside views\n# if not _is_collaborator(request, ctx):\n# return HttpResponseForbidden()\ndef _is_collaborator(request, context):\n '''check access depending on context'''\n svc_url = settings.HBP_COLLAB_SERVICE_URL\n if not context:\n return False\n\n url = '%scollab/context/%s/' % (svc_url, context)\n\n headers = {'Authorization': get_auth_header(request.user.social_auth.get())}\n res = requests.get(url, headers=headers)\n\n if res.status_code != 200:\n return False\n\n collab_id = res.json()['collab']['id']\n url = '%scollab/%s/permissions/' % (svc_url, collab_id)\n res = requests.get(url, headers=headers)\n if res.status_code != 200:\n return False\n return res.json().get('UPDATE', False)\n\n\ndef get_user(request):\n url = \"{}/user/me\".format(settings.HBP_IDENTITY_SERVICE_URL)\n headers = get_authorization_header(request)\n logger.debug(\"Requesting user information for given access token\")\n res = requests.get(url, headers=headers)\n if res.status_code != 200:\n logger.debug(\"Error\" + res.content)\n raise Exception(res.content)\n logger.debug(\"User information retrieved\")\n return res.json()\n\n\n# def notify_coordinators(request, project):\n# coordinators = get_admin_list(request)\n# url = 'https://services.humanbrainproject.eu/stream/v0/api/notification/'\n# #url = 'https://stream.humanbrainproject.eu/api/v0/notifications/'\n# headers = get_authorization_header(request)\n# targets = [{\"type\": \"HBPUser\", \"id\": id} for id in coordinators]\n# payload = {\n# \"summary\": \"New access request for the Neuromorphic Computing Platform: {}\".format(project.title),\n# \"targets\": targets,\n# \"object\": {\n# \"type\": \"HBPCollaboratoryContext\",\n# \"id\": \"346173bb-887c-4a47-a8fb-0da5d5980dfc\"\n# }\n# }\n# res = requests.post(url, json=payload, headers=headers)\n# if res.status_code not in (200, 204):\n# logger.error(\"Unable to notify coordinators. {}: {}\".format(res.status_code, res.content))\n# return False\n# return True\n\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestDefinitionResource(View):\n serializer = ValidationTestDefinitionSerializer\n login_url='/login/hbp/'\n\n def _get_test(self, test_id):\n try:\n test = ValidationTestDefinition.objects.get(pk=test_id)\n except ValidationTestDefinition.DoesNotExist:\n test = None\n return test\n\n def get(self, request, *args, **kwargs):\n \"\"\"View a test\"\"\"\n test = self._get_test(kwargs[\"test_id\"])\n if test is None:\n return HttpResponseNotFound(\"No such test\")\n code_version = request.GET.get(\"version\", None)\n content = self.serializer.serialize(test, code_version)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestDefinitionListResource(View):\n serializer = ValidationTestDefinitionSerializer\n login_url='/login/hbp/'\n\n # NEEDS UPDATING NOW CODE IS A SEPARATE OBJECT\n def post(self, request, *args, **kwargs):\n \"\"\"Add a test\"\"\"\n # if not is_admin(request):\n # return HttpResponseForbidden(\"You do not have permission to add a test.\")\n form = ValidationTestDefinitionForm(json.loads(request.body))\n if form.is_valid():\n test = form.save()\n content = self.serializer.serialize(test)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=201)\n else:\n print(form.data)\n return HttpResponseBadRequest(str(form.errors)) # todo: plain text\n\n def get(self, request, *args, **kwargs):\n tests = ValidationTestDefinition.objects.all()\n content = self.serializer.serialize(tests)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestDefinitionCreate(DetailView): \n template_name = \"simple_test_create.html\"\n model = ValidationTestDefinition\n form_class = ValidationTestDefinitionForm\n form_class_code = ValidationTestCodeForm\n serializer = ValidationTestDefinitionSerializer\n login_url='/login/hbp/'\n\n def get(self, request, *args, **kwargs): \n h = ValidationTestDefinition()\n form = self.form_class(instance = h)\n c = ValidationTestCode()\n formcode = self.form_class_code(instance = c)\n return render(request, self.template_name, {'form': form, 'formcode':formcode })\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add a test\"\"\"\n # if not is_admin(request):\n # return HttpResponseForbidden(\"You do not have permission to add a test.\")\n # form = ValidationTestDefinitionForm(json.loads(request.body))\n\n test_creation = ValidationTestDefinition()\n form = self.form_class(request.POST, instance=test_creation)\n test_code_creation = ValidationTestCode()\n\n if form.is_valid():\n #TODO : add some check to verify that repository/path/version are correct. \n #possible to combine 2 models in one form ? yep :-) it is done\n form = form.save()\n test_code_creation.test_definition = ValidationTestDefinition.objects.get(id = form.id)\n test_code_creation.repository = request.POST.get(\"repository\", None)\n test_code_creation.path = request.POST.get(\"path\", None)\n test_code_creation.version = request.POST.get(\"version\", None)\n test_code_creation.save()\n return self.redirect(request, pk=form.id)\n else:\n return render(request, self.template_name, {'form': form, 'formcode': formcode}) # todo: plain text\n\n @classmethod \n def redirect(self, request, *args, **kwargs): ### use to go back to Test detail View\n # url = reverse('simple-detail-view', kwargs = {'pk': kwargs['pk']})\n url = reverse('simple-detail-view', kwargs = {'pk': kwargs['pk']})\n \n return HttpResponseRedirect(url)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestDefinitionSearchResource(View):\n serializer = ValidationTestDefinitionSerializer\n login_url='/login/hbp/'\n\n def get(self, request, *args, **kwargs):\n filters = {}\n for key, value in request.GET.items():\n if key not in VALID_FILTER_NAMES:\n return HttpResponseBadRequest(\"{} is not a valid filter\".format(key))\n else:\n filters[key + \"__contains\"] = value # should handle multiple values\n tests = ValidationTestDefinition.objects.filter(**filters)\n# raise Exception(str(filters))\n content = self.serializer.serialize(tests)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleTestListView(LoginRequiredMixin, ListView):\n model = ValidationTestDefinition\n template_name = \"simple_test_list.html\"\n login_url='/login/hbp/' \n\n def get_queryset(self):\n # print(\"SimpleTestListView - get_queryset\" + str(self.request.GET.items()))\n filters = {}\n if self.request.META['QUERY_STRING'].startswith(\"search=\"):\n search = \"\"\n search_cat = \"\"\n for key, value in self.request.GET.items():\n if key == 'search':\n search = value\n if key == 'search_cat':\n search_cat = value\n # print(search_cat, search)\n if search_cat in VALID_FILTER_NAMES:\n filters[search_cat + \"__icontains\"] = search\n else :\n for item in VALID_FILTER_NAMES:\n filters[item + \"__icontains\"] = search\n name_list = ValidationTestDefinition.objects.filter(name__contains=search)\n species_list = ValidationTestDefinition.objects.filter(species__contains=search)\n age_list = ValidationTestDefinition.objects.filter(age__contains=search)\n brain_region_list = ValidationTestDefinition.objects.filter(brain_region__contains=search)\n cell_type_list = ValidationTestDefinition.objects.filter(cell_type__contains=search)\n data_location_list = ValidationTestDefinition.objects.filter(data_location__contains=search)\n data_type_list = ValidationTestDefinition.objects.filter(data_type__contains=search)\n data_modality_list = ValidationTestDefinition.objects.filter(data_modality__contains=search)\n test_type_list = ValidationTestDefinition.objects.filter(test_type__contains=search)\n author_list = ValidationTestDefinition.objects.filter(author__contains=search)\n publication_list = ValidationTestDefinition.objects.filter(publication__contains=search)\n\n self.object_list = (name_list|species_list|age_list|brain_region_list|cell_type_list|data_location_list|data_type_list|data_modality_list|test_type_list|author_list|publication_list).distinct()\n return self.object_list\n else :\n for key, value in self.request.GET.items():\n search = value\n filters[key + \"__icontains\"] = search\n\n return ValidationTestDefinition.objects.filter(**filters)\n\n\n def get_context_data(self, **kwargs):\n context = super(SimpleTestListView, self).get_context_data(**kwargs)\n context[\"section\"] = \"tests\"\n context[\"filters\"] = {\n \"species\": ValidationTestDefinition.objects.values_list('species', flat=True).distinct(),\n \"brain_region\": ValidationTestDefinition.objects.values_list('brain_region', flat=True).distinct(),\n \"cell_type\": ValidationTestDefinition.objects.values_list('cell_type', flat=True).distinct(),\n }\n\n return context\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleTestDetailView(LoginRequiredMixin, DetailView):\n model = ValidationTestDefinition\n template_name = \"simple_test_detail.html\"\n # template_name = \"test_view.html\"\n \n login_url='/login/hbp/'\n\n def get_context_data(self, **kwargs):\n context = super(SimpleTestDetailView, self).get_context_data(**kwargs)\n context[\"section\"] = \"tests\"\n publication_field = context[\"object\"].publication\n if publication_field.startswith(\"doi:\"):\n crossref_metadata = self._get_crossref_metadata(publication_field)\n context[\"publication_detail\"] = crossref_metadata\n if crossref_metadata:\n context[\"formatted_publication\"] = self._format_publication(crossref_metadata)\n print (context)\n return context\n\n def _get_crossref_metadata(self, publication_field):\n prefix, doi = publication_field.split(\":\")\n try:\n response = requests.get(CROSSREF_URL + doi)\n except requests.ConnectionError:\n logger.warning(\"Unable to retrieve metadata for DOI {}\".format(doi))\n return {}\n if response.ok:\n return response.json()['message']\n else:\n logger.warning(\"Unable to retrieve metadata for DOI {}\".format(doi))\n return {}\n\n def _format_publication(self, pub_data):\n for author in pub_data[\"author\"]:\n author[\"initials\"] = \"\".join([name[0] for name in author[\"given\"].split()])\n authors = [u\"{family} {initials}\".format(**author)\n for author in pub_data[\"author\"]]\n pub_data[\"authors\"] = u\", \".join(authors[:-1]) + u\" and \" + authors[-1]\n pub_data[\"year\"] = pub_data[\"created\"][\"date-parts\"][0][0]\n template = u\"{authors} ({year}) {title[0]}. {short-container-title[0]} {volume}:{page} {URL}\"\n return template.format(**pub_data)\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass NewSimpleTestDetailView(LoginRequiredMixin, DetailView):\n model = ValidationTestDefinition\n template_name = \"simple_test_detail.html\"\n form_class = CommentForm\n\n def get(self, request, *args, **kwargs):\n validation_code = ValidationTestCode.objects.filter(test_definition_id = self.kwargs['pk'])\n test = ValidationTestDefinition.objects.get(id = self.kwargs['pk'])\n\n comment = Comment.objects.filter(test = self.kwargs['pk'])\n\n cmt = Comment()\n form = self.form_class(instance = cmt)\n \n return render(request, self.template_name, {'form': form, 'validation_code':validation_code, 'test':test, 'comment':comment})\n\n\n def post(self, request, *args, **kwargs):\n validation_code = ValidationTestCode.objects.filter(test_definition_id = self.kwargs['pk'])\n test = ValidationTestDefinition.objects.get(id = self.kwargs['pk'])\n comment = Comment.objects.filter(test = self.kwargs['pk'])\n\n if request.POST.get('action', None) == 'edit_comment':\n form=self.edit_comment(request)\n else: \n comment_creation = Comment()\n comment_creation.test = get_object_or_404(ValidationTestDefinition, pk=self.kwargs['pk']) \n \n if request.method == 'POST':\n form = CommentForm(request.POST, instance=comment_creation)\n\n if form.is_valid(): \n form = form.save(commit=False)\n form.author = request.user\n form.save()\n \n cmt = Comment()\n form = self.form_class(instance = cmt)\n \n return render(request, self.template_name, {'form': form, 'validation_code':validation_code, 'test':test, 'comment':comment})\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleTestEditView(DetailView):\n model = ValidationTestDefinition\n form_class = ValidationTestDefinitionForm\n template_name = \"simple_test_edit.html\"\n login_url='/login/hbp/'\n\n def get_context_data(self, **kwargs):\n context = super(SimpleTestEditView, self).get_context_data(**kwargs)\n context[\"section\"] = \"models\"\n context[\"build_info\"] = settings.BUILD_INFO\n return context\n\n def get(self, request, *args, **kwargs):\n print(self.get_object().id)\n h = ValidationTestDefinition.objects.get(id = self.get_object().id)\n form = self.form_class(instance = h)\n # print(str(form))\n return render(request, self.template_name, {'form': form, 'object':h})\n \n def post(self, request, *args, **kwargs):\n m = self.get_object()\n form = self.form_class(request.POST, instance=m)\n if form.is_valid():\n form = form.save(commit=False)\n form.save()\n return self.redirect(request, pk=m.id)\n # return render(request, \"simple_test_detail.html\", {'form': form, \"object\": m})\n return render(request, self.template_name, {'form': form, \"object\": m})\n\n @classmethod \n def redirect(self, request, *args, **kwargs): \n url = reverse(\"simple-detail-view\", kwargs = { 'pk':kwargs['pk']})\n return HttpResponseRedirect(url)\n\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\n# class ScientificModelResource(View):\n# serializer = ScientificModelSerializer\n# login_url='/login/hbp/'\n\n# def _get_model(self, model_id):\n# try:\n# model = ScientificModel.objects.get(pk=model_id)\n# except ScientificModel.DoesNotExist:\n# model = None\n# return model\n\n# def get(self, request, *args, **kwargs):\n# \"\"\"View a model\"\"\"\n# model = self._get_model(kwargs[\"model_id\"])\n# if model is None:\n# return HttpResponseNotFound(\"No such result\")\n# content = self.serializer.serialize(model)\n# return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass AddImage(View):\n model = ScientificModelImage\n template_name = \"modal.html\"\n login_url='/login/hbp/'\n form_class = ScientificModelImageForm\n\n def get(self, request, *args, **kwargs):\n h = ScientificModelImage()\n form = self.form_class(instance = h)\n return render(request, self.template_name, {'form': form})\n\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ScientificModelListResource(View):\n serializer = ScientificModelSerializer\n login_url='/login/hbp/'\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add a model\"\"\"\n print (\"ScientificModelListResource POST\")\n # if not is_admin(request):\n # return HttpResponseForbidden(\"You do not have permission to add a result.\")\n form = ScientificModelForm(json.loads(request.body))\n if form.is_valid():\n model = form.save()\n content = self.serializer.serialize(model)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=201)\n else:\n print(form.data)\n return HttpResponseBadRequest(str(form.errors)) # todo: plain text\n\n def get(self, request, *args, **kwargs):\n print (\"ScientificModelListResource GET\")\n \n models = ScientificModel.objects.all()\n content = self.serializer.serialize(models)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleModelListView(LoginRequiredMixin, ListView):\n model = ScientificModel\n template_name = \"simple_model_list.html\"\n login_url='/login/hbp/'\n \n def get_queryset(self):\n filters = {}\n\n for key, value in self.request.GET.items():\n if key in VALID_MODEL_FILTER_NAMES:\n filters[key + \"__icontains\"] = value\n\n return ScientificModel.objects.filter(**filters)\n\n def get(self, request, *args, **kwargs):\n if request.META['QUERY_STRING'].startswith(\"search=\"):\n name_list = ScientificModel.objects.filter(name__contains=request.META['QUERY_STRING'][7:])\n species_list = ScientificModel.objects.filter(species__contains=request.META['QUERY_STRING'][7:])\n brain_region_list = ScientificModel.objects.filter(brain_region__contains=request.META['QUERY_STRING'][7:])\n cell_type_list = ScientificModel.objects.filter(cell_type__contains=request.META['QUERY_STRING'][7:])\n author_list = ScientificModel.objects.filter(author__contains=request.META['QUERY_STRING'][7:])\n self.object_list = (name_list|species_list|brain_region_list|cell_type_list|author_list).distinct()\n\n else:\n self.object_list = self.get_queryset()\n context = self.get_context_data()\n return self.render_to_response(context)\n\n\n def get_context_data(self, **kwargs):\n context = super(SimpleModelListView, self).get_context_data(**kwargs)\n context[\"section\"] = \"models\"\n context[\"build_info\"] = settings.BUILD_INFO\n context[\"filters\"] = {\n \"species\": ScientificModel.objects.values_list('species', flat=True).distinct(),\n \"brain_region\": ScientificModel.objects.values_list('brain_region', flat=True).distinct(),\n \"cell_type\": ScientificModel.objects.values_list('cell_type', flat=True).distinct(),\n }\n return context\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleModelDetailView(LoginRequiredMixin, DetailView):\n model = ScientificModel\n template_name = \"simple_model_detail.html\"\n login_url='/login/hbp/'\n\n def get_context_data(self, **kwargs):\n context = super(SimpleModelDetailView, self).get_context_data(**kwargs)\n context[\"section\"] = \"models\"\n context[\"build_info\"] = settings.BUILD_INFO\n return context\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleModelEditView(DetailView):\n model = ScientificModel\n form_class = ScientificModelForm\n template_name = \"simple_model_edit.html\"\n login_url='/login/hbp/'\n\n def get_context_data(self, **kwargs):\n context = super(SimpleModelEditView, self).get_context_data(**kwargs)\n context[\"section\"] = \"models\"\n context[\"build_info\"] = settings.BUILD_INFO\n return context\n\n def get(self, request, *args, **kwargs):\n print(self.get_object().id)\n h = ScientificModel.objects.get(id = self.get_object().id)\n form = self.form_class(instance = h)\n # print(str(form))\n return render(request, self.template_name, {'form': form, 'object':h})\n \n def post(self, request, *args, **kwargs):\n m = self.get_object()\n form = self.form_class(request.POST, instance=m)\n if form.is_valid():\n form = form.save(commit=False)\n form.save()\n return self.redirect(request, pk=m.id)\n return render(request, self.template_name, {'form': form, \"object\": m})\n \n @classmethod \n def redirect(self, request, *args, **kwargs): \n url = reverse(\"simple-model-detail-view\", kwargs = { 'pk':kwargs['pk']})\n return HttpResponseRedirect(url)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleModelVersionView(DetailView):\n model = ScientificModelInstance\n form_class = ScientificModelInstanceForm\n template_name = \"simple_model_version.html\"\n login_url='/login/hbp/'\n\n def get_context_data(self, **kwargs):\n context = super(SimpleModelVersionView, self).get_context_data(**kwargs)\n context[\"section\"] = \"models\"\n context[\"build_info\"] = settings.BUILD_INFO\n return context\n\n def get(self, request, *args, **kwargs):\n qs = request.META['QUERY_STRING']\n if qs.startswith(\"modelID=\"):\n model_id = qs[8:] \n h = ScientificModel.objects.get(id=model_id)\n else: \n h = ScientificModel()\n instance = ScientificModelInstance()\n instance.model = h\n form = self.form_class(instance = instance)\n return render(request, self.template_name, {'form': form, 'object':h})\n \n def post(self, request, *args, **kwargs):\n\n form = self.form_class(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n form.save()\n return self.redirect(request, pk=form.model.id)\n return render(request, self.template_name, {'form': form})\n\n @classmethod \n def redirect(self, request, *args, **kwargs): \n url = reverse(\"simple-model-detail-view\", kwargs = { 'pk':kwargs['pk']})\n return HttpResponseRedirect(url)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleModelCreateView(View):\n model = ScientificModel\n template_name = \"simple_model_create.html\"\n login_url='/login/hbp/'\n form_class = ScientificModelForm\n form_class_instance = ScientificModelInstanceForm\n form_class_image = ScientificModelImageForm\n serializer = ScientificModelSerializer\n def get(self, request, *args, **kwargs):\n h = ScientificModel()\n form = self.form_class(instance = h)\n model_instance = ScientificModelInstance()\n form_instance = self.form_class_instance(instance=model_instance)\n model_image = ScientificModelImage()\n form_image = self.form_class_image(instance = model_image)\n return render(request, self.template_name, {'form': form, 'form_instance': form_instance, 'form_image': form_image})\n \n def post(self, request, *args, **kwargs):\n model_creation = ScientificModel()\n form = self.form_class(request.POST, instance=model_creation)\n if form.is_valid():\n form = form.save(commit=False)\n form.access_control = 2180 #self.get_collab_id()\n form.save()\n # content = self.serializer.serialize(form)\n model_instance = ScientificModelInstance(model = ScientificModel.objects.get(id = form.id))\n form_instance = self.form_class_instance(instance = model_instance)\n form_instance = form_instance.save(commit=False)\n form_instance.model = ScientificModel.objects.get(id = form.id)\n form_instance.source = request.POST.get('source', None)\n form_instance.version = request.POST.get('version', None)\n form_instance.parameters = request.POST.get('parameters', None)\n form_instance.save()\n return HttpResponseRedirect(form.id)\n \n return render(request, self.template_name, {'form': form}, status=400) \n def get_collab_id(self):\n social_auth = self.request.user.social_auth.get()\n print(\"social auth\", social_auth.extra_data )\n # import hbp_service_client.document_service.client as doc_service_client\n # access_token = get_access_token(self.request.user.social_auth.get())\n # dsc = doc_service_client.Client.new(access_token)\n\n headers = {\n 'Authorization': get_auth_header(self.request.user.social_auth.get())\n }\n\n #to get collab_id\n svc_url = settings.HBP_COLLAB_SERVICE_URL\n context = self.request.session[\"next\"][6:]\n url = '%scollab/context/%s/' % (svc_url, context)\n res = requests.get(url, headers=headers)\n collab_id = res.json()['collab']['id']\n return collab_id\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestResultResource(View):\n serializer = ValidationTestResultSerializer\n login_url='/login/hbp/'\n\n def _get_result(self, result_id):\n try:\n result = ValidationTestResult.objects.get(pk=result_id)\n except ValidationTestResult.DoesNotExist:\n result = None\n return result\n\n def get(self, request, *args, **kwargs):\n \"\"\"View a result\"\"\"\n result = self._get_result(kwargs[\"result_id\"])\n if result is None:\n return HttpResponseNotFound(\"No such result\")\n content = self.serializer.serialize(result)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestResultView(View): \n template_name = \"simple_result_new_create.html\"\n model = ValidationTestResult \n form_class = ValidationTestResultForm\n\n serializer = ValidationTestResultSerializer\n login_url='/login/hbp/'\n\n\n def get(self, request, *args, **kwargs):\n\n h = ValidationTestResult()\n form = self.form_class(instance = h)\n\n return render(request, self.template_name, {'form': form, })\n\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add a test\"\"\"\n \n #result_creation = ValidationTestResult()\n test_creation = ValidationTestResult() \n #form = self.form_class(request.POST, instance=result_creation)\n form = self.form_class(request.POST, instance=test_creation)\n\n if form.is_valid():\n form = form.save(commit=False)\n form.save()\n return HttpResponseRedirect(form.id)\n return render(request, self.template_name, {'form': form})\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ValidationTestResultListResource(View):\n serializer = ValidationTestResultSerializer\n login_url='/login/hbp/'\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add a result\"\"\"\n # if not is_admin(request):\n # return HttpResponseForbidden(\"You do not have permission to add a result.\")\n\n data = json.loads(request.body)\n\n sci_model = ScientificModel.objects.get(pk=data[\"model_instance\"][\"model_id\"])\n model_instance, created = ScientificModelInstance.objects.get_or_create(model=sci_model,\n version=data[\"model_instance\"][\"version\"],\n parameters=data[\"model_instance\"][\"parameters\"])\n test_uri = data[\"test_definition\"]\n parsed_uri = urlparse(test_uri)\n test_id = int(parsed_uri.path.split(\"/\")[-1])\n test_instance_id = int(parse_qs(parsed_uri.query)['version'][0])\n test_instance = ValidationTestCode.objects.get(pk=test_instance_id)\n assert test_instance.test_definition.pk == test_id, \"{} != {}\".format(test_instance.test_definition.pk, test_id) # sanity check\n\n new_test_result = ValidationTestResult(model_instance=model_instance,\n test_definition=test_instance,\n results_storage=data[\"results_storage\"],\n result=float(data[\"result\"]), # should be a Quantity?\n passed=data[\"passed\"],\n platform=json.dumps(data[\"platform\"]),\n project=data.get(\"project\", \"\"))\n new_test_result.save()\n content = self.serializer.serialize(new_test_result)\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=201)\n\n def get(self, request, *args, **kwargs):\n results = ValidationTestResult.objects.all()\n content = self.serializer.serialize(results)\n\n return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleResultListView(LoginRequiredMixin, ListView):\n model = ValidationTestResult\n template_name = \"simple_result_list.html\"\n login_url='/login/hbp/'\n\n def get_queryset(self):\n filters = {}\n for key, value in self.request.GET.items():\n if key in VALID_RESULT_FILTERS:\n filters[VALID_RESULT_FILTERS[key]] = value\n\n return ValidationTestResult.objects.all().filter(**filters).order_by('-timestamp')\n\n def get_context_data(self, **kwargs):\n context = super(SimpleResultListView, self).get_context_data(**kwargs)\n context[\"section\"] = \"results\"\n context[\"build_info\"] = settings.BUILD_INFO\n\n # create list of model and tests filters\n context[\"filters\"] = {\n \"models\": ScientificModel.objects.values_list('name', flat=True),\n \"tests\": ValidationTestDefinition.objects.values_list('name', flat=True)\n }\n return context\n\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleResultDetailView(LoginRequiredMixin, DetailView):\n \n model = ValidationTestResult\n template_name = \"simple_result_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super(SimpleResultDetailView, self).get_context_data(**kwargs)\n context[\"section\"] = \"results\"\n context[\"build_info\"] = settings.BUILD_INFO\n context[\"related_data\"] = self.get_related_data(self.request.user)\n\n if self.object.project:\n context[\"collab_name\"] = self.get_collab_name()\n return context\n\n def get_collab_name(self):\n # import bbp_services.client as bsc\n # services = bsc.get_services()\n\n import hbp_service_client.document_service.client as doc_service_client\n access_token = get_access_token(self.request.user.social_auth.get())\n dsc = doc_service_client.Client.new(access_token)\n\n headers = {\n 'Authorization': get_auth_header(self.request.user.social_auth.get())\n }\n\n #to get collab_id\n svc_url = settings.HBP_COLLAB_SERVICE_URL\n context = self.request.session[\"next\"][6:]\n url = '%scollab/context/%s/' % (svc_url, context)\n res = requests.get(url, headers=headers)\n collab_id = res.json()['collab']['id']\n\n project = dsc.list_projects(collab_id=collab_id)[\"results\"]\n\n # url = services['collab_service']['prod']['url'] + \"collab/{}/\".format(self.object.project)\n # url = services['collab_service']['prod']['url'] + \"collab/{}/\".format(dsc.list_projects(collab_id=2169)[\"results\"][0][\"name\"])\n url = \"https://services.humanbrainproject.eu/collab/v0/collab/{}/\".format(dsc.list_projects(collab_id=collab_id)[\"results\"][0][\"name\"])\n \n response = requests.get(url, headers=headers)\n collab_name = response.json()[\"title\"]\n\n return collab_name\n\n def get_collab_storage_url(self):\n # import bbp_services.client as bsc\n # services = bsc.get_services()\n\n import hbp_service_client.document_service.client as doc_service_client\n access_token = get_access_token(self.request.user.social_auth.get())\n dsc = doc_service_client.Client.new(access_token)\n\n\n headers = {\n 'Authorization': get_auth_header(self.request.user.social_auth.get())\n }\n\n #to get collab_id\n svc_url = settings.HBP_COLLAB_SERVICE_URL\n context = self.request.session[\"next\"][6:]\n url = '%scollab/context/%s/' % (svc_url, context)\n res = requests.get(url, headers=headers)\n collab_id = res.json()['collab']['id']\n\n project = dsc.list_projects(collab_id=collab_id)[\"results\"]\n\n # url = services['collab_service']['prod']['url'] + \"collab/{}/nav/all/\".format(self.object.project)\n url = \"https://services.humanbrainproject.eu/collab/v0/collab/{}/nav/all/\".format(dsc.list_projects(collab_id=collab_id)[\"results\"][0][\"name\"])\n \n\n response = requests.get(url, headers=headers)\n if response.ok:\n nav_items = response.json()\n for item in nav_items: \n if item[\"app_id\"] == \"31\": # Storage app\n\n # return \"https://collab.humanbrainproject.eu/#/collab/{}/nav/{}\".format(self.object.project, item[\"id\"])\n return \"https://collab.humanbrainproject.eu/#/collab/{}/nav/{}\".format(dsc.list_projects(collab_id=collab_id)[\"results\"][0][\"name\"], item[\"id\"])\n \n else:\n return \"\"\n\n def get_related_data(self, user):\n # assume for now that data is in collab\n\n # from bbp_client.oidc.client import BBPOIDCClient\n # from bbp_client.document_service.client import Client as DocClient\n # import bbp_services.client as bsc\n # services = bsc.get_services()\n\n # oidc_client = BBPOIDCClient.bearer_auth(services['oidc_service']['prod']['url'], access_token)\n # doc_client = DocClient(services['document_service']['prod']['url'], oidc_client) # a remplacer : creer instance de nouvelle classe : hbp_service client\n\n import hbp_service_client.document_service.client as doc_service_client\n\n access_token = get_access_token(user.social_auth.get())\n dsc = doc_service_client.Client.new(access_token)\n\n headers = {\n 'Authorization': get_auth_header(user.social_auth.get())\n }\n\n #to get collab_id\n svc_url = settings.HBP_COLLAB_SERVICE_URL\n context = self.request.session[\"next\"][6:]\n url = '%scollab/context/%s/' % (svc_url, context)\n res = requests.get(url, headers=headers)\n collab_id = res.json()['collab']['id']\n\n project_dict = dsc.list_projects(collab_id=collab_id)\n \n try :\n dsc.create_folder(\"folder_test\", project_dict[\"results\"][0][\"uuid\"])\n\n except:\n print (\"folder already exist\") \n\n parse_result = urlparse(self.object.results_storage)\n\n # print (\"parse result : \")\n # print (parse_result)\n # print (\"\")\n\n ###reste a voir ici... je ne comprend pas ce qui doit etre dans parse_result\n if parse_result.scheme == \"collab\":\n # if 1 :\n list_folder = dsc.list_project_content(project_dict[\"results\"][0][\"uuid\"])\n # collab_folder = parse_result.path\n collab_folder = list_folder[\"results\"][0]\n \n #return doc_client.listdir(collab_folder)\n # folder_uuid = doc_client.get_standard_attr(collab_folder)['_uuid'] #a remplacer\n folder_uuid = collab_folder[\"uuid\"]\n \n data = {\n \"folder\": {\n \"path\": collab_folder,\n }\n }\n if self.object.project:\n data[\"folder\"][\"url\"] = self.get_collab_storage_url() + \"?state=uuid={}\".format(folder_uuid)\n return data\n else:\n print(\"Storage not yet supported\")\n\n return {}\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass SimpleResultEditView(View):\n model = ValidationTestResult \n template_name = \"simple_result_create.html\"\n login_url='/login/hbp/'\n form_class = ValidationTestResultForm\n serializer = ValidationTestResultSerializer\n\n def get(self, request, *args, **kwargs):\n\n h = ValidationTestResult()\n form = self.form_class(instance = h)\n datas = {}\n datas['models'] = list(ScientificModel.objects.all().distinct())\n datas['tests'] = list(ValidationTestDefinition.objects.all().distinct())\n print(datas)\n return render(request, self.template_name, {'form': form, 'datas':datas})\n\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add a test\"\"\"\n print('result', request.POST.get(\"model_select\", None))\n result_creation = ValidationTestResult()\n #test_creation = ValidationTestResult() \n form = self.form_class(request.POST, instance=result_creation)\n #form = self.form_class(request.POST, instance=test_creation)\n\n if form.is_valid():\n form = form.save(commit=False)\n form.save()\n return HttpResponseRedirect(form.id)\n return render(request, self.template_name, {'form': form})\n\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass HomeValidationView(View):\n # model = ValidationTestDefinition\n # template_name = \"validation_home.html\"\n # login_url='/login/hbp/'\n\n # def get(self, request, *args, **kwargs):\n # template = loader.get_template(self.template_name)\n # return HttpResponse(template.render())\n\n\n # model = ValidationTestDefinition\n template_name = \"validation_home.html\"\n login_url='/login/hbp/'\n\n def get(self, request, *args, **kwargs):\n tests = ValidationTestDefinition.objects.all()\n models = ScientificModel.objects.all()\n tests = serializers.serialize(\"json\", tests)\n models = serializers.serialize(\"json\", models) \n\n return render(request, self.template_name, { 'tests':tests, 'models':models})\n\n\n\n\n#@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\n#class configviewDetailView(LoginRequiredMixin, DetailView): \n# model = configview\n# template_name = \"config_view_detail.html\"\n# login_url='/login/hbp/'\n\n# def get_context_data(self, **kwargs):\n# context = super(configviewDetailView, self).get_context_data(**kwargs)\n# context[\"section\"] = \"models\"\n# context[\"build_info\"] = settings.BUILD_INFO\n# return context\n\n\n\n#class configviewListResource(View):\n# serializer = configviewSerializer\n# login_url='/login/hbp/'\n\n# def post(self, request, *args, **kwargs):\n \n# print (\"configviewListResource POST\")\n# form = configviewForm(json.loads(request.body))\n# if form.is_valid():\n# model = form.save()\n# content = self.serializer.serialize(model)\n# return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=201)\n# else:\n# print(form.data)\n# return HttpResponseBadRequest(str(form.errors)) # todo: plain text\n\n# def get(self, request, *args, **kwargs):\n# print (\"configviewListResource GET\")\n \n# models = configview.objects.all()\n# content = self.serializer.serialize(models)\n# return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=200)\n\n\n\n\n#class configviewCreateView(View):\n \n# model = configview\n# template_name = \"config_view.html\"\n# login_url='/login/hbp/'\n# form = configviewForm\n# serializer = configviewSerializer\n# def get(self, request, *args, **kwargs):\n# model_configview = configview()\n# form = self.form(instance = model_configview)\n# return render(request, self.template_name, {'form': form})\n \n\n# def post(self, request, *args, **kwargs):\n# model_creation_configview = configview()\n# form = self.form(request.POST, instance=model_creation_configview)\n# if form.is_valid():\n# form = form.save(commit=False)\n# form.access_control = 3348 #self.get_collab_id()\n# form.save()\n# return HttpResponseRedirect(form.id)\n \n# return render(request, self.template_name, {'form': form}, status=400) \n\n \n# def get_collab_id(self):\n# social_auth = self.request.user.social_auth.get()\n# print(\"social auth\", social_auth.extra_data )\n \n# headers = {\n# 'Authorization': get_auth_header(self.request.user.social_auth.get())\n# }\n\n #to get collab_id\n# svc_url = settings.HBP_COLLAB_SERVICE_URL\n# context = self.request.session[\"next\"][6:]\n# url = '%scollab/context/%s/' % (svc_url, context)\n# res = requests.get(url, headers=headers)\n# collab_id = res.json()['collab']['id']\n# return collab_id\n \n\n\n\n\n\n\n#class configviewEditView(DetailView):\n# model = configview\n# form_class = configviewForm\n #template_name = \"config_view_edit.html\"\n# template_name = \"config_view_detail.tpl.html\"\n# login_url='/login/hbp/'\n\n# def get_context_data(self, **kwargs):\n# context = super(configviewEditView, self).get_context_data(**kwargs)\n# context[\"section\"] = \"models\"\n# context[\"build_info\"] = settings.BUILD_INFO\n# return context\n\n# def get(self, request, *args, **kwargs):\n# print(self.get_object().id)\n# h = configview.objects.get(id = self.get_object().id)\n# form = self.form_class(instance = h)\n# return render(request, self.template_name, {'form': form, 'object':h})\n \n# def post(self, request, *args, **kwargs):\n# m = self.get_object()\n# form = self.form_class(request.POST, instance=m)\n# if form.is_valid():\n# form = form.save(commit=False)\n# form.save()\n# return self.redirect(request, pk=m.id)\n# return render(request, self.template_name, {'form': form, \"object\": m})\n \n# @classmethod \n# def redirect(self, request, *args, **kwargs): \n# url = reverse(\"config-view-detail-view\", kwargs = { 'pk':kwargs['pk']})\n# return HttpResponseRedirect(url)\n\n\n\n\n\n\n\n#class configviewDetail(APIView):\n\n# def get(self, request, format=None, **kwargs):\n# serializer_contextconfigview = {\n# 'request': request,\n# }\n# tests_configview = configview.objects.filter(id = self.kwargs['id'])\n# configview_serializer = configviewSerializer(tests, context=serializer_context, many=True) \n\n# return Response({\n# 'tests_configview': configview_serializer.data,\n# })\n\n\n\n\n#class configviewRest(APIView):\n \n# def get(self, request, format=None, **kwargs):\n\n# serializer_contextconfigview = {'request': request,}\n\n# logger.debug(\"get -- s : \" + str(request.GET.items))\n\n\n# for key, value in self.request.GET.items():\n# if key == 'id':\n# tests_configview = configview.objects.filter(id = value)\n# else:\n# tests_configview = configview.objects.all()\n\n# configview_serializer = configviewSerializer(tests_configview, context=serializer_contextconfigview, many=True)\n\n# return Response({\n# 'tests_configview': configview_serializer.data,\n# })\n\n \n# def post(self, request, format=None):\n# serializer_context = {'request': request,}\n#\n# test_serializer = configviewSerializer(data=request.data['test_data'], context=serializer_context)\n# if test_serializer.is_valid():\n# test = test_serializer.save() \n# else:\n# return Response(test_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n#\n# code_serializer = configviewSerializer(data=request.data['code_data'], context=serializer_context)\n# if code_serializer.is_valid():\n# code_serializer.save(test_definition_id=test.id)\n# else:\n# return Response(code_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n# \n# return Response(status=status.HTTP_201_CREATED)\n\n\n\n\n\n\n\n\n#class AllModelAndTest(APIView):\n\n# def get(self, request, format=None, **kwargs):\n# models = ScientificModel.objects.all()\n# tests = ValidationTestDefinition.objects.all()\n\n# serializer_context = {\n# 'request': request,\n# }\n\n\n# model_serializer = ScientificModelSerializer(models, context=serializer_context, many=True )#data=request.data)\n# test_serializer = ValidationTestDefinitionSerializer(tests, context=serializer_context, many=True)\n# configview_serializer = configviewSerializer(models, context=serializer_context, many=True )#data=request.data)\n\n\n #need to transform model_serializer.data :\n # \"resource_uri\": \"/models/{}\".format(model.pk)\n\n #also need to join \"code\" data throught serializer\n\n\n\n# return Response({\n# 'models': model_serializer.data,\n# 'tests': test_serializer.data,\n# })\n\n\n\n\n# class TestDetail(APIView):\n\n# def get(self, request, format=None, **kwargs):\n# serializer_context = {\n# 'request': request,\n# }\n# # print (self.kwargs.__dict__)\n# tests = ValidationTestDefinition.objects.filter(id = self.kwargs['id'])\n# test_serializer = ValidationTestDefinitionSerializer(tests, context=serializer_context, many=True) \n\n# return Response({\n# 'tests': test_serializer.data,\n# })\n\nclass ScientificModelInstanceRest (APIView):\n def post(self, request, format=None):\n serializer_context = {'request': request,}\n model_id = str(len(request.POST.getlist('id')))\n\n serializer = ScientificModelInstanceSerializer(data=request.data, context=serializer_context)\n \n if serializer.is_valid(): \n serializer.save(model_id=model_id) #need to see how to get this value throught kwargs or other ?\n return Response(status=status.HTTP_201_CREATED) #put inside .is_valid\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ScientificModelRest(APIView):\n \n def get(self, request, format=None, **kwargs):\n serializer_context = {\n 'request': request,\n }\n model_id = str(len(request.GET.getlist('id')))\n\n if(model_id == '0'):\n models = ScientificModel.objects.all()\n model_serializer = ScientificModelSerializer(models, context=serializer_context, many=True )\n return Response({\n 'models': model_serializer.data,\n })\n else:\n for key, value in self.request.GET.items():\n if key == 'id':\n models = ScientificModel.objects.filter(id=value)\n model_instance = ScientificModelInstance.objects.filter(model_id=value)\n model_images = ScientificModelImage.objects.filter(model_id=value)\n model_serializer = ScientificModelSerializer(models, context=serializer_context, many=True )#data=request.data)\n model_instance_serializer = ScientificModelInstanceSerializer(model_instance, context=serializer_context, many=True )\n model_image_serializer = ScientificModelImageSerializer(model_images, context=serializer_context, many=True )\n #need to transform model_serializer.data :\n # \"resource_uri\": \"/models/{}\".format(model.pk)\n\n return Response({\n 'models': model_serializer.data,\n 'model_instances': model_instance_serializer.data,\n 'models_images': model_image_serializer.data,\n })\n\n def post(self, request, format=None):\n serializer_context = {'request': request,}\n print('fdeqr')\n model_serializer = ScientificModelSerializer(data=request.data['model'], context=serializer_context)\n if model_serializer.is_valid():\n model = model_serializer.save()\n else:\n return Response(model_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n model_instance_serializer = ScientificModelInstanceSerializer(data=request.data['model_instance'], context=serializer_context)\n if model_instance_serializer.is_valid():\n model_instance_serializer.save(model_id=model.id)\n else:\n return Response(model_instance_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n print(request.data['model_image'])\n if request.data['model_image']!={}:\n for i in request.data['model_image']:\n model_image_serializer = ScientificModelImageSerializer(data=i, context=serializer_context)\n if model_image_serializer.is_valid():\n print(\"is valid\")\n model_image_serializer.save(model_id=model.id)\n else:\n print('is not valid')\n return Response(model_image_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_201_CREATED)\n\nclass ValidationTestCodeRest(APIView):\n\n def get(self, request, format=None, **kwargs):\n \n serializer_context = {'request': request,}\n nb_id = str(len(request.GET.getlist('id')))\n nb_td_id = str(len(request.GET.getlist('test_definition_id')))\n\n if nb_id == '0' and nb_td_id == '0':\n tests = ValidationTestCode.objects.all()\n else:\n for key, value in self.request.GET.items():\n if key == 'id':\n tests = ValidationTestCode.objects.filter(id = value)\n if key == 'test_definition_id':\n tests = ValidationTestCode.objects.filter(test_definition_id = value)\n\n test_serializer = ValidationTestCodeSerializer(tests, context=serializer_context, many=True)\n return Response({\n 'tests': test_serializer.data,\n })\n\n\n def post(self, request, format=None):\n serializer_context = {'request': request,}\n test_id = str(len(request.POST.getlist('id')))\n\n serializer = ValidationTestCodeSerializer(data=request.data, context=serializer_context)\n \n if serializer.is_valid(): \n serializer.save(test_definition_id=test_id) #need to see how to get this value throught kwargs or other ?\n return Response(status=status.HTTP_201_CREATED) #put inside .is_valid\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_serializer_class(self):\n print (self.request.method)\n # if self.request.method in ('GET', )\n # return ValidationTestDefinitionWithCodesReadSerializer\n return ValidationTestCodeSerializer\n\n\nclass ValidationTestDefinitionRest(APIView):\n \n def get(self, request, format=None, **kwargs):\n\n serializer_context = {'request': request,}\n nb_id = str(len(request.GET.getlist('id')))\n\n if(nb_id == '0'):\n tests = ValidationTestDefinition.objects.all()\n else:\n for key, value in self.request.GET.items():\n if key == 'id':\n tests = ValidationTestDefinition.objects.filter(id = value)\n\n test_serializer = ValidationTestDefinitionSerializer(tests, context=serializer_context, many=True)\n\n return Response({\n 'tests': test_serializer.data,\n })\n\n\n def post(self, request, format=None):\n serializer_context = {'request': request,}\n\n test_serializer = ValidationTestDefinitionSerializer(data=request.data['test_data'], context=serializer_context)\n if test_serializer.is_valid():\n test = test_serializer.save() \n else:\n return Response(test_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n code_serializer = ValidationTestCodeSerializer(data=request.data['code_data'], context=serializer_context)\n if code_serializer.is_valid():\n code_serializer.save(test_definition_id=test.id)\n else:\n return Response(code_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n return Response(status=status.HTTP_201_CREATED)\n\n\nclass TestCommentRest(APIView):\n def get(self, request, format=None, **kwargs):\n serializer_context = {'request': request,}\n nb_id = str(len(request.GET.getlist('id')))\n nb_test_id = str(len(request.GET.getlist('test_id')))\n\n if nb_id == '0' and nb_test_id == '0':\n comments = Comment.objects.all()\n else:\n for key, value in self.request.GET.items():\n if key == 'id':\n comments = Comment.objects.filter(id = value)\n if key == 'test_id':\n logger.info(\"value : \" + value)\n comments = Comment.objects.filter(test_id = value)\n\n comment_serializer = CommentSerializer(comments, context=serializer_context, many=True)\n\n return Response({\n 'comments': comment_serializer.data,\n })\n\n# @method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\n# class ValidationTestResultEdit(TemplateView): \n# template_name = \"simple_result_edit.html\"\n# model = ValidationTestResult\n# form_class = ValidationTestResultForm\n\n# serializer = ValidationTestResultSerializer\n# login_url='/login/hbp/'\n\n# def get(self, request, *args, **kwargs):\n\n# h = ValidationTestResult()\n# form = self.form_class(instance = h)\n\n# return render(request, self.template_name, {'form': form, })\n\n\n# def post(self, request, *args, **kwargs):\n# \"\"\"Add a result\"\"\"\n \n# result_creation = ValidationTestResult()\n# form = self.form_class(request.POST, instance=result_creation)\n\n# if form.is_valid():\n# result = form.save()\n# content = self.serializer.serialize(test)\n# return HttpResponse(content, content_type=\"application/json; charset=utf-8\", status=201)\n# else:\n# print(form.data)\n# return HttpResponseBadRequest(str(form.errors)) # todo: plain text\n\n# #############################################################\n###views for model catalog api\n@method_decorator(login_required(login_url='/login/hbp'), name='dispatch' )\nclass ModelCatalogView(View):\n\n template_name = \"model_catalog.html\"\n login_url='/login/hbp/'\n\n def get(self, request, *args, **kwargs):\n models = ScientificModel.objects.all()\n models = serializers.serialize(\"json\", models) \n return render(request, self.template_name, {'models':models})\n\n ","sub_path":"model_validation_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":61120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"297271793","text":"# -*- coding: utf-8 -*-\r\n# Author:leali\r\n# Description:\r\n# Version:v1.0\r\n# Date:2019-01-01 04:38 PM\r\n\r\nimport scrapy\r\n\r\n\r\nclass QuotesSpider(scrapy.Spider):\r\n name = \"quotes\"\r\n\r\n def start_requests(self):\r\n urls = [\r\n 'http://quotes.toscrape.com/'\r\n ]\r\n for url in urls:\r\n yield scrapy.Request(url=url, callback=self.parse)\r\n\r\n def parse(self, response):\r\n for quote in response.css('div.quote'):\r\n yield {\r\n 'text': quote.css('span.text::text').extract_first(),\r\n 'author': quote.css('small.author::text').extract_first(),\r\n 'tags': quote.css('div.tags > a.tag::text').extract(),\r\n }\r\n # For
elements there is a shortcut: response.follow uses their href attribute automatically\r\n for href in response.css('li.next a'):\r\n yield response.follow(href, callback=self.parse)\r\n # next_page = response.css('li.next > a::attr(href)').extract_first()\r\n # if next_page is not None:\r\n # yield response.follow(next_page, callback=self.parse)\r\n","sub_path":"Spider/tutorial/tutorial/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"478881535","text":"import adage\nimport adage.backends\nimport os\nimport json\nimport workflow_loader\nimport clihelpers\nimport visualize\nimport serialize\nimport yadageschemas\nimport shutil\nimport logging\nfrom packtivity.statecontexts.posixfs_context import LocalFSProvider, LocalFSState\n\nfrom yadage.controllers import setup_controller_fromstring\nfrom yadage.wflow import YadageWorkflow\n\nlog = logging.getLogger(__name__)\n\nclass YadageSteering():\n def __init__(self,loggername = __name__):\n self.log = logging.getLogger(loggername)\n self.workdir = None\n self.yadagedir = None\n self.controller = None\n self.rootprovider = None\n self.adage_kwargs = {}\n\n @property\n def workflow(self):\n return self.controller.adageobj\n\n def prepare_workdir(self, workdir, accept_existing_workdir = False, contextinit = None):\n self.workdir = workdir\n\n\n writable_context = LocalFSState([workdir])\n self.rootprovider = LocalFSProvider(contextinit,writable_context)\n\n self.yadagedir = '{}/_yadage/'.format(workdir)\n\n if os.path.exists(self.yadagedir):\n if not accept_existing_workdir:\n raise RuntimeError('yadage meta directory exists. explicitly accept')\n self.log.info('yadage meta directory exists.. will remove and remake')\n shutil.rmtree(self.yadagedir)\n os.makedirs(self.yadagedir)\n \n def init_workflow(self, workflow, toplevel, initdata, ctrlsetup = 'inmem', initdir = None, search_initdir = True, validate = True, schemadir = yadageschemas.schemadir):\n ##check input data\n if search_initdir and initdir:\n clihelpers.discover_initfiles(initdata,os.path.realpath(initdir))\n\n workflow_json = workflow_loader.workflow(\n workflow,\n toplevel=toplevel,\n schemadir=schemadir,\n validate=validate\n )\n with open('{}/yadage_template.json'.format(self.yadagedir), 'w') as f:\n json.dump(workflow_json, f)\n workflowobj = YadageWorkflow.createFromJSON(workflow_json, self.rootprovider)\n if initdata:\n log.info('initializing workflow with %s',initdata)\n workflowobj.view().init(initdata)\n else:\n log.info('no initialization data')\n\n self.controller = setup_controller_fromstring(workflowobj, ctrlsetup)\n\n def adage_argument(self,**kwargs):\n self.adage_kwargs.update(**kwargs)\n\n def run_adage(self, backend, **adage_kwargs):\n self.controller.backend = backend\n self.adage_argument(**adage_kwargs)\n adage.rundag(controller = self.controller, **self.adage_kwargs)\n\n def serialize(self):\n serialize.snapshot(\n self.workflow,\n '{}/yadage_snapshot_workflow.json'.format(self.yadagedir),\n '{}/yadage_snapshot_backend.json'.format(self.yadagedir)\n )\n\n def visualize(self):\n visualize.write_prov_graph(self.yadagedir, self.workflow, vizformat='png')\n visualize.write_prov_graph(self.yadagedir, self.workflow, vizformat='pdf')\n\n","sub_path":"yadage/steering_object.py","file_name":"steering_object.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"72992717","text":"# Author: F. Alex Wolf (http://falexwolf.de)\n\"\"\"Differential Gene Expression Analysis\n\nThis is a Beta Version of a tool for differential gene expression testing\nbetween sets detected in previous tools. Tools such as dpt, cluster,...\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom math import sqrt, floor\nfrom scipy.sparse import issparse\nfrom scipy.stats import rankdata\nfrom scipy.stats import norm\nfrom .. import utils\nfrom .. import logging as logg\nfrom ..preprocessing import simple\n\ndef rank_genes_groups(\n adata,\n groupby,\n groups='all',\n group_reference=None,\n n_genes=100,\n compute_distribution=False,\n only_positive=True,\n copy=False,\n test_type='t_test'):\n \"\"\"Rank genes according to differential expression [Wolf17]_.\n\n Rank genes by differential expression. By default, a t-test-like ranking is\n used, in which means are normalized with variances. Soon, a Wilcoxon-rank\n test and other alternatives will be provided.\n\n Parameters\n ----------\n adata : `AnnData`\n Annotated data matrix.\n groupby : `str`\n The key of the sample grouping to consider.\n groups : `str`, `list`, optional (default: `'all'`)\n Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall\n be restricted. If not passed, a ranking will be generated for all\n groups.\n group_reference : `str` or `None`, optional (default: `None`)\n If `None`, compare each group to the union of the rest of the group. If\n a group identifier, the comparison will be with respect to this group.\n n_genes : `int` (default: 100)\n How many genes to rank by default.\n compute_distribution : `bool`\n If `True`, also computes the distribution for top-ranked genes, which\n can be visualized using `sc.pl.rank_genes_groups_violin(adata)`.\n test_type : 't_test' or 'wilcoxon' (default: 't_test')\n If 't_test', use t_test to calculate test statistics. If 'wilcoxon', use Wilcoxon-Rank-Sum\n to calculate test statistic.\n Returns\n -------\n rank_genes_groups_gene_zscores : np.ndarray of dtype float (adata.add)\n Array of shape (number of comparisons) × (number of genes) storing the\n zscore of the each gene for each test.\n rank_genes_groups_gene_names : np.ndarray of dtype str (adata.add)\n Array of shape (number of comparisons). Stores the labels for each comparison,\n for example \"C1 vs. C2\" when comparing category 'C1' with 'C2'.\n \"\"\"\n logg.info('find differentially expressed genes', r=True)\n adata = adata.copy() if copy else adata\n n_genes_user = n_genes\n utils.check_adata(adata)\n # for clarity, rename variable\n groups_order = groups\n if isinstance(groups_order, list) and isinstance(groups_order[0], int):\n groups_order = [str(n) for n in groups_order]\n if group_reference is not None and group_reference not in set(groups_order):\n groups_order += [group_reference]\n if (group_reference is not None\n and group_reference not in set(adata.add[groupby + '_order'])):\n raise ValueError('group_reference = {} needs to be one of groupby = {}.'\n .format(group_reference, groupby))\n groups_order, groups_masks = utils.select_groups(\n adata, groups_order, groupby)\n adata.add['rank_genes_groups'] = groupby\n adata.add['rank_genes_groups_order'] = groups_order\n X = adata.X\n\n rankings_gene_zscores = []\n rankings_gene_names = []\n n_groups = groups_masks.shape[0]\n n_genes = X.shape[1]\n ns = np.zeros(n_groups, dtype=int)\n for imask, mask in enumerate(groups_masks):\n ns[imask] = np.where(mask)[0].size\n # TODO: Add logging such that test-type is included\n logg.info('... consider \"{}\":'.format(groupby), groups_order,\n 'with sample numbers', ns)\n if group_reference is not None:\n ireference = np.where(groups_order == group_reference)[0][0]\n reference_indices = np.arange(adata.n_vars, dtype=int)\n\n # Here begins the part that is test-specific.\n\n if test_type not in {'t_test', 'wilcoxon'}:\n # TODO: Print Error Message in logging\n logg.warn('Test_type should be either \"wilcoxon\" or \"t_test\". T-test is being used as default' )\n # For convenience, and to avoid total collapse, set test_type to t_test\n test_type='t_test'\n\n if test_type is 't_test':\n # loop over all masks and compute means, variances and sample numbers\n # Definition of n_groups and n_genes was moved ahead since required for all test-types\n\n means = np.zeros((n_groups, n_genes))\n vars = np.zeros((n_groups, n_genes))\n # Definition of ns Moved ahead\n for imask, mask in enumerate(groups_masks):\n means[imask], vars[imask] = simple._get_mean_var(X[mask])\n # Definition of ns moved ahead\n # The following code parts were moved ahead since required by all test-types: Logging, ireference,\n # ankings_gene_zscores = [] and rankings_gene_names = [] and reference_indices\n\n\n # test each either against the union of all other groups\n # or against a specific group\n\n\n for igroup in range(n_groups):\n if group_reference is None:\n mask_rest = ~groups_masks[igroup]\n else:\n if igroup == ireference: continue\n else: mask_rest = groups_masks[ireference]\n mean_rest, var_rest = simple._get_mean_var(X[mask_rest])\n # Make a more conservative assumption on the variance reduction\n # in the reference. Instead of this\n ns_rest = np.where(mask_rest)[0].size\n # use this\n # ns_rest = ns[igroup]\n denominator = np.sqrt(vars[igroup]/ns[igroup] + var_rest/ns_rest)\n denominator[np.flatnonzero(denominator == 0)] = np.nan\n zscores = (means[igroup] - mean_rest) / denominator\n zscores[np.isnan(zscores)] = 0\n zscores = zscores if only_positive else np.abs(zscores)\n partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]\n partial_indices = np.argsort(zscores[partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n rankings_gene_zscores.append(zscores[global_indices])\n rankings_gene_names.append(adata.var_names[global_indices])\n if compute_distribution:\n mask = groups_masks[igroup]\n for gene_counter in range(n_genes_user):\n gene_idx = global_indices[gene_counter]\n X_col = X[mask, gene_idx]\n if issparse(X): X_col = X_col.toarray()[:, 0]\n identifier = _build_identifier(groupby, groups_order[igroup],\n gene_counter, adata.var_names[gene_idx])\n full_col = np.empty(adata.n_smps)\n full_col[:] = np.nan\n full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]\n adata.smp[identifier] = full_col\n elif test_type is 'wilcoxon':\n # Wilcoxon-rank-sum test is usually more powerful in detecting marker genes\n # Limit maximal RAM that is required by the calculation. Currently set fixed to roughly 100 MByte\n CONST_MAX_SIZE = 10000000\n ns_rest = np.zeros(n_groups, dtype=int)\n # initialize space for z-scores\n zscores = np.zeros(n_genes)\n # First loop: Loop over all genes\n if group_reference is not None:\n for imask, mask in enumerate(groups_masks):\n if imask == ireference:\n continue\n else:\n mask_rest = groups_masks[ireference]\n ns_rest[imask] = np.where(mask_rest)[0].size\n if ns_rest[imask] <= 25 or ns[imask] <= 25:\n logg.hint(\"Few observations in a group for normal approximation (<=25). Lower test accuracy.\")\n n_active = ns[imask]\n m_active = ns_rest[imask]\n # Now calculate gene expression ranking in batches:\n batch = []\n # Calculate batch frames\n n_genes_max_batch = floor(CONST_MAX_SIZE / (n_active + m_active))\n if n_genes_max_batch < n_genes - 1:\n batch_index = n_genes_max_batch\n while batch_index < n_genes - 1:\n batch.append(batch_index)\n batch_index = batch_index + n_genes_max_batch\n batch.append(n_genes - 1)\n else:\n batch.append(n_genes - 1)\n left = 0\n # Calculate rank sums for each batch for the current mask\n for batch_index, right in enumerate(batch):\n # Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.\n if issparse(X):\n df1 = pd.DataFrame(data=X[mask, left:right].todense())\n df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),\n index=np.arange(start=n_active, stop=n_active + m_active))\n else:\n df1 = pd.DataFrame(data=X[mask, left:right])\n df2 = pd.DataFrame(data=X[mask_rest, left:right],\n index=np.arange(start=n_active, stop=n_active + m_active))\n df1 = df1.append(df2)\n ranks = df1.rank()\n # sum up adjusted_ranks to calculate W_m,n\n zscores[left:right] = np.sum(ranks.loc[0:n_active, :])\n left = right + 1\n zscores = (zscores - (n_active * (n_active + m_active + 1) / 2)) / sqrt(\n (n_active * m_active * (n_active + m_active + 1) / 12))\n zscores = zscores if only_positive else np.abs(zscores)\n zscores[np.isnan(zscores)] = 0\n partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]\n partial_indices = np.argsort(zscores[partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n rankings_gene_zscores.append(zscores[global_indices])\n rankings_gene_names.append(adata.var_names[global_indices])\n if compute_distribution:\n # remove line: current mask already available\n # Add calculation of means, var: (Unnecessary for wilcoxon if compute distribution=False)\n mean, vars = simple._get_mean_var(X[mask])\n mean_rest, var_rest = simple._get_mean_var(X[mask_rest])\n denominator = np.sqrt(vars / ns[imask] + var_rest / ns_rest[imask])\n denominator[np.flatnonzero(denominator == 0)] = np.nan\n for gene_counter in range(n_genes_user):\n gene_idx = global_indices[gene_counter]\n X_col = X[mask, gene_idx]\n if issparse(X): X_col = X_col.toarray()[:, 0]\n identifier = _build_identifier(groupby, groups_order[imask],\n gene_counter, adata.var_names[gene_idx])\n full_col = np.empty(adata.n_smps)\n full_col[:] = np.nan\n full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]\n adata.smp[identifier] = full_col\n\n # If no reference group exists, ranking needs only to be done once (full mask)\n else:\n zscores=np.zeros((n_groups,n_genes))\n batch = []\n n_cells=X.shape[0]\n n_genes_max_batch = floor(CONST_MAX_SIZE / n_cells)\n if n_genes_max_batch < n_genes - 1:\n batch_index = n_genes_max_batch\n while batch_index < n_genes - 1:\n batch.append(batch_index)\n batch_index = batch_index + n_genes_max_batch\n batch.append(n_genes - 1)\n else:\n batch.append(n_genes - 1)\n left = 0\n for batch_index, right in enumerate(batch):\n # Check if issparse is true\n if issparse(X):\n df1 = pd.DataFrame(data=X[:, left:right].todense())\n else:\n df1 = pd.DataFrame(data=X[:, left:right])\n ranks = df1.rank()\n # sum up adjusted_ranks to calculate W_m,n\n for imask, mask in enumerate(groups_masks):\n zscores[imask,left:right] = np.sum(ranks.loc[mask, :])\n left = right + 1\n\n for imask, mask in enumerate(groups_masks):\n\n zscores[imask,:] = (zscores[imask,:] - (ns[imask] * (n_cells + 1) / 2)) / sqrt(\n (ns[imask] * (n_cells-ns[imask]) * (n_cells + 1) / 12))\n zscores = zscores if only_positive else np.abs(zscores)\n zscores[np.isnan(zscores)] = 0\n partition = np.argpartition(zscores[imask,:], -n_genes_user)[-n_genes_user:]\n partial_indices = np.argsort(zscores[imask,partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n rankings_gene_zscores.append(zscores[imask, global_indices])\n rankings_gene_names.append(adata.var_names[global_indices])\n if compute_distribution:\n mean, vars = simple._get_mean_var(X[mask])\n mean_rest, var_rest = simple._get_mean_var(X[~mask])\n denominator = np.sqrt(vars / ns[imask] + var_rest / (n_cells-ns[imask]))\n denominator[np.flatnonzero(denominator == 0)] = np.nan\n for gene_counter in range(n_genes_user):\n gene_idx = global_indices[gene_counter]\n X_col = X[mask, gene_idx]\n if issparse(X): X_col = X_col.toarray()[:, 0]\n identifier = _build_identifier(groupby, groups_order[imask],\n gene_counter, adata.var_names[gene_idx])\n full_col = np.empty(adata.n_smps)\n full_col[:] = np.nan\n full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]\n adata.smp[identifier] = full_col\n\n\n\n # Here ends the test-specific part, do logging\n\n\n\n groups_order_save = groups_order\n if group_reference is not None:\n groups_order_save = [g for g in groups_order if g != group_reference]\n adata.add['rank_genes_groups_gene_scores'] = np.rec.fromarrays(\n [n for n in rankings_gene_zscores],\n dtype=[(rn, 'float32') for rn in groups_order_save])\n adata.add['rank_genes_groups_gene_names'] = np.rec.fromarrays(\n [n for n in rankings_gene_names],\n dtype=[(rn, 'U50') for rn in groups_order_save])\n logg.m(' finished', t=True, end=' ')\n logg.m('and added\\n'\n ' \"rank_genes_groups_gene_names\", np.recarray to be indexed by the `groups` (adata.add)\\n'\n ' \"rank_genes_groups_gene_zscores\", the scores (adata.add)\\n'\n ' \"rank_genes_...\", distributions of top-ranked genes (adata.smp)')\n return adata if copy else None\n\n\ndef _build_identifier(groupby, name, gene_counter, gene_name):\n return 'rank_genes_{}_{}_{}_{}'.format(\n groupby, name, gene_counter, gene_name)\n","sub_path":"scanpy/tools/rank_genes_groups.py","file_name":"rank_genes_groups.py","file_ext":"py","file_size_in_byte":15761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"373934768","text":"#!/usr/bin/env python3\n# -*- utf-8 -*-\n\n\"\"\"\ncreate a template to jekyll\nusage: jekyll_post title\n\n\"\"\"\n\n__version__ = \"0.01\"\n__author__ = \"wwq0327@gmail.com\"\n\nimport os\nimport sys\n\nfrom datetime import datetime\n\nSAVE_DIR = \"_posts\"\nHEADER = \"\"\"---\nlayout: post\ntitle: \ndescription:\nkeywords:\n---\"\"\"\n\ndef gen_title(title):\n date_string = datetime.today().strftime(\"%Y-%m-%d\")\n return date_string + \"-\" + title + \".markdown\"\n\n\ndef save(filename):\n save_path = os.path.join(SAVE_DIR, filename)\n if os.path.exists(save_path):\n print(\"%s is exists.\" % filename)\n sys.exit(1)\n \n print(save_path)\n try:\n f = open(save_path, \"w\")\n f.write(HEADER)\n except IOError as e:\n print(e.strerror)\n sys.exit(0)\n\n f.close()\n\ndef main():\n if len(sys.argv) == 2:\n filename = gen_title(sys.argv[1])\n \n save(filename)\n else:\n print(__doc__)\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"jekyll_post.py","file_name":"jekyll_post.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"272398260","text":"class Node:\n \"\"\"Node used with doubly-linked list.\"\"\"\n\n def __init__(self, item):\n self.item = item\n self.next = None # Link to next node.\n self.prev = None # Link to previous node.\n\n\nclass OrderedList:\n \"\"\"A doubly-linked ordered list of items, from lowest (head of list) to highest (tail of list).\"\"\"\n\n def __init__(self):\n \"\"\"Use one dummy node as described in class.\n ***No other attributes***\n Do not have an attribute to keep track of size.\"\"\"\n\n self.sentinel = Node(\n None\n ) # Dummy node that plays the role of both the head and the tail!\n # It does not contain data.\n self.sentinel.next = (\n self.sentinel\n ) # The sentinel points to itself as the list is a loop.\n self.sentinel.prev = self.sentinel\n\n def is_empty(self):\n \"\"\"Returns back True if OrderedList is empty.\n MUST have O(1) performance.\"\"\"\n return self.sentinel.next == self.sentinel\n\n def add(self, item):\n \"\"\"Adds an item to OrderedList, in the proper location based on ordering of items\n from lowest (at head of list) to highest (at tail of list).\n If the item is already in the list, do not add it again .\n MUST have O(n) average-case performance.\"\"\"\n\n new_node = Node(item)\n if self.is_empty(): # Adds first node.\n new_node.prev = self.sentinel\n new_node.next = self.sentinel\n\n self.sentinel.next = new_node\n new_node.next.prev = new_node\n else:\n # Fix new node links before you fix sentinel node links, or you will lose information!\n # Add new node to start.\n if self.sentinel.next.item > new_node.item:\n new_node.prev = self.sentinel\n new_node.next = (\n self.sentinel.next\n ) # 'self.sentinel.next' was pointing to first node.\n self.sentinel.next = (\n new_node # 'self.sentinel.next' points to first node.\n )\n new_node.next.prev = new_node # Fix 'previous link' of previous first node to point to new node.\n else:\n current_node = self.sentinel.next\n while current_node.next != self.sentinel:\n if current_node.item == new_node.item:\n return\n if (\n current_node.next.item > new_node.item\n ): # Then insert after current node!\n new_node.prev = current_node\n new_node.next = current_node.next\n\n current_node.next = new_node\n new_node.next.prev = new_node\n return # Don't want to run any of the code below if this is statement is executed!\n current_node = current_node.next\n\n # Place new node at the end of the list. New node is the max node.\n if current_node.item == new_node.item:\n return\n last_node = self.sentinel.prev\n # New node links.\n new_node.prev = last_node\n new_node.next = last_node.next\n last_node.next = new_node\n new_node.next.prev = new_node\n\n def remove(self, item):\n \"\"\"Removes an item from OrderedList. If item is removed (i.e, was in the list) return True.\n If item was not removed (was not in the list) returns False.\n MUST have O(n) average-case performance.\"\"\"\n\n current_node = self.sentinel.next\n if self.is_empty():\n return False\n\n # For list with 1 node!\n if (\n current_node.next == self.sentinel\n and current_node.prev == self.sentinel\n and current_node.item == item\n ):\n self.sentinel.next = (\n self.sentinel\n ) # Back to 'base case' (i.e., empty list/default).\n self.sentinel.prev = self.sentinel\n return True\n if (\n current_node.next == self.sentinel\n and current_node.prev == self.sentinel\n and current_node.item != item\n ):\n return False\n # For list with more than 1 node.\n while current_node.next != self.sentinel:\n if current_node.item == item: # Then remove current node.\n # Skips links over current node.\n current_node.next.prev = current_node.prev\n current_node.prev.next = current_node.next\n return True\n current_node = current_node.next\n # For last node in list with more than 1 node.\n if current_node.next == self.sentinel and current_node.item == item:\n current_node.next.prev = current_node.prev\n current_node.prev.next = current_node.next\n return True\n return False\n\n def index(self, item):\n \"\"\"Returns index of an item in OrderedList (assuming head of list is index 0).\n If item is not in list, return None.\n Must have O(n) average-case performance.\"\"\"\n\n if self.is_empty():\n return None\n\n current_node = self.sentinel.next\n # If first node contains item.\n if current_node.item == item:\n return 0\n # If list has 1 node.\n if (\n current_node.next == self.sentinel\n and current_node.prev == self.sentinel\n and current_node.item != item\n ):\n return None\n\n # For list with more than 1 node.\n index = 1\n while current_node.next != self.sentinel:\n if current_node.next.item == item:\n return index\n current_node = current_node.next\n index = index + 1\n return None\n\n def pop(self, index):\n \"\"\"Removes and returns item at an index (assuming head of list is index 0).\n If index is negative or >= size of list, raises IndexError.\n Must have O(n) average-case performance.\"\"\"\n\n if index < 0:\n raise IndexError\n if self.is_empty():\n raise IndexError\n current_node = self.sentinel.next\n # For list with 1 node!\n if (\n current_node.next == self.sentinel\n and current_node.prev == self.sentinel\n and index == 0\n ):\n self.sentinel.next = (\n self.sentinel\n ) # Back to 'base case' (i.e., empty list/default).\n self.sentinel.prev = self.sentinel\n return current_node.item\n # Ff list has 1 node.\n if (\n current_node.next == self.sentinel\n and current_node.prev == self.sentinel\n and index != 0\n ):\n raise IndexError\n\n # For list with more than 1 node.\n # Loop through a set (index) number of times.\n my_index = 0\n while my_index < index:\n current_node = current_node.next\n if current_node == self.sentinel:\n raise IndexError\n my_index = my_index + 1\n\n current_node.next.prev = current_node.prev\n current_node.prev.next = current_node.next\n return current_node.item\n\n def search_helper(self, item, node):\n \"\"\"Recursive helper function to search.\"\"\"\n\n if node == self.sentinel:\n return False\n if node.item == item:\n return True\n return self.search_helper(item, node.next)\n\n def search(self, item):\n \"\"\"Searches OrderedList for item, returns True if item is in list, False otherwise.\n To practice recursion, this method must call a recursive method that will search the list.\n MUST have O(n) average-case performance.\"\"\"\n\n return self.search_helper(item, self.sentinel.next)\n\n def python_list(self):\n \"\"\"Return a Python list representation of OrderedList, from head to tail.\n For example, list with integers 1, 2, and 3 would return [1, 2, 3].\n MUST have O(n) performance.\"\"\"\n\n current_node = self.sentinel.next\n if self.is_empty():\n return []\n if current_node.next == self.sentinel and current_node.prev == self.sentinel:\n return [current_node.item]\n save = [current_node.item]\n while current_node.next != self.sentinel:\n save.append(current_node.next.item)\n current_node = current_node.next\n return save\n\n def python_list_reversed(self):\n \"\"\"Return a Python list representation of OrderedList, from tail to head, using recursion.\n For example, list with integers 1, 2, and 3 would return [3, 2, 1]\n To practice recursion, this method must call a RECURSIVE method that will return a reversed list.\n MUST have O(n) performance.\"\"\"\n\n return self.python_list_reversed_helper(self.sentinel.next)\n\n def python_list_reversed_helper(self, node):\n \"\"\"Recursive helper function to python_list_reversed.\"\"\"\n\n if node == self.sentinel:\n return []\n else:\n return self.python_list_reversed_helper(node.next) + [node.item]\n\n def size(self):\n \"\"\"Returns the number of items in the OrderedList.\n To practice recursion, this method must call a RECURSIVE method that will count and return the number of\n items in the list.\n MUST have O(n) performance.\"\"\"\n\n return self.size_helper(self.sentinel.next)\n\n def size_helper(self, node):\n \"\"\"Recursive helper function to python_list_reversed.\"\"\"\n\n if node == self.sentinel:\n return 0\n else:\n return self.size_helper(node.next) + 1\n\n\nOL = OrderedList()\nOL.add(2)\nOL.add(3)\nOL.add(1)\nOL.add(4)\nprint(OL.search(1))\n\nOL = OrderedList()\nOL.add(2)\n\nOL.add(3)\nOL.add(1)\nOL.add(4)\nprint(OL.remove(5))\n","sub_path":"Labs/Lab4/ordered_list.py","file_name":"ordered_list.py","file_ext":"py","file_size_in_byte":9931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"490861205","text":"import re\nimport copy\nimport logging\nfrom urllib2 import urlparse\nfrom datetime import datetime,timedelta\nfrom cgi import parse_qsl\nfrom urllib import urlencode\n\nfrom tgimport import tg\nfrom baseconnector import BaseConnector\nfrom utils.utils import stripHtml, get_hash\nfrom utils.decorators import logit\nfrom utils.sessioninfomanager import checkSessionInfo, updateSessionInfo\n\nlog = logging.getLogger('CardRatingsConnector')\nclass CardRatingsConnector(BaseConnector):\n @logit(log , 'fetch')\n def fetch(self):\n '''This is a fetch method which fetches the data \n sample url:http://www.cardratings.com/forum/search.php?st=0&sk=t&sd=d&sr=posts&keywords=american+express\n '''\n try:\n self.genre = \"Review\"\n self.__baseuri = 'http://www.cardratings.com/forum'\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n self.__setSoupForCurrentUri()\n #c = 0\n main_page_soup = copy.copy(self.soup)\n while self.__processForumUrl():\n try:\n next_page_uri = main_page_soup.find('a','right-box right',text = 'Next').\\\n parent['href'].replace('.','')\n data_dict = dict(parse_qsl(next_page_uri.split('?')[-1]))\n if 'sid' in data_dict.keys():\n data_dict.pop('sid')\n self.currenturi = self.__baseuri + '/search.php?'+ \\\n urlencode(data_dict) \n self.__setSoupForCurrentUri()\n main_page_soup = copy.copy(self.soup)\n## c += 1\n## if c > 100:\n## break\n except:\n log.exception(self.log_msg('Next Page link not found for url \\\n %s'%self.currenturi))\n break \n return True \n except:\n log.info(self.log_msg('Exception while fetchin review url %s'\\\n %self.currenturi)) \n return True\n \n @logit(log, '__processForumUrl')\n def __processForumUrl(self):\n \"\"\"\n It will fetch each thread and its associate infomarmation\n and add the tasks\n \"\"\"\n try:\n threads = self.soup.find('div',id = 'page-body').findAll('div','inner')\n for thread in threads:\n try:\n post_uri = thread.find('div','postbody').find('h3').\\\n find('a')['href'].replace('.','')\n data_dict = dict(parse_qsl(post_uri.split('?')[-1]))\n post_id = 'p' + data_dict['p']\n if 'sid' in data_dict:\n data_dict.pop('sid')\n if 'hilit' in data_dict:\n data_dict.pop('hilit') \n review_page_link = self.__baseuri + '/viewtopic.php?' + \\\n urlencode(data_dict)\n self.__addPost(review_page_link, post_id)\n except: \n log.exception(self.log_msg('uri not found in the url\\\n %s'%self.currenturi)) \n continue \n return True \n except:\n log.exception(self.log_msg('Exception in fetch for the url %s'\\\n %self.currenturi))\n return True\n \n @logit(log,'__addPost') \n def __addPost(self, review_page_link, post_id): \n \"\"\"\n This will take the post tag , and fetch data and meta data and add it to \n self.pages\n \"\"\"\n try:\n self.currenturi = review_page_link\n self.__setSoupForCurrentUri()\n page = self.__getData(post_id)\n if not page:\n return True \n unique_key = get_hash({'data' : page['data']})\n log.info(unique_key)\n if checkSessionInfo(self.genre, self.session_info_out, unique_key,\\\n self.task.instance_data.get('update'),parent_list\\\n = [self.task.instance_data['uri']]):\n log.info(self.log_msg('Session info returns True'))\n return False\n \n except:\n log.exception(self.log_msg('Cannot add the post for the url %s'%\\\n self.currenturi))\n return False\n try:\n page['uri'] = self.currenturi \n except:\n log.info(self.log_msg('Cannot find the uri'))\n page['uri'] = self.currenturi\n try:\n result=updateSessionInfo(self.genre, self.session_info_out, unique_key, \\\n get_hash( page ),'Review', self.task.instance_data.get('update'),\\\n parent_list=[self.task.instance_data['uri']])\n if not result['updated']:\n log.exception(self.log_msg('Update session info returns False'))\n return True\n page['parent_path'] = []\n page['path'] = [self.task.instance_data['uri'], unique_key]\n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n log.info(page)\n log.info(self.log_msg('Post Added'))\n return True\n except:\n log.exception(self.log_msg('Error while adding session info'))\n return True \n \n \n def __getData(self, post_id):\n post = self.soup.find('div',id=post_id)\n page = {}\n try:\n page['title']= stripHtml(post.find('div','postbody').find('h3').\\\n find('a').renderContents())\n if page['title'].startswith('Re:'):\n page['entity'] = 'question'\n else:\n page['entity'] = 'answer' \n except:\n log.exception(self.log_msg('data title not found'))\n page['title'] = ''\n \n try:\n page['data'] = stripHtml(post.find('div','content').renderContents())\n except:\n log.exception(self.log_msg('Data not found for the url %s'%self.currenturi))\n page['data'] = '' \n \n if not page['title'] and not page['data']:\n log.info(self.log_msg(\"Data and title not found for %s,\"\\\n \" discarding this review\"%self.currenturi))\n return False \n try:\n auth_link = post.find('p','author').find('a')\n if auth_link:\n auth_link.extract()\n author = post.find('p','author').find('strong')\n if author:\n author.extract()\n try:\n page['et_author_name'] = stripHtml(author.renderContents()) \n except:\n log.exception(self.log_msg('author name not found'))\n try:\n date_str = stripHtml(post.find('p','author').renderContents()).\\\n split('by')[-1].strip()\n page['posted_date'] = datetime.strptime(date_str,'%a %b %d, %Y %I:%M %p').\\\n strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n log.exception(self.log_msg('Posted date not found'))\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%SZ\")\n \n except:\n log.exception(self.log_msg('author not found'))\n \n try:\n lists =[stripHtml(each.renderContents())for each in post.\\\n find('dl','postprofile').findAll('dd',recursive = False)]\n for each in lists:\n if each.startswith('Posts:'):\n page['et_author_posts_count'] = int(each.split(':')[-1].\\\n replace(',',''))\n if each.startswith('Joined:'):\n date_str = each.split('Joined:')[-1].strip()\n try:\n page['edate_author_joined_date'] = datetime.strptime(date_str,'%a %b %d, %Y %I:%M %p').\\\n strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n log.exception(self.log_msg('joined datet not found'))\n if each.startswith('Location:'):\n page['et_author_locatiion'] = each.split('Location:')[-1]\n \n except:\n log.exception(self.log_msg('author info not found'))\n \n \n return page \n @logit(log,'__setSoupForCurrentUri')\n def __setSoupForCurrentUri(self, data=None, headers={}):\n \"\"\"It will set soup object for the Current URI\n \"\"\"\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n raise Exception('Page content not fetched for th url %s'%self.currenturi)\n self._setCurrentPage() \n ","sub_path":"crawler/connectors/cardratingsconnector.py","file_name":"cardratingsconnector.py","file_ext":"py","file_size_in_byte":10472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"653807694","text":"# python built-in library\nimport os\nimport argparse\nimport time\nfrom multiprocessing import Manager\n# 3rd party library\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import RandomSampler, WeightedRandomSampler\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n# own code\nfrom model import build_model\nfrom dataset import KaggleDataset, Compose\nfrom helper import config, AverageMeter, iou_mean, save_ckpt, load_ckpt\nfrom loss import contour_criterion, focal_criterion, mse_criterion, regularizer, focal_pixel_criterion\nfrom valid import inference, unpack_data, get_iou\n\ndef main(resume=True, n_epoch=None, learn_rate=None):\n model_name = config['param']['model'] # helper.py line26~32 with configparser (https://docs.python.org/3/library/configparser.html) Q:meaning of ['param']['model']\n if learn_rate is None:\n learn_rate = config['param'].getfloat('learn_rate')\n width = config.getint(model_name, 'width')\n weight_map = config['param'].getboolean('weight_map')\n c = config['train']\n log_name = c.get('log_name')\n n_batch = c.getint('n_batch')\n n_worker = c.getint('n_worker')\n n_cv_epoch = c.getint('n_cv_epoch')\n if model_name == 'da_unet' or model_name == 'ynet' or model_name == 'camynet':\n domain_adaptation = True\n else:\n domain_adaptation = False\n if domain_adaptation:\n target_data = config[model_name]['target_data']\n if model_name == 'ynet' or model_name == 'camynet':\n mode = config[model_name]['mode']\n if mode == 'pretrain':\n print('pretrain mode')\n elif mode == 'train':\n print('train mode')\n elif mode == 'combine':\n print('combined training mode')\n if n_epoch is None:\n n_epoch = c.getint('n_epoch')\n balance_group = c.getboolean('balance_group')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device\n\n model = build_model(model_name) # model.py line654\n model = model.to(device) # class UNet(nn.Module) to()function: https://pytorch.org/docs/stable/nn.html Q: what does to() do\n models = []\n models.append(model)\n # define optimizer\n optimizer = torch.optim.Adam( # https://pytorch.org/docs/stable/optim.html\n filter(lambda p: p.requires_grad, model.parameters()), # Q: what is filter() and p\n lr=args.learn_rate,\n weight_decay=1e-6\n )\n\n # dataloader workers are forked process thus we need a IPC manager to keep cache in same memory space\n manager = Manager() # https://docs.python.org/3/library/multiprocessing.html#multiprocessing-managers\n cache = manager.dict()\n compose = Compose() # dataset.py line125\n # prepare dataset\n if os.path.exists('data/valid'):\n # advance mode: use valid folder as CV\n source_dataset = KaggleDataset('data/train', 'csv_file_s', transform=compose, cache=cache) # dataset.py line28\n if domain_adaptation:\n target_dataset = KaggleDataset('data/'+target_data, 'csv_file_t', transform=compose, cache=cache)\n else:\n valid_dataset = KaggleDataset('data/valid', 'csv_file_v', transform=compose, cache=cache)\n else:\n # auto mode: split part of train dataset as CV\n source_dataset = KaggleDataset('data/train', 'csv_file_s', transform=compose, cache=cache, use_filter=True)\n if domain_adaptation:\n source_dataset, target_dataset = source_dataset.split()\n else:\n source_dataset, valid_dataset = source_dataset.split()\n # add stage1 and stage2 testing set dataset\n resize = not config['valid'].getboolean('pred_orig_size')\n compose = Compose(augment=False, resize=resize)\n s1test_dir = os.path.join('data', 'test')\n s2test_dir = os.path.join('data', 'valid')\n if os.path.exists(s1test_dir):\n datas1test = KaggleDataset(s1test_dir, 'csv_file_s', transform=compose)\n if os.path.exists(s2test_dir):\n datas2test = KaggleDataset(s2test_dir, 'csv_file_s', transform=compose)\n # decide whether to balance training set\n if balance_group: # Q: what is the meaning\n weights, ratio = source_dataset.class_weight() # dataset.py line116\n # Len of weights is number of original epoch samples. \n # After oversample balance, majority class will be under-sampled (least sampled)\n # Multipling raito is to gain chance for each sample to be visited at least once in each epoch \n sampler = WeightedRandomSampler(weights, int(len(weights) * ratio)) # https://pytorch.org/docs/stable/data.html\n if domain_adaptation:\n weights_target, ratio_target = source_dataset.class_weight()\n sampler_target = WeightedRandomSampler(weights_target, int(len(weights_target) * ratio_target))\n else:\n sampler = RandomSampler(source_dataset)\n if domain_adaptation:\n sampler_target = RandomSampler(target_dataset)\n # data loader\n source_loader = DataLoader( # https://pytorch.org/docs/stable/data.html\n source_dataset,\n sampler=sampler,\n batch_size=n_batch,\n num_workers=n_worker,\n pin_memory=torch.cuda.is_available())\n if domain_adaptation:\n target_loader = DataLoader(\n target_dataset,\n sampler=sampler_target,\n batch_size=n_batch,\n num_workers=n_worker,\n pin_memory=torch.cuda.is_available())\n else:\n valid_loader = DataLoader(\n valid_dataset,\n shuffle=False,\n batch_size=n_batch,\n num_workers=n_worker)\n\n # resume checkpoint\n start_epoch = iou_s = iou_t = iou_cv = 0\n if resume:\n start_epoch = load_ckpt(model, optimizer) # helper.py line230\n if start_epoch == 0:\n print('Grand new training ...')\n\n # put model to GPU\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model) # https://pytorch.org/docs/stable/nn.html\n\n # decide log directory name\n log_dir = os.path.join(\n 'logs', log_name, '{}-{}'.format(model_name, width),\n 'ep_{},{}-lr_{}'.format(\n start_epoch,\n n_epoch + start_epoch,\n learn_rate,\n )\n )\n\n with SummaryWriter(log_dir) as writer: # https://tensorboardx.readthedocs.io/en/latest/tutorial.html#create-a-summary-writer\n if start_epoch == 0 and False:\n # dump graph only for very first training, disable by default\n dump_graph(model, writer, n_batch, width) # line116\n print('Training started...')\n for epoch in range(start_epoch + 1, n_epoch + start_epoch + 1): # 1 base\n if domain_adaptation:\n # copied from https://github.com/jvanvugt/pytorch-domain-adaptation\n batch_iterator = zip(loop_iterable(source_loader), loop_iterable(target_loader))\n iou_s = train(batch_iterator, model, optimizer, epoch, writer, max(len(source_loader),len(target_loader)))\n else:\n iou_s = train(source_loader, model, optimizer, epoch, writer, len(source_loader)) # line146\n if domain_adaptation:\n if len(target_dataset) > 0 and epoch % n_cv_epoch == 0:\n with torch.no_grad(): # https://pytorch.org/docs/stable/_modules/torch/autograd/grad_mode.html\n iou_cv = valid(target_loader, model, epoch, writer, len(target_loader))\n if os.path.exists(s1test_dir):\n train_inference(datas1test, models, resize, compose, epoch, writer, tbprefix = 'Stage1')\n if os.path.exists(s2test_dir):\n train_inference(datas2test, models, resize, compose, epoch, writer, tbprefix = 'Stage2')\n else:\n if len(valid_dataset) > 0 and epoch % n_cv_epoch == 0:\n with torch.no_grad():\n iou_cv = valid(valid_loader, model, epoch, writer, len(source_loader)) # line220\n if os.path.exists(s1test_dir):\n train_inference(datas1test, models, resize, compose, epoch, writer, tbprefix = 'Stage1')\n if os.path.exists(s2test_dir):\n train_inference(datas2test, models, resize, compose, epoch, writer, tbprefix = 'Stage2')\n\n save_ckpt(model, optimizer, epoch, iou_s, iou_cv)\n print('Training finished...')\n\ndef dump_graph(model, writer, n_batch, width):\n # Prerequisite\n # $ sudo apt-get install libprotobuf-dev protobuf-compiler\n # $ pip3 install onnx\n print('Dump model graph...')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n dummy_input = torch.rand(n_batch, 3, width, width, device=device)\n torch.onnx.export(model, dummy_input, \"checkpoint/model.pb\", verbose=False)\n writer.add_graph_onnx(\"checkpoint/model.pb\")\n\ndef train_inference(dataset, models, resize, compose, epoch ,writer, tbprefix):\n iou_test = AverageMeter()\n for data in tqdm(dataset): # https://tqdm.github.io/docs/tqdm/\n uid, y, y_c, y_m = inference(data, models, resize)\n x, gt, gt_s, gt_c, gt_m = unpack_data(data, compose, resize)\n iou = get_iou(y, y_c, y_m, gt)\n iou_test.update(iou, 1)\n writer.add_scalar('testing/' + tbprefix + '_instance_iou', iou_test.avg, epoch)\n\ndef train(loader, model, optimizer, epoch, writer, n_step):\n batch_time = AverageMeter() # helper.py line35\n data_time = AverageMeter()\n losses = AverageMeter()\n iou = AverageMeter() # semantic IoU\n iou_c = AverageMeter() # contour IoU\n iou_m = AverageMeter() # marker IoU\n print_freq = config['train'].getfloat('print_freq')\n only_contour = config['contour'].getboolean('exclusive')\n weight_map = config['param'].getboolean('weight_map')\n model_name = config['param']['model']\n with_contour = config.getboolean(model_name, 'branch_contour')\n with_marker = config.getboolean(model_name, 'branch_marker')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if model_name == 'da_unet' or model_name == 'ynet' or model_name == 'camynet':\n domain_adaptation = True\n reg = config[model_name]['regularizer']\n if model_name == 'ynet' or model_name == 'camynet':\n mode = config[model_name]['mode']\n lamb = config[model_name]['lamb'].split(',')\n else:\n domain_adaptation = False\n\n # Sets the module in training mode.\n model.train() # https://pytorch.org/docs/stable/nn.html\n end = time.time()\n for i in range(n_step):\n if domain_adaptation:\n data_s, data_t = next(loader)\n else:\n data_s = next(iter(loader))\n # measure data loading time\n data_time.update(time.time() - end)\n # split sample data\n inputs_s = data_s['image'].to(device)\n labels_s = data_s['label'].to(device)\n labels_c_s = data_s['label_c'].to(device)\n labels_m_s = data_s['label_m'].to(device)\n if domain_adaptation:\n inputs_t = data_t['image'].to(device)\n # get loss weight\n weights = None\n if weight_map and 'weight' in data_s:\n weights = data_s['weight'].to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n # forward step\n if model_name == 'da_unet':\n outputs, outputs_t, feature_map_s, feature_map_t = model([inputs_s, inputs_t], 'train')\n # add here if there is anything like 'da_camunet' or something\n elif model_name == 'ynet':\n if mode == 'pretrain':\n outputs, feature_map_s, feature_map_t = model([inputs_s, inputs_t], mode)\n elif mode == 'combine':\n outputs, rec_s, rec_t, feature_map_s, feature_map_t = model([inputs_s, inputs_t], mode)\n else:\n outputs, rec_s, rec_t = model([inputs_s, inputs_t], mode)\n elif model_name == 'camynet':\n if mode == 'pretrain':\n outputs, outputs_c, outputs_m, feature_map_s, feature_map_t = model([inputs_s, inputs_t], mode)\n elif mode == 'combine':\n outputs, outputs_c, outputs_m, rec_s, rec_t, feature_map_s, feature_map_t = model([inputs_s, inputs_t], mode)\n else:\n outputs, outputs_c, outputs_m, rec_s, rec_t = model([inputs_s, inputs_t], mode)\n else:\n outputs = model(inputs_s)\n if with_contour and with_marker:\n outputs, outputs_c, outputs_m = outputs\n elif with_contour:\n outputs, outputs_c = outputs\n # compute loss\n if only_contour:\n loss = contour_criterion(outputs, labels_c_s) # loss.py line86\n else:\n # weight_criterion equals to segment_criterion if weights is none\n loss = focal_criterion(outputs, labels_s, weights) # loss.py line93\n if with_contour:\n loss += focal_criterion(outputs_c, labels_c_s, weights)\n if with_marker:\n loss += focal_criterion(outputs_m, labels_m_s, weights)\n if model_name == 'da_unet':\n loss += regularizer(feature_map_s, feature_map_t, reg)\n elif model_name == 'ynet':\n if mode == 'pretrain' or mode == 'combine':\n for j in range(len(lamb)):\n if lamb[j] != '0':\n loss += float(lamb[j])*regularizer(feature_map_s[j], feature_map_s[j], reg)\n if mode == 'train' or mode == 'combine':\n loss += 0.001*(mse_criterion(rec_s, inputs_s) + mse_criterion(rec_t, inputs_t))\n # compute gradient and do backward step\n loss.backward() # Q: cannot find backward()\n optimizer.step()\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n # measure accuracy and record loss\n # NOT instance-level IoU in training phase, for better speed & instance separation handled in post-processing\n losses.update(loss.item(), inputs_s.size(0))\n if only_contour:\n batch_iou = iou_mean(outputs, labels_c_s) # helper.py line104\n else:\n batch_iou = iou_mean(outputs, labels_s)\n iou.update(batch_iou, inputs_s.size(0))\n if with_contour:\n batch_iou_c = iou_mean(outputs_c, labels_c_s)\n iou_c.update(batch_iou_c, inputs_s.size(0))\n if with_marker:\n batch_iou_m = iou_mean(outputs_m, labels_m_s)\n iou_m.update(batch_iou_m, inputs_s.size(0))\n # log to summary\n #step = i + epoch * n_step\n #writer.add_scalar('training/loss', loss.item(), step)\n #writer.add_scalar('training/batch_elapse', batch_time.val, step)\n #writer.add_scalar('training/batch_iou', iou.val, step)\n #writer.add_scalar('training/batch_iou_c', iou_c.val, step)\n #writer.add_scalar('training/batch_iou_m', iou_m.val, step)\n if (i + 1) % print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\t'\n 'Time: {batch_time.avg:.2f} (io: {data_time.avg:.2f})\\t'\n 'Loss: {loss.val:.4f} (avg: {loss.avg:.4f})\\t'\n 'IoU: {iou.avg:.3f} (Coutour: {iou_c.avg:.3f}, Marker: {iou_m.avg:.3f})\\t'\n .format(\n epoch, i, n_step, batch_time=batch_time,\n data_time=data_time, loss=losses, iou=iou, iou_c=iou_c, iou_m=iou_m\n )\n )\n # end of loop, dump epoch summary\n writer.add_scalar('training/epoch_loss', losses.avg, epoch)\n writer.add_scalar('training/epoch_iou', iou.avg, epoch)\n writer.add_scalar('training/epoch_iou_c', iou_c.avg, epoch)\n writer.add_scalar('training/epoch_iou_m', iou_m.avg, epoch)\n return iou.avg # return epoch average iou\n\ndef valid(loader, model, epoch, writer, n_step):\n iou = AverageMeter() # semantic IoU\n iou_c = AverageMeter() # contour IoU\n iou_m = AverageMeter() # marker IoU\n losses = AverageMeter()\n only_contour = config['contour'].getboolean('exclusive')\n weight_map = config['param'].getboolean('weight_map')\n model_name = config['param']['model']\n with_contour = config.getboolean(model_name, 'branch_contour')\n with_marker = config.getboolean(model_name, 'branch_marker')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if model_name == 'da_unet' or model_name == 'ynet' or model_name == 'camynet':\n domain_adaptation = True\n else:\n domain_adaptation = False\n\n # Sets the model in evaluation mode.\n model.eval() # https://pytorch.org/docs/stable/nn.html\n for i, data in enumerate(loader):\n # get the inputs\n inputs = data['image'].to(device)\n labels = data['label'].to(device)\n labels_c = data['label_c'].to(device)\n labels_m = data['label_m'].to(device)\n # get loss weight\n weights = None\n if weight_map and 'weight' in data:\n weights = data['weight'].to(device)\n # forward step\n if domain_adaptation:\n outputs = model(inputs, 'valid')\n else:\n outputs = model(inputs)\n if with_contour and with_marker:\n outputs, outputs_c, outputs_m = outputs\n elif with_contour:\n outputs, outputs_c = outputs\n # compute loss\n if only_contour:\n loss = contour_criterion(outputs, labels_c)\n else:\n # weight_criterion equals to segment_criterion if weights is none\n loss = focal_criterion(outputs, labels, weights)\n if with_contour:\n loss += focal_criterion(outputs_c, labels_c, weights)\n if with_marker:\n loss += focal_criterion(outputs_m, labels_m, weights)\n # measure accuracy and record loss (Non-instance level IoU)\n losses.update(loss.item(), inputs.size(0))\n if only_contour:\n batch_iou = iou_mean(outputs, labels_c)\n else:\n batch_iou = iou_mean(outputs, labels)\n iou.update(batch_iou, inputs.size(0))\n if with_contour:\n batch_iou_c = iou_mean(outputs_c, labels_c)\n iou_c.update(batch_iou_c, inputs.size(0))\n if with_marker:\n batch_iou_m = iou_mean(outputs_m, labels_m)\n iou_m.update(batch_iou_m, inputs.size(0))\n # end of loop, dump epoch summary\n writer.add_scalar('CV/epoch_loss', losses.avg, epoch)\n writer.add_scalar('CV/epoch_iou', iou.avg, epoch)\n writer.add_scalar('CV/epoch_iou_c', iou_c.avg, epoch)\n writer.add_scalar('CV/epoch_iou_m', iou_m.avg, epoch)\n print(\n 'Epoch: [{0}]\\t\\tcross-validation\\t'\n 'Loss: N/A (avg: {loss.avg:.4f})\\t'\n 'IoU: {iou.avg:.3f} (Coutour: {iou_c.avg:.3f}, Marker: {iou_m.avg:.3f})\\t'\n .format(\n epoch, loss=losses, iou=iou, iou_c=iou_c, iou_m=iou_m\n )\n )\n return iou.avg # return epoch average iou\n\ndef loop_iterable(iterable): # copied from https://github.com/jvanvugt/pytorch-domain-adaptation\n while True:\n yield from iterable\n\nif __name__ == '__main__':\n learn_rate = config['param'].getfloat('learn_rate')\n n_epoch = config['train'].getint('n_epoch')\n parser = argparse.ArgumentParser()\n parser.add_argument('--resume', dest='resume', action='store_true')\n parser.add_argument('--no-resume', dest='resume', action='store_false')\n parser.add_argument('--epoch', type=int, help='run number of epoch')\n parser.add_argument('--lr', type=float, dest='learn_rate', help='learning rate')\n parser.set_defaults(resume=True, epoch=n_epoch, learn_rate=learn_rate)\n args = parser.parse_args()\n\n main(args.resume, args.epoch, args.learn_rate)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":20006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"219764557","text":"import random\nimport pgzrun\nfrom pgzo import *\n\nWIDTH = 560 # screen width\nHEIGHT = 460 # screen height\nTITLE = \"Crab\" # window title\n\n# \"Schaltzentrale\" für die Schwierigkeit des Spiels:\nSTART_WITH_LOBSTERS = 1\nSTART_LOBSTER_STRENGTH = 0.2\nSTART_WITH_WORMS = 20\n\n\nclass Button(GameObj):\n def __init__(self, image_normal, image_hover, pos, action):\n self.pos = pos\n self.image = image_normal\n self.image_normal = image_normal\n self.image_hover = image_hover\n self.action = action\n self.hover = False\n\n def on_mouse_move(self, pos):\n self.hover = self.collidepoint(mouse_state.pos)\n if self.hover:\n self.image = self.image_hover\n else:\n self.image = self.image_normal\n\n def on_mouse_down(self, pos, button):\n if button == mouse.LEFT and self.hover:\n self.action()\n\n\nclass Start(Stage):\n\n def __init__(self):\n self.background_image = \"start\"\n self.reset()\n\n def reset(self):\n self.leave_all()\n self.start_button = Button(\n \"start_button_normal\",\n \"start_button_hover\",\n (WIDTH * 0.5, HEIGHT * 0.5),\n self.start_game)\n self.quit_button = Button(\n \"quit_button_normal\",\n \"quit_button_hover\",\n (WIDTH * 0.1, HEIGHT * 0.9),\n sys.exit)\n self.start_button.appear_on_stage(self)\n self.quit_button.appear_on_stage(self)\n\n def draw(self):\n screen.draw.text(\n \"The Crab Game\",\n center=(WIDTH // 2, HEIGHT * 0.25),\n color=\"brown\",\n fontsize=60,\n fontname=\"zachary\")\n\n def on_key_up(self, key):\n if key == keys.SPACE:\n self.start_game()\n\n def start_game(self):\n beach_stage.reset_game()\n beach_stage.show()\n\n\nclass Beach(Stage):\n def __init__(self):\n self.background_image = \"sand\"\n\n def reset_game(self):\n self._level = 1\n self._score = 0\n self.prepare_beach()\n\n def prepare_beach(self):\n self._defeated = False\n self._victorious = False\n\n self.leave_all()\n\n crab = Crab((WIDTH / 2, HEIGHT * 0.7))\n crab.appear_on_stage(self)\n\n for dummy in range(START_WITH_WORMS):\n w = Beach._create_random_worm()\n w.appear_on_stage(self)\n\n num_lobsters = START_WITH_LOBSTERS + self._level - 1\n for dummy in range(num_lobsters):\n lobster = Beach._create_random_lobster(START_LOBSTER_STRENGTH)\n lobster.appear_on_stage(self)\n\n def take_out_neighbouring_worms(self, crab):\n for w in self.get_game_objects(Worm):\n if w.overlaps(crab):\n w.leave_stage()\n sounds.blop.play()\n self._score += 1\n if not self._victorious and self.count_game_objects(Worm) == 0:\n self._victorious = True\n self.leave_all(Lobster)\n self.schedule_gameover()\n\n def take_out_neighbouring_crab(self, lobster):\n for c in self.get_game_objects(Crab):\n if not c.shielded and c.overlaps(lobster):\n c.leave_stage()\n sounds.au.play()\n # There is only one crab on the beach, so we set\n # _defeated=True if we found at least one overlapping crab:\n self._defeated = True\n self.schedule_gameover()\n\n def restart(self):\n if self._victorious:\n self._level += 1\n self.prepare_beach()\n elif self._defeated:\n start_stage.reset()\n start_stage.show()\n else:\n raise Exception(\"restart on unfinished game detected!\")\n\n def schedule_gameover(self):\n clock.schedule_unique(self.restart, 4.0)\n\n @staticmethod\n def _create_random_lobster(strength):\n result = Lobster(\n (random.randrange(WIDTH), random.randrange(HEIGHT * 0.4)),\n 1 + strength * 4,\n 1 + strength * 9,\n 1 + strength * 24)\n result.turn(random.randrange(360))\n return result\n\n @staticmethod\n def _create_random_worm():\n return Worm((random.randrange(WIDTH), random.randrange(HEIGHT)))\n\n def draw(self):\n screen.draw.text(\"Score: \" + str(self._score), topleft=(10, 10),\n color=\"black\", fontsize=20, fontname=\"zachary\")\n screen.draw.text(\"Level: \" + str(self._level), topleft=(10, 35),\n color=\"black\", fontsize=20, fontname=\"zachary\")\n if self._defeated:\n screen.draw.text(\n \"You lose!\",\n center=(WIDTH // 2, HEIGHT * 0.25),\n color=\"brown\",\n fontsize=60,\n fontname=\"zachary\")\n if self._victorious:\n screen.draw.text(\n \"You win!\",\n center=(WIDTH // 2, HEIGHT * 0.25),\n color=\"brown\",\n fontsize=60,\n fontname=\"zachary\")\n\n\nclass Crab(GameObj):\n\n IMAGE_PREFIX = \"crab\"\n IMAGE_COUNT = 6\n TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS = 5\n\n def __init__(self, pos):\n self._set_image_index(0)\n self.pos = pos\n self.speed = 0\n self.traveled = 0\n self.shielded = False\n self.shield_energy = 10\n\n def _set_image_index(self, image_index):\n self.image_index = image_index\n self.image = Crab.IMAGE_PREFIX + str(image_index)\n\n def draw(self):\n if self.shielded:\n shield = Actor(\"shield\", self.pos)\n shield.draw()\n\n energy_block = Actor(\"energy_block\")\n x = WIDTH - 20\n y = 20\n for dummy in range(self.shield_energy):\n energy_block.topright = (x, y)\n energy_block.draw()\n x -= 10\n\n def act(self):\n self.switch_image()\n if self.can_move(self.speed):\n self.move(self.speed)\n self.traveled += abs(self.speed)\n else:\n self.speed = 0\n self.stage.take_out_neighbouring_worms(self)\n if keyboard.left:\n self.turn(10)\n if keyboard.right:\n self.turn(-10)\n if keyboard.up:\n self.speed_up()\n if keyboard.down:\n self.slow_down()\n if keyboard.space and not self.shielded and self.shield_energy > 0:\n self.shielded = True\n self.shield_energy -= 1\n clock.schedule_unique(self.unshield, 3)\n\n def unshield(self):\n self.shielded = False\n\n def switch_image(self):\n if self.traveled > Crab.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS:\n self._set_image_index((self.image_index + 1) % Crab.IMAGE_COUNT)\n self.traveled -= Crab.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS\n\n\nclass Lobster(GameObj):\n\n IMAGE_PREFIX = \"lobster\"\n IMAGE_COUNT = 2\n TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS = 7\n\n def __init__(self, pos, speed, drunkenness, jumpiness):\n self._set_image_index(0)\n self.pos = pos\n self.speed = speed\n self.traveled = 0\n self.drunkenness = drunkenness\n self.jumpiness = jumpiness\n\n def _set_image_index(self, image_index):\n self.image_index = image_index\n self.image = Lobster.IMAGE_PREFIX + str(image_index)\n\n def act(self):\n self.switch_image()\n if self.can_move(self.speed):\n self.move(self.speed)\n self.traveled += abs(self.speed)\n else:\n self.turn(25)\n if random.randrange(10) < self.drunkenness:\n self.turn(random.uniform(-self.jumpiness, self.jumpiness))\n self.stage.take_out_neighbouring_crab(self)\n\n def switch_image(self):\n if self.traveled > Lobster.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS:\n self._set_image_index((self.image_index + 1) % Lobster.IMAGE_COUNT)\n self.traveled -= Lobster.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS\n\n\nclass Worm(GameObj):\n\n IMAGE_PREFIX = \"worm\"\n IMAGE_COUNT = 2\n TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS = 3\n\n def __init__(self, pos):\n self._set_image_index(0)\n self.pos = pos\n self.speed = 0.5\n if random.randrange(2) == 0:\n self.speed = -self.speed\n self.traveled = 0\n\n def _set_image_index(self, image_index):\n self.image_index = image_index\n self.image = Worm.IMAGE_PREFIX + str(image_index)\n\n def act(self):\n self.switch_image()\n if self.can_move(self.speed):\n self.move(self.speed)\n self.traveled += abs(self.speed)\n else:\n self.speed = -self.speed\n if random.randrange(100) < 10:\n self.speed += random.uniform(-1, 1) / 50\n\n def switch_image(self):\n if self.traveled > Worm.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS:\n self._set_image_index((self.image_index + 1) % Worm.IMAGE_COUNT)\n self.traveled -= Worm.TRAVEL_DISTANCE_BETWEEN_IMAGE_FLIPS\n\n\nstart_stage = Start()\nbeach_stage = Beach()\nstart_stage.show()\n\n\npgzrun.go()\n","sub_path":"anhang/kapitel13/crab.py","file_name":"crab.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"180245683","text":"#!/usr/bin/python\n\"\"\"\nget records from settings table\nforeach record\n\tif key name in approved list\n\t\tadd key and value to object, set value to appropriate data type\n\telse\n\t\tprint key name being ignored\nnext\nencode as JSON\ndelete all records from settings table\nvacuum\ninsert record with key_name = \"all_settings\" and json\n\"\"\"\n\nimport sqlite3\nimport json\nimport shutil\n\ndbPath = '/var/cas-mgr/cas-mgr.sqlite'\ndbBackup = '/var/cas-mgr/cas-mgr.sqlite.bak'\nkeyList = {\"allow_movie_change\":\"bool\",\n\t\t\t\"arcade_subnet\":\"string\",\n\t\t\t\"autoloader_dvd_drive_mount\":\"string\",\n\t\t\t\"autoloader_dvd_drive_status_file\":\"string\",\n\t\t\t\"autoloader_prog_file\":\"string\",\n\t\t\t\"autoloader_response_file\":\"string\",\n\t\t\t\"booth_status_interval\":\"number\",\n\t\t\t\"collection_report_recipients\":\"string\",\n\t\t\t\"daily_meters_interval\":\"number\",\n\t\t\t\"daily_meters_report_recipients\":\"string\",\n\t\t\t\"daily_meters_time\":\"string\",\n\t\t\t\"data_path\":\"string\",\n\t\t\t\"device_list\":\"list\",\n\t\t\t\"dvd_copier_log_file\":\"string\",\n\t\t\t\"dvd_copier_proc_name\":\"string\",\n\t\t\t\"dvd_copier_timeout\":\"number\",\n\t\t\t\"dvd_copy_prog\":\"string\",\n\t\t\t\"dvd_drive_mount\":\"string\",\n\t\t\t\"dvd_mount_timeout\":\"number\",\n\t\t\t\"enable_auto_loader\":\"bool\",\n\t\t\t\"enable_collection_report\":\"bool\",\n\t\t\t\"enable_daily_meters\":\"bool\",\n\t\t\t\"enable_no_back_cover\":\"bool\",\n\t\t\t\"file_server_log_file\":\"string\",\n\t\t\t\"file_server_prog_file\":\"string\",\n\t\t\t\"last_daily_meters_date\":\"string\",\n\t\t\t\"last_restart_device_date\":\"string\",\n\t\t\t\"max_address\":\"string\",\n\t\t\t\"max_channels\":\"number\",\n\t\t\t\"max_transcoded_video_file_size\":\"number\",\n\t\t\t\"min_address\":\"string\",\n\t\t\t\"min_free_diskspace\":\"float\",\n\t\t\t\"movie_change_status_interval\":\"number\",\n\t\t\t\"restart_device_interval\":\"number\",\n\t\t\t\"show_extra_add_video_fields\":\"bool\",\n\t\t\t\"show_no_backcover_option\":\"bool\",\n\t\t\t\"software_update_interval\":\"number\",\n\t\t\t\"store_name\":\"string\",\n\t\t\t\"transcoder_log_file\":\"string\",\n\t\t\t\"upc_lookup_interval\":\"number\",\n\t\t\t\"upload_movie_metadata\":\"bool\",\n\t\t\t\"upload_view_times\":\"bool\",\n\t\t\t\"video_metadata_path\":\"string\",\n\t\t\t\"video_path\":\"string\",\n\t\t\t\"wine_autoloader_dvd_drive_letter\":\"string\",\n\t\t\t\"wine_dvd_copy_dest_drive_letter\":\"string\",\n\t\t\t\"wine_dvd_drive_letter\":\"string\"}\n\n# Backup database before modifying\nshutil.copy2(dbPath, dbBackup)\n\ncon = sqlite3.connect(dbPath)\ncur = con.cursor()\n\n# If the all_settings key exists then do not continue the table update\ncur.execute(\"SELECT key_name FROM settings WHERE key_name = ?\", ('all_settings',))\nrow = cur.fetchone()\n\nif row is None:\n\tcur.execute(\"SELECT key_name, data FROM settings ORDER BY key_name\")\n\n\tsettings = {}\n\n\trow = cur.fetchone()\n\twhile row is not None:\n\t\tif row[0] in keyList.keys():\n\t\t\tif keyList[row[0]] == \"number\":\n\t\t\t\tsettings[row[0]] = int(row[1])\n\t\t\telif keyList[row[0]] == \"float\":\n\t\t\t\tsettings[row[0]] = int(float(row[1]))\n\t\t\telif keyList[row[0]] == \"string\":\n\t\t\t\tsettings[row[0]] = str(row[1])\n\t\t\telif keyList[row[0]] == \"bool\":\n\t\t\t\tsettings[row[0]] = (int(row[1]) == 1)\n\t\t\telif keyList[row[0]] == \"list\":\n\t\t\t\tsettings[row[0]] = str(row[1]).split(\",\")\n\t\t\telse:\n\t\t\t\tprint(\"Unknown data type\")\n\t\telse:\n\t\t\tprint(\"Ignoring '%s'\" % row[0])\n\n\t\trow = cur.fetchone()\n\n\t# Since we are just converting settings to the new format, set flag that this\n\t# is not the first run so user doesn't get confused\n\tsettings['first_run'] = False\n\n\tjsonSettings = json.JSONEncoder(sort_keys=True).encode(settings)\n\n\tcur.execute('DELETE FROM settings')\n\tcur.execute('VACUUM')\n\tcur.execute('INSERT INTO settings (key_name, data) VALUES (?, ?)', ('all_settings', jsonSettings))\n\n\tcon.commit()\nelse:\n\tprint('Database does not need to be updated.')\n\ncon.close()\n","sub_path":"installer/update-cas-mgr-settings.py","file_name":"update-cas-mgr-settings.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"240060745","text":"import math\n\nT = int(input())\nfor t in range(T):\n n, k = map(int,input().split())\n \n p = []\n \n for i in range(n):\n r, h = map(int,input().split())\n side = 2*math.pi*r*h\n base = math.pi*pow(r, 2)\n \n p.append([r, h, side, base])\n\n p.sort(key=lambda x: x[0])\n\n maxarea = 0\n \n for i in range(k-1, n):\n base = p[i][3]\n\n part = p[:i]\n part.sort(key=lambda x: x[2])\n\n area = base + p[i][2]\n if k > 1:\n for j in range(i-1, i - k, -1):\n area += part[j][2]\n\n if area > maxarea:\n maxarea = area\n \n \n \n print(\"Case #\"+str(t+1)+\":\",maxarea)\n \n","sub_path":"Google Code Jam/2017/Round 1C/Ample_Syrup.py","file_name":"Ample_Syrup.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"594406308","text":"\"\"\"Details about the built-in battery.\"\"\"\nfrom __future__ import annotations\nimport logging\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.components.sensor import SensorEntity\nimport subprocess\n\nfrom homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE\nimport homeassistant.helpers.config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_CAPACITY_LEVEL = \"capacity_level\"\nATTR_STATUS = \"status\"\n\nBATTERY_LIFE_COMMAND = \"sysctl hw.acpi.battery.life | cut -d' ' -f2\"\nBATTERY_STATUS_COMMAND = \"sysctl hw.acpi.battery.state | cut -d' ' -f2\"\n\n\"\"\"Platform for sensor integration.\"\"\"\n\n\ndef setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n \"\"\"Set up the sensor platform.\"\"\"\n add_entities([FreeBSDBattery()])\n _LOGGER.info('Created FreeBSDBattery entity')\n return True\n\n\nclass FreeBSDBattery(SensorEntity):\n \"\"\"Representation of a Sensor.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the sensor.\"\"\"\n self._battery_life = None\n self._battery_status = None\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the sensor.\"\"\"\n return 'FreeBSD Battery'\n\n @property\n def native_value(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._battery_life\n\n @property\n def unit_of_measurement(self) -> str:\n \"\"\"Return the unit of measurement.\"\"\"\n return PERCENTAGE\n\n @property\n def device_class(self):\n return DEVICE_CLASS_BATTERY\n\n @property\n def extra_state_attributes(self):\n return {\n \"life\": self._battery_life,\n \"status\": self._battery_status\n }\n\n def update(self):\n \"\"\"Get the latest data and updates the states.\"\"\"\n self._battery_life = int(subprocess.check_output(\n BATTERY_LIFE_COMMAND, shell=True))\n self._battery_status = int(subprocess.check_output(\n BATTERY_STATUS_COMMAND, shell=True))\n _LOGGER.info(self._battery_life)\n _LOGGER.info(self._battery_status)\n","sub_path":"custom_components/freebsd_battery/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"598841537","text":"import os\nimport numpy as np\n\ncurr_dir_path = os.path.dirname(os.path.realpath(__file__))\nINP_DIR = os.path.join(curr_dir_path, '../Data/ProgramData_INPUT')\n\nprog_id = '58' # if input is a valid variable name\n\nif not os.path.exists(os.path.join(INP_DIR, prog_id)):\n os.mkdir(os.path.join(INP_DIR, prog_id))\n\nfor i in range(1, 100+1):\n inp_path = os.path.join(INP_DIR, prog_id, '{}.txt'.format(i))\n with open(inp_path, 'w') as f:\n inp_n = np.random.randint(1, 5, size=1)[0]\n f.write(f'{inp_n}\\n')\n for _ in range(inp_n):\n inp_s = np.random.choice([ord('_')]*50+list(range(ord('a'), ord('z')+1))+list(range(ord('A'), ord('Z')+1))+list(range(ord('0'), ord('9')+1)), size=np.random.randint(1, 25, size=1)[0])\n f.write('{}\\n'.format(''.join(list(map(chr, inp_s)))))\n","sub_path":"generate_input/gen_input_58.py","file_name":"gen_input_58.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"495963988","text":"import cv2\nimport numpy as np\nimport os\nimport random\nfrom os import listdir\nfrom os.path import isfile, join\nfrom scipy.ndimage import rotate\nimport shutil\n\n\ndef flip(path, path_save, axis): # axis = 0 (горизонтально), axis = 1 (вертикально), axis = -1 (оба?)\n if axis != 0 | axis != 1 | axis != -1:\n print(\"axis должен быть равен 0, 1 или -1!\\n\")\n return 0\n files = [f for f in listdir(path) if isfile(join(path, f))]\n files.sort()\n for name in files:\n print(path + name)\n img = cv2.imread(path + name, cv2.IMREAD_UNCHANGED)\n out = cv2.flip(img, axis)\n\n name, format = name.split(\".\", 1)\n cv2.imwrite(path_save + name + 'flip' + str(axis) + '.' + format, out)\n\n\ndef rot(path, path_save, angle):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n files.sort()\n i = 0\n for name in files:\n # print(path + name)\n image = cv2.imread(path + name, cv2.IMREAD_UNCHANGED)\n im_rot = rotate(image, angle, reshape=True)\n\n name, format = name.split(\".\", 1)\n print(\"img: \", path + name, np.shape(im_rot))\n cv2.imwrite(path_save + name + 'rot' + str(i) + '.' + format, im_rot)\n i += 1\n\n\ndef image_illumination(path, path_save, path_mask, path_save_mask, level):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n files.sort()\n i = 0\n for file in files:\n # print(path + name)\n name, format = file.split(\".\", 1)\n\n in_img = cv2.imread(path + file, cv2.IMREAD_UNCHANGED)\n a = np.double(in_img)\n b = a + level\n out = b\n\n print(\"img: \", path + name, np.shape(out))\n cv2.imwrite(path_save + name + 'illumination' + str(level) + str(i) + '.' + format, out)\n\n # теперь надо скопировать маски, неизменнённые!\n # причём они должны называться также\n\n img_seg = cv2.imread(path_mask + name + '.png')\n cv2.imwrite(path_save_mask + name + 'illumination' + str(level) + str(i) + '.' + format, img_seg)\n\n i += 1\n\n\ndef zoom (path, path_save, zoom_factor):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n files.sort()\n for name in files:\n img = cv2.imread(path + name, cv2.IMREAD_UNCHANGED)\n # print(path + name)\n\n height, width = img.shape[:2] # It's also the final desired shape\n new_height, new_width = int(height * zoom_factor), int(width * zoom_factor)\n\n y1, x1 = max(0, new_height - height) // 2, max(0, new_width - width) // 2\n y2, x2 = y1 + height, x1 + width\n bbox = np.array([y1,x1,y2,x2])\n\n bbox = (bbox / zoom_factor).astype(np.int)\n y1, x1, y2, x2 = bbox\n cropped_img = img[y1:y2, x1:x2]\n\n resize_height, resize_width = min(new_height, height), min(new_width, width)\n pad_height1, pad_width1 = (height - resize_height) // 2, (width - resize_width) // 2\n pad_height2, pad_width2 = (height - resize_height) - pad_height1, (width - resize_width) - pad_width1\n pad_spec = [(pad_height1, pad_height2), (pad_width1, pad_width2)] + [(0,0)] * (img.ndim - 2)\n\n result = cv2.resize(cropped_img, (resize_width, resize_height))\n result = np.pad(result, pad_spec, mode='constant')\n assert result.shape[0] == height and result.shape[1] == width\n\n name, format = name.split(\".\", 1)\n\n print(\"img: \", path + name, np.shape(result))\n cv2.imwrite(path_save + name + 'zoom' + str(zoom_factor) + '.' + format, result)\n\n\nif __name__ == '__main__':\n os.system(\"find /Users/kate/PycharmProjects/make_data -name '.DS_Store' -delete\")\n\n path_orig_img = 'Airports/!MINEhelsinki/img/'\n path_orig_mask = 'Airports/!MINEhelsinki/mask/'\n\n path_save_aug_img = 'Airports/!MINEhelsinki/b&w_aug/img/'\n path_save_aug_mask = 'Airports/!MINEhelsinki/b&w_aug/mask/'\n\n files_img = [f for f in listdir(path_save_aug_img) if isfile(join(path_save_aug_img, f))]\n files_mask = [f for f in listdir(path_save_aug_mask) if isfile(join(path_save_aug_mask, f))]\n\n assert len(files_img) == len(files_mask)\n\n # работает, не трогать\n # for i in np.arange(-100, 100, 20):\n # image_illumination(path_orig_img, path_save_aug_img, path_orig_mask, path_save_aug_mask, level=i)\n\n # работает, не трогать\n for i in np.arange(1.5, 8.5, 2.5):\n zoom(path_orig_img, path_save_aug_img, zoom_factor=i)\n zoom(path_orig_mask, path_save_aug_mask, zoom_factor=i)\n\n # работает, не трогать\n for i in np.arange(-1, 1, 1):\n flip(path_orig_img, path_save_aug_img, axis=i)\n flip(path_orig_mask, path_save_aug_mask, axis=i)","sub_path":"data_aug.py","file_name":"data_aug.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"133466351","text":"import numpy as np\n\nfrom DREAM_and_DeepCFR.TrainingProfile import TrainingProfile\nfrom DREAM_and_DeepCFR.workers.driver.Driver import Driver\nfrom PokerRL.game.games import StandardLeduc # or any other game\n\nif __name__ == '__main__':\n ctrl = Driver(t_prof=TrainingProfile(\n name=\"SD-CFR_LEDUC_LB_2700trav_095_SEED\" + str(np.random.randint(1000000)),\n\n n_traversals_per_iter=2700,\n\n n_batches_adv_training=3000,\n sampler=\"learned_baseline\",\n os_eps=0.5,\n game_cls=StandardLeduc,\n ),\n eval_methods={\n \"br\": 3,\n })\n ctrl.run()\n","sub_path":"Leduc_DREAM_T2700.py","file_name":"Leduc_DREAM_T2700.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"635526154","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 11 22:28:53 2015\n\n@author: Owner\n\"\"\"\n\n\nimport MapReduce\nimport sys\n\n\"\"\"\n A and B in a sparse matrix format\n algorithm to compute the matrix multiplication A x B\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: row of A or col of B\n # value: [matrix, row, col, value]\n value = record \n if record[0] == 'a':\n for x in range(5):\n key = [record[1],x] #record[0]\n mr.emit_intermediate(tuple(key), value) #(key, value)\n else:\n for x in range(5):\n key = [x,record[2]]\n mr.emit_intermediate(tuple(key), value) #(key, value)\n\ndef reducer(key, list_of_values):\n # key: index of resulting matrix\n # value: row vector and column vector\n aa = {}\n bb = {}\n result = 0\n for value in list_of_values:\n if value[0] == 'a':\n aa[value[2]] = value[3]\n else:\n bb[value[1]] = value[3]\n for x in range(5):\n try:\n result += aa[x]*bb[x]\n except KeyError:\n pass\n key = list(key)\n key.append(result)\n key = tuple(key)\n mr.emit(key)\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)","sub_path":"MapReduce/MapReduce_Framework/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"212889965","text":"# Описать метод RectPS(x1,y1,x2,y2) вычисляющий периметр P и площадь S\n# прямоугольника со сторонами параллельными осям координат (x1,y1),(x2,y2)\n# его противоположных вершин. x1,y1,x2,y2 - входные параметры P и S - \n# выходные параметры вещественного типа. С помощью этого метода найти\n# периметры и площади трех прямоугольников с данными противоположными вершинами.\n\n# Examples coordinates\n# 4,4,6,6\n# 3.5,5.9,6,9\n# 3,5,-3,-6\n\n\ndef draw_rectangle(coordinates):\n\tcoordinates = coordinates.split(',')\n\tx1 = float(coordinates[0])\n\ty1 = float(coordinates[1])\n\tx2 = float(coordinates[2])\n\ty2 = float(coordinates[3])\n\n\tp = (abs(x1 - x2) + abs(y1 - y2)) * 2\n\ts = abs(x1 - x2) * abs(y1 - y2)\n\n\tprint('coordinates (x1=%s, y1=%s) (x2=%s, y2=%s)' % (x1, y1, x2, y2))\n\tprint('perimeter P = ', p)\n\tprint('square S = ', s)\n\tprint('\\n Looks like')\n\n\theight = int(abs(x1 - x2))\n\twidth = int(abs(y1 - y2))\n\tfor x in range(width):\n\t\tfor x in range(height):\n\t\t\tprint(' * ', end='')\n\t\tprint()\n\n\ni = 3\nwhile i > 0:\n\tcoordinates = input('Enter coordinates x1,y1,x2,y2 Example 4,4,6,6 or 2.5,8,6,-2.5\\n: ')\n\tdraw_rectangle(coordinates)\n\ti -= 1\n","sub_path":"lesson5/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"109488877","text":"import zeep, pika, json, sys\n\ndef main(ssn, loan_a, loan_d):\n data = {\n \"ssn\":ssn,\n \"loanAmount\": loan_a,\n \"loanDuration\": loan_d\n }\n send_to_queue(json.loads(json.dumps(data)))\n\ndef get_credit_score (ssn):\n\n wsdl = 'http://datdb.cphbusiness.dk:8080/CreditScoreService/CreditScoreService?wsdl'\n client = zeep.Client(wsdl=wsdl)\n return client.service.creditScore(ssn)\n\ndef send_to_queue (data):\n\n #Connect to rabbit MQ server, specify host inside of \"ConnectionParameters()\"\n connection = pika.BlockingConnection(pika.ConnectionParameters('datdb.cphbusiness.dk'))\n channel = connection.channel()\n\n exchange_name = 'g1.exchange'\n #Create a queue\n channel.exchange_declare(exchange=exchange_name,\n exchange_type='direct')\n data['creditScore'] = get_credit_score(data['ssn'])\n data['ssn'] = data['ssn'].replace(\"-\",\"\")\n routing_key = 'g1.creditscore'\n #Send a message, \"routing_key\" specifies what queue we are sending to\n channel.basic_publish(exchange=exchange_name,\n routing_key=routing_key,\n body=json.dumps(data))\n\n print(\" [x] Sent %r\" % (data))\n connection.close()\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"LB_CreditScore/get_credit_score.py","file_name":"get_credit_score.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"28812200","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: kshit\n\"\"\"\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport glob\n\nos.chdir(\"C:\\\\Users\\\\kshit\\\\Desktop\")\nfor file in glob.glob(\"*.png\"):\n img = cv2.imread(file)\n mask = np.zeros(img.shape[:2],np.uint8)\n bgdModel = np.zeros((1,65),np.float64)\n fgdModel = np.zeros((1,65),np.float64)\n rect = (10,10,20,20)\n cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)\n mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\n img = img*mask2[:,:,np.newaxis]\n filename = \"foreground\"+file\n cv2.imwrite(filename,img)\n","sub_path":"Supplementary Materials/ExtractForegroundOfAllImages.py","file_name":"ExtractForegroundOfAllImages.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"217993370","text":"# Copyright (C) Ivan Kravets \n# See LICENSE for details.\n\nimport atexit\nimport re\nfrom os import getenv, listdir, remove, sep, walk\nfrom os.path import basename, dirname, isdir, isfile, join, normpath\n\nfrom SCons.Script import Exit, SConscript, SConscriptChdir\nfrom SCons.Util import case_sensitive_suffixes\n\nfrom platformio.util import pioversion_to_intstr\n\n\ndef BuildFirmware(env):\n\n # fix ASM handling under non-casitive OS\n if not case_sensitive_suffixes(\".s\", \".S\"):\n env.Replace(\n AS=\"$CC\",\n ASCOM=\"$ASPPCOM\"\n )\n\n env.ProcessFlags()\n env.BuildFramework()\n\n firmenv = env.Clone()\n vdirs = firmenv.VariantDirRecursive(\n join(\"$BUILD_DIR\", \"src\"), \"$PROJECTSRC_DIR\")\n\n # build dependent libs\n deplibs = firmenv.BuildDependentLibraries(\"$PROJECTSRC_DIR\")\n\n # append specified LD_SCRIPT\n if \"LDSCRIPT_PATH\" in firmenv:\n firmenv.Append(\n LINKFLAGS=[\"-T\", \"$LDSCRIPT_PATH\"]\n )\n\n # enable \"cyclic reference\" for linker\n firmenv.Prepend(\n _LIBFLAGS=\"-Wl,--start-group \"\n )\n firmenv.Append(\n _LIBFLAGS=\" -Wl,--end-group\"\n )\n\n _srcbuild_flags = getenv(\"PLATFORMIO_SRCBUILD_FLAGS\",\n env.subst(\"$SRCBUILD_FLAGS\"))\n if _srcbuild_flags:\n firmenv.MergeFlags(_srcbuild_flags)\n\n firmenv.Append(\n CPPDEFINES=[\"PLATFORMIO={0:02d}{1:02d}{2:02d}\".format(\n *pioversion_to_intstr())]\n )\n\n return firmenv.Program(\n join(\"$BUILD_DIR\", \"firmware\"),\n [firmenv.GlobCXXFiles(vdir) for vdir in vdirs],\n LIBS=list(env.get(\"LIBS\", []) + deplibs)[::-1],\n LIBPATH=env.get(\"LIBPATH\", []) + [\"$BUILD_DIR\"],\n PROGSUFFIX=\".elf\"\n )\n\n\ndef ProcessFlags(env):\n if \"extra_flags\" in env.get(\"BOARD_OPTIONS\", {}).get(\"build\", {}):\n env.MergeFlags(env.subst(\"${BOARD_OPTIONS['build']['extra_flags']}\"))\n\n if \"BUILD_FLAGS\" in env:\n env.MergeFlags(env['BUILD_FLAGS'])\n\n # Cancel any previous definition of name, either built in or\n # provided with a -D option // Issue #191\n undefines = [f for f in env.get(\"CCFLAGS\", []) if f.startswith(\"-U\")]\n if undefines:\n for undef in undefines:\n env['CCFLAGS'].remove(undef)\n env.Append(_CPPDEFFLAGS=\" %s\" % \" \".join(undefines))\n\n\ndef GlobCXXFiles(env, path):\n files = []\n for suff in [\"*.c\", \"*.cpp\", \"*.S\"]:\n _list = env.Glob(join(path, suff))\n if _list:\n files += _list\n return files\n\n\ndef VariantDirRecursive(env, variant_dir, src_dir, duplicate=True,\n ignore_pattern=None):\n if not ignore_pattern:\n ignore_pattern = (\".git\", \".svn\")\n variants = []\n src_dir = env.subst(src_dir)\n for root, _, _ in walk(src_dir, followlinks=True):\n _src_dir = root\n _var_dir = variant_dir + root.replace(src_dir, \"\")\n if any([s in _var_dir.lower() for s in ignore_pattern]):\n continue\n env.VariantDir(_var_dir, _src_dir, duplicate)\n variants.append(_var_dir)\n return variants\n\n\ndef BuildFramework(env):\n if \"FRAMEWORK\" not in env:\n return\n\n if env['FRAMEWORK'].lower() in (\"arduino\", \"energia\"):\n env.ConvertInoToCpp()\n\n for f in env['FRAMEWORK'].split(\",\"):\n framework = f.strip().lower()\n if framework in env.get(\"BOARD_OPTIONS\", {}).get(\"frameworks\"):\n SConscriptChdir(0)\n SConscript(\n env.subst(join(\"$PIOBUILDER_DIR\", \"scripts\", \"frameworks\",\n \"%s.py\" % framework))\n )\n else:\n Exit(\"Error: This board doesn't support %s framework!\" %\n framework)\n\n\ndef BuildLibrary(env, variant_dir, library_dir, ignore_files=None):\n lib = env.Clone()\n vdirs = lib.VariantDirRecursive(\n variant_dir, library_dir, ignore_pattern=(\".git\", \".svn\", \"examples\"))\n srcfiles = []\n for vdir in vdirs:\n for item in lib.GlobCXXFiles(vdir):\n if not ignore_files or item.name not in ignore_files:\n srcfiles.append(item)\n return lib.Library(\n lib.subst(variant_dir),\n srcfiles\n )\n\n\ndef BuildDependentLibraries(env, src_dir): # pylint: disable=R0914\n\n INCLUDES_RE = re.compile(r\"^\\s*#include\\s+(\\<|\\\")([^\\>\\\"\\']+)(?:\\>|\\\")\",\n re.M)\n LIBSOURCE_DIRS = [env.subst(d) for d in env.get(\"LIBSOURCE_DIRS\", [])]\n\n # start internal prototypes\n\n class IncludeFinder(object):\n\n def __init__(self, base_dir, name, is_system=False):\n self.base_dir = base_dir\n self.name = name\n self.is_system = is_system\n\n self._inc_path = None\n self._lib_dir = None\n self._lib_name = None\n\n def getIncPath(self):\n return self._inc_path\n\n def getLibDir(self):\n return self._lib_dir\n\n def getLibName(self):\n return self._lib_name\n\n def run(self):\n if not self.is_system and self._find_in_local():\n return True\n return self._find_in_system()\n\n def _find_in_local(self):\n if isfile(join(self.base_dir, self.name)):\n self._inc_path = join(self.base_dir, self.name)\n return True\n else:\n return False\n\n def _find_in_system(self):\n for lsd_dir in LIBSOURCE_DIRS:\n if not isdir(lsd_dir):\n continue\n\n for ld in listdir(lsd_dir):\n inc_path = normpath(join(lsd_dir, ld, self.name))\n try:\n lib_dir = inc_path[:inc_path.index(\n sep, len(lsd_dir) + 1)]\n except ValueError:\n continue\n lib_name = basename(lib_dir)\n\n # ignore user's specified libs\n if \"IGNORE_LIBS\" in env and lib_name in env['IGNORE_LIBS']:\n continue\n\n if not isfile(inc_path):\n # if source code is in \"src\" dir\n lib_dir = join(lsd_dir, lib_name, \"src\")\n inc_path = join(lib_dir, self.name)\n\n if isfile(inc_path):\n self._lib_dir = lib_dir\n self._lib_name = lib_name\n self._inc_path = inc_path\n return True\n return False\n\n def _get_dep_libs(src_dir):\n state = {\n \"paths\": set(),\n \"libs\": set(),\n \"ordered\": set()\n }\n state = _process_src_dir(state, env.subst(src_dir))\n\n result = []\n for item in sorted(state['ordered'], key=lambda s: s[0]):\n result.append((item[1], item[2]))\n return result\n\n def _process_src_dir(state, src_dir):\n for root, _, _ in walk(src_dir, followlinks=True):\n for node in (env.GlobCXXFiles(root) +\n env.Glob(join(root, \"*.h\"))):\n state = _parse_includes(state, node)\n return state\n\n def _parse_includes(state, node):\n skip_includes = (\"arduino.h\", \"energia.h\")\n matches = INCLUDES_RE.findall(node.get_text_contents())\n for (inc_type, inc_name) in matches:\n base_dir = dirname(node.get_abspath())\n if inc_name.lower() in skip_includes:\n continue\n if join(base_dir, inc_name) in state['paths']:\n continue\n else:\n state['paths'].add(join(base_dir, inc_name))\n\n finder = IncludeFinder(base_dir, inc_name, inc_type == \"<\")\n if finder.run():\n _parse_includes(state, env.File(finder.getIncPath()))\n\n _lib_dir = finder.getLibDir()\n if _lib_dir and _lib_dir not in state['libs']:\n state['ordered'].add((\n len(state['ordered']) + 1, finder.getLibName(),\n _lib_dir))\n state['libs'].add(_lib_dir)\n state = _process_src_dir(state, _lib_dir)\n return state\n\n # end internal prototypes\n\n deplibs = _get_dep_libs(src_dir)\n env.Prepend(\n CPPPATH=[join(\"$BUILD_DIR\", l) for (l, _) in deplibs]\n )\n\n # add automatically \"utility\" dir from the lib (Arduino issue)\n env.Prepend(\n CPPPATH=[\n join(\"$BUILD_DIR\", l, \"utility\") for (l, ld) in deplibs\n if isdir(join(ld, \"utility\"))\n ]\n )\n\n libs = []\n for (libname, inc_dir) in deplibs:\n lib = env.BuildLibrary(\n join(\"$BUILD_DIR\", libname), inc_dir)\n env.Clean(libname, lib)\n libs.append(lib)\n return libs\n\n\nclass InoToCPPConverter(object):\n\n PROTOTYPE_RE = re.compile(\n r\"\"\"^(\n (?:\\s*[a-z_\\d]+){1,2} # return type\n \\s+[a-z_\\d]+\\s* # name of prototype\n \\([a-z_,\\.\\*\\&\\[\\]\\s\\d]*\\) # arguments\n )\\s*\\{ # must end with {\n \"\"\",\n re.X | re.M | re.I\n )\n\n DETECTMAIN_RE = re.compile(r\"void\\s+(setup|loop)\\s*\\(\", re.M | re.I)\n\n STRIPCOMMENTS_RE = re.compile(r\"(/\\*.*?\\*/|//[^\\r\\n]*$)\", re.M | re.S)\n\n def __init__(self, nodes):\n self.nodes = nodes\n\n def is_main_node(self, contents):\n return self.DETECTMAIN_RE.search(contents)\n\n @staticmethod\n def _replace_comments_callback(match):\n if \"\\n\" in match.group(1):\n return \"\\n\" * match.group(1).count(\"\\n\")\n else:\n return \" \"\n\n def append_prototypes(self, fname, contents, prototypes):\n contents = self.STRIPCOMMENTS_RE.sub(self._replace_comments_callback,\n contents)\n result = []\n is_appended = False\n linenum = 0\n for line in contents.splitlines():\n linenum += 1\n line = line.strip()\n\n if not is_appended and line and not line.startswith(\"#\"):\n is_appended = True\n result.append(\"%s;\" % \";\\n\".join(prototypes))\n result.append('#line %d \"%s\"' % (linenum, fname))\n\n result.append(line)\n\n return result\n\n def convert(self):\n prototypes = []\n data = []\n for node in self.nodes:\n ino_contents = node.get_text_contents()\n prototypes += self.PROTOTYPE_RE.findall(ino_contents)\n\n item = (basename(node.get_path()), ino_contents)\n if self.is_main_node(ino_contents):\n data = [item] + data\n else:\n data.append(item)\n\n if not data:\n return None\n\n result = [\"#include \"]\n is_first = True\n\n for name, contents in data:\n if is_first and prototypes:\n result += self.append_prototypes(name, contents, prototypes)\n else:\n result.append('#line 1 \"%s\"' % name)\n result.append(contents)\n is_first = False\n\n return \"\\n\".join(result)\n\n\ndef ConvertInoToCpp(env):\n\n def delete_tmpcpp_file(file_):\n remove(file_)\n\n ino_nodes = (env.Glob(join(\"$PROJECTSRC_DIR\", \"*.ino\")) +\n env.Glob(join(\"$PROJECTSRC_DIR\", \"*.pde\")))\n\n c = InoToCPPConverter(ino_nodes)\n data = c.convert()\n\n if not data:\n return\n\n tmpcpp_file = join(env.subst(\"$PROJECTSRC_DIR\"), \"piomain.cpp\")\n with open(tmpcpp_file, \"w\") as f:\n f.write(data)\n\n atexit.register(delete_tmpcpp_file, tmpcpp_file)\n\n\ndef exists(_):\n return True\n\n\ndef generate(env):\n env.AddMethod(BuildFirmware)\n env.AddMethod(ProcessFlags)\n env.AddMethod(GlobCXXFiles)\n env.AddMethod(VariantDirRecursive)\n env.AddMethod(BuildFramework)\n env.AddMethod(BuildLibrary)\n env.AddMethod(BuildDependentLibraries)\n env.AddMethod(ConvertInoToCpp)\n return env\n","sub_path":"platformio/builder/tools/platformio.py","file_name":"platformio.py","file_ext":"py","file_size_in_byte":11979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"612401322","text":"import cv2\nimport numpy as np\n\n# video = cv2.VideoCapture('ring_pretzle_test.avi')\nvideo = cv2.VideoCapture(0)\n\ni = 0 # video file name incrementer\nframe = np.array([])\nprev_frame = np.array([])\nwrite = False\ninit = False\nfourCC = cv2.VideoWriter_fourcc(*'MPEG')\nwhile 1:\n prev_frame = frame\n ret, frame = video.read()\n if not ret:\n break\n\n if not init:\n prev_frame = frame\n init = True\n\n diff = cv2.absdiff(frame, prev_frame)\n cv2.imshow('Window', diff)\n\n layer = diff[:,:,0] # Take top layer of diff (RGB values are equal, so only need one layer)\n thresh_intensity = 80 # threshold intensity value (0-255)\n thresh_size = 30 # number of pixels needed to be above thresh_intensity\n row = 20 # row number (from top) to determine if object is fully in frame\n\n if layer[layer>thresh_intensity].size > thresh_size \\\n and np.all(layer[row] stx1 will find the allele files for stx1A and stx1\n allelefiles = glob('{}/{}*.fa'.format(self.allelepath, vtx[:4]))\n for allelefile in allelefiles:\n allele = os.path.basename(allelefile).split('.')[0]\n # Populate the dictionaries\n sample[self.analysistype].targetfiles[allele] = allelefile\n alleles.append(allele)\n sample[self.analysistype].targets[vtx] = alleles\n sample[self.analysistype].targetpath = self.allelepath\n # Run the target baiting method\n self.targets()\n\n def targets(self):\n \"\"\"Hash the allele files as required\"\"\"\n printtime('Performing analysis with {} targets folder'.format(self.analysistype), self.start)\n for sample in self.metadata:\n # Initialise dictionaries\n sample[self.analysistype].vtxhashes = dict()\n sample[self.analysistype].hashcalls = dict()\n try:\n # Iterate through all the vtx genes found for each strain\n for vtx, allelefile in sample[self.analysistype].targetfiles.items():\n # Find the base name/path of the allele file\n targetbase = allelefile.split('.')[0]\n hashfile = '{}.mhs.gz'.format(targetbase)\n # Define the hash call\n hashcall = 'cd {} && mirabait -b {} -k 19 -K {}'.format(self.allelepath, allelefile, hashfile)\n # Add the hash call to the dictionary\n sample[self.analysistype].hashcalls[vtx] = hashcall\n # Run the system call as required\n if not os.path.isfile(hashfile):\n call(hashcall, shell=True, stdout=self.devnull, stderr=self.devnull)\n # Ensure that the hash file was successfully created\n assert os.path.isfile(\n hashfile), u'Hashfile could not be created for the combined target file {0!r:s}' \\\n .format(allelefile)\n # Add the hash filename/path to the dictionary\n sample[self.analysistype].vtxhashes[vtx] = hashfile\n except KeyError:\n pass\n # Bait the fastq files\n self.mirabaiting()\n\n def mirabaiting(self):\n \"\"\"Perform baiting of fastq files with the appropriate hashed allele file\"\"\"\n printtime('Performing kmer baiting of fastq files with {} targets'.format(self.analysistype), self.start)\n # Create and start threads for each fasta file in the list\n for i in range(self.cpus):\n # Send the threads to the bait method\n threads = Thread(target=self.mirabait, args=())\n # Set the daemon to true - something to do with thread management\n threads.setDaemon(True)\n # Start the threading\n threads.start()\n for sample in self.metadata:\n try:\n # Create dictionaries to store data for each strain/vtx gene combination\n sample[self.analysistype].outputdir = dict()\n sample[self.analysistype].baitedfastq = dict()\n sample[self.analysistype].baitcall = dict()\n for vtx, vtxhash in sample[self.analysistype].vtxhashes.items():\n # Set the output directory for each vtx gene\n sample[self.analysistype].outputdir[vtx] = os.path.join(sample.general.outputdirectory,\n self.analysistype, vtx)\n # Set attribute values\n sample[self.analysistype].baitedfastq[vtx] = \\\n '{}/{}_targetMatches.fastq'.format(sample[self.analysistype].outputdir[vtx], vtx)\n # Create the folder (if necessary)\n make_path(sample[self.analysistype].outputdir[vtx])\n # Create the mirabait call\n if len(sample.general.fastqfiles) == 2:\n syscall = 'mirabait -c -B {} -t 4 -o {} -p {} {}'\\\n .format(vtxhash, sample[self.analysistype].baitedfastq[vtx],\n sample.general.fastqfiles[0], sample.general.fastqfiles[1])\n else:\n syscall = 'mirabait -c -B {} -t 4 -o {} {}' \\\n .format(vtxhash, sample[self.analysistype].baitedfastq[vtx],\n sample.general.fastqfiles[0])\n sample[self.analysistype].baitcall[vtx] = syscall\n # Add the variables to the queue\n self.baitqueue.put((sample, vtx))\n except KeyError:\n pass\n self.baitqueue.join()\n # Perform reference mapping\n self.mapping()\n\n def mirabait(self):\n while True:\n sample, vtx = self.baitqueue.get()\n # Run the system call (if necessary)\n if not os.path.isfile(sample[self.analysistype].baitedfastq[vtx]):\n call(sample[self.analysistype].baitcall[vtx], shell=True, stdout=self.devnull, stderr=self.devnull)\n self.baitqueue.task_done()\n\n def mapping(self):\n \"\"\"Perform target indexing, reference mapping, SAM header editing, and BAM sorting using bowtie2 and samtools\"\"\"\n printtime('Performing reference mapping', self.start)\n for i in range(self.cpus):\n # Send the threads to\n threads = Thread(target=self.map, args=())\n # Set the daemon to True - something to do with thread management\n threads.setDaemon(True)\n # Start the threading\n threads.start()\n for sample in self.metadata:\n try:\n # Create dictionaries to store data for each strain/vtx gene combination\n sample[self.analysistype].sortedbam = dict()\n sample[self.analysistype].baitfilenoext = dict()\n sample[self.analysistype].faifile = dict()\n sample[self.analysistype].bowtie2align = dict()\n sample[self.analysistype].bowtie2build = dict()\n sample[self.analysistype].samindex = dict()\n for vtx, vtxfastq in sample[self.analysistype].baitedfastq.items():\n # Set the path/name for the sorted bam file to be created\n sample[self.analysistype].sortedbam[vtx] = \\\n '{}/{}_sorted.bam'.format(sample[self.analysistype].outputdir[vtx], vtx)\n # Remove the file extension of the bait file for use in the indexing command\n sample[self.analysistype].baitfilenoext[vtx] = \\\n sample[self.analysistype].targetfiles[vtx].split('.')[0]\n # Use bowtie2 wrapper to create index the target file\n bowtie2build = Bowtie2BuildCommandLine(reference=sample[self.analysistype].targetfiles[vtx],\n bt2=sample[self.analysistype].baitfilenoext[vtx],\n **self.builddict)\n # Use samtools wrapper to set up the bam sorting command\n samsort = SamtoolsSortCommandline(input_bam=sample[self.analysistype].sortedbam[vtx],\n o=True,\n out_prefix=\"-\")\n # Create a list of programs to which data are piped as part of the reference mapping\n samtools = [\n # When bowtie2 maps reads to all possible locations rather than choosing a 'best' placement, the\n # SAM header for that read is set to 'secondary alignment', or 256. Please see:\n # http://davetang.org/muse/2014/03/06/understanding-bam-flags/ The script below reads stdin\n # and subtracts 256 from headers which include 256\n 'python {}/editsamheaders.py'.format(self.homepath),\n # # Use samtools wrapper to set up the samtools view\n SamtoolsViewCommandline(b=True,\n S=True,\n h=True,\n input_file=\"-\"),\n samsort]\n # Add custom parameters to a dictionary to be used in the bowtie2 alignment wrapper\n indict = {'--very-sensitive-local': True,\n # For short targets, the match bonus can be increased\n '--ma': self.matchbonus,\n '-U': sample[self.analysistype].baitedfastq[vtx],\n '-a': True,\n '--threads': self.cpus,\n '--local': True}\n # Create the bowtie2 reference mapping command\n bowtie2align = Bowtie2CommandLine(bt2=sample[self.analysistype].baitfilenoext[vtx],\n threads=self.cpus,\n samtools=samtools,\n **indict)\n # Create the command to faidx index the bait file\n sample[self.analysistype].faifile[vtx] = sample[self.analysistype].targetfiles[vtx] + '.fai'\n samindex = SamtoolsFaidxCommandline(reference=sample[self.analysistype].targetfiles[vtx])\n # Add the commands (as strings) to the metadata\n sample[self.analysistype].bowtie2align[vtx] = str(bowtie2align)\n sample[self.analysistype].bowtie2build[vtx] = str(bowtie2build)\n sample[self.analysistype].samindex[vtx] = str(samindex)\n # Index the allele files (if necessary)\n if not os.path.isfile('{}.1{}'.format(sample[self.analysistype].baitfilenoext[vtx],\n self.bowtiebuildextension)):\n stdoutbowtieindex, stderrbowtieindex = \\\n map(StringIO, bowtie2build(cwd=sample[self.analysistype].targetpath))\n # Write any error to a log file\n if stderrbowtieindex:\n # Write the standard error to log, bowtie2 puts alignment summary here\n with open(os.path.join(sample[self.analysistype].targetpath,\n '{}_bowtie_index.log'.format(vtx)), 'ab+') as log:\n log.writelines(logstr(bowtie2build, stderrbowtieindex.getvalue(),\n stdoutbowtieindex.getvalue()))\n # Close the stdout and stderr streams\n stdoutbowtieindex.close()\n stderrbowtieindex.close()\n # Add the commands to the queue. Note that the commands would usually be attributes of the sample\n # but there was an issue with their serialization when printing out the metadata\n self.mapqueue.put((sample, bowtie2build, bowtie2align, samindex, vtx))\n except KeyError:\n pass\n self.mapqueue.join()\n # Use samtools to index the sorted bam file\n self.indexing()\n\n def map(self):\n while True:\n sample, bowtie2build, bowtie2align, samindex, vtx = self.mapqueue.get()\n # Use samtools faidx to index the bait file - this will be used in the sample parsing\n if not os.path.isfile(sample[self.analysistype].faifile[vtx]):\n stdoutindex, stderrindex = map(StringIO, samindex(cwd=sample[self.analysistype].targetpath))\n # Write any error to a log file\n if stderrindex:\n # Write the standard error to log, bowtie2 puts alignment summary here\n with open(os.path.join(sample[self.analysistype].targetpath,\n '{}_samtools_index.log'.format(vtx)), 'ab+') as log:\n log.writelines(logstr(samindex, stderrindex.getvalue(), stdoutindex.getvalue()))\n # Close the stdout and stderr streams\n stdoutindex.close()\n stderrindex.close()\n # Only run the functions if the sorted bam files and the indexed bait file do not exist\n if not os.path.isfile(sample[self.analysistype].sortedbam[vtx]):\n # Set stdout to a stringIO stream\n stdout, stderr = map(StringIO, bowtie2align(cwd=sample[self.analysistype].outputdir[vtx]))\n if stderr:\n # Write the standard error to log, bowtie2 puts alignment summary here\n with open(os.path.join(sample[self.analysistype].outputdir[vtx],\n '{}_bowtie_samtools.log'.format(vtx)), 'ab+') as log:\n log.writelines(logstr(bowtie2align, stderr.getvalue(), stdout.getvalue()))\n stdout.close()\n stderr.close()\n self.mapqueue.task_done()\n\n def indexing(self):\n \"\"\"Use samtools to index BAM files\"\"\"\n printtime('Indexing sorted bam files', self.start)\n for i in range(self.cpus):\n # Send the threads to\n threads = Thread(target=self.index, args=())\n # Set the daemon to true - something to do with thread management\n threads.setDaemon(True)\n # Start the threading\n threads.start()\n for sample in self.metadata:\n try:\n # Create dictionaries to store data for each strain/vtx gene combination\n sample[self.analysistype].bamindex = dict()\n sample[self.analysistype].sortedbai = dict()\n for vtx, sortedbam in sample[self.analysistype].sortedbam.items():\n # Define the indexing call\n bamindex = SamtoolsIndexCommandline(input=sortedbam)\n # Update the metadata\n sample[self.analysistype].sortedbai[vtx] = sortedbam + '.bai'\n sample[self.analysistype].bamindex[vtx] = str(bamindex)\n self.indexqueue.put((sample, bamindex, vtx))\n except KeyError:\n pass\n self.indexqueue.join()\n # Parse the results\n self.parsing()\n\n def index(self):\n while True:\n sample, bamindex, vtx = self.indexqueue.get()\n # Only make the call if the .bai file doesn't already exist\n if not os.path.isfile(sample[self.analysistype].sortedbai[vtx]):\n # Use cStringIO streams to handle bowtie output\n stdout, stderr = map(StringIO, bamindex(cwd=sample[self.analysistype].outputdir[vtx]))\n if stderr:\n # Write the standard error to log\n with open(os.path.join(sample[self.analysistype].outputdir[vtx],\n '{}_samtools_bam_index.log'.format(vtx)), 'ab+') as log:\n log.writelines(logstr(bamindex, stderr.getvalue(), stdout.getvalue()))\n stderr.close()\n self.indexqueue.task_done()\n\n def parsing(self):\n printtime('Parsing sorted bam files', self.start)\n for i in range(self.cpus):\n # Send the threads to\n threads = Thread(target=self.parse, args=())\n # Set the daemon to true - something to do with thread management\n threads.setDaemon(True)\n # Start the threading\n threads.start()\n for sample in self.metadata:\n # Initialise dictionary-containing attributes that can be updated\n sample[self.analysistype].newsequences = dict()\n sample[self.analysistype].newseqclosestmatch = dict()\n sample[self.analysistype].newseqclosestseq = dict()\n sample[self.analysistype].allelematches = dict()\n try:\n for vtx, sortedbam in sample[self.analysistype].sortedbam.items():\n self.parsequeue.put((sample, vtx))\n except KeyError:\n pass\n self.parsequeue.join()\n # Process the new alleles\n self.newalleles()\n\n def parse(self):\n import pysamstats\n import operator\n while True:\n sample, vtx = self.parsequeue.get()\n # Initialise dictionaries to store parsed data\n matchdict = dict()\n depthdict = dict()\n seqdict = dict()\n resultsdict = dict()\n snpdict = dict()\n gapdict = dict()\n faidict = dict()\n uniqueresults = dict()\n refdict = dict()\n # Variable to store the expected position in gene/allele\n pos = 0\n # Get the fai file into a dictionary to be used in parsing results\n with open(sample[self.analysistype].faifile[vtx], 'rb') as faifile:\n for line in faifile:\n data = line.split('\\t')\n faidict[data[0]] = int(data[1])\n try:\n # Use the stat_variation function of pysam stats to return records parsed from sorted bam files\n # Values of interest can be retrieved using the appropriate keys\n correction = 0\n for rec in pysamstats.stat_variation(alignmentfile=sample[self.analysistype].sortedbam[vtx],\n fafile=sample[self.analysistype].targetfiles[vtx],\n max_depth=1000000):\n\n # Add the reference sequence to the dictionary\n if rec['chrom'] not in refdict:\n refdict[rec['chrom']] = str()\n refdict[rec['chrom']] += rec['ref']\n # Initialise seqdict with the current gene/allele if necessary with an empty string\n if rec['chrom'] not in seqdict:\n seqdict[rec['chrom']] = str()\n # Since this is the first position in a \"new\" gene/allele, reset the pos variable to 0\n pos = 0\n # There seems to be a bug in pysamstats with how gaps at the start of the sequence are treated.\n # Although the position is correct, the whole reference sequence is still included, rather than\n # starting at where the gap ends\n if rec['pos'] > pos:\n # If there is a gap of 173 bases at the beginning of the match, the reference sequence\n # still should start at 0, but it starts at 173, therefore, the match actually starts at\n # 2 * 173 = 346\n correction = 2 * rec['pos']\n # The number of gaps is equal to the starting position\n gapdict[rec['chrom']] = rec['pos']\n # The actual position will be rec['pos']\n pos = rec['pos']\n # Allow the position to reach the calculated correction factor\n if rec['pos'] >= correction:\n # Initialise gap dict with 0 gaps\n if rec['chrom'] not in gapdict:\n gapdict[rec['chrom']] = 0\n # If there is a gap in the alignment, record the size of the gap in gapdict\n if int(rec['pos']) > pos:\n # Add the gap size to gap dict\n gapdict[rec['chrom']] += rec['pos'] - pos\n # Add dashes to the sequence to indicate the gap\n seqdict[rec['chrom']] += 'N' * (int(rec['pos'] - pos))\n # Set the expected position to the current position\n pos = int(rec['pos'])\n # Increment pos in preparation for the next iteration\n pos += 1\n # Initialise snpdict if necessary\n if rec['chrom'] not in snpdict:\n snpdict[rec['chrom']] = 0\n # Initialise the current gene/allele in depthdict with the depth (reads_all) if necessary,\n # otherwise add the current depth to the running total\n if rec['chrom'] not in depthdict:\n depthdict[rec['chrom']] = int(rec['reads_all'])\n else:\n depthdict[rec['chrom']] += int(rec['reads_all'])\n # Dictionary of bases and the number of times each base was observed per position\n bases = {'A': rec['A'], 'C': rec['C'], 'G': rec['G'], 'T': rec['T']}\n # Track any deletions prior to the sequence\n if rec['deletions'] > rec['matches']:\n seqdict[rec['chrom']] += 'N'\n # Increment the running total of the number of SNPs\n snpdict[rec['chrom']] += 1\n else:\n if rec['matches'] > 0 or rec['mismatches'] > 0:\n # If the most prevalent base (calculated with max() and operator.itemgetter())\n # doesn't match the reference base, add this prevalent base to seqdict\n if max(bases.iteritems(), key=operator.itemgetter(1))[0] != rec['ref']:\n seqdict[rec['chrom']] += max(bases.iteritems(), key=operator.itemgetter(1))[0]\n # Increment the running total of the number of SNPs\n snpdict[rec['chrom']] += 1\n else:\n # If the bases match, add the reference base to seqdict\n seqdict[rec['chrom']] += (rec['ref'])\n # Initialise posdict if necessary, otherwise, increment the running total of matches\n if rec['chrom'] not in matchdict:\n matchdict[rec['chrom']] = 1\n else:\n matchdict[rec['chrom']] += 1\n # If there are no results in the bam file, then pass over the strain\n except ValueError:\n pass\n # Iterate through all the genes/alleles with results above\n for allele in sorted(matchdict):\n # If the length of the match is greater or equal to the length of the gene/allele (multiplied by the\n # cutoff value) as determined using faidx indexing, then proceed\n # Calculate the average depth by dividing the total number of reads observed by the length of the gene\n averagedepth = float(depthdict[allele]) / float(matchdict[allele])\n percentidentity = float(matchdict[allele]) / float(faidict[allele]) * 100\n if percentidentity == self.cutoff * 100:\n # Only report a positive result if this average depth is greater than 4X\n if averagedepth > 4:\n # Populate resultsdict with the gene/allele name, the percent identity, and the average depth\n resultsdict.update({allele: {'{:.2f}'.format(percentidentity): '{:.2f}'.format(averagedepth)}})\n # Add the results to the object\n sample[self.analysistype].allelematches[vtx] = resultsdict\n # Determine if there are alleles without a 100% match\n if not resultsdict:\n for allele in sorted(matchdict):\n # Filter the alleles to only include the vtx subunit\n if vtx in allele:\n percentidentity = float(matchdict[allele]) / float(faidict[allele]) * 100\n # Use a more relaxed cutoff to find the closest alleles\n if percentidentity >= self.cutoff * 50:\n uniqueresults.update({allele: percentidentity})\n try:\n # Find the best match (highest percent identity)\n closestallele = max(uniqueresults.iteritems(), key=operator.itemgetter(1))[0]\n percentidentity = max(uniqueresults.iteritems(), key=operator.itemgetter(1))[1]\n averagedepth = float(depthdict[closestallele]) / float(matchdict[closestallele])\n # Populate the metadata with the results\n sample[self.analysistype].newsequences[vtx] = seqdict[closestallele]\n sample[self.analysistype].newseqclosestmatch[vtx] = \\\n {closestallele: {'{:.2f}'.format(percentidentity): '{:.2f}'.format(averagedepth)}}\n sample[self.analysistype].newseqclosestseq[vtx] = {closestallele: refdict[closestallele]}\n except ValueError:\n pass\n self.parsequeue.task_done()\n\n def newalleles(self):\n \"\"\"Determine whether 'new' alleles have been previously stored in the custom database\"\"\"\n from Bio import SeqIO\n from Bio.Alphabet import generic_dna\n printtime('Updating metadata with new allele information', self.start)\n # Iterate through all the samples\n for sample in self.metadata:\n # Create a dictionary to store brand new alleles\n sample[self.analysistype].allelesnew = dict()\n sample[self.analysistype].allelesolc = dict()\n # Find the vtx gene and sequence for new alleles\n for vtx, sequence in sample[self.analysistype].newsequences.items():\n # Extract the subtype for the vtx gene from the metadata\n subtype = [stx[-1] for stx in sample.general.vtxset if vtx[:4] == stx[:4]][0]\n # Populate a dictionary with the sequence as the key - duplicates will automatically be discarded\n self.alleledict[sequence] = {vtx: subtype}\n # Set the file to in which the new allele is to be stored\n allelefile = '{}/OLC{}.tfa'.format(sample[self.analysistype].targetpath, vtx)\n # Create the file if it doesn't exist\n open(allelefile, 'ab').close()\n self.allelefiledict[vtx] = allelefile\n # Iterate through all the unique new alleles to find alleles not already in the custom database\n for sequence, vtxtype in self.alleledict.items():\n vtx, subtype = vtxtype.items()[0]\n # Load the allele file into a list\n allelelist = list(SeqIO.parse(self.allelefiledict[vtx], 'fasta'))\n # Put the most recent allele number (+1) for each gene into a dictionary\n try:\n self.allelelist.update({vtx: int(allelelist[-1].id.split(':')[0]) + 1})\n # If there are no previous entries, start at 1000000\n except IndexError:\n self.allelelist.update({vtx: 1000000})\n # List comprehension to store all the sequence of all the alleles\n alleles = [allele.seq for allele in allelelist]\n # If this sequence has not previously been found add it to the dictionary\n if sequence not in alleles:\n self.newallelelist[sequence] = {vtx: subtype}\n # If it has been found before, update the metadata to reflect this\n else:\n for sample in self.metadata:\n currentid = allelelist[-1].id\n # The .newsequences attribute is to be updated\n for stx, seq in sample[self.analysistype].newsequences.items():\n # If the current sequence is the same as the sequence store in the metadata object\n if sequence == seq:\n # Pull the depth of coverage value from the .newseqclosestmatch attribute\n depth = sample[self.analysistype].newseqclosestmatch[vtx].items()[0][1].items()[0][1]\n # Update the metadata\n sample[self.analysistype].allelematches[stx] = {currentid: {100.00: depth}}\n # Add metadata with any custom alleles\n sample[self.analysistype].allelesolc[vtx] = {'nt': {currentid: sequence}}\n # Perform translations to quickly find any alleles with mis/nonsense mutations\n # Add an appropriate number of N's to pad out any partial codons at the end of the sequence\n remainder = 3 - len(sequence) % 3\n sequence += ('N' * remainder)\n protein = Seq.translate(Seq(sequence))\n sample[self.analysistype].allelesolc[vtx].update({'aa': {currentid: str(protein)}})\n # Add the alleles to the appropriate file\n for sequence, vtxtype in self.newallelelist.items():\n # Split the tuple\n vtx, subtype = vtxtype.items()[0]\n try:\n # The header will be the allele number plus the predicted subtype\n currentid = '{}:{}'.format(str(self.allelelist[vtx]), subtype)\n allelesequence = Seq(sequence, generic_dna)\n # Create a sequence record using BioPython\n fasta = SeqRecord(allelesequence,\n # Without this, the header will be improperly formatted\n description='',\n # Use >:currentid as the header\n id=currentid)\n # Open the allele file to append\n with open(self.allelefiledict[vtx], 'ab+') as supplemental:\n # Use the SeqIO module to properly format the new sequence record\n SeqIO.write(fasta, supplemental, \"fasta\")\n # Populate the metadata with the new allele information\n for sample in self.metadata:\n for stx, seq in sample[self.analysistype].newsequences.items():\n if sequence == seq:\n # Pull the depth of coverage value from the .newseqclosestmatch attribute\n depth = sample[self.analysistype].newseqclosestmatch[vtx].items()[0][1].items()[0][1]\n # Update the metadata\n sample[self.analysistype].allelematches[stx] = {currentid: {100.00: depth}}\n sample[self.analysistype].allelesnew[vtx] = currentid\n # Add metadata with any custom alleles\n sample[self.analysistype].allelesolc[vtx] = {'nt': {currentid: sequence}}\n # Perform translations to quickly find any alleles with mis/nonsense mutations\n # Add an appropriate number of N's to pad out any partial codons at the end of the sequence\n remainder = 3 - len(sequence) % 3\n sequence += ('N' * remainder)\n protein = Seq.translate(Seq(sequence))\n sample[self.analysistype].allelesolc[vtx].update({'aa': {currentid: str(protein)}})\n # Increment the current id\n self.allelelist[vtx] += 1\n except KeyError:\n pass\n # Create a report of all the new alleles\n self.reports()\n\n def reports(self):\n \"\"\"Create a report with all the new alleles\"\"\"\n import xlsxwriter\n printtime('Creating report', self.start)\n # If, for some reason, analyses are performed more than once on this dataset, alleles will no longer be\n # considered 'new' on subsequent analyses. Don't create/overwrite new allele reports if there are no new alleles\n if self.newallelelist:\n # Create a workbook to store the report. Using xlsxwriter rather than a simple csv format, as I want to be\n # able to have appropriately sized, multi-line cells\n workbook = xlsxwriter.Workbook('{}/newalleles.xlsx'.format(self.reportpath))\n # New worksheet to store the data\n worksheet = workbook.add_worksheet()\n # Add a bold format for header cells. Using a monotype font size 8\n bold = workbook.add_format({'bold': True, 'font_name': 'Courier New', 'font_size': 8})\n # Format for data cells. Monotype, size 8, top vertically justified\n courier = workbook.add_format({'font_name': 'Courier New', 'font_size': 8})\n courier.set_align('top')\n # Set the custom width for columns 3 and 4 to be 50, and column 5 to be 60 characters of the default font\n worksheet.set_column(3, 4, 50)\n worksheet.set_column(5, 5, 60)\n # Initialise the position within the worksheet to be (0,0)\n row = 0\n col = 0\n # List of the headers to use\n headers = ['Strain', 'Subunit', 'Closest', 'NucleicAcidSequence', 'ProteinSequence', 'BLASTalignment']\n # Populate the headers\n for category in headers:\n # Write the data in the specified cell (row, col) using the bold format\n worksheet.write(row, col, category, bold)\n # Move to the next column to write the next category\n col += 1\n # Data starts in row 1\n row = 1\n # Initialise variables to hold the longest names; used in setting the column width\n longestname = 0\n longestrefname = 0\n for sample in self.metadata:\n if sample[self.analysistype].allelesnew.items():\n # Every record starts at column 0\n col = 0\n # List to store data; will be used for populating the spreadsheet\n results = list()\n for vtx, allele in sample[self.analysistype].allelesnew.items():\n # Extract the protein sequence of the gene\n protein = sample[self.analysistype].allelesolc[vtx]['aa'][allele]\n # Format the protein as fasta\n aastring = self.fastacarriagereturn(allele, protein, '\\n')\n # Extract the nucleotide sequence, and format it to fasta\n ntsequence = sample[self.analysistype].allelesolc[vtx]['nt'][allele]\n ntstring = self.fastacarriagereturn(allele, ntsequence, '\\n')\n # Write the new alleles to a fasta file\n fastafile = '{}/newalleles.fa'.format(self.reportpath)\n with open(fastafile, 'wb') as newalleles:\n newalleles.write(ntstring)\n # Extract the name of the closest reference allele, and its sequence\n refallele, refsequence = sample[self.analysistype].newseqclosestseq[vtx].items()[0]\n # Get the percent identity of this closest reference allele\n percentidentity = sample[self.analysistype].newseqclosestmatch[vtx].items()[0][1].items()[0][0]\n # Create a pseudo-BLAST alignment of the query and reference sequences\n formattedblast = self.interleaveblastresults(ntsequence, refsequence)\n # Determine the longest name of all the strains, and use it to set the width of column 0\n if len(sample.name) > longestname:\n longestname = len(sample.name)\n worksheet.set_column(0, 0, len(sample.name))\n # Do the same for the reference allele names\n if len(refallele) > longestrefname:\n longestrefname = len(refallele)\n worksheet.set_column(2, 2, longestrefname)\n # Set the width of the row to be the number of lines (number of newline characters) * 11\n worksheet.set_row(row, formattedblast.count('\\n') * 11)\n # Store the variables in a list\n results = [sample.name, vtx, '{}\\n\\n{}%'.format(refallele, percentidentity), ntstring, aastring,\n formattedblast]\n # Write out the data to the spreadsheet\n for data in results:\n worksheet.write(row, col, data, courier)\n col += 1\n # Increase the row counter for the next strain's data\n row += 1\n # Close the workbook\n workbook.close()\n\n @staticmethod\n def interleaveblastresults(query, subject):\n \"\"\"\n Creates an interleaved string that resembles BLAST sequence comparisons\n :param query: Query sequence\n :param subject: Subject sequence\n :return: Properly formatted BLAST-like sequence comparison\n \"\"\"\n # Initialise strings to hold the matches, and the final BLAST-formatted string\n matchstring = ''\n blaststring = ''\n # Iterate through the query\n for i, bp in enumerate(query):\n # If the current base in the query is identical to the corresponding base in the reference, append a '|'\n # to the match string, otherwise, append a ' '\n if bp == subject[i]:\n matchstring += '|'\n else:\n matchstring += ' '\n # Set a variable to store the progress through the sequence\n prev = 0\n # Iterate through the query, from start to finish in steps of 60 bp\n for j in range(0, len(query), 60):\n # BLAST results string. The components are: current position (padded to four characters), 'OLC', query\n # sequence, \\n, matches, \\n, 'ref', subject sequence. Repeated until all the sequence data are present.\n \"\"\"\n 0000 OLC ATGAAGAAGATATTTGTAGCGGCTTTATTTGCTTTTGTTTCTGTTAATGCAATGGCAGCT\n ||||||||||| ||| | |||| ||||||||| || ||||||||||||||||||||||||\n ref ATGAAGAAGATGTTTATGGCGGTTTTATTTGCATTAGTTTCTGTTAATGCAATGGCAGCT\n 0060 OLC GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA\n ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n ref GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA\n \"\"\"\n blaststring += '{} OLC {}\\n {}\\n ref {}\\n'\\\n .format('{:04d}'.format(j), query[prev:j + 60], matchstring[prev:j + 60], subject[prev:j + 60])\n # Update the progress variable\n prev = j + 60\n # Return the properly formatted string\n return blaststring\n\n @staticmethod\n def fastacarriagereturn(seqname, seq, delim='\\015'):\n \"\"\"\n Format sequences to be fasta with carriage returns ('\\015') instead of newlines\n :param seqname: Name of sequence\n :param seq: Sequence string\n :param delim: Delimiter used to split fasta sequence. Defaults to carriage return\n :return: string of properly formatted sequence\n \"\"\"\n count = 0\n # Standard fasta header\n seqstring = '>{}{}'.format(seqname, delim)\n # Iterate through the sequence, adding a delimiter every 60 characters\n for char in seq:\n if count < 60:\n seqstring += char\n count += 1\n else:\n seqstring += char + delim\n count = 0\n # Add a final delimiter at the end of the string\n seqstring += delim\n # Return the properly formatted string\n return seqstring\n\n def __init__(self, inputobject, analysistype):\n # Create a custom object using a cutoff of 100%\n Custom.__init__(self, inputobject, analysistype, 1.00)\n # Initialise variables\n self.allelepath = os.path.join(self.targetpath, 'vtxalleles')\n self.allelefiles = glob('{}/*.fa'.format(self.allelepath))\n self.alleledict = dict()\n self.allelefiledict = dict()\n self.allelelist = dict()\n self.newallelelist = dict()\n self.reportpath = os.path.join(self.path, 'reports')\n","sub_path":"objectOriented/allelefind.py","file_name":"allelefind.py","file_ext":"py","file_size_in_byte":42336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"56202809","text":"'''\nCreated on 2013/09/01\n\n@author: so\n'''\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndef main():\n path = '/Users/so/work/python/TimeSeriesAnalysis/data/'\n name = 'economicdata.csv'\n fileName = path + name\n data = pd.read_csv(fileName)\n\n plt.plot(data[\"saunemp\"])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"TimeSeriesAnalysis/test_readCsv.py","file_name":"test_readCsv.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"349820014","text":"import math\r\nimport pygame\r\nimport time\r\npygame.init()\r\npygame.font.init()\r\npixelFont = pygame.font.Font('Pixeled.ttf',5)\r\nwidth = 800\r\nheight = 600\r\nwindow = pygame.display.set_mode((width,height))\r\npygame.display.set_caption('Broke College Student Simulator: An Introduction To Financial Literacy')\r\nclock = pygame.time.Clock()\r\ndTime = time.time()\r\nelapsedTime=0\r\nplayerCycleTime=0\r\n\r\nmovementSpeed=5\r\n\r\nlost=False\r\nselectedDue=0\r\ntutorial1=False\r\ntutorial2=False\r\ntutorial3=False\r\ntutorial4=False\r\nstartCountdown=False\r\ntimeStarted=time.time()\r\ntimeRemaining=300\r\nexited = False\r\ncanMove=False\r\nmovingLeft=False\r\nmovingRight=False\r\ninDuesMenu=False\r\ninPaymentMenu=False\r\ninAtmMenu=False\r\ntoolTipOn=False\r\ninteractionObj={}\r\nroom=0\r\nbalance=0\r\ndebitBalance=0\r\ncreditDebt=0\r\nbottles=0\r\nbottleChart=[1,1,1]\r\ndueChart=[300,500,2000]\r\ndue1Paid=False\r\ndue2Paid=False\r\ndue3Paid=False\r\ncurrentTimeLimit=300\r\n\r\nblack = (0,0,0)\r\nwhite = (255,255,255)\r\nbackground = (175,175,175)\r\n\r\n#Images\r\nplr1=pygame.image.load('Images/idle1.png')\r\nplr2=pygame.image.load('Images/idle2.png')\r\nplr3=pygame.image.load('Images/run1.png')\r\nplr4=pygame.image.load('Images/run2.png')\r\nplrImg=plr2\r\n\r\ndormImg=pygame.image.load('Images/dorm.png')\r\nwall1Img=pygame.image.load('Images/wall1.png')\r\nwall2Img=pygame.image.load('Images/wall2.png')\r\ndoorImg=pygame.image.load('Images/door.png')\r\nelevatorImg=pygame.image.load('Images/elevator.png')\r\nblueBinImg=pygame.image.load('Images/blueBin.png')\r\natmImg=pygame.image.load('Images/atm.png')\r\nvendingMachineImg=pygame.image.load('Images/vendingMachine.png')\r\nrecyclingMachineImg=pygame.image.load('Images/recyclingMachine.png')\r\nstoreFrontImg=pygame.image.load('Images/storeFront.png')\r\n\r\ndorm={'img':wall1Img,'offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':0.5}\r\nwall1={'img':wall1Img,'offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':0.5}\r\nwall2={'img':wall2Img,'offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':0.5}\r\ndoor={'img':doorImg,'offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':1}\r\nelevator={'img':elevatorImg,'msg':'Elevator','offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':1}\r\nblueBin={'img':blueBinImg,'msg':'Blue Bin','offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':1}\r\natm={'img':atmImg,'msg':'ATM','offsetX':0,'offsetY':0,'offsetScaleX':0,'offsetScaleY':0}\r\nvendingMachine={'img':vendingMachineImg,'msg':'Vending Machine','offsetX':0,'offsetY':0,'offsetScaleX':0,'offsetScaleY':0}\r\nrecyclingMachine={'img':recyclingMachineImg,'msg':'Recycling Machine','offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':1}\r\nstoreFront={'img':storeFrontImg,'msg':'Work','offsetX':0,'offsetY':0,'offsetScaleX':0.5,'offsetScaleY':1}\r\n\r\nx = 700\r\ny = 300\r\nscroll=0\r\nminScroll=0#-350\r\nmaxScroll=0#350\r\nminX=0\r\nmaxX=800\r\n\r\ndef updateMoney(amt):\r\n global balance,moneyText,tutorial4,tipText\r\n balance+=amt\r\n if balance>200:\r\n balance=200\r\n if tutorial4==False:\r\n tutorial4=True\r\n tipText = generateText('$200 MAX CASH, IT\\'S BETTER TO SAVE YOUR MONEY IN A BANK ACCOUNT',2,(246, 247, 94))\r\n if balance<0:\r\n moneyText=generateText(\"$\"+str(balance),5,(255,100,100))\r\n else:\r\n moneyText=generateText(\"$\"+str(balance),5,(255,255,255))\r\n\r\ndef updateTime():\r\n global timerText,timeRemaining,timeStarted,startCountdown,lost\r\n if startCountdown==False:\r\n return\r\n timeRemaining=currentTimeLimit-int(time.time()-timeStarted)\r\n #print(timeStarted)\r\n if timeRemaining>=60:\r\n timerText=generateText(str(timeRemaining),10,(255,255,255))\r\n else:\r\n timerText=generateText(str(timeRemaining),10,(255,100,100))\r\n if timeRemaining<=0:\r\n lost=True\r\n\r\ndef generateText(txt,size,color):\r\n tempText = pixelFont.render(txt,0,color,None)\r\n tempText = pygame.transform.scale(tempText,(tempText.get_rect().size[0]*size,tempText.get_rect().size[1]*size))\r\n return tempText\r\n\r\ndef generateRect(w,h,color):\r\n tempRect=pygame.draw.rect(window,(255,255,255),(500,0,0,0))\r\n tempBox=pygame.Surface((tempRect.width,tempRect.height))\r\n tempBox.fill(color)\r\n tempBox=pygame.transform.scale(tempBox,(w,h))\r\n return tempBox\r\n\r\ndef newObj(obj,x,y):\r\n obj['offsetX']=x\r\n obj['offsetY']=y\r\n return obj\r\n\r\ndef drawObjects(l):\r\n for i in l:\r\n window.blit(i['img'],(i['offsetX']-i['img'].get_rect().size[0]*i['offsetScaleX']-scroll,i['offsetY']-i['img'].get_rect().size[1]*i['offsetScaleY']))\r\n #print(i['img'].get_rect().size)\r\n\r\ndef getInteraction(l):\r\n global toolTipOn,interactionObj\r\n for i in l:\r\n if i['img'].get_rect().move(i['offsetX']-i['img'].get_rect().size[0]*i['offsetScaleX']-scroll,i['offsetY']-i['img'].get_rect().size[1]*i['offsetScaleY']).collidepoint(pygame.mouse.get_pos()):\r\n if 'msg' in i:\r\n toolTipOn=True\r\n interactionObj=i\r\n newInteraction(i)\r\n return\r\n toolTipOn=False\r\n\r\ndef newInteraction(obj):\r\n global toolTipBox, toolTipText\r\n toolTipText = generateText(obj['msg'],2,(255,255,255))\r\n toolTipBox = generateRect(toolTipText.get_rect().size[0]+10,30,(242, 178, 60))\r\n\r\ndef work(obj):\r\n global timeStarted\r\n updateMoney(75)\r\n timeStarted-=30\r\n\r\ndef openATM(obj):\r\n global inAtmMenu,balanceText,debtText,balance,creditDebt\r\n inAtmMenu=True\r\n balanceText=generateText('BALANCE: $'+str(debitBalance),4,(0, 0, 0))\r\n debtText=generateText('DEBT: $'+str(creditDebt),4,(255, 100, 100))\r\n\r\ndef debitPayment():\r\n global balanceText,debtText,currentTimeLimit,timeStarted,selectedDue,dueChart,debitBalance,due1Paid,due2Paid,due3Paid,inPaymentMenu,duePay1Box,duePay2Box,duePay3Box\r\n if debitBalance>=dueChart[selectedDue]:\r\n debitBalance-=dueChart[selectedDue]\r\n if selectedDue==0:\r\n due1Paid=True\r\n duePay1Box=generateRect(100,60,(100,100,100))\r\n currentTimeLimit=500\r\n timeStarted=time.time()\r\n elif selectedDue==1:\r\n due2Paid=True\r\n duePay2Box=generateRect(100,60,(100,100,100))\r\n currentTimeLimit=1000\r\n timeStarted=time.time()\r\n else:\r\n due3Paid=True\r\n duePay3Box=generateRect(100,60,(100,100,100))\r\n inPaymentMenu=False\r\n balanceText=generateText('BALANCE: $'+str(debitBalance),4,(0, 0, 0))\r\n debtText=generateText('DEBT: $'+str(creditDebt),4,(255, 100, 100))\r\ndef creditPayment():\r\n global tipText,balanceText,debtText,currentTimeLimit,timeStarted,selectedDue,dueChart,debitBalance,due1Paid,due2Paid,due3Paid,inPaymentMenu,creditDebt,duePay1Box,duePay2Box,duePay3Box\r\n if selectedDue==0:\r\n due1Paid=True\r\n duePay1Box=generateRect(100,60,(100,100,100))\r\n currentTimeLimit=500\r\n timeStarted=time.time()\r\n elif selectedDue==1:\r\n due2Paid=True\r\n duePay2Box=generateRect(100,60,(100,100,100))\r\n currentTimeLimit=1000\r\n timeStarted=time.time()\r\n else:\r\n due3Paid=True\r\n duePay3Box=generateRect(100,60,(100,100,100))\r\n creditDebt+=dueChart[selectedDue]\r\n inPaymentMenu=False\r\n balanceText=generateText('BALANCE: $'+str(debitBalance),4,(0, 0, 0))\r\n debtText=generateText('DEBT: $'+str(creditDebt),4,(255, 100, 100))\r\n tipText = generateText('UNPAID DEBT CAN HURT YOUR CREDIT SCORE, MAKE YOUR PAYMENTS ON TIME',2,(246, 247, 94))\r\ndef payDue(due):\r\n global selectedDue,inPaymentMenu,inDuesMenu\r\n if due==0:\r\n if due1Paid==False:\r\n selectedDue=0\r\n inPaymentMenu=True\r\n inDuesMenu=False\r\n elif due==1:\r\n if due2Paid==False:\r\n selectedDue=1\r\n inPaymentMenu=True\r\n inDuesMenu=False\r\n elif due==2:\r\n if due3Paid==False:\r\n selectedDue=2\r\n inPaymentMenu=True\r\n inDuesMenu=False\r\ndef entrance(obj):\r\n global room,minScroll,maxScroll,scroll,x,startCountdown,timeStarted,tutorial3,tipText\r\n if room==0:\r\n room=1\r\n minScroll=-350\r\n maxScroll=350\r\n scroll=350\r\n x=750\r\n timeStarted=time.time()\r\n startCountdown=True\r\n if tutorial3==False:\r\n tutorial3=True\r\n tipText = generateText('EARN MONEY TO PAY YOUR FIRST DUE',5,(246, 247, 94))\r\n elif room==1:\r\n room=2\r\n else:\r\n room=1\r\n\r\ndef renderFrame(delta):\r\n window.fill(background)\r\n global x,y,scroll\r\n if movingLeft:\r\n if scroll>minScroll:\r\n if x>400:\r\n x-=movementSpeed\r\n if xmaxX:\r\n x=maxX\r\n else:\r\n scroll+=movementSpeed\r\n else:\r\n x+=movementSpeed\r\n scroll=maxScroll\r\n #print(scroll)\r\n #window.blit(carImg, (x,y))\r\n drawObjects(game[room])\r\n getInteraction(game[room])\r\n window.blit(plrImg,(x-plrImg.get_rect().size[0]/2,y))\r\n window.blit(moneyText,(20,0))\r\n\r\n window.blit(duesBox,((660,15)))\r\n window.blit(duesText,(675,0))\r\n\r\n window.blit(timerText,(400-timerText.get_rect().size[0]/2,0))\r\n\r\n window.blit(tipText,(400-tipText.get_rect().size[0]/2,520))\r\n\r\n if toolTipOn:\r\n window.blit(toolTipBox,(pygame.mouse.get_pos()[0],pygame.mouse.get_pos()[1]-30))\r\n window.blit(toolTipText,(pygame.mouse.get_pos()[0]+5,pygame.mouse.get_pos()[1]-30))\r\n if inDuesMenu:\r\n window.blit(duesMenu,(100,100))\r\n window.blit(due1,(110,100))\r\n window.blit(due2,(110,200))\r\n window.blit(due3,(110,300))\r\n\r\n window.blit(duePay1Box,(590,110))\r\n window.blit(duePay2Box,(590,210))\r\n window.blit(duePay3Box,(590,310))\r\n window.blit(duePayText,(600,100))\r\n window.blit(duePayText,(600,200))\r\n window.blit(duePayText,(600,300))\r\n if inPaymentMenu:\r\n window.blit(paymentMenu,(200,200))\r\n window.blit(debitButton,(225,300))\r\n window.blit(creditButton,(425,300))\r\n window.blit(debitText,(250,300))\r\n window.blit(creditText,(435,300))\r\n if inAtmMenu:\r\n window.blit(atmMenu,(200,200))\r\n window.blit(depositButton,(225,210))\r\n window.blit(withdrawButton,(415,210))\r\n window.blit(depositText,(250,210))\r\n window.blit(withdrawText,(430,210))\r\n window.blit(balanceText,(225,275))\r\n window.blit(debtText,(225,325))\r\n if lost:\r\n window.fill((255,0,0))\r\n\r\n#print(blueBin)\r\n#print(newObj(blueBin,0.5,0.5))\r\ndorm=[{'img':dormImg,'offsetX':400,'offsetY':300,'offsetScaleX':0.5,'offsetScaleY':0.5},{'img':doorImg,'msg':'LEAVE DORM','func':'entrance','offsetX':60,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1}]\r\nhallway=[{'img':wall1Img,'offsetX':400,'offsetY':300,'offsetScaleX':0.5,'offsetScaleY':0.5},{'img':elevatorImg,'msg':'FIRST FLOOR','func':'entrance','offsetX':-115,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':doorImg,'offsetX':200,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':doorImg,'offsetX':500,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':doorImg,'offsetX':800,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':doorImg,'offsetX':1100,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':blueBinImg,'offsetX':350,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':blueBinImg,'offsetX':950,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1}]\r\nlobby=[{'img':wall2Img,'offsetX':400,'offsetY':300,'offsetScaleX':0.5,'offsetScaleY':0.5},{'img':elevatorImg,'msg':'SECOND FLOOR','func':'entrance','offsetX':-115,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':blueBinImg,'offsetX':75,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':vendingMachineImg,'offsetX':250,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':recyclingMachineImg,'offsetX':400,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':atmImg,'msg':'USE ATM','func':'openATM','offsetX':550,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1},{'img':storeFrontImg,'msg':'WORK ($75)','func':'work','offsetX':875,'offsetY':430,'offsetScaleX':0.5,'offsetScaleY':1}]\r\ngame=[dorm,hallway,lobby]\r\n\r\nmoneyText=generateText(\"$\"+str(balance),5,(255,255,255))\r\nduesText=generateText(\"DUES\",5,(255,255,255))\r\n\r\nduesBox = generateRect(130,50,(255,150,150))\r\nduesMenu = generateRect(600,400,(255,255,255))\r\n\r\ntoolTipBox = generateRect(100,25,(242, 178, 60))\r\ntoolTipText = generateText('',2,(255,255,255))\r\n\r\ntimerText = generateText('',10,(255,255,255))\r\n\r\ntipText = generateText('CLICK THE \\'DUES\\' BUTTON',5,(246, 247, 94))\r\ndue1=generateText('TEXTBOOKS: $300',5,(255, 100, 100))\r\ndue2=generateText('INSURANCE: $900',5,(255, 100, 100))\r\ndue3=generateText('STUDENT DEBT: $2000',5,(255, 100, 100))\r\nduePay1Box=generateRect(100,60,(100,255,100))\r\nduePay2Box=generateRect(100,60,(100,255,100))\r\nduePay3Box=generateRect(100,60,(100,255,100))\r\nduePayText=generateText('PAY',5,(255,255,255))\r\n\r\npaymentMenu = generateRect(400,200,(255,255,255))\r\ndebitButton = generateRect(150,60,(100,100,255))\r\ncreditButton = generateRect(150,60,(100,255,100))\r\ndebitText = generateText('DEBIT',5,(255,255,255))\r\ncreditText = generateText('CREDIT',5,(255,255,255))\r\n\r\natmMenu = generateRect(400,200,(255,255,255))\r\ndepositButton = generateRect(150,60,(100,255,100))\r\nwithdrawButton = generateRect(150,60,(100,100,255))\r\ndepositText = generateText('DEPOSIT',3,(255,255,255))\r\nwithdrawText = generateText('WITHDRAW',3,(255,255,255))\r\n\r\nbalanceText=generateText('BALANCE: $',4,(0, 0, 0))\r\ndebtText=generateText('DEBT: $',4,(255, 100, 100))\r\n\r\nwhile not exited:\r\n dTime=time.time()-dTime\r\n elapsedTime+=dTime\r\n playerCycleTime+=dTime\r\n updateTime()\r\n #print(int(elapsedTime%1*4))\r\n\r\n for event in pygame.event.get():\r\n #print(event)\r\n if event.type == pygame.QUIT:\r\n exited = True\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if toolTipOn:\r\n if abs(interactionObj['offsetX']-(scroll+x)) <200:\r\n globals()[interactionObj['func']](interactionObj)\r\n elif duesBox.get_rect().move(660,15).collidepoint(pygame.mouse.get_pos()):\r\n if inAtmMenu:\r\n inAtmMenu=False\r\n if tutorial1==False:\r\n tutorial1=True\r\n tipText = generateText('PAY DUES BEFORE DEADLINES',5,(246, 247, 94))\r\n if inDuesMenu==False:\r\n inDuesMenu=True\r\n if inPaymentMenu:\r\n inPaymentMenu=False\r\n inDuesMenu=False\r\n inAtmMenu=False\r\n else:\r\n inDuesMenu=False\r\n if tutorial2==False:\r\n tutorial2=True\r\n canMove=True\r\n tipText = generateText('MOVE WITH A/D, INTERACT WITH MOUSE',5,(246, 247, 94))\r\n if inDuesMenu:\r\n if duePay1Box.get_rect().move(590,110).collidepoint(pygame.mouse.get_pos()):\r\n payDue(0)\r\n elif duePay2Box.get_rect().move(590,210).collidepoint(pygame.mouse.get_pos()):\r\n if due1Paid:\r\n payDue(1)\r\n elif duePay2Box.get_rect().move(590,310).collidepoint(pygame.mouse.get_pos()):\r\n if due2Paid:\r\n payDue(2)\r\n elif inPaymentMenu:\r\n if debitButton.get_rect().move(225,300).collidepoint(pygame.mouse.get_pos()):\r\n debitPayment()\r\n elif creditButton.get_rect().move(425,300).collidepoint(pygame.mouse.get_pos()):\r\n creditPayment()\r\n elif inAtmMenu:\r\n if depositButton.get_rect().move(225,210).collidepoint(pygame.mouse.get_pos()):\r\n debitBalance+=balance\r\n updateMoney(balance*-1)\r\n elif withdrawButton.get_rect().move(415,210).collidepoint(pygame.mouse.get_pos()):\r\n updateMoney(debitBalance)\r\n debitBalance=0\r\n balanceText=generateText('BALANCE: $'+str(debitBalance),4,(0, 0, 0))\r\n debtText=generateText('DEBT: $'+str(creditDebt),4,(255, 100, 100))\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_a:\r\n if canMove:\r\n movingLeft=True\r\n playerCycleTime-=playerCycleTime%0.5\r\n elif event.key == pygame.K_d:\r\n if canMove:\r\n movingRight=True\r\n playerCycleTime-=playerCycleTime%0.5\r\n elif event.key == pygame.K_ESCAPE:\r\n inAtmMenu=False\r\n inDuesMenu=False\r\n inPaymentMenu=False\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_a:\r\n if canMove:\r\n movingLeft=False\r\n if not movingRight:\r\n plrImg=plr2\r\n elif event.key == pygame.K_d:\r\n if canMove:\r\n movingRight=False\r\n if not movingLeft:\r\n plrImg=plr1\r\n if movingLeft and not movingRight:\r\n if playerCycleTime%0.33<0.165:\r\n plrImg = plr4\r\n else:\r\n plrImg = plr2\r\n if movingRight and not movingLeft:\r\n if playerCycleTime%0.33<0.165:\r\n plrImg = plr3\r\n else:\r\n plrImg = plr1\r\n renderFrame(dTime)\r\n\r\n pygame.display.update()\r\n dTime = time.time()\r\n clock.tick(60)\r\npygame.quit()\r\nquit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"203621527","text":"__author__ = 'Alex Macniven'\n\n\nimport time\n\n\ndef countdown(count):\n for t in range(count, - 1, -1):\n # get minutes and seconds\n minutes = count // 60\n seconds = count % 60\n # output the time left\n print(str(minutes).zfill(2), ':', str(seconds).zfill(2)) # zfill to add leading zero's\n count -= 1\n time.sleep(1)\n\n\ndef session(pomolen, breaklen):\n pomodone = 0\n for pomodone in range(0, 5):\n countdown(pomolen)\n pomodone += 1\n print('Good work thats ', pomodone, ' complete')\n wait = input('Press any key to commence break')\n if pomodone != 5:\n countdown(breaklen)\n print('Break times over, get back to work')\n wait = input('Press any key to commence work')\n else:\n countdown(pomolen)\n print('Thats a full session done')","sub_path":"pymodoro.py","file_name":"pymodoro.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"527811722","text":"\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import pyplot as plt\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Takes directory to trained model')\nparser.add_argument('--model', type=str, nargs = 1, help ='path to trained model', required = False)\nargs = parser.parse_args()\nmodelPath = args.model[0]\n\nresults = pd.read_json(modelPath+'/stat.json')\n\nloss = results[['epoch_num','cross_entropy_loss']]\n\nepoch = loss[['epoch_num']].values.flatten()\ncross_entropy_loss = loss[['cross_entropy_loss']].values.flatten()\nprint(epoch)\n\nfig = plt.figure()\nloss.plot(x='epoch_num', y='cross_entropy_loss')\n\nplt.show()\nfig.savefig('../output/epoch-400d1-9d2-3d3-1_randcrp.png')\n","sub_path":"LyCNN/plotResults.py","file_name":"plotResults.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"197202996","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import New_post\n\n\nfrom .models import Post\nfrom .models import Menu\nfrom .models import Main_page\nfrom .models import News\n\n# Create your views here.\ndef redir(request):\n return HttpResponseRedirect('/blog')\n\ndef blog(request):\n posts_main = Main_page.objects.all()\n menus = Menu.objects.all()\n context = {'posts_main': posts_main, 'menus': menus}\n return render(request, 'blogapp/blog.html', context)\n\ndef posts(request):\n posts = Post.objects.all()\n menus = Menu.objects.all()\n context = {'posts': posts, 'menus': menus}\n return render(request, 'blogapp/posts.html', context)\n\ndef posts_detail(request, slug):\n post = Post.objects.get(slug__iexact=slug)\n menus = Menu.objects.all()\n return render(request, 'blogapp/post_detail.html', context={'post':post, 'menus': menus})\n\ndef news(request):\n posts_news = News.objects.all()\n menus = Menu.objects.all()\n context = {'posts_news': posts_news, 'menus': menus}\n return render(request, 'blogapp/news.html', context)\n\ndef news_detail(request, slug):\n post = News.objects.get(slug__iexact=slug)\n menus = Menu.objects.all()\n return render(request, 'blogapp/news_detail.html', context={'post':post, 'menus': menus})\n\ndef panel(request):\n post = Post.objects.all()\n news_post = News.objects.all()\n menus = Menu.objects.all()\n page_name = 'Панель администратора'\n return render(request, 'blogapp/panel.html', context={'page_name': page_name, 'posts': post, 'news_posts': news_post, 'menus': menus})\n\ndef edit(request, id):\n menus = Menu.objects.all()\n post = Post.objects.get(id=id)\n if request.method == 'POST':\n form = New_post(request.POST, instance=post)\n if form.is_valid():\n post.save()\n return HttpResponseRedirect(reverse('panel'))\n else:\n form = New_post(instance=post)\n\n return render(request, 'blogapp/edit.html', context={'post': post, 'menus': menus, 'form': form})\n\ndef add_post(request):\n menus = Menu.objects.all()\n if request.method == 'POST':\n form = New_post(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('panel'))\n else:\n form = New_post()\n context = {'menus': menus, 'form': form}\n return render(request,'blogapp/add_post.html', context)\n\ndef delete_post(request, id):\n del_post = Post.objects.get(id=id)\n del_post.delete()\n return HttpResponseRedirect(reverse('panel'))\n\n","sub_path":"blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"291046563","text":"# TensorFlow and tf.keras\nimport matplotlib.pyplot as plt\n# Helper libraries\nimport numpy as np\nimport tensorflow as tf\n# Default Python Libraries\nimport time\nfrom tensorflow import keras\n\nprint(tf.__version__)\n\n# read fashion mnist data\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# name label\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# rgb 2 grey\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# build layer\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(64, activation=tf.nn.relu),\n keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\n# build model\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n# train model\nmodel.fit(train_images, train_labels, epochs=5)\n\n# evaluate accu\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('Test accuracy:', test_acc)\n\n# predict \npredictions = model.predict(test_images)\n\n# Plot the first 25 test images, their predicted label, and the true label\n# Color correct predictions in green, incorrect predictions in red\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid('off')\n plt.imshow(test_images[i], cmap=plt.cm.binary)\n predicted_label = np.argmax(predictions[i])\n correct_label = test_labels[i]\n if predicted_label == correct_label:\n color = 'green'\n else:\n color = 'red'\n plt.xlabel(\"{} ({})\".format(class_names[predicted_label], \n class_names[correct_label]),\n color=color)\n\nplt.show()","sub_path":"xml2xy/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"168042251","text":"import maya.cmds as cmds\nfrom functools import partial\n\ndef getAllConstraintFromScene(fake_input = True):\n meshTransList = []\n \n for eachCons in cmds.ls(type = \"joint\"):\n #print eachTrans\n cmds.setAttr(eachCons+\".drawStyle\", 2)\n \n\n\ndef win(fake_input = True):\n winName = \"Change Constraint Interp type\"\n versionNumber = 0.1\n \n if cmds.window(winName, exists = True):\n cmds.deleteUI(winName)\n\n cmds.window(winName, sizeable = True,\n titleBar = True, resizeToFitChildren = False,\n menuBar = True, widthHeight = (600, 500),\n title = winName)\n cmds.columnLayout(columnWidth = 600, rowSpacing = 20)\n cmds.rowLayout(numberOfColumns=1, columnAttach = [1, \"both\", 0])\n cmds.button(label = \"shortest\", command = partial(getChosenTransFromScene))\n cmds.setParent(\"..\")\n cmds.showWindow()\n","sub_path":"jointDisplay.py","file_name":"jointDisplay.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"333443082","text":"#!/usr/bin/python\n\nfrom libcosmo.track_halos import *\nfrom libcosmo.utils import *\nfrom libcosmo.units import *\nfrom libcosmo.halos import *\nfrom libcosmo.find_halo import *\nfrom libcosmo.particles import *\nfrom libcosmo.lg_plot import *\nfrom libio.read_ascii import *\nfrom libio.find_files import *\nfrom pygadgetreader import *\nfrom libcosmo.grid import *\nfrom config import *\nimport time\nimport pickle\nimport os.path\n\nini_num=0\nend_num=0\n\n# Should we read the gadget file and export the particles, or just read the already exported particles?\n#doReadSlab = True\ndoReadSlab = False\n\n# When reading and storing particle data, reduce by this factor\n#f_rescale = 1.0 \nf_rescale = 4.0 \n#f_rescale = 16.0 \n\n#all_lg_base = simu_runs()\n#all_lg_base=['00_06']\n#all_lg_base=['17_10']\n#all_lg_base=['34_13']\n#all_lg_base=['55_02']\nall_lg_base=['09_18']\n#all_lg_base=['01_12']\n#all_lg_base=['17_13']\n\n#subruns = ['00']\nsubruns = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09']\n\nmax_range=0\n\nfor isub in range(0, max_range):\n sub_str = '%02d' % isub\n subruns.append(sub_str)\n\n#box = '500'; resolution = '512'; n_files = 8; withGas = False\n#box = '100'; resolution = '4096'; n_files = 1; withGas = False\nbox = '100'; resolution = '2048'; n_files = 1; withGas = False\n#box = '100'; resolution = '1024'; n_files = 1; withGas = True\n\n#box_size = 100.0e+3; plot_side = 10.0e+3; thickn = 5000.0; units = 'kpc'\nbox_size = 100.0; plot_side = 0.35; thickn = 1.5; units = 'Mpc'\n\nsnap_name='snapshot_054'\n\n#fn_lg = 'saved/lgs_00.pkl'\nfn_lg = 'saved/lgs_'+resolution+'_'+all_lg_base[0]+'_00.pkl'\nf_lg = open(fn_lg, 'rb')\nthis_lg = pickle.load(f_lg)\n\n#print(this_lg[0].geo_com())\n#box_center = [46.6, 50.7, 47.8]\n#box_center= this_lg[0].geo_com()\nbox_center= this_lg.geo_com()\n\nfor ip in range(0, 3): \n box_center[ip] = box_center[ip] / 1000.0\n\nprint('Rescaling the plot around the LG position: ', box_center)\n\nfor subrun in subruns:\n base_path = '/home/eduardo/CLUES/DATA/'+resolution+'/'+all_lg_base[0]+'/' + subrun + '/'\n #base_path = '/home/oem/CLUES/DATA/'+resolution+'/'+all_lg_base[0]+'/' + subrun + '/'\n\n # File Names for the output slabs\n fn_0 = base_path + 'slab_xy0_' + str(f_rescale) + '.pkl'\n fn_1 = base_path + 'slab_xy1_' + str(f_rescale) + '.pkl'\n fn_4 = base_path + 'slab_xy4.pkl'\n\n if doReadSlab == True:\n # Double the side of the slab just in case\n plot_side = plot_side * 2\n print(base_path + snap_name)\n\n if withGas:\n [x0, y0] = return_slab(base_path + snap_name, 2, box_center, plot_side, thickn, n_files, f_rescale, units, 0)\n # Never rescale star particles\n [x4, y4] = return_slab(base_path + snap_name, 2, box_center, plot_side, thickn, n_files, 1.0, units, 4)\n f_0 = open(fn_0, 'wb')\n pickle.dump([x0, y0], f_0)\n f_4 = open(fn_4, 'wb')\n pickle.dump([x4, y4], f_4)\n print('Dumping to files: ', fn_0, fn_4)\n\n [x1, y1] = return_slab(base_path + snap_name, 2, box_center, plot_side, thickn, n_files, f_rescale, units, 1)\n f_1 = open(fn_1, 'wb')\n pickle.dump([x1, y1], f_1)\n\n print('Dumping to files: ', fn_0, fn_1, fn_4)\n\n f_1.close()\n\n if withGas:\n f_0.close()\n f_4.close()\n\n else:\n # DM\n slab = [fn_1]; ptype = 1\n bw_smooth = 0.25; nbins = 750\n f_out = 'ProjectsPlots/lg_rhos_' + resolution + '_' + all_lg_base[0] + '_' + subrun + '_grid' + str(nbins)\n simple_plot_rho(box_center, plot_side, f_out, nbins, f_rescale, thickn, units, slab, bw_smooth, ptype)\n\n # gas and stars\n if withGas:\n slab = [fn_0, fn_4]; ptype = 0\n #bw_smooth = 0.025; nbins = 256\n bw_smooth = 0.1; nbins = 64\n simple_plot_rho(box_center, plot_side, f_out, nbins, f_rescale, thickn, units, slab, bw_smooth, ptype)\n","sub_path":"old_code/plot_lgs_rhos.py","file_name":"plot_lgs_rhos.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"395328621","text":"# -- coding: utf-8 --\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\nfrom h2o.estimators.random_forest import H2ORandomForestEstimator\nfrom h2o.grid.grid_search import H2OGridSearch\n\n\ndef create_descriptive_stat(df, var_list):\n \"\"\"\n Creates descriptive statistics on a pandas DataFrame.\n\n :param df: pandas DataFrame\n :param var_list: list of variables to include\n \"\"\"\n df = df[var_list]\n desc_df = df.describe(percentiles=np.arange(0.1, 1, 0.1).round(2))\n desc_df = desc_df.append(df.agg({np.median\n , lambda x: x.mode()[0]})).rename(index={'': 'mode'}).transpose()\n desc_df['missing nbr'] = df.isnull().sum()\n desc_df['missing %'] = ((df.isnull().sum()) / df.shape[0] * 100)\n desc_df = desc_df[['min', 'max', 'mean', 'mode', 'median', '10%', '20%',\n '30%', '40%', '50%', '60%', '70%', '80%', '90%', 'missing nbr', 'missing %']].transpose()\n\n return desc_df.round(2)\n\ndef plot_full_stacked_density(df, target_var, var, kind):\n \"\"\"\n Plots a full stacked density plot of the given variable.\n\n :param df: pandas DataFrame\n :param target_var: target variable\n :param var: variable\n :param kind: the kind of the plot, e.g. 'bar', 'area', 'density'\n \"\"\"\n pivot_df = df.groupby([target_var, var]).size().reset_index().pivot(columns=target_var, index=var, values=0)\n pivot_df = pivot_df.div(pivot_df.sum(1), axis=0)\n pivot_df.plot(kind=kind, stacked=True, rot=45, title='Density of churn')\n plt.ylabel('Density of churn (%)')\n\n return plt.show()\n\n\ndef calc_pearson_correlation(df, var_list):\n \"\"\"\n Calculates Pearson correlation between variables. \n \n :param df: pandas DataFrame\n :param var_list: list of variables\n \"\"\"\n pearson_corr = df[var_list].apply(lambda x: pd.factorize(x)[0]).corr(method='pearson').abs() \n return pearson_corr\n \n\ndef select_by_correlation_threshold(df, threshold):\n \"\"\"\n List all variables with higher or equal to the correlation as the threshold.\n\n :param df: pandas DataFrame with the correlatting variable pairs\n :param threshold: threshold value\n \"\"\"\n corr_sorted = df.unstack().sort_values(ascending=False).drop_duplicates()\n corr_df = pd.DataFrame(corr_sorted)\n corr_df = corr_df.reset_index().rename(columns={'level_0': 'variable_1', \n 'level_1': 'variable_2', \n 0: 'correlation'})\n corr_df = corr_df[corr_df['correlation'] != 1]\n corr_df = corr_df[corr_df.correlation >= threshold]\n return corr_df\n\n\ndef variable_selection(selected, target, train, nfolds, n, model_type):\n \"\"\"\n Performs variable selection based on the selected model type by the user. \n Returns a variable importance plot and a list with the n most important variables. \n\n :param selected: list of selected variables\n :param target: target variable\n :param train: h2o frame with train data\n :param nfolds: the number of folds to use for cross-validation\n :param n: number of important variables to be selected at the end of the training process\n :param model_type: the name of the model the variable selection should be calculated on (can be 'GBM', 'GLM', 'DRF')\n \"\"\"\n if model_type == 'GBM':\n model = H2OGradientBoostingEstimator(balance_classes=True,\n seed=1234,\n model_id='GBM_var_sel',\n nfolds=nfolds,\n stopping_rounds=3,\n stopping_tolerance=0.01,\n stopping_metric=\"lift_top_group\")\n\n model.train(x=selected, y=target, training_frame=train)\n\n varimp = model.varimp(use_pandas=True)[0:n + 1]\n result = list(varimp['variable'])\n\n if model_type == 'GLM':\n model = H2OGeneralizedLinearEstimator(family='binomial',\n alpha=1.0, # lasso regularization\n lambda_search=True,\n nfolds=nfolds,\n standardize=True,\n seed=1234,\n model_id='GLM_var_sel')\n\n model.train(x=selected, y=target, training_frame=train)\n\n varimp = model.coef_norm()\n glm_df = pd.DataFrame(varimp.items(), columns=['variable', 'scaled_importance'])\n glm_df = glm_df[glm_df.scaled_importance > 0.0]\n glm_df = glm_df.sort_values(by=['scaled_importance'], ascending=False)\n glm_df = glm_df[glm_df.variable != 'Intercept']\n result = glm_df['variable'][0:n]\n for i in range(len(result)):\n result.iloc[i] = result.iloc[i].split('.')[0]\n result = list(set(result))\n\n if model_type == 'DRF':\n model = H2ORandomForestEstimator(balance_classes=True,\n seed=1234,\n ntrees=10,\n model_id=\"DRF_var_sel\",\n nfolds=nfolds,\n stopping_tolerance=0.01,\n stopping_metric=\"lift_top_group\")\n\n model.train(x=selected, y=target, training_frame=train)\n\n varimp = model.varimp(use_pandas=True)[0:n]\n result = list(varimp['variable'])\n\n return model, result\n\n\ndef modelling_for_testing_variables(target, var_list, train, nfolds):\n \"\"\"\n Trains 3 models (GBM, GLM, Random Forest) for testing a list of variables.\n\n :param selected: list of selected variables\n :param target: target variable\n :param train: h2o frame with train data\n :param nfolds: the number of folds to use for cross-validation\n\n \"\"\"\n model_gbm = H2OGradientBoostingEstimator(balance_classes=True,\n seed=1234,\n nfolds=nfolds,\n stopping_tolerance=0.01,\n stopping_metric=\"lift_top_group\")\n\n model_glm = H2OGeneralizedLinearEstimator(family='binomial',\n seed=1234,\n nfolds=nfolds,\n alpha=0.7,\n lambda_search=True)\n\n model_drf = H2ORandomForestEstimator(balance_classes=True,\n seed=1234,\n nfolds=nfolds,\n stopping_tolerance=0.01,\n stopping_metric=\"lift_top_group\",\n ntrees=15)\n\n model_gbm.train(x=var_list, y=target, training_frame=train)\n model_glm.train(x=var_list, y=target, training_frame=train)\n model_drf.train(x=var_list, y=target, training_frame=train)\n\n model_gbm.name = 'GBM'\n model_glm.name = 'GLM'\n model_drf.name = 'DRF'\n\n return model_gbm, model_glm, model_drf\n\n\ndef result_comparison(model_list, test, target):\n \"\"\"\n Compare results of different models on the test set.\n\n :param model_list: list of trained h2o models\n :param test: h2o frame with test data\n :param target: target variable\n \"\"\"\n model_name = []\n cum_resp_rate_val = []\n cum_cap_rate_val = []\n cum_resp_rate_val_20 = []\n cum_cap_rate_val_20 = []\n auc_vars = []\n nbr = []\n target_nbr = []\n\n for i in model_list:\n auc = i.model_performance(test_data=test).auc()\n\n lift_df = i.model_performance(test_data=test).gains_lift().as_data_frame().iloc[5:6, :]\n lift_df_20 = i.model_performance(test_data=test).gains_lift().as_data_frame().iloc[7:8, :]\n\n cum_resp_rate_val.append(lift_df.iloc[0]['cumulative_response_rate'])\n cum_cap_rate_val.append(lift_df.iloc[0]['cumulative_capture_rate'])\n\n cum_resp_rate_val_20.append(lift_df_20.iloc[0]['cumulative_response_rate'])\n cum_cap_rate_val_20.append(lift_df_20.iloc[0]['cumulative_capture_rate'])\n\n model_name.append(i.name)\n nbr.append(test.shape[0])\n target_nbr.append(test[target].sum())\n auc_vars.append(auc)\n\n res_df = pd.DataFrame({'Model': model_name, 'Customer Nbr': nbr, 'Target Nbr': target_nbr, 'AUC': auc_vars,\n 'Cum Response Rate TOP 10%': cum_resp_rate_val,'Cum Response Rate TOP 20%': cum_resp_rate_val_20,\n 'Cum Capture Rate TOP 10%': cum_cap_rate_val, 'Cum Capture Rate TOP 20%': cum_cap_rate_val_20\n })\n\n res_df = res_df.round(2)\n res_df['Target Nbr'] = res_df['Target Nbr'].astype(int)\n res_df['Target Found TOP 10%'] = (res_df['Target Nbr'] * res_df['Cum Capture Rate TOP 10%']).astype(int)\n res_df['Target Found TOP 20%'] = (res_df['Target Nbr'] * res_df['Cum Capture Rate TOP 20%']).astype(int)\n\n fin_df = res_df[['Model', 'AUC', 'Customer Nbr', 'Target Nbr',\n 'Cum Capture Rate TOP 10%', 'Cum Response Rate TOP 10%', 'Target Found TOP 10%',\n 'Cum Capture Rate TOP 20%', 'Cum Response Rate TOP 20%', 'Target Found TOP 20%']]\\\n .sort_values(by='AUC', ascending=False)\n\n return fin_df\n\n\ndef gbm_grid_search_max_depth(selected, target, train, test, nfolds):\n \"\"\"\n Performs grid search on a GBM model to find the optimal max_depth parameter that maximizes the AUC on the test frame.\n Returns the best model and the grid. \n\n :param selected: list of selected variables\n :param target: target variable\n :param train: h2o frame with train data\n :param test: h2o frame with test data\n :param nfolds: the number of folds to use for cross-validation\n \"\"\"\n hyperparameters = {'max_depth': [3, 4, 5, 6, 7, 8]}\n search_criteria = {'strategy': \"Cartesian\"}\n gbm_grid = H2OGridSearch(H2OGradientBoostingEstimator(seed=1234,\n balance_classes=True,\n nfolds=nfolds),\n hyperparameters,\n search_criteria=search_criteria)\n\n gbm_grid.train(x=selected, y=target, training_frame=train, validation_frame=test)\n\n gbm_grid_table = gbm_grid.get_grid(sort_by='auc', decreasing=True)\n gbm_best_model = gbm_grid.models[0]\n gbm_best_model.name = 'best GBM - max_detph'\n gbm_grid_table = gbm_grid_table.sorted_metric_table().drop('model_ids', axis=1)\n\n return gbm_best_model, gbm_grid_table\n\n\ndef gbm_grid_search(selected, target, train, test, nfolds, max_depth, ntrees_list, min_rows_list, show_top):\n \"\"\"\n Performs grid search on a GBM model to find the optimal parameters that maximize the AUC on the test frame.\n Returns the best model and the grid. \n\n :param selected: list of selected variables\n :param target: target variable\n :param train: h2o frame with train data\n :param test: h2o frame with test data\n :param nfolds: the number of folds to use for cross-validation\n :param max_depth: specifies the maximum depth to which each tree will be built\n :param ntrees_list: list of number of trees to build in the model\n :param min_rows_list: list of the minimum number of observations for a leaf in order to split\n \"\"\"\n hyperparameters = {'ntrees': ntrees_list,\n 'min_rows': min_rows_list,\n 'min_split_improvement': [1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8],\n 'learn_rate': [0.05, 0.08, 0.1, 0.25, 0.35, 0.5, 1],\n 'learn_rate_annealing': [0.9, 0.93, 0.95, 0.99, 0.1],\n 'col_sample_rate': [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n 'sample_rate': [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n 'col_sample_rate_change_per_level': [0.3, 0.4, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 1,\n 1.5, 1.8, 2],\n 'col_sample_rate_per_tree': [0.3, 0.4, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 1],\n 'histogram_type': [\"UniformAdaptive\", \"QuantilesGlobal\", \"Random\"]\n }\n\n search_criteria = {'strategy': \"RandomDiscrete\",\n 'stopping_metric': \"lift_top_group\",\n 'stopping_tolerance': 0.01,\n 'stopping_rounds': 3,\n 'max_runtime_secs': 800,\n 'max_models': 50}\n\n gbm_grid = H2OGridSearch(H2OGradientBoostingEstimator(seed=1234,\n balance_classes=True,\n max_depth=max_depth,\n nfolds=nfolds),\n hyperparameters,\n search_criteria=search_criteria)\n\n gbm_grid.train(x=selected, y=target, training_frame=train, validation_frame=test)\n\n gbm_grid_table = gbm_grid.get_grid(sort_by='auc', decreasing=True)\n gbm_best_model = gbm_grid.models[0]\n gbm_best_model.name = 'best GBM - grid search'\n gbm_grid_table = gbm_grid_table.sorted_metric_table().drop('model_ids', axis=1)[0:show_top]\n\n return gbm_best_model, gbm_grid_table\n\n\ndef glm_grid_search(selected, target, train, test, nfolds, show_top):\n \"\"\"\n Performs grid search on a GLM model to find the optimal parameters that maximize the AUC on the test frame.\n Returns the best model and the grid. \n\n :param selected: list of selected variables\n :param target: target variable\n :param train: h2o frame with train data\n :param test: h2o frame with test data\n :param nfolds: the number of folds to use for cross-validation\n \"\"\"\n hyperparameters = {'alpha': [0.01, 0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 1],\n 'lambda': [0, 1, 0.5, 0.1, 0.001, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]}\n\n search_criteria = {'strategy': \"Cartesian\"}\n\n glm_grid = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial',\n seed=1234,\n balance_classes=True,\n nfolds=nfolds,\n standardize=True),\n hyperparameters,\n search_criteria=search_criteria)\n\n glm_grid.train(x=selected, y=target, training_frame=train, validation_frame=test)\n\n glm_grid_table = glm_grid.get_grid(sort_by='auc', decreasing=True)\n glm_best_model = glm_grid.models[0]\n glm_best_model.name = 'best GLM - grid search'\n glm_grid_table = glm_grid_table.sorted_metric_table().drop('model_ids', axis=1)[0:show_top]\n\n return glm_best_model, glm_grid_table\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"123195015","text":"import os\nimport subprocess\nimport tarfile\nfrom distutils.spawn import find_executable\n\nfrom .exceptions import GitExecutionError\nfrom .logging import LOGGER\n\n\nPKG_PATH = os.path.dirname(os.path.realpath(__file__))\nVENDOR_PATH = os.path.realpath('{}/vendor'.format(PKG_PATH))\nGIT_VERSION = '2.4.3'\nGIT_TAR_FILE = '{}/git-{}.tar'.format(VENDOR_PATH, GIT_VERSION)\nTMP_PATH = '/tmp'\nBIN_PATH = os.path.join(TMP_PATH, 'usr/bin')\nGIT_TEMPLATE_DIR = os.path.join(TMP_PATH, 'usr/share/git-core/templates')\nGIT_EXEC_PATH = os.path.join(TMP_PATH, 'usr/libexec/git-core')\nLD_LIBRARY_PATH = os.path.join(TMP_PATH, 'usr/lib64')\nGIT_BINARY = '{}/usr/bin/git'.format(TMP_PATH)\n\n\nif not find_executable('git'):\n LOGGER.info('git not found installing using local copy')\n if not os.path.isfile(GIT_BINARY):\n LOGGER.info('extracting git tarball')\n tar = tarfile.open(GIT_TAR_FILE)\n tar.extractall(path=TMP_PATH)\n tar.close()\n\n LOGGER.info('setting up environment variables')\n os.environ['PATH'] += ':{}'.format(BIN_PATH)\n os.environ['GIT_TEMPLATE_DIR'] = GIT_TEMPLATE_DIR\n os.environ['GIT_EXEC_PATH'] = GIT_EXEC_PATH\n os.environ['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH\n\n\n# def exec_command(*args, **kwargs):\n# ''' For standard commands without any command line input '''\n# options = dict({'cwd': '/tmp', 'env': os.environ}, **kwargs)\n# command = ['git'] + list(args)\n# LOGGER.info('executing git command: \"{}\"'.format(' '.join(command)))\n# p = subprocess.Popen(command, stdout=subprocess.PIPE,\n# stderr=subprocess.PIPE, cwd=options['cwd'],\n# env=options['env'])\n# stdout, stderr = p.communicate()\n# if p.returncode != 0:\n# LOGGER.error('git failed with {} returncode'.format(p.returncode))\n# raise GitExecutionError(\n# 'command={} returncode={} stdout=\"{}\" '\n# 'stderr=\"{}\"'.format(command, p.returncode, stdout, stderr)\n# )\n# return stdout, stderr\n\ndef exec_command(*args, **kwargs):\n ''' \n For commands when command line input is needed add a keyworded arg named 'clinput'\n which is a list of the things you need to input in order of how they are asked. \n For example, if your repository is private, and requires a username/password.\n\n Example 1:\n You want to clone a private repo. You can clone via \n git clone [user]:[password]@host.xz/path/to/repo.git\n However this will leave the password in your git/bash history, and also \n is not allowed if you use an access token as your password (via github standards).\n So instead you can \n git clone host.xz/path/to/repo.git\n Then your keywords arg should include\n git.exec_command('clone', 'pathtorepo', ..., clinput=['username', 'password'])\n \n Or you could\n git clone [user@]host.xz/path/to/repo.git\n In which your keywords arg should inlude\n git.exec_command('clone', 'pathtorepo', ..., clinput=['password'])\n Example 2:\n Clone using an access token. \n git clone [user]@host.xz:path/to/repo.git\n In which it will prompt for a password which is passed in.\n git.exec_command('clone', 'pathtorepo', ..., clinput=['accesstoken'])\n '''\n options = dict({'cwd': '/tmp', 'env': os.environ}, **kwargs)\n command = ['git'] + list(args)\n LOGGER.info('executing git command: \"{}\"'.format(' '.join(command)))\n\n p, stdout, stderr = None, None, None\n\n if 'clinput' in options:\n LOGGER.info('Inputting command line arguments')\n newline = os.linesep\n p = subprocess.run(command,\n stdout = subprocess.PIPE,\n input = newline.join(options['clinput']),\n encoding = 'utf-8'\n )\n # p = subprocess.Popen(command, \n # stdin = subprocess.PIPE, \n # stdout = subprocess.PIPE,\n # stderr = subprocess.PIPE,\n # cwd=options['cwd'],\n # env=options['env'], \n # universal_newlines=True)\n\n # stdout, stderr = p.communicate( newline.join(options['clinput']) )\n else:\n p = subprocess.Popen(command, \n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, \n cwd=options['cwd'],\n env=options['env'])\n stdout, stderr = p.communicate()\n \n if p.returncode != 0:\n LOGGER.error('git failed with {} returncode'.format(p.returncode))\n raise GitExecutionError(\n 'command={} returncode={} stdout=\"{}\" '\n 'stderr=\"{}\"'.format(command, p.returncode, p.stdout, p.stderr)\n )\n return stdout, stderr\n \n","sub_path":"git/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"409310211","text":"import codecs, csvkit\n\nDATA = '../data/'\nwith open(DATA+'explicacoes.csv', 'rb') as csvfile:\n\tarquivo = csvkit.reader(csvfile, delimiter=',', encoding='utf-8')\n\texplicacoes = []\n\tfor linha in arquivo:\n\t\texplicacao = {\n\t\t\t'_id' : linha[0],\n\t\t\t'sigla' : linha[0],\n\t\t\t'nome' : linha[1],\n\t\t\t'descricao' : linha[2].strip()\n\t\t}\n\t\texplicacoes.append(explicacao)\n\ndef mongo_save(explicacoes, clear=False):\n from pymongo import MongoClient\n client = MongoClient()\n db = client.monitorlegislativo\n collection = db.explicacoes\n if (clear):\n collection.drop()\n for e in explicacoes:\n collection.update({'_id' : e['_id']}, e, upsert=True)\n\nmongo_save(explicacoes)","sub_path":"scripts/explicacoes.py","file_name":"explicacoes.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"115960533","text":"from crane import app\nfrom hosts import Host\nfrom flask import jsonify, Response, request\nimport json\nimport concurrent.futures\nimport paramiko\n\ndef get_info_from_container(container, host):\n result = {}\n result['id'] = container['Id']\n result['name'] = container['Name']\n result['image'] = container['Config']['Image']\n result['cmd'] = \" \".join(container['Config']['Cmd'])\n if container['State']['Running']:\n result['state'] = 'Running'\n else:\n result['state'] = 'Stopped'\n result['hostid'] = host.id\n result['hostname'] = host.name\n return result\n\ndef get_container_from_host(host):\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker ps -a -q\")\n result = ssh_stdout.read()\n if result == \"\":\n return []\n containers = result.split(\"\\n\")\n container_params = \" \".join(containers)\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker inspect {0}\".format(container_params))\n result = ssh_stdout.read()\n container_list = map( lambda x: get_info_from_container(x, host), json.loads(result))\n return container_list\n\n@app.route('/container', methods=['GET'])\ndef get_containers():\n result = []\n hosts = Host.query.all()\n futures = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n for host in hosts:\n futures.append(executor.submit(get_container_from_host, host))\n for f in concurrent.futures.as_completed(futures):\n result = result + f.result()\n result.sort(key=lambda x:x['name'])\n return jsonify(result=result)\n\n@app.route('/host//container/', methods=['DELETE'])\ndef remove_container(host_id, container_id):\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker rm {0}\".format(container_id))\n return \"\"\n\n@app.route('/host//container/', methods=['GET'])\ndef inspect_container(host_id, container_id):\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker inspect {0}\".format(container_id))\n data = ssh_stdout.read()\n return jsonify(result=json.loads(data)[0])\n\n@app.route('/host//container//start', methods=['POST'])\ndef start_container(host_id, container_id):\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker start {0}\".format(container_id))\n return \"\"\n\n@app.route('/host//container//stop', methods=['POST'])\ndef stop_container(host_id, container_id):\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker stop {0}\".format(container_id))\n return \"\"\n\ndef get_container_logs(host_id, container_id, tail):\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker logs --tail={1} {0}\".format(container_id, tail))\n data = ssh_stdout.read()\n return data\n\n@app.route('/host//container//lastlog', methods=['GET'])\ndef get_container_lastlog(host_id, container_id):\n data = get_container_logs(host_id, container_id, 200);\n return jsonify(result=data)\n\n@app.route('/host//container//fulllog', methods=['GET'])\ndef get_container_fulllog(host_id, container_id):\n data = get_container_logs(host_id, container_id, \"all\");\n return Response(data, content_type='text/plain')\n\ndef generate_environment_params(envs):\n envs = envs.split()\n result = \"\"\n for env in envs:\n result = result + \"-e {0} \".format(env)\n return result\n\ndef generate_portmapping_params(ports):\n ports = ports.split()\n result = \"\"\n for port in ports:\n result = result + \"-p {0} \".format(port)\n return result\n\ndef generate_volume_params(volumes):\n volumes=volumes.split()\n result = \"\"\n for volume in volumes:\n result = result + \"-v {0} \".format(volume)\n return result\n\ndef generate_capabilities_params(caps):\n caps = caps.split()\n result = \"\"\n for cap in caps:\n result = result + \"--cap-add {0} \".format(cap)\n return result\n\ndef interpolate_string(string, params):\n has_work = True\n result = \"\"\n while (has_work):\n start = string.find(\"%(\")\n if start == -1:\n result += string\n has_work = False\n else:\n end = string.find(\")%\", start)\n if end == -1:\n result += string\n has_work = False\n else:\n param = string[start+2:end]\n result += string[0:start]\n value = params.get(param, \"\")\n result += value\n string = string[end+2:]\n return result\n\ndef interpolate_array(array, params):\n result = []\n for item in array:\n result.append(interpolate_string(item, params))\n return result\n\ndef interpolate_variables(deploy, parameters):\n container = {}\n container['environment'] = generate_environment_params(interpolate_string(deploy['environment'], parameters)) if deploy.has_key('environment') else \"\"\n container['portmapping'] = generate_portmapping_params(interpolate_string(deploy['portmapping'], parameters)) if deploy.has_key('portmapping') else \"\"\n container['volumes'] = generate_volume_params(interpolate_string(deploy['volumes'], parameters)) if deploy.has_key('volumes') else \"\"\n container['capabilities'] = generate_capabilities_params(interpolate_string(deploy['capabilities'], parameters)) if deploy.has_key('capabilities') else \"\"\n container['restart'] = \"--restart={0}\".format(deploy['restart']) if deploy.has_key('restart') else \"\"\n container['command'] = interpolate_string(deploy['command'], parameters) if deploy.has_key('command') else \"\"\n container['name'] = interpolate_string(deploy['name'], parameters)\n container['image'] = interpolate_string(deploy['image'], parameters)\n container['hostname'] = \"--hostname={0}\".format(interpolate_string(deploy['hostname'], parameters)) if deploy.has_key('hostname') else \"\"\n container['predeploy'] = interpolate_string(deploy['predeploy'], parameters) if deploy.has_key('predeploy') else \"\"\n container['postdeploy'] = interpolate_string(deploy['postdeploy'], parameters) if deploy.has_key('postdeploy') else \"\"\n return container\n\ndef run_deploy_hook(ssh, container, hook):\n if not container.has_key('predeploy'):\n return \"\"\n transport = ssh.get_transport()\n sftp = paramiko.sftp_client.SFTPClient.from_transport(transport)\n script = sftp.file(\"/tmp/script\", \"w\")\n script.write(container['predeploy'])\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"/bin/bash /tmp/script\")\n stdout = ssh_stdout.read()\n stderr = ssh_stderr.read()\n exit_code = ssh_stdout.channel.recv_exit_status()\n return { 'stdout': stdout, 'stderr' : stderr, 'exit_code': exit_code }\n\n@app.route('/host//container', methods=['POST'])\ndef deploy_container(host_id):\n json = request.get_json()\n host = Host.query.filter_by(id=host_id).first()\n ssh = host.get_connection()\n container = {};\n if json['deploy'] == 'raw':\n container = interpolate_variables(json['container'], {})\n else:\n container = interpolate_variables(json['template']['deploy'], json['parameters'])\n predeploy = run_deploy_hook(ssh, container, \"predeploy\")\n if predeploy['exit_code'] != 0:\n return jsonify(predeploy=predeploy, status=\"error\", message=\"Predeploy script failed!\")\n ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(\"docker run -d -name {0} {1} {2} {3} {4} {5} {6} {7} {8}\".format(\n container['name'],\n container['volumes'],\n container['capabilities'],\n container['hostname'],\n container['environment'],\n container['portmapping'],\n container['restart'],\n container['image'],\n container['command']\n ))\n deploy = {}\n deploy['stdout'] = ssh_stdout.read()\n deploy['stderr'] = ssh_stderr.read()\n deploy['exit_code'] = ssh_stdout.channel.recv_exit_status()\n if deploy['exit_code'] != 0:\n return jsonify(predeploy=predeploy, deploy=deploy, status=\"error\", message=\"Starting container failed!\")\n postdeploy = run_deploy_hook(ssh, container, \"postdeploy\")\n if postdeploy['exit_code'] != 0:\n return jsonify(predeploy=predeploy, deploy=deploy, postdeploy=postdeploy, status=\"error\", message=\"Postdeploy script failed!\")\n return jsonify(status=\"success\", container=deploy['stdout'].strip())\n","sub_path":"crane/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":8778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"495842140","text":"menu = ['','Americano', 'Latte', 'Espresso', 'Mocha', '식혜', '수정과']\nprice = [0,1500, 2000, 1700, 2500, 2000, 1900]\nbill = [0, 10000, 5000, 1000]\n\n#############################\n# 메뉴 보이기\ndef menu_print():\n i = 1\n while i< len(menu):\n print(i, menu[i], price[i])\n #print(\"%d. %-10s %5d\"% (i, menu[i], price[i]))\n i = i+1\n#############################\n\n#############################\n# 음료 선택\ndef menu_select():\n n = int(input(\"음료를 선택하세요 : \"))\n price_sum = price[n] \n print(menu[n], price[n],'원 ', '합계 ', price_sum, '원')\n\n # 음료 추가\n\n while n != 0:\n print()\n n = int(input(\"계속 주문은 음료 번호를, 지불은 0을 누르세요 : \"))\n if n > 0 and n < len(menu):\n price_sum = price_sum + price[n]\n print(menu[n], price[n],'원 ', '합계 ', price_sum, '원')\n else :\n if n == 0 :\n print(\"주문이 완료되었습니다.\")\n else :\n print(\"없는 메뉴입니다.\")\n return price_sum\n##############################\n\n##############################\n# 지불\ndef menu_pay(total_price):\n # 지불 방법 출력\n for i in range (1, len(bill)):\n print( i,'.',bill[i],'원',end=' ')\n print()\n\n # 지불\n pay = 0\n while pay < total_price:\n n = int(input(\"지불 금액을 입력하세요 : \"))\n if n>0 and n detected [{len(boxes)}] faces\");\n\n for box in boxes:\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)\n\n cv2.imwrite(os.path.join(path, file), image)\n\nprint(\"Done.\")","sub_path":"python/examples/FaceDetection/FaceDetection.py","file_name":"FaceDetection.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"475266526","text":"from bs4 import BeautifulSoup,NavigableString\nimport urllib3\nfrom urllib.parse import urljoin\nimport csv\nimport time\n\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nhttp = urllib3.PoolManager()\n\narquivo = 'ceepr-par.csv'\nconselho = 'ceepr'\n\n#########################################################################################################################\n########### Cria documento\n# Cabeçalho do csv\nwith open(arquivo, 'a+', encoding='utf-8', newline = '') as csvfile:\n c = csv.writer(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n c.writerow(['Id','Url','Tipo','Numero','Data','Processo','Relator','Interessado','Ementa','Assunto','Documento','Titulo'])\n\n\n#############\n\nurls = [\n ('http://www.cee.pr.gov.br/modules/conteudo/conteudo.php?conteudo=253', 'CEIF'),\n ('http://www.cee.pr.gov.br/modules/conteudo/conteudo.php?conteudo=254', 'CEMEP'),\n ('http://www.cee.pr.gov.br/modules/conteudo/conteudo.php?conteudo=44', 'CES'),\n ('http://www.cee.pr.gov.br/modules/conteudo/conteudo.php?conteudo=43', 'CEB')\n ]\n\ni = 1\nfor url, env in urls:\n\n page = http.request('GET', url)\n soup = BeautifulSoup(page.data, 'html5lib')\n tipo = ''\n numero = ''\n data = ''\n processo = ''\n relator = ''\n interessado = ''\n ementa = ''\n documento = ''\n titulo = ''\n \n divPage = soup.find('div', {\"id\": \"page\"})\n assunto = divPage.find('h1').text\n linksAno = divPage.find_all('a')\n\n \n time.sleep(1)\n for link in linksAno:\n tipo = 'PAR'\n ano = link.text.strip()\n url1 = link.get('href')\n \n time.sleep(0.5)\n if ano and int(ano) != 9999:\n print(ano)\n try:\n page1 = http.request('GET', url1)\n soup1 = BeautifulSoup(page1.data, 'html5lib')\n \n divPageMeses = soup1.find('div', {\"id\": \"page\"})\n linksMes = divPageMeses.find_all('a')\n except:\n print(url1)\n print('--------------')\n \n linksMes = (a for a in linksMes if 'Voltar' not in a.text and a.text.strip() and not a.text.strip().endswith('.pdf'))\n time.sleep(0.5)\n for linkMes in linksMes:\n mes = linkMes.text.strip()\n urlMes = linkMes.get('href')\n \n if ano:\n try:\n print(ano + ' - ' + mes + ' - ' + urlMes)\n\n pageFinal = http.request('GET', urlMes)\n soupFinal = BeautifulSoup(pageFinal.data, 'html5lib')\n\n divAtos = soupFinal.find('div', {\"id\": \"page\"})\n linksAtos = divAtos.find_all('a') \n time.sleep(0.5)\n for aAto in linksAtos:\n if aAto.text and aAto.text.strip() not in ['Voltar', 'Anterior', 'Próximo','Voltar CEIF','Voltar CEMEP','Voltar ao CEMEP','Voltar ao CEIF','Próxima']:\n titulo = aAto.text\n documento = aAto.get('href')\n url = urlMes\n if 'nº.' in titulo.lower():\n numero = titulo.lower().split('nº.')[1].strip().split(',')[0]\n if ',' in titulo:\n data = (titulo.lower().split(',')[1].replace('aprovado em','').strip().replace('-',''))\n elif '-' in titulo:\n data = (titulo.lower().split('-')[1].replace('aprovado em','').strip().replace('-',''))\n \n if aAto.next_sibling and str(aAto.next_sibling) and str(aAto.next_sibling).strip() and str(aAto.next_sibling).strip() != '-':\n ementa = str(aAto.next_sibling)\n elif aAto.next_sibling and aAto.next_sibling.next_sibling and str(aAto.next_sibling.next_sibling).strip() and str(aAto.next_sibling.next_sibling).strip() != '-' :\n ementa = str(aAto.next_sibling.next_sibling)\n elif aAto.parent and aAto.parent.next_sibling and str(aAto.parent.next_sibling).strip() and str(aAto.parent.next_sibling).strip() != '-' :\n ementa = str(aAto.parent.next_sibling)\n else:\n ementa = (titulo)\n \n ementa = ementa.strip()\n if ementa.startswith('- ') or ementa.startswith(', '):\n ementa = ementa[2:]\n\n print(titulo + ' - ' + ementa)\n id = conselho + '-' + env + '-' + tipo + '-' + ano +'-'+ str(i)\n i = i + 1\n\n time.sleep(0.50)\n with open(arquivo, 'a', encoding='utf-8', newline = '') as csvfile:\n c = csv.writer(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n c.writerow([id, url, tipo, numero, data, processo, relator, interessado, ementa, assunto, documento,titulo])\n except:\n print('ERRROOO:::'+ano + ' - ' + mes + ' - ' + urlMes)\n \n\n \n\n#########################################################################################################################\n##### ","sub_path":"crawlers/ceepr/crawler_ceepr-ceif.py","file_name":"crawler_ceepr-ceif.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"457606986","text":"from selenium import webdriver\nimport time\n\ndriver = webdriver.Chrome(executable_path=\"C:\\Drivers\\Chrome\\chromedriver.exe\")\n\ndriver.get(\"https://phptravels.com/\")\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ncurrent_win_id=driver.current_window_handle\n#print(win_id)\ntime.sleep(5)\ndriver.find_element_by_xpath('//*[@id=\"main-menu\"]/ul/li[8]/a/span').click()\nmul_win_id=driver.window_handles\n#print(mul_win_id)\n#print(type(mul_win_id))\n\n#for loop for handling multiple Windows\nfor id in mul_win_id:\n if(current_win_id !=id):\n driver.switch_to.window(id)\n driver.find_element_by_id(\"inputEmail\").send_keys(\"Hi There \")\n print(\"Test case passed:It entered data on Child Window \")\n\ntime.sleep(5)\ndriver.switch_to.window(mul_win_id[0])\ndriver.find_element_by_xpath('//*[@id=\"main-menu\"]/ul/li[1]/span').click()\nprint(\"Clicked on Demo on Parent Window \")\n#yytty\n\ndriver.quit()\n\n\n","sub_path":"WindowActionParentW.py","file_name":"WindowActionParentW.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"181561604","text":"# Author.............: Park, Pablo Chanwoo\n# File name..........: weekly.py\n# Written Date.......: 2018-12-28\n# Program Description: program automation for crawling and processing weekly report data for Malaysia\n\nfrom selenium import webdriver\nimport pandas as pd\nimport numpy as np\nimport re\nimport time\nimport datetime\nimport os\n\n\n## Define variables\n#### Define chromedriver\ndriver = webdriver.Chrome(\"c:/git/chromedriver.exe\")\ntime.sleep(1)\n\n#### Define stores to crawl data from\nstores = [\"arcoris\", \"ipc\", \"setia-city\", \"mytown\", \"genting\", \"pearl\", \"penang\", \"sunway-pyramid\", \"pavilion\", \"1-utama\", \"ioi-puchong\"]\n\n#### Define date variables\ndate_from= []\ndate_from.append(input('date from(dd): '))\ndate_from.append(input('month from(mm): '))\ndate_from.append('2019')\n#date_from.append(input('year from(yyyy): '))\ndate_from = datetime.datetime.strptime(''.join(date_from),'%d%m%Y')\nrange_from = '{}%2F{}%2F{}+00%3A00%3A00'.format(str(date_from.day).zfill(2), str(date_from.month).zfill(2), date_from.year)\n\ndate_to= date_from + datetime.timedelta(days=7)\nrange_to = '{}%2F{}%2F{}+00%3A00%3A00'.format(str(date_to.day).zfill(2), str(date_to.month).zfill(2), date_to.year)\n\n#### Define html links to crawl data from\nsales_summary = 'https://kyochon.revelup.com/brand/reports/sales_summary/json/?dining_option=&employee=&online_app=&online_app_type=&online_app_platform=&show_unpaid=1&show_irregular=1&range_from={}&range_to={}&format=csv'.format(range_from, range_to)\nhourly_sales = 'https://kyochon.revelup.com/reports/hourly_sales/export/csv/?aggregate_format=hours&employee=&online_app=&online_app_type=&online_app_platform=&dining_option=&show_unpaid=1&show_irregular=1&no-filter=0&day_of_week=&range_from={}&range_to={}'.format(range_from, range_to)\nproduct_mix = 'https://kyochon.revelup.com/reports/product_mix/data/?sort_by=&sort_reverse=&combo_expand=&employee=&online_app=&online_app_type=&online_app_platform=&dining_option=&show_unpaid=1&show_irregular=1&sort_view=1&show_class=1&quantity_settings=0&no-filter=0&day_of_week=&range_from={}&range_to={}&format=csv'.format(range_from, range_to)\n#### NOTE that the report can be downloaded via URL API, per store you are logged onto (28 Dec. 2018)\n\n#### Define directory\nuser_name = \"박찬우\"\ndirectory = 'c:/Users/{}/Downloads/'.format(user_name) ## adjustment maybe needed accordingly\nsave_direct = 'c:/Users/{}/Desktop/'.format(user_name)\n\n#### Define password for Revel system\npassword = input(\"password for revel system: \")\n\nprint(\"Initiating program...\")\nbeginning = time.time()\n\n\n\n\n## Define functions\n#### function to sort sales by menu\ndef pd_mix_func(file):\n global tmp_pd, sales_by_menu\n \n try:\n tmp_pd = pd.merge(pd.read_csv(directory+file, engine='python'),\n menu,\n left_on=\"Class\",\n right_on = \"asis\")\n tmp_idx = tmp_pd.apply(lambda x: bool(re.search(pattern = '([A-Z]+C)', string=x[3])), axis=1)\n tmp_pd.tobe[tmp_idx]=\"Combo\"\n tmp_pd = tmp_pd.groupby('tobe').sum()['Total Sales']\n\n except:\n for i in range(len(tmp_pd)):\n tmp_pd[i] = np.NaN\n\n if store == stores[0]:\n sales_by_menu = pd.DataFrame(tmp_pd)\n else:\n sales_by_menu = pd.concat([sales_by_menu, pd.DataFrame(tmp_pd)], axis = 1, sort = False)\n sales_by_menu = sales_by_menu.drop(\"Delete\")\n \n#### function to sort sales by time\ndef hourly_func(file):\n global sales_by_time\n \n temp = pd.read_csv(directory+file, engine='python')[['Time', 'Sales']].iloc[9:24]\n \n if store == stores[0]:\n sales_by_time = pd.DataFrame(temp)\n else:\n sales_by_time = pd.concat([sales_by_time, pd.DataFrame(temp)[\"Sales\"]], axis = 1, sort = False)\n\n#### function to sort sales by day, and sales by order type\ndef sales_func(file):\n global daily_gross_sales, daily_net_sales, sales_by_order\n \n \n temp = pd.read_csv(directory+file, engine='python')#[['Gross Sales', 'Net Sales','Togo Sales', 'Eatin Sales', 'Delivery Sales',\n # 'Catering Sales', 'WebOrder', 'Online Sales', 'Shipping Sales', 'Takeaway']]\n temp1 = pd.DataFrame([temp[['Eatin Sales', 'Catering Sales']].sum().sum()/2,\n temp[['Togo Sales', 'Takeaway']].sum().sum()/2 ,\n temp[['Delivery Sales', 'Online Sales', 'Shipping Sales']].sum().sum()/2],\n index = ['Dining', 'Take-out', 'Delivery'])\n \n if store == stores[0]:\n daily_gross_sales = pd.DataFrame(temp['Gross Sales'])\n daily_net_sales = pd.DataFrame(temp['Net Sales'])\n sales_by_order = temp1\n else:\n daily_gross_sales = pd.concat([daily_gross_sales,temp['Gross Sales']], axis = 1, sort = False)\n daily_net_sales = pd.concat([daily_net_sales,temp['Net Sales']], axis = 1, sort = False)\n sales_by_order = pd.concat([sales_by_order, temp1], axis = 1, sort = False)\n \n\n\n## Access web-site //line 14: driver = webdriver.Chrome(\"d:/git/chromedriver.exe\")//\n#### Open the web-site\ndriver.get(\"https://kyochon.revelup.com/login/?next=/dashboard/\") # POS web-site\ntime.sleep(3)\n\n#### Log-in\ndriver.find_element_by_xpath('//*[@id=\"id_username\"]').send_keys(\"JayceKim\")\ndriver.find_element_by_xpath('//*[@id=\"id_password\"]').send_keys(password)\ndriver.find_element_by_xpath('//*[@id=\"form-login\"]/fieldset/div[3]/input').click()\ntime.sleep(3)\n\n#### Open store lists\ndriver.find_element_by_xpath('//*[@id=\"header\"]/div[2]/div[3]/div[2]').click()\ntime.sleep(5)\n\n#### Retrieve total number of stores\ntmp = driver.find_element_by_class_name('fancytree-title').text\nn_store = int(re.findall(string=tmp, pattern = '[0-9]+')[0])\n\n#### Fetch data from the web-site \nfor i in range(1, n_store+1):\n print('data fetching on store #{}...'.format(str(i)))\n #### Access Store\n if i > 1:\n try:\n driver.find_element_by_xpath('//*[@id=\"header\"]/div[2]/div[3]/div[2]').click()\n except:\n print('error on store #{}'.format(i))\n\t ### 업데이트 에러가 나는 경우가 종종 있는데,exception_class(message, screen, stacktrace), 해당 케이스 명시할 경우 업데이트 항목 삭제 가능\n time.sleep(5)\n driver.find_element_by_xpath('//*[@id=\"establishments-tree\"]/div/div[3]/ul/li[1]/ul/li[{}]/span[2]/span[3]'.format(i)).click()\n time.sleep(5)\n \n #### Run URL APIs\n driver.get(sales_summary)\n driver.implicitly_wait(3)\n driver.get(hourly_sales)\n driver.implicitly_wait(3)\n driver.get(product_mix)\n driver.implicitly_wait(3)\n\n\n\n## Sort menus\n#### Read fetched data\nfor i in stores:\n\t#### Read files\n product_mix = \"Product_Mix_{}_{}_00-00_{}_00-00.csv\".format(i,\n (date_from.strftime('%Y-%m-%d')),\n (date_to.strftime('%Y-%m-%d')))\n #### Group by classes\n if i =='arcoris':\n tmp = pd.read_csv(directory+product_mix, engine='python').groupby('Class', as_index = False).sum()\n else:\n \t#### In case file is empty, pass the file\n try:\n tmp = pd.concat([tmp, pd.read_csv(directory+product_mix, engine='python').groupby('Class', as_index = False).sum()])\n except:\n pass\n\n#### Retrieve unique menus\ntmp = tmp.Class.unique()\ntmp.sort()\n\n#### Re-group categories for report (\"Beverage\", \"Chicken\", \"Side\", \"Salad\", \"A-La-Carte\")\n#### If previous classification is avaiable, read\nif pd.Series(os.listdir(save_direct + 'weekly/')).isin([\"menu.csv\"]).sum() == 1:\n\tmenu = pd.read_csv(save_direct + 'weekly/menu.csv', engine='python')\n#### If not, make classification.\n#### The below \"tobe\" may need rearrangement\nelse:\n\ttobe = [\"A-La-Carte\", \"Beverage\", \"Beverage\", \"Beverage\", \"Chicken\",\n\t\t\t\"Delete\", \"Delete\", \"Chicken\", \"Beverage\", \"Chicken\",\n \t\t\t\"Beverage\", \"Chicken\", \"Beverage\", \"Side\", \"Chicken\", \n \t\t\t\"Chicken\", \"Salad\", \"Delete\", \"Side\", \"Beverage\"]\n\tmenu = pd.DataFrame([tmp, tobe]).T\n\tmenu.columns = ['asis', 'tobe']\n\t#### final menu to be saved as csv\n\tmenu.to_csv(save_direct + \"weekly/menu.csv\")\n\n\n\n## Read process data per report\nfor store in stores:\n\t#### call reports per store and run user-defined-function\n product_mix = \"Product_Mix_{}_{}_00-00_{}_00-00.csv\".format(store,\n (date_from.strftime('%Y-%m-%d')),\n (date_to.strftime('%Y-%m-%d')))\n hourly_sales = \"Hourly_Sales_{}_{}_00-00_{}_00-00.csv\".format(store,\n (date_from.strftime('%Y-%m-%d')),\n (date_to.strftime('%Y-%m-%d')))\n sales_summary = \"Sales_Summary_{}_{}_00-00_{}_00-00.csv\".format(store,\n (date_from.strftime('%Y-%m-%d')),\n (date_to.strftime('%Y-%m-%d')))\n pd_mix_func(product_mix)\n hourly_func(hourly_sales)\n sales_func(sales_summary)\n \n #### at last store, assign column names to each report DataFrame\n if store == stores[-1]:\n sales_by_time = sales_by_time.set_index(\"Time\")\n sales_by_time.columns = sales_by_menu.columns = daily_gross_sales.columns = daily_net_sales.columns = sales_by_order.columns = stores\n daily_gross_sales = daily_gross_sales[:-1]\n daily_net_sales = daily_net_sales[:-1]\n daily_gross_sales.index = daily_net_sales.index = [(date_from+datetime.timedelta(days=i)).strftime(\"%y-%m-%d\") for i in range(7)]\n daily_gross_sales.index.name = \"Gross\"\n daily_net_sales.index.name = \"Net\"\n \n\n\n\n## Merge reports onto one excel sheet\nwriter = pd.ExcelWriter(save_direct + 'Weekly/weekly_report({}-{}).xlsx'.format(date_from.strftime('%m%d'),\n date_to.strftime('%m%d'),\n engine='xlsxwriter'))\n\ndaily_net_sales.to_excel(writer, sheet_name=\"raw data\")\ndaily_gross_sales.to_excel(writer, sheet_name=\"raw data\", startrow=9)\nsales_by_order.to_excel(writer, sheet_name=\"raw data\", startrow=9+9)\nsales_by_menu.to_excel(writer, sheet_name=\"raw data\", startrow=9+9+5)\nsales_by_time.to_excel(writer, sheet_name=\"raw data\", startrow=9+9+5+8)\n\nwriter.save()\n\n\n\n# Open directory with output\nos.startfile(save_direct + 'Weekly/')\n\n\n## Return time elapsed\nelap_min, elap_sec = np.divmod(time.time()-beginning, 60)\nprint(\"Total process finished at {}min, {}second\". format(int(elap_min), int(elap_sec)))\n","sub_path":"Web_Crawling/Project_KC/weekly_customized.py","file_name":"weekly_customized.py","file_ext":"py","file_size_in_byte":10713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"425726655","text":"import sys\nimport os\nimport time\n\nimport numpy as np\nfrom scipy.fftpack import rfft, irfft, rfftfreq\nfrom scipy.signal import hilbert\nfrom scipy.stats import zscore\nfrom scipy.io import loadmat, savemat\n\ntry:\n import pyfftw\n pyfftw.interfaces.cache.enable()\nexcept ImportError:\n sys.stderr.write(\"pyfftw unavailable\\n\")\n\ntry:\n from stfio import plot as stfio_plot\nexcept ImportError:\n sys.stderr.write(\"stfio unavailable\\n\")\n\n\ndef fgaussColqu(x, f_c):\n \"\"\"\n Eq. 5 from Colquhoun & Sigworth, p. 486 of the blue book\n np.log(2.0)/2.0 = 0.34657359028\n\n Parameters\n ----------\n x : numpy.ndarray\n Frequencies\n f_c : Cutoff frequency (-3dB)\n\n Returns\n -------\n gauss : numpy.ndarray\n Transfer function to achieve -3dB at f_c\n \"\"\"\n return np.exp(-0.34657359028*(x/f_c)*(x/f_c))\n\n\ndef convolve(x, transfer, arglist, verbose=True):\n \"\"\"\n Convolves an array with a transfer function in the frequency domain\n\n Parameters\n ----------\n x : stfio_plot.Timeseries\n Input data\n transfer : function\n Transfer function\n arglist : list\n Additional arguments to transfer\n verbose : bool, optional\n Verbose output. Default: False\n\n Returns\n -------\n filtered : stfio_plot.Timeseries\n Filtered data\n \"\"\"\n\n t0 = time.time()\n\n inputa = x.data.copy()\n outsize = int(len(inputa)/2.0 + 1)\n outputa = np.empty((outsize), dtype=np.complex)\n\n fft = pyfftw.FFTW(inputa, outputa, direction='FFTW_FORWARD',\n flags=('FFTW_ESTIMATE',), threads=8)\n ifft = pyfftw.FFTW(outputa, inputa, direction='FFTW_BACKWARD',\n flags=('FFTW_ESTIMATE',), threads=8)\n\n if verbose:\n sys.stdout.write(\"Computing frequencies... \")\n sys.stdout.flush()\n f = np.arange(0, len(outputa), dtype=np.float) / (len(inputa) * x.dt)\n try:\n assert(len(f) == len(outputa))\n except:\n sys.stderr.write(\"\\nError in array lengths: %d != %d\\n\" % (\n len(f), len(outputa)))\n sys.exit(0)\n\n if verbose:\n sys.stdout.write(\"done\\nForward fft (convolve)... \")\n sys.stdout.flush()\n fft()\n\n outputa *= transfer(f, *arglist)\n\n if verbose:\n sys.stdout.write(\"done\\nReverse fft (convolve)... \")\n sys.stdout.flush()\n\n ifft(normalise_idft=False)\n\n # Scale\n inputa /= len(x.data)\n\n if verbose:\n sys.stdout.write(\"done (%.2f ms)\\n\" % ((time.time()-t0)*1e3))\n sys.stdout.flush()\n\n return stfio_plot.Timeseries(inputa, x.dt)\n\n\ndef gaussian_filter(x, f_c, verbose=True):\n \"\"\"\n Gaussian filter\n\n Parameters\n ----------\n x : stfio_plot.Timeseries\n Input data\n f_c : float\n Cutoff frequency in kHz (-3 dB)\n verbose : bool, optional\n Verbose output. Default: False\n\n Returns\n -------\n x convolved with a Gaussian filter kernel.\n \"\"\"\n\n return convolve(x, fgaussColqu, [f_c, ], verbose=verbose)\n\n\ndef lowpass(x, f_c, verbose=True):\n \"\"\"\n Lowpass filter\n\n Parameters\n ----------\n x : stfio_plot.Timeseries\n Input data\n f_c : float\n Cutoff frequency in kHz (-3 dB)\n verbose : bool, optional\n Verbose output. Default: False\n\n Returns\n -------\n x convolved with a Gaussian filter kernel.\n \"\"\"\n return gaussian_filter(x, f_c, verbose=verbose)\n\n\ndef highpass(x, f_c, verbose=True):\n \"\"\"\n Highpass filter\n\n Parameters\n ----------\n x : stfio_plot.Timeseries\n Input data\n f_c : float\n Cutoff frequency in kHz (-3 dB)\n verbose : bool, optional\n Verbose output. Default: False\n\n Returns\n -------\n x convolved with a Gaussian filter kernel.\n \"\"\"\n return convolve(\n x, lambda f, f_c: 1.0 - fgaussColqu(f, f_c), [f_c, ],\n verbose=verbose)\n\n\ndef remove_hum(data_raw, dt, humband=(36, 52), referenceband=(20, 30)):\n signal_mean = data_raw.mean()\n signal = data_raw-signal_mean\n W = rfftfreq(signal.size, d=dt)\n f_signal = rfft(signal)\n imax0 = np.where(W > humband[0])[0][0]\n imax1 = np.where(W > humband[1])[0][0]\n f_signal_new = f_signal.copy()\n iref0 = np.where(W > referenceband[0])[0][0]\n iref1 = np.where(W > referenceband[1])[0][0]\n refstd = np.std(f_signal[iref0:iref1])\n maxstd = np.std(f_signal[imax0:imax1])\n f_signal_new[imax0:imax1] /= ((np.abs(f_signal_new[imax0:imax1]/(1.0*refstd)))**1 + 1)\n signal_filtered = irfft(f_signal_new)+signal_mean\n\n return signal_filtered, W, f_signal, f_signal_new\n\ndef fhilbert(signal):\n padding = np.zeros(int(2 ** np.ceil(np.log2(len(signal)))) - len(signal))\n tohilbert = np.hstack((signal, padding))\n \n result = hilbert(tohilbert)\n \n result = result[0:len(signal)]\n\n return result\n\ndef findRipples(signal_bp, signal_noise_bp, std_thresholds=(2, 10), durations=(30, 100), fn_hilbert=None):\n lowThresholdFactor, highThresholdFactor = std_thresholds\n minInterRippleInterval, maxRippleDuration = durations\n \n if fn_hilbert is not None and os.path.exists(fn_hilbert):\n f_hilbert = loadmat(fn_hilbert)\n signal_analytic = f_hilbert['signal'][0]\n noise_analytic = f_hilbert['noise'][0]\n else:\n sys.stdout.write(\"Computing Hilbert transform...\")\n sys.stdout.flush()\n signal_analytic = fhilbert(signal_bp.data)\n noise_analytic = fhilbert(signal_noise_bp.data)\n\n if fn_hilbert is not None and not os.path.exists(fn_hilbert):\n savemat(fn_hilbert, {\n 'signal': signal_analytic,\n 'noise': noise_analytic\n })\n sys.stdout.write(\" done\\n\")\n signal_envelope = np.abs(signal_analytic)\n noise_envelope = 3.0*np.abs(noise_analytic)\n \n zsignal = signal_envelope - noise_envelope\n zsignal[signal_envelope > noise_envelope] = zscore(zsignal[signal_envelope > noise_envelope])\n zsignal[signal_envelope <= noise_envelope] = 0\n \n thresholded = (zsignal > lowThresholdFactor).astype(int)\n start = np.where(np.diff(thresholded) > 0)[0]\n stop = np.where(np.diff(thresholded) < 0)[0]\n if len(stop) == len(start)-1:\n start = start[:-1]\n if len(stop)-1 == len(start):\n stop = stop[1:]\n if start[0] > stop[0]:\n start = start[:-1]\n stop = stop[1:]\n\n if not len(start):\n sys.stderr.write(\"No ripples detected\\n\")\n return\n\n minInterRippleSamples = int(np.round(minInterRippleInterval/signal_bp.dt))\n\n merged = True\n ripples = np.array([start, stop])\n while merged:\n merged = False\n tmpripples = [ripples[:, 0].tolist()]\n for ir, (r1, r2) in enumerate(zip(ripples[0, 1:], ripples[1, :-1])):\n if r1-r2 > minInterRippleSamples:\n tmpripples[-1][1] = r2\n tmpripples.append([r1, ripples[1, ir+1]])\n else:\n merged = True\n ripples = np.array(tmpripples).T.copy()\n durations = (ripples[1, :]-ripples[0, :]) * signal_bp.dt\n assert(np.all(durations > 0))\n ripples = ripples[:, durations < maxRippleDuration]\n \n ripplemaxs = np.array([np.max(zsignal[ripple[0]:ripple[1]]) for ripple in ripples.T])\n ripples = ripples[:, ripplemaxs > highThresholdFactor]\n rippleargmaxs = np.array([np.argmax(zsignal[ripple[0]:ripple[1]])+ripple[0] for ripple in ripples.T])\n\n return ripples, rippleargmaxs\n","sub_path":"haussmeister/spectral.py","file_name":"spectral.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"} +{"seq_id":"576802404","text":"class Config(object):\r\n \r\n #predefined size of the image passed to the neural network \r\n IMG_SIZE = (224,224)\r\n\r\n #, \"Database/Masks/00001_mask.jpg\",\"Database/Backgrounds/bg1.jpg\"\r\n\r\n # directory with samples of the same object (in a sens of class, but not being our object we want to detect)\r\n SAMPLE_DIR = \"Database/cars_train\"\r\n \r\n #directory of foreground images = photos of the object we want to spont on images\r\n #FG_DIR = 'Database/Photos/Mycar'\r\n FG_DIR = \"Database/VW/tmp3\"\r\n \r\n #directory of preciously generated masks from photos from FG_DIR\r\n #MASK_DIR = 'Database/Masks'\r\n MASK_DIR = \"Database/VW_Masks2\"\r\n \r\n #directory of background images = random photos to diversified database\r\n #BG_DIR = 'Database/Backgrounds'\r\n BG_DIR = \"Database/Backgrounds\"\r\n\r\n #size of a butch used in training network on custom objects \r\n BATCH_SIZE = 32\r\n \r\n NUM_BATCH_IN_EPOCH = 5\r\n \r\n #fraction of a training set which will be splited to validation and test dataset \r\n VALIDATION_SPLIT = 0.2\r\n\r\n #random seed usef in shuffeling database \r\n SEED = 1234\r\n\r\n INITIAL_EPOCHS = 3\r\n\r\n LR = 0.0001\r\n\r\n FINE_TUNE_EPOCHS = 5\r\n\r\n FINE_TUNE_AT = 100\r\n\r\n FINE_TUNE_LR = LR/10\r\n\r\n #SCALE = 1./127.5 \r\n \r\n #OFFSET = -1 ","sub_path":"Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"23"}