diff --git "a/5099.jsonl" "b/5099.jsonl" new file mode 100644--- /dev/null +++ "b/5099.jsonl" @@ -0,0 +1,88 @@ +{"seq_id":"13347024824","text":"class Solution(object):\n def findSubstring(self, s, words):\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n dict = collections.defaultdict(int)\n for w in words: dict[w] += 1\n l,res = len(words[0]),[]\n for i in xrange(l):\n dict1 = dict.copy()\n j = i\n while j < len(s) - l + 1:\n dict1[s[j:j+l]] -= 1\n while dict1[s[j:j+l]] < 0:\n dict1[s[i:i+l]] += 1\n i += l\n j += l\n if (j-i) / l == len(words) : res.append(i)\n return res\n","repo_name":"RealHacker/leetcode-solutions","sub_path":"030_substring_with_cancatenation_all_words/substring_concat_all_words.py","file_name":"substring_concat_all_words.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":648,"dataset":"github-code","pt":"27"} +{"seq_id":"23849567519","text":"import os\nfrom pathlib import Path\nfrom .readSGLX import readMeta\n\n#########################################################################\n#########################################################################\n# Functions below are copied from Allen Institute's ecephys_spike_sorting\n# package\n#########################################################################\n#########################################################################\n\n\ndef EphysParams(ap_band_file):\n # assume metadata file is in same directory as binary, Constuct metadata path\n\n # read metadata\n\n metaName, binExt = os.path.splitext(ap_band_file)\n metaFullPath = Path(metaName + \".meta\")\n meta = readMeta(metaFullPath)\n\n if \"imDatPrb_type\" in meta:\n pType = meta[\"imDatPrb_type\"]\n if pType == \"0\":\n probe_type = \"NP1\"\n else:\n probe_type = \"NP\" + pType\n else:\n probe_type = \"3A\" # 3A probe\n\n sample_rate = float(meta[\"imSampRate\"])\n\n num_channels = int(meta[\"nSavedChans\"])\n\n uVPerBit = Chan0_uVPerBit(meta)\n\n return (probe_type, sample_rate, num_channels, uVPerBit)\n\n\n# Return gain for imec channels.\n# Index into these with the original (acquired) channel IDs.\n#\ndef Chan0_uVPerBit(meta):\n # Returns uVPerBit conversion factor for channel 0\n # If all channels have the same gain (usually set that way for\n # 3A and NP1 probes; always true for NP2 probes), can use\n # this value for all channels.\n\n imroList = meta[\"imroTbl\"].split(sep=\")\")\n # One entry for each channel plus header entry,\n # plus a final empty entry following the last ')'\n # channel zero is the 2nd element in the list\n\n if \"imDatPrb_dock\" in meta:\n # NP 2.0; APGain = 80 for all channels\n # voltage range = 1V\n # 14 bit ADC\n uVPerBit = (1e6) * (1.0 / 80) / pow(2, 14)\n else:\n # 3A, 3B1, 3B2 (NP 1.0)\n # voltage range = 1.2V\n # 10 bit ADC\n currList = imroList[1].split(sep=\" \") # 2nd element in list, skipping header\n APgain = float(currList[3])\n uVPerBit = (1e6) * (1.2 / APgain) / pow(2, 10)\n\n return uVPerBit","repo_name":"CSC-UW/ecephys","sub_path":"ecephys/sglx/external/aibs_ecephys_spike_sorting.py","file_name":"aibs_ecephys_spike_sorting.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"40557563627","text":"\"\"\"在一个由 0 和 1 组成的二维矩阵内,找到只包含 1 的最大正方形,并返回其面积。\n示例: 输入:\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n输出: 4\n\"\"\"\n\n\nclass Solution:\n def maximalSquare(self, matrix):\n length = 0\n if len(matrix) == 0 or len(matrix[0]) == 0:\n return 0\n dp = [[0] * len(matrix[0]) for _ in range(len(matrix))] # dp[i][j]表示以(i,j)处作为正方形右下角,由1组成的最大正方形的边长\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == \"1\": # 元素为\"1\"时\n if i == 0 or j == 0: # 如果在边界处,dp[i][j] = 1表示以该处作为右下角的最大边长为1\n dp[i][j] = 1\n else:\n dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1 # 取三者中的较小值 + 1\n length = max(length, dp[i][j]) # 记录过程中的最大边长\n return length * length\n","repo_name":"cp4011/Algorithms","sub_path":"热题/221_最大正方形.py","file_name":"221_最大正方形.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"34261720929","text":"class Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n leng = len(nums)\n counter = collections.defaultdict(int)\n\n def devide(array):\n if len(array)==1:\n return array[0]\n mid = len(array)//2\n\n L = devide(array[:mid])\n \n R = devide(array[mid:])\n \n counter[L]+=1\n counter[R]+=1\n\n return [L,R][array.count(R)>mid]\n \n result = devide(nums)\n \n return result","repo_name":"Seochangh2/algorithm_python","sub_path":"leetcode/0169-majority-element/0169-majority-element.py","file_name":"0169-majority-element.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"26048481115","text":"#! /usr/bin/env python3\n\ndef get_smallest_multiple(interval = [1,20]):\n \"\"\"\n Project Euler Problem 5\n\n 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n -----------\n Smallest positive number evenly divisible by all numbers in [a,b] is the multiple of the highest number\n of prime factors for all prime factors in the interval.\n\n e.g.,\n\n For [1,5] the smallest multiple is: 2 * 3 * 2 * 5 = 60\n prime factors:\n 2 = 2\n 3 = 3\n 4 = 2 * 2\n 5 = 5\n\n Therefore, 2 * 3 * 2 * 5 = 2 * 3 * 2 * 5, not 2 * 3 * 2 * 2 * 5\n \"\"\"\n low = interval[0]\n high = interval[1]\n\n factors = {}\n for i in range(low, high+1):\n prime_factors = get_prime_factors(i)\n prime_dict = {}\n for prime in prime_factors:\n if prime_dict.get(prime):\n prime_dict[prime] += 1\n else:\n prime_dict[prime] = 1\n\n primes = prime_dict.keys()\n for prime in primes:\n if factors.get(prime):\n if factors[prime] < prime_dict[prime]:\n factors[prime] = prime_dict[prime]\n else:\n factors[prime] = prime_dict[prime]\n\n smallest_mult = 1\n keys = factors.keys()\n for prime in keys:\n smallest_mult *= (prime ** factors[prime])\n \n return smallest_mult\n\n\ndef get_prime_factors(num, prime_set_size = 1):\n \"\"\"\n Returns the prime factors of a number.\n\n Input:\n Integer > 1\n Output:\n List representing the prime factors of the number.\n \"\"\"\n prime_factors = []\n prime_begin = 2\n while num > 1:\n primes = get_primes(prime_begin, prime_set_size)\n index = 0\n while(index < len(primes)):\n if (num % primes[index]) == 0:\n prime_factors.append(primes[index])\n num = num / primes[index]\n else:\n index += 1\n prime_begin = primes[index - 1] + 1\n \n return prime_factors\n\n\ndef get_primes(prime_begin, prime_set_size):\n \"\"\"\n Returns a list of primes of length prime_set_size, starting at prime_begin.\n \"\"\"\n primes = []\n if is_prime(prime_begin) and prime_set_size > 0:\n primes.append(prime_begin)\n \n while len(primes) < prime_set_size:\n prime_begin = find_next_prime(prime_begin)\n primes.append(prime_begin)\n \n return primes\n\n\ndef find_next_prime(begin = 1):\n \"\"\"\n Returns the next prime after begin.\n \"\"\"\n begin += 1\n if begin == 2:\n return begin\n \n if (begin % 2) == 0:\n begin += 1\n \n while True:\n if is_prime(begin):\n return begin\n else:\n begin += 2\n\n\ndef is_prime(num):\n \"\"\"\n Returns True if num is prime, false otherwise.\n \"\"\"\n if num < 2:\n return False\n \n i = 2\n while i <= int(num ** 0.5):\n if (num % i) == 0:\n return False\n else:\n i += 1\n \n return True\n\n\nPROBLEM_STATEMENT = \"Project Euler Problem 5\\n\\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\\n-----------\\nSmallest positive number evenly divisible by all numbers in [a,b] is the multiple of the highest number\\nof prime factors for all prime factors in the interval.\\n\\ne.g.,\\n\\nFor [1,5] the smallest multiple is: 2 * 3 * 2 * 5 = 60\\nprime factors:\\n2 = 2\\n3 = 3\\n4 = 2 * 2\\n5 = 5\\n\\nTherefore, 2 * 3 * 2 * 5 = 2 * 3 * 2 * 5, not 2 * 3 * 2 * 2 * 5\"\n\n\nprint(PROBLEM_STATEMENT)\nprint(\"\\n\\n\" + str(get_smallest_multiple()))\n","repo_name":"vannevar-morgan/project_euler","sub_path":"problem_5/problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22702314900","text":"'''\n\n Online Python Compiler.\n Code, Compile, Run and Debug python program online.\nWrite your code in this editor and press \"Run\" button to execute it.\n\n'''\n\nn=input()\nc=0\nmax1=0\nfor i in range(len(n)-1):\n if n[i]==n[i+1]:\n c+=1\n if c>max1:\n max1=c\n else:\n c=0\nprint(max1+1)","repo_name":"Arjun2469/CP-CSES-Problem-Set-Solutions-","sub_path":"Introductory problems/Repetition.py","file_name":"Repetition.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35880587240","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.engine.predictor_glip import GLIPDemo\n\ndef load_img(file_name):\n pil_image = Image.open(file_name).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image\n\ndef load_model(config_file, weight_file):\n cfg.local_rank = 0\n cfg.num_gpus = 1\n cfg.merge_from_file(config_file)\n cfg.merge_from_list([\"MODEL.WEIGHT\", weight_file])\n cfg.merge_from_list([\"MODEL.DEVICE\", \"cuda\"])\n\n glip_demo = GLIPDemo(\n cfg,\n min_image_size=800,\n confidence_threshold=0.7,\n show_mask_heatmaps=False\n )\n return glip_demo\n\ndef draw_rectangle(img, x0, y0, x1, y1):\n color = np.random.rand(3) * 255\n img = img.astype(np.float64)\n img[y0:y1, x0-1:x0+2, :3] = color\n img[y0:y1, x1-1:x1+2, :3] = color\n img[y0-1:y0+2, x0:x1, :3] = color\n img[y1-1:y1+2, x0:x1, :3] = color\n img[y0:y1, x0:x1, :3] /= 2\n img[y0:y1, x0:x1, :3] += color * 0.5\n img = img.astype(np.uint8)\n return img\n\ndef save_individual_img(image, bbox, labels, n_cat, pred_dir, view_id):\n n = len(labels)\n result_list = [np.copy(image) for i in range(n_cat)]\n for i in range(n):\n l = labels[i] - 1\n x0, y0, x1, y1 = bbox[i]\n x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)\n result_list[l] = draw_rectangle(result_list[l], x0, y0, x1, y1)\n for i in range(n_cat):\n plt.imsave(\"%s/%d_%d.png\" % (pred_dir, view_id, i), result_list[i][:, :, [2, 1, 0]])\n\ndef glip_inference(glip_demo, save_dir, part_names, num_views=10,\n save_pred_img=True, save_individual_img=False, save_pred_json=False):\n pred_dir = os.path.join(save_dir, \"glip_pred\")\n os.makedirs(pred_dir, exist_ok = True)\n predictions = []\n for i in range(num_views):\n image = load_img(\"%s/rendered_img/%d.png\" % (save_dir, i))\n result, top_predictions = glip_demo.run_on_web_image(image, part_names, 0.5) \n if save_pred_img: \n plt.imsave(\"%s/%d.png\" % (pred_dir, i), result[:, :, [2, 1, 0]])\n bbox = top_predictions.bbox.cpu().numpy()\n score = top_predictions.get_field(\"scores\").cpu().numpy()\n labels = top_predictions.get_field(\"labels\").cpu().numpy()\n if save_individual_img:\n save_individual_img(image, bbox, labels, len(part_names), pred_dir, i)\n for j in range(len(bbox)):\n x1, y1, x2, y2 = bbox[j].tolist()\n predictions.append({\"image_id\" : i,\n \"category_id\" : labels[j].item(),\n \"bbox\" : [x1,y1, x2-x1, y2-y1],\n \"score\" : score[j].item()})\n if save_pred_json:\n with open(\"%s/pred.json\" % pred_dir, \"w\") as outfile:\n json.dump(predictions, outfile)\n return predictions\n","repo_name":"ricklentz/PartSLIP","sub_path":"src/glip_inference.py","file_name":"glip_inference.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"15068172803","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\ndef prepro_mask(mask: np.ndarray, area_threshold: int=32) -> np.ndarray:\r\n mask_shape = mask.shape\r\n if len(mask_shape) != 2:\r\n mask = mask[..., 0]\r\n mask = mask.astype(\"uint8\")\r\n mask = _del_small_connection(mask, area_threshold)\r\n class_num = len(np.unique(mask))\r\n if class_num != 2:\r\n _, mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY |\r\n cv2.THRESH_OTSU)\r\n mask = np.clip(mask, 0, 1).astype(\"uint8\") # 0-255 / 0-1 -> 0-1\r\n return mask\r\n\r\n\r\ndef calc_distance(p1: np.ndarray, p2: np.ndarray) -> float:\r\n return float(np.sqrt(np.sum(np.power((p1[0] - p2[0]), 2))))\r\n\r\n\r\ndef _del_small_connection(pred: np.ndarray, threshold: int=32) -> np.ndarray:\r\n result = np.zeros_like(pred)\r\n contours, reals = cv2.findContours(pred, cv2.RETR_TREE,\r\n cv2.CHAIN_APPROX_NONE)\r\n for contour, real in zip(contours, reals[0]):\r\n if real[-1] == -1:\r\n if cv2.contourArea(contour) > threshold:\r\n cv2.fillPoly(result, [contour], (1))\r\n else:\r\n cv2.fillPoly(result, [contour], (0))\r\n return result.astype(\"uint8\")\r\n","repo_name":"feiva/PaddleRS","sub_path":"paddlers/utils/postprocs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"74627789191","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.forms import Form\nfrom django.forms import fields\nfrom django.forms import widgets\nfrom app01 import models\n\nclass TestForm(Form):\n user = fields.CharField(\n required=True,\n max_length=12,\n min_length=3,\n error_messages={}, # 错误提示\n widget=widgets.TextInput(attrs={'name':123}), #定制生成html插件\n label='用户名', # 前端的label {{obj.user.label}}\n initial='alex', #初始值\n help_text='提示信息', # 提示信息\n show_hidden_initial='True', # 再生成一个框\n # validators=[], # 自定义验证\n localize=True, # 本地化\n disabled=False, # 是否可编辑\n label_suffix='aaa', # label后缀\n\n )\n age = fields.IntegerField(\n label='年龄',\n max_value=12,\n min_value=5,\n error_messages={\n \"max_value\": '太大了',\n \"min_value\": '太小了',\n\n }\n )\n email = fields.EmailField(\n label='邮箱',\n )\n img = fields.FileField(\n label='文件'\n )\n city = fields.ChoiceField(\n label='城市',\n choices=[\n (1, \"北京\"),\n (2, \"上海\"),\n (3, \"广州\"),\n ], # 后台拿到的是字符串1,2,3\n initial=2, # 默认值也可以在函数中传字典\n )\n # 字符串转换为数字\n # city = fields.TypedChoiceField(\n # coerce=lambda x: int(x), # 这个做字符串转换的\n # label='城市',\n # choices=[\n # (1, \"北京\"),\n # (2, \"上海\"),\n # (3, \"广州\"),\n # ], # 后台拿到的是字符串1,2,3\n # initial=2, # 默认值也可以在函数中传字典\n # )\n hobby = fields.MultipleChoiceField(\n choices=[\n (1, '篮球'),\n (2, '足球'),\n (3, '乒乓球'),\n ], # 后台拿到的是字符串1,2,3,如果需要使用数字,需使用TypedChoiceField\n initial=[1,2],\n )\n\n xdb = fields.ChoiceField(\n widget=widgets.Select(choices=([\n (1, '篮球'),\n (2, '足球'),\n (3, '乒乓球'),\n ])),\n )\n\n # 多选\n xdb1 = fields.MultipleChoiceField(\n widget=widgets.SelectMultiple(attrs={'class': 'c1'}),\n choices=[\n (1, '篮球'),\n (2, '足球'),\n (3, '乒乓球'),\n ],\n )\n\n # checkbox 单选\n xbd2 = fields.CharField(\n widget=widgets.CheckboxInput()\n )\n\n # checkbox 多选\n xbd3 = fields.MultipleChoiceField(\n initial=[2,],\n choices=((1,'北京'),(2,'上海'),(3,'深圳')),\n widget=widgets.CheckboxSelectMultiple()\n )\n\n # radio\n xdb4 = fields.ChoiceField(\n initial=2,\n choices=((1, '北京'), (2, '上海'), (3, '深圳')),\n widget=widgets.RadioSelect(),\n\n )\n\ndef test(request):\n if request.method == 'GET':\n obj = TestForm()\n return render(request, 'test.html', {\"obj\": obj})\n else:\n obj = TestForm(request.POST, request.FILES)\n obj.is_valid()\n return render(request, 'test.html', {\"obj\": obj})\nfrom django.forms.models import ModelChoiceField\nclass XqForm(Form):\n price = fields.IntegerField()\n user_id = fields.IntegerField(\n widget=widgets.Select(\n # values_list返回的是元组\n # choices=models.UserInfo.objects.values_list('id', 'username'),\n )\n )\n\n # 不推荐\n user_id2 = ModelChoiceField(\n queryset=models.UserInfo.objects.all(),\n to_field_name='username'\n )\n\n def __init__(self, *args, **kwargs):\n # 实时更新,super会拷贝所有字段来赋值\n super(XqForm, self).__init__(*args, **kwargs)\n # 下面这行必须写在super下面\n self.fields[\"user_id\"].widget.choices = models.UserInfo.objects.values_list('id', 'username')\n\n\ndef xq(request):\n obj = XqForm()\n return render(request, 'xq.html', {'obj':obj})\n\nfrom django.core.exceptions import ValidationError, NON_FIELD_ERRORS\nclass AjaxForm(Form):\n username = fields.CharField()\n user_id = fields.IntegerField(\n widget=widgets.Select(choices=[(0,'alex'),(1,'root'),(2,'qwer')])\n )\n\n # 自定义方法 clean_字段名\n # 必须有返回值 self.cleaned_data['username']\n # 如果出错,raise ValidationError('fieldname')\n def clean_username(self):\n v = self.cleaned_data['username']\n if models.UserInfo.objects.filter(username=v).count():\n # 这里是看源码得到的\n raise ValidationError('用户名已存在')\n\n # return self.cleaned_data['price']\n return v\n\n def clean(self):\n # 联合错误\n valuedict = self.cleaned_data\n v1 = valuedict.get('username')\n v2 = valuedict.get('user_id')\n if v1 == 'root' and v2 == 1:\n raise ValidationError('整体错误信息')\n return self.cleaned_data\n\nimport json\ndef ajax(request):\n if request.method == 'GET':\n obj = AjaxForm()\n return render(request, 'ajax.html', {'obj':obj})\n else:\n ret = {'status': '没钱','msg':None}\n obj = AjaxForm(request.POST)\n if obj.is_valid():\n data = obj.cleaned_data\n ret[\"status\"] = '钱'\n return HttpResponse(json.dumps(ret))\n else:\n \"\"\"\n {\n __all__: [], 整体错误信息,NON_FIELD_ERRORS就是这个错误\n username: [], 个体错误信息\n ...\n }\n \"\"\"\n ret['msg'] = obj.errors\n # from django.forms.utils import ErrorDict\n # print(obj.errors.as_json())\n # print(obj.errors.as_ul())\n # print(obj.errors.as_data())\n return HttpResponse(json.dumps(ret))\n\n\n\n\n\n\n","repo_name":"kangle0224/forms","sub_path":"app02/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13545611206","text":"import sys, os\nimport torch\nimport argparse\nimport importlib\nimport setproctitle, socket, uuid\nimport datetime\n\nfrom datasets import get_dataset\nfrom models import get_model\nfrom utils.train import train\nfrom utils.conf import *\nfrom utils.args import *\nfrom utils.checkpoint import create_load_ckpt\n\nconf_path = os.getcwd() + \".\"\nsys.path.append(conf_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Reasoning Shortcut', allow_abbrev=False)\n parser.add_argument('--model', type=str,default='cext', help='Model for inference.', choices=get_all_models())\n parser.add_argument('--load_best_args', action='store_true', help='Loads the best arguments for each method, '\n 'dataset and memory buffer.') \n \n torch.set_num_threads(4)\n\n add_management_args(parser)\n args = parser.parse_known_args()[0]\n mod = importlib.import_module('models.' + args.model)\n\n # LOAD THE PARSER SPECIFIC OF THE MODEL, WITH ITS SPECIFICS\n get_parser = getattr(mod, 'get_parser') \n parser = get_parser()\n parser.add_argument('--project', type=str, default=\"Reasoning-Shortcuts\", help='wandb project')\n\n args = parser.parse_args() # this is the return\n\n # load args related to seed etc.\n set_random_seed(args.seed) if args.seed is not None else set_random_seed(42)\n \n return args\n\ndef main(args):\n \n # Add uuid, timestamp and hostname for logging\n args.conf_jobnum = str(uuid.uuid4())\n args.conf_timestamp = str(datetime.datetime.now())\n args.conf_host = socket.gethostname()\n dataset = get_dataset(args)\n\n # Load dataset, model, loss, and optimizer\n encoder, decoder = dataset.get_backbone()\n n_images, c_split = dataset.get_split()\n model = get_model(args, encoder, decoder, n_images, c_split) \n loss = model.get_loss(args)\n model.start_optim(args)\n\n # SAVE A BASE MODEL OR LOAD IT, LOAD A CHECKPOINT IF PROVIDED\n model = create_load_ckpt(model, args)\n\n # set job name\n setproctitle.setproctitle('{}_{}_{}'.format( args.model, args.buffer_size if 'buffer_size' in args else 0, args.dataset))\n\n # perform posthoc evaluation/ cl training/ joint training\n print(' Chosen device:', model.device)\n if args.posthoc: pass\n else: train(model, dataset, loss, args)\n\n print('\\n ### Closing ###')\n\nif __name__ == '__main__':\n args = parse_args()\n \n print(args)\n \n main(args)","repo_name":"ema-marconato/reasoning-shortcuts","sub_path":"XOR_MNIST/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"7971065020","text":"import streamlit as st\nimport cv2 \nimport face_recognition\nimport os\n\n##new\n\n# from deepface import DeepFace\nfrom fer import FER\n\n##new\n\nclass FaceDetector:\n def __init__(self):\n # self.WINDOW = st.image([])\n pass\n\n def face_detector(self, img,WINDOW):\n \n RGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n boxes = face_recognition.face_locations(RGB, model='hog') \n flag = False\n for (y1, x2, y2, x1) in boxes:\n # cropped_face = RGB[y1:y2, x1:x2]\n # analyze = DeepFace.analyze(img,actions=['emotions'])\n # emotion = analyze[\"dominanmt_emotion\"]\n emo_detector = FER(mtcnn=True)\n captured_emotions = emo_detector.detect_emotions(RGB)\n dominant_emotion, emotion_score = emo_detector.top_emotion(RGB)\n\n flag = True\n cv2.rectangle(RGB, (x1, y1), (x2, y2), (255, 0, 0), 2)\n label_position = (x1-3,y1-3)\n cv2.putText(RGB,dominant_emotion,label_position,cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)\n # face = cv2.resize(RGB, (200, 200))\n WINDOW.image(RGB)\n if flag:\n return RGB,boxes\n else:\n return None,None\n","repo_name":"kkkumar2/Face_unlock","sub_path":"STREAMLIT/FACE_VERIFICATION/face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39839473499","text":"#! /usr/bin/env python\n# Author: Kapil Thadani (kapil@cs.columbia.edu)\n\nfrom __future__ import division, with_statement\n\n\ndef avg(values):\n \"\"\"Return the mean of a list or generator. The numpy methods break when\n generators are supplied.\n \"\"\"\n total, n = 0.0, 0\n for x in values:\n total += x\n n += 1\n return float(total) / n\n","repo_name":"outerproduct/dissertation","sub_path":"utils/avg.py","file_name":"avg.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"41862375106","text":"from typing import Optional\nfrom services.Structures import ListNode\n\n\nclass Solution:\n\n @staticmethod\n def list_node_to_int(list_node: Optional[ListNode]) -> int:\n if not list_node:\n return 0\n result = [str(list_node.val)]\n while list_node.next:\n result.append(str(list_node.next.val))\n list_node = list_node.next\n\n return \"\".join(reversed(result))\n\n def add_two_numbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n\n res = int(self.list_node_to_int(l1))+int(self.list_node_to_int(l2))\n\n node = None\n next_element = None\n for i in [i for i in str(res)]:\n node = ListNode(int(i), next_element)\n next_element = node\n\n return res\n return node # for leet code\n\n\ndef test():\n assert Solution().add_two_numbers(ListNode.create_new_list([2, 4, 3]), ListNode.create_new_list([5, 6, 4])) == 807\n assert Solution().add_two_numbers(ListNode.create_new_list([9, 9, 9, 9, 9, 9, 9]),\n ListNode.create_new_list([9, 9, 9, 9])) == 10009998\n\n\n\n","repo_name":"vanpipka/leetCode","sub_path":"questions/AddTwoNumbers.py","file_name":"AddTwoNumbers.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74065281671","text":"from typing import List\n\ndef find_pivot(nums: List[int]) -> int:\n\t\"\"\"In a sorted list rotated clockwise by pivot, return index of pivot\"\"\"\n\tif not nums:\n\t\treturn -1\n\n\t# pivot is 0 or length of list is 1\n\tif nums[0] <= nums[-1]:\n\t\treturn 0\n\n\t# binary search for the pivot condition is met\n\tbeg, end = 0, len(nums)\n\twhile end > beg:\n\t\tmid = (beg + end) // 2\n\n\t\tif nums[mid - 1] > nums[mid]:\n\t\t\treturn mid\n\t\telif nums[mid] > nums[0]: # look right\n\t\t\tbeg = mid\n\t\telif nums[mid] < nums[0]: # look left\n\t\t\tend = mid\n\t\telse:\n\t\t\tprint('list contained a duplicate when it shouldn\\'t have. Exiting...')\n\t\t\treturn -2\n\n\tprint('list was not actually pivoted. Exiting...')\n\treturn -3\n\n\ndef rotated_binary_search(nums: List[int], target: int, pivot: int) -> int:\n\t\"\"\"\n\tSearch a sorted list rotated clockwise by pivot, return index of target\n\n\tpivot -- number of indices rotated, (int in range [0, len(nums)))\n\t\"\"\"\n\tlength = len(nums)\n\tbeg, end = pivot, length + pivot\n\n\twhile end > beg:\n\t\tmid = (beg + end) // 2\n\t\tmid_rot = mid % length # rotated index of mid\n\n\t\tif nums[mid_rot] > target:\n\t\t\tend = mid\n\t\telif nums[mid_rot] < target:\n\t\t\tbeg = mid + 1\n\t\telse:\n\t\t\treturn mid_rot\n\n\treturn -1\n\n\ndef search(nums: List[int], target: int):\n\treturn rotated_binary_search(nums, target, find_pivot(nums))\n\n\ndef main():\n\ttest_cases = [\n\t\t[[10, 2, 4, 6, 8], 8],\n\t\t[[10, 2, 4, 6, 8], 9],\n\t\t[[], 0],\n\t\t[[0], 0],\n\t\t[[1, 2, 3, 4, 5], 4],\n\t\t[[3, 4, 5, 1, 2], 4],\n\t\t[[3, 1], 0],\n\t\t[[3, 1], 1],\n\t\t[[3, 5, 1], 0],\n\t\t[[1, 3], 3],\n\t]\n\n\tfor tc in test_cases:\n\t\tprint('Input: ' + str(tc))\n\t\tprint('Output: ' + str(search(tc[0], tc[1])))\n\t\tprint()\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"Alee4738/leetcode","sub_path":"src/search_rotated_array.py","file_name":"search_rotated_array.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25549093373","text":"# 8.1 создать словарь дней недели\n\nweek = {1:'Monday', 2: 'Thuesday', 3: 'Wendnesday', 4:'Thursday', 5:'Friday'}\nweek[6] ='Saturday'\nweek[7] = 'Sunday'\nprint(week)\n\n# 8.2 Описание кота\ncats = {\n 'cat_1' :{\n 'name' : \"Mouse\",\n 'sex' : 'female',\n 'age' : 5,\n 'weight' : 2.5\n },\n 'cat_2' :{\n 'name' : \"Majesties\",\n 'sex' : 'male',\n 'age' : 3,\n 'weight' : 4\n }\n}\nprint(cats)\nprint(cats['cat_1'])\n#print(f'cat 1, name : {cats['cat_1']['name']}, cat 2 name: {cats['cat_2']['name']}')\n\n# 8.3.1 Считать строку и посчитать сколько каких букв в ней содержится\ns = input('enter string:')\nres ={}\nfor item in s:\n if res.get(item):\n res[item] += 1\n else:\n res[item] = 1\nprint(res)\n\n# 8.3.2 Считать строку и посчитать сколько каких букв в ней содержится\ns = input('enter string:')\nres ={}\nfor item in s:\n if not res.get(item):\n res[item] = s.count(item)\nprint(res)\n\n\n\n\n","repo_name":"OlgaAseieva/PYTHON","sub_path":"start_python_hometask/start_ht/task_8.py","file_name":"task_8.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72386692873","text":"#!/usr/bin/env python\n\n# This script should be run only if you need to update all of the bundles\n# It was used after editing foo-bundle-v0.1.0/manifests/foo.csv.yaml in 9b74c989471f146296d55c49faa10eba9feebfb1\n# It was specifically designed to propogate those changes to all other bundles in a8389232dcface7555afdf8bbd2c95dd0bc9f95d\n# You may need to change what gets popped off of the source template at some point if you need to update other bundle data en masse\n\nimport os\nimport yaml\nimport logging\nfrom base64 import b64encode\nfrom pathlib import Path\nfrom typing import Any\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)\n\noperators = ('foo', 'bar', 'baz')\nSCRIPT_DIR = Path(os.path.dirname(os.path.abspath(__file__)))\nbundle_dir = SCRIPT_DIR.joinpath('bundles')\ntemplate_bundle = bundle_dir.joinpath('foo').joinpath('foo-bundle-v0.1.0')\ntemplate_csv = template_bundle.joinpath('manifests').joinpath('foo.csv.yaml')\nwith open(template_csv) as f:\n template_data = yaml.safe_load(f)\ntemplate_data['metadata'].pop('name')\ntemplate_data['spec'].pop('customresourcedefinitions')\ntemplate_data['spec'].pop('version')\ntry:\n template_data['spec'].pop('replaces')\nexcept KeyError:\n pass\ntry:\n template_data['spec'].pop('skips')\nexcept KeyError:\n pass\ntemplate_data['spec'].pop('relatedImages')\ntemplate_data['spec'].pop('icon')\nlogging.info(f'Template: {template_data}')\n\n\ndef merge(a, b, path=None):\n \"merges b into a\"\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a\n\n\ndef convert_bundle(src: Path) -> str:\n def convert_refs(src: Any) -> Any:\n if isinstance(src, dict):\n ret = {}\n for k, v in src.items():\n ret[k] = convert_refs(v)\n elif isinstance(src, list):\n ret = []\n for v in src:\n ret.append(convert_refs(v))\n elif isinstance(src, str):\n return src.replace('0.1.0', version).replace('foo', name).replace('Foo', name.title())\n elif isinstance(src, bool) or isinstance(src, int):\n return src\n else:\n raise NotImplementedError(f'no case for object of type {type(src)}')\n return ret\n\n csv_path = list(src.joinpath('manifests').glob('*.csv.yaml'))[0]\n with open(csv_path) as f:\n bundle_data = yaml.safe_load(f)\n version = bundle_data['spec']['version']\n name = bundle_data['metadata']['name'].split('.')[0]\n\n if src == template_bundle:\n new_bundle_data = bundle_data\n else:\n # Temporarily inherit these from the template\n bundle_data['spec'].pop('installModes')\n new_bundle_data = merge(bundle_data, convert_refs(template_data))\n icon_path = src.parent.joinpath(f'{name}.svg')\n with open(icon_path) as f:\n icon_data = f.read()\n b64_icon_data = b64encode(icon_data.encode()).decode()\n new_bundle_data['spec']['icon'][0]['base64data'] = b64_icon_data\n\n with open(csv_path, 'w') as f:\n f.write(yaml.dump(new_bundle_data, explicit_start=True))\n\n\nfor operator in operators:\n operator_dir = bundle_dir.joinpath(operator)\n for bundle in operator_dir.glob(f'{operator}-bundle*'):\n convert_bundle(bundle)\n","repo_name":"openshift/oc-mirror","sub_path":"test/operator/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"27"} +{"seq_id":"23427014248","text":"import tensorflow as tf \n\n\ndef linear_dense_function(features, labels, mode, params):\n '''\n assumes features are N x 75 x 75 x 2\n 2 channels: band 1 and band 2\n '''\n # ADAM hyper parameters\n learning_rate = params['learning_rate']\n moment_decay = params['moment_decay']\n moment2_decay = params['moment2_decay']\n\n '''\n single-layer preceptron\n '''\n # data\n X = tf.cast(features['x'], dtype=tf.float32)\n '''\n fully-connected layer\n [N, 75, 75, 2] -> [N, 2]\n '''\n X = tf.reshape(X, [-1, 75 * 75 * 2])\n norm = tf.layers.batch_normalization(\n name='norm',\n inputs=X,\n axis=1,\n training=(mode==tf.estimator.ModeKeys.TRAIN))\n W1 = tf.get_variable(name='W1',\n shape=[75 * 75 * 2, 2],\n initializer=tf.contrib.layers.xavier_initializer())\n b1 = tf.get_variable(name='b1',\n shape=[2, ],\n initializer=tf.contrib.layers.xavier_initializer())\n logits = tf.matmul(name='logits', a=norm, b=W1) + b1\n predictions = tf.nn.softmax(logits)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n one_hot_labels = tf.one_hot(indices=labels, depth=2)\n loss = tf.losses.softmax_cross_entropy(one_hot_labels, logits)\n adam = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=moment_decay,\n beta2=moment2_decay)\n norm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(norm_update_ops):\n update_op = adam.minimize(loss=loss,\n global_step=tf.train.get_global_step())\n return(tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=update_op))\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n return(tf.estimator.EstimatorSpec(mode=mode,\n predictions=predictions))\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n one_hot_labels = tf.one_hot(indices=labels, depth=2)\n loss = tf.losses.log_loss(one_hot_labels, predictions)\n predicted_labels = tf.argmax(input=predictions, axis=1)\n metrics = {'false_negatives': tf.metrics.false_negatives(\n labels=labels,\n predictions=predicted_labels),\n 'true_positives': tf.metrics.true_positives(\n labels=labels,\n predictions=predicted_labels),\n 'false_positives': tf.metrics.false_positives(\n labels=labels,\n predictions=predicted_labels),\n 'precision': tf.metrics.precision(\n labels=labels,\n predictions=predicted_labels),\n 'recall': tf.metrics.recall(\n labels=labels,\n predictions=predicted_labels),\n 'AUC': tf.metrics.auc(\n labels=labels,\n predictions=predicted_labels),\n 'mean_per_class_acc': tf.metrics.mean_per_class_accuracy(\n labels=labels,\n predictions=predicted_labels,\n num_classes=2),\n 'accuracy': tf.metrics.accuracy(\n labels=labels,\n predictions=predicted_labels)}\n return(tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=metrics))\n","repo_name":"mrkzwng/alexnet-squeezenet-kaggle-comp","sub_path":"linear_dense.py","file_name":"linear_dense.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35133953527","text":"# Para agregar lista de comandos con descripcion al chat\nimport telebot\nfrom decouple import config # Para leer las variables del archivo .env\nfrom src.botones_inline import *\nfrom src.botones_respuesta import *\nfrom src.eliminar_modificar import *\nfrom src.nekos import *\nfrom src.rand_nums import *\n\n\nBOT_TOKEN = config('BOT_TOKEN')\nbot = telebot.TeleBot(BOT_TOKEN)\n\n\n@bot.message_handler(commands=[\"start\"])\ndef cmd_start(message):\n bot.reply_to(\n message, \"Hola soy un bot de prueba para el taller de Creación de Bots con Python de PROTECO y soy tu asistente de telegram.\")\n\n\n@bot.message_handler(commands=['ayuda', 'help'])\ndef cmd_help(message):\n bot.reply_to(message, \"Dejame ayudarte a interactuar conmigo:\\n/start: Inicia la interacción con el bot.\\n/help: Muestra un menu de ayuda con comandos y su función\")\n\n\n# Envío de archivos multimedia\n@bot.message_handler(commands=['neko'])\ndef call_cmd_neko(message):\n cmd_neko(bot, message)\n\n\n# Edición/Eliminacion de mensajes\n@bot.message_handler(commands=['eliminar'])\ndef call_cmd_delete_msg(message):\n cmd_delete_msg(bot, message)\n\n\n# Botones de respuesta\n@bot.message_handler(commands=['registro'])\ndef cmd_register(message):\n markup = ForceReply() # Instanciando un objeto de la clase ForceReplay\n # guardamos el mensaje devuelto por el usuario en una variable\n ans = bot.send_message(\n message.chat.id, \"¿Cúal es tu nombre?\", reply_markup=markup)\n\n # Recibe como primer argumento un mensaje y como segundo argumento la funcion que va a gestionar dicho mensaje\n bot.register_next_step_handler(ans, get_age)\n\n\ndef get_age(message):\n usuarios[message.chat.id] = {}\n # message.text devuelve el nombre del usuario\n usuarios[message.chat.id][\"nombre\"] = message.text\n markup = ForceReply()\n ans = bot.send_message(\n message.chat.id, f'Hola {usuarios[message.chat.id][\"nombre\"]} ¿Cuál es tu edad?', reply_markup=markup)\n bot.register_next_step_handler(ans, get_gender)\n\n\ndef get_gender(message):\n if not message.text.isdigit():\n markup = ForceReply()\n ans = bot.send_message(\n message.chat.id, \"ERROR. Debes ingresar una edad valida.\\n¿Cúal es tu edad?\", reply_markup=markup)\n bot.register_next_step_handler(ans, get_gender)\n else:\n usuarios[message.chat.id][\"edad\"] = int(message.text)\n # Configuracion de la botonera\n markup = ReplyKeyboardMarkup(\n one_time_keyboard=True,\n input_field_placeholder=\"Selecciona una opción\",\n resize_keyboard=True)\n markup.add(\"Hombre\", \"Mujer\", \"Otro\", \"Prefiero no especificar\")\n ans = bot.send_message(\n message.chat.id, \"¿Cúal es tu género?\", reply_markup=markup)\n bot.register_next_step_handler(ans, save_data)\n\n\ndef save_data(message):\n if not message.text in [\"Hombre\", \"Mujer\", \"Otro\", \"Prefiero no especificar\"]:\n ans = bot.send_message(\n message.chat.id, \"ERROR. Debes seleccionar una opción valida.\\n¿Cúal es tu geénero?\")\n else:\n usuarios[message.chat.id][\"genero\"] = message.text\n data = \"Datos ingresados:\\n\"\n data += f\"Nombre: {usuarios[message.chat.id]['nombre']}\\n\"\n data += f\"Edad : {usuarios[message.chat.id]['edad']}\\n\"\n data += f\"Género: {usuarios[message.chat.id]['genero']}\\n\"\n\n markup = ReplyKeyboardRemove()\n # parse_mode indica el tipo de sintaxis con el que va a interpretar las cadenas de texto, acepta markdown y html\n bot.send_message(message.chat.id, data,\n parse_mode=\"html\", reply_markup=markup)\n\n # Aquí va el código para almacenar los datos en tu base de datos\n\n # Eliminar los datos en menoría\n del usuarios[message.chat.id]\n\n# Adivina el numero\n\n\n@bot.message_handler(commands=['jugar'])\ndef cmd_game(message):\n num = randint(1, 10) # generamos un numero aleatorio entre 1 y 10\n cid = message.chat.id # guardamos el chat id\n usuarios[cid] = num\n\n buttons = ReplyKeyboardMarkup(\n input_field_placeholder=\"Selecciona una opcion...\",\n row_width=5)\n buttons.add('1', '2', '3', '4', '5', '6', '7', '8', '9', '10')\n ans = bot.send_message(\n cid, \"Adivina el número entre 1 y 10\", reply_markup=buttons)\n bot.register_next_step_handler(ans, verify_num)\n\n\ndef verify_num(message):\n cid = message.chat.id\n if not message.text.isdigit():\n ans = bot.send_message(cid, \"ERROR. Introduce un número\")\n bot.register_next_step_handler(ans, verify_num)\n else:\n n = int(message.text)\n if n < 0 or n > 10:\n ans = bot.send_message(cid, \"ERROR. Número fuera del rango\")\n bot.register_next_step_handler(ans, verify_num)\n else:\n if n == usuarios[cid]:\n markup = ReplyKeyboardRemove()\n bot.reply_to(message, \"Felicidades! Has ganado!\",\n reply_markup=markup)\n del usuarios[cid]\n return\n elif n > usuarios[cid]:\n ans = bot.reply_to(\n message, \"Pista: el numero que buscas es menor.\")\n bot.register_next_step_handler(ans, verify_num)\n else:\n ans = bot.reply_to(\n message, \"Pista: El número que buscas es mayor.\")\n bot.register_next_step_handler(ans, verify_num)\n\n# Callback_query_handdler\n\n\n@bot.message_handler(commands=['enlaces'])\ndef call_cmd_links(message):\n cmd_links(bot, message)\n\n\n@bot.callback_query_handler(func=lambda x: True)\ndef call_response_links(call):\n response_links(bot, call)\n\n\n@bot.message_handler(func=lambda m: True)\ndef responder(message):\n if message.text.startswith('/'):\n bot.reply_to(message, \"Comando no disponible\")\n else:\n bot.reply_to(message, \"Hola ¿En que puedo ayudarte?\")\n\n\nif __name__ == '__main__':\n print('iniciando bot...')\n bot.infinity_polling()\n","repo_name":"GoldTryck/ChatbotTelegram","sub_path":"mainBot.py","file_name":"mainBot.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8161180860","text":"import sys\n\nfrom globals import *\n\ndef get_full_option_for_short_version(option) -> str:\n if option == 's':\n return K_SEED\n elif option == 'S':\n return K_SAMPLE\n elif option == 'g':\n return K_START_ACTION\n elif option == 'h':\n return K_HELP\n elif option == 'f':\n return K_FILE\n elif option == 'F':\n return K_FARAWAY\n elif option == 'm':\n return K_MIN\n elif option == 'p':\n return K_DISTANCE_COEF\n print(\"ERROR - UNKNOWN SHORT OPTION : {}\".format(option))\n return None\n\ndef default_cli_options():\n # initialize default seed value \n global_options[K_SEED] = K_DEFAULT_SEED_VALUE\n # initalize to run 'data_read' test if unspecified\n global_options[K_START_ACTION] = K_DEFAULT_START_ACTION\n # initialzie to use defalut csv file if unspecified\n global_options[K_FILE] = K_DEFAULT_DATA_FILE \n # initialize 'help' to false.\n global_options[K_HELP] = False\n\n # cluster options initialization\n global_options[K_FARAWAY] = K_DEFAULT_FARAWAY_VALUE\n global_options[K_MIN] = K_DEFAULT_MIN_VALUE\n global_options[K_DISTANCE_COEF] = K_DEFAULT_DISTANCE_COEF\n global_options[K_SAMPLE] = K_DEFAULT_SAMPLE_VALUE\n\ndef initialize_from_cli():\n default_cli_options()\n parse_cli_options()\n\ndef get_option_key_and_value_requirement(key) -> tuple[str, bool]:\n full_options_with_value = [K_SEED, K_START_ACTION, K_FILE, K_FARAWAY, K_MIN, K_DISTANCE_COEF, K_SAMPLE]\n short_options_with_value = ['s', 'g', 'f', 'F', 'm', 'p', 'S']\n \n key = key[2:] if key[1] == '-' else key[1:]\n \n if key in full_options_with_value:\n return (key, True)\n elif key in short_options_with_value:\n return (get_full_option_for_short_version(key), True)\n else:\n return (K_HELP, False)\n\ndef print_help():\n print('''\n OPTIONS:\n -d or --dump -> on crash, dump stack = false\n -h or --help -> Show this message.\n -F or --Far -> distance to \"faraway\" = .95\n -f or --file -> Name of file = 'data/auto93.csv'\n -g or --go -> Default action = 'data'\n -m or --min -> stop clusters at N^min = .5\n -p or --p -> distance coefficient = 2\n -s or --seed -> random number seed = 937162211\n -S or --Sample -> sampling data size = 512\n ''')\n\ndef handle_unknown_cli_option():\n print(\"Unknown option, please run -h (or) --help for more details.\")\n\ndef parse_cli_options():\n global global_options\n # skip 0 for script name.\n next_arg_is_value = False\n option_key = \"\"\n for arg in sys.argv[1:]:\n if not next_arg_is_value:\n option_details = get_option_key_and_value_requirement(arg)\n if option_details[1]:\n next_arg_is_value = True\n option_key = option_details[0]\n else:\n # Options which do not require value might expect different handling.\n if option_details[0] == K_HELP:\n global_options[K_HELP] = True\n else:\n global_options[option_key] = arg\n next_arg_is_value = False\n\n","repo_name":"surya-prakash-susarla/ASE-23","sub_path":"src/hw04/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"720108335","text":"#两数相加\r\n\r\n\"\"\"\r\n问题分析:将整数存放在链表中,逆序存放且每个节点存放一个位,(2 -> 4 -> 3)就代表数字342\r\n 然后需要将两个链表表示的数加起来,用另外一个链表存放(存放的就是整数和)\r\n\r\n算法分析:从两个链表的头开始,依次移动,每次将两个节点的值相加,若大于10,取个位,并且下一位的相加要加上1\r\n 小于10,则直接相加\r\n\"\"\"\r\nclass ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n# class Solution:\r\n# def addTwoNumbers(self, l1, l2):\r\n\r\n# new_headlist = ListNode(-1)\r\n\r\n# cur = new_headlist\r\n# bonus = 0\r\n\r\n# while l1 and l2:\r\n# temp = l1.val + l2.val + bonus\r\n# if temp >= 10:\r\n# temp = temp % 10\r\n# # cur.val = temp\r\n# cur.next = ListNode(temp)\r\n# cur = cur.next\r\n# bonus = 1\r\n# else:\r\n# cur.next = ListNode(temp)\r\n# cur = cur.next\r\n# bonus = 0\r\n \r\n# l1 = l1.next\r\n# l2 = l2.next\r\n\r\n# if l1 == None:\r\n# cur.next = l2\r\n\r\n# if l2 == None:\r\n# cur.next = l1\r\n# return new_headlist.next\r\n\r\n\r\n\"\"\"\r\n上面是我写的第一个版本,有点小BUG;不想改了,重新写了一个,思想都是一样的,只是改了判断条件\r\n\"\"\"\r\n\r\nclass Solution:\r\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\r\n new_head = ListNode(0)\r\n curry = new_head\r\n carry = 0\r\n \r\n while l1 or l2:\r\n sum = 0\r\n if l1:\r\n sum += l1.val\r\n l1 = l1.next\r\n if l2:\r\n sum += l2.val\r\n l2 = l2.next\r\n sum += carry\r\n carry = sum // 10\r\n curr.next = ListNode(sum % 10)\r\n curr = curr.next\r\n if carry > 0:\r\n curr.next = ListNode(1)\r\n return new_head.next\r\n\r\n\r\n ","repo_name":"zhangwhitemouse/pythonDataStructure","sub_path":"datastructure/python-data-structure-linkedlist/addtwonumbers.py","file_name":"addtwonumbers.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31375240733","text":"\"\"\"Implements the `utopya test` subcommand\"\"\"\n\nimport os\nimport sys\n\nimport click\n\nfrom ._shared import OPTIONS, add_options, complete_model_names\nfrom ._utils import Echo\n\n# -----------------------------------------------------------------------------\n\n\n@click.command(\n \"test\",\n help=(\n \"Run the Python model tests associated with the specified model.\\n\"\n \"\\n\"\n \"Looks up the test directory, temporarily moves to that directory, \"\n \"and invokes pytest there. Any additional pytest arguments are passed \"\n \"through. See pytest docs for more information.\"\n ),\n context_settings=dict(ignore_unknown_options=True),\n)\n#\n# Select a model\n@click.argument(\"model_name\", shell_complete=complete_model_names)\n@add_options(OPTIONS[\"label\"])\n#\n# Pass pytest arguments through\n@click.argument(\"pytest_args\", nargs=-1)\n#\n# NOTE This function needs a name that is not simply ``test`` because pytest\n# will also search this file for test discovery and we cannot have a\n# function here that looks like a test function.\ndef run_test(\n model_name: str,\n label: str,\n pytest_args: tuple,\n):\n \"\"\"Invokes the associated Python tests for a model using pytest.\"\"\"\n import pytest\n\n import utopya\n\n _log = utopya._getLogger(\"utopya\")\n\n # Get model information and the corresponding test directory\n model = utopya.Model(name=model_name, bundle_label=label)\n model_info = model.info_bundle\n\n py_tests_dir = model_info.paths.get(\"py_tests_dir\")\n if not py_tests_dir:\n _log.caution(\"No tests defined for this model.\")\n return\n\n py_tests_dir = os.path.abspath(py_tests_dir)\n\n # Add the model tests' parent directories to the PATH to allow imports\n prepend_to_sys_path = (\n os.path.dirname(os.path.dirname(py_tests_dir)),\n os.path.dirname(py_tests_dir),\n )\n for p in prepend_to_sys_path:\n sys.path.insert(0, p)\n\n # Move to the test directory ...\n old_wd = os.getcwd()\n os.chdir(py_tests_dir)\n\n # ... and invoke the tests\n _log.progress(\"Invoking associated Python model tests ...\")\n _log.remark(\n \"Temporarily setting working directory to model test directory:\\n %s\",\n py_tests_dir,\n )\n _log.remark(\"Full test command:\\n\\n pytest %s\\n\\n\", \" \".join(pytest_args))\n try:\n sys.exit(pytest.main(list(pytest_args)))\n\n finally:\n # Change back to previous working directory to not have side effects\n os.chdir(old_wd)\n\n # Remove previously prependend directories from system PATH\n for p in prepend_to_sys_path:\n sys.path.remove(p)\n","repo_name":"utopia-foss/utopya","sub_path":"utopya_cli/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"43458010480","text":"import Irmin_ConnectionDB\nimport yagmail\nfrom datetime import datetime, timedelta, date\n\ndef main():\n print('Conectando a la DB Irmin_Test...')\n mydb = Irmin_ConnectionDB.connectionDB()\n if mydb == False:\n print('Error en Conexion...Contacte al administrador del Sistema IRMIN') \n else:\n print('Conexion realizada con exito...')\n body = checkDaily(mydb)\n sendEmail(body)\n\ndef checkDaily(mydb):\n today = date.today()\n time_from = ' 00:00:00'\n time_to = ' 23:59:59'\n yesterday = today - timedelta(days=2)\n today_body = str(today.day) + \"/\" + str(today.month) + \"/\" + str(today.year)\n yesterday_body = str(yesterday.day) + \"/\" + str(yesterday.month) + \"/\" + str(yesterday.year)\n yesterday_from = str(yesterday) + time_from\n yesterday_to = str(yesterday) + time_to\n sql = \"SELECT * FROM irmin_test.images WHERE (fecha_hora_imagen > '\" + yesterday_from + \"' and fecha_hora_imagen < '\" + yesterday_to + \"'and hay_actividad = 1)\"\n \n mycursor = mydb.cursor()\n try:\n mycursor.execute(sql)\n records = mycursor.fetchall()\n body = [\n \"

Fecha: \" + today_body + \"

\",\n \"


\",\n \"

Buenos días,

\",\n \"

En el dia \" + yesterday_body + \" se han encontrado \" + str(mycursor.rowcount) + \" imágenes con condiciones predisponentes para la caída de granizo.

\",\n \"


\",\n \"

Cualquier pregunta no dude en consultar al administrador del sistema proyecto: proyecto.irmin@gmail.com.

\",\n \"


\",\n \"

Muchas gracias,

\",\n \"

IRMIN Admin

\",\n ]\n\n #New Informe\n datetimeNow = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n fecha_hora_informe = created = modified = datetimeNow\n descripcion = \"Fecha: \" + today_body + \" - En el dia \" + yesterday_body + \" se han encontrado \" + str(mycursor.rowcount) + \" imagenes con condiciones predisponentes para la caida de granizo.\"\n sql = \"INSERT INTO informes (fecha_hora_informe, descripcion, created, modified) VALUES (%s, %s, %s, %s)\"\n val = (fecha_hora_informe, descripcion, created, modified)\n mycursor = mydb.cursor()\n try:\n mycursor.execute(sql, val)\n mydb.commit()\n #print(mycursor.rowcount, \"OK...\")\n\n mycursor = mydb.cursor()\n mycursor.execute('SELECT MAX(id) from informes')\n result = mycursor.fetchall()\n for i in result:\n informe_id = i[0]\n except:\n body = \"Error DB: Informe\"\n\n #New Imagen-Informe\n for image in records:\n image_id = image[0]\n sql = \"INSERT INTO images_informes (image_id, informe_id) VALUES (%s, %s)\"\n val = (image_id, informe_id)\n mycursor = mydb.cursor()\n try:\n mycursor.execute(sql, val)\n mydb.commit()\n #print(mycursor.rowcount, \"OK...\")\n except:\n body = \"Error en Imagenes e Informes\"\n except:\n body = \"Error DB: Informes\"\n\n return body\n\ndef sendEmail(body):\n yag = yagmail.SMTP('proyecto.irmin@gmail.com', 'Proyectoirmin123')\n subject = 'Reporte Diario: ' + datetime.now().strftime(\"%d%m%Y\")\n yag.send('santiagopierotti@gmail.com', subject, body)\n\n#Code\nmain()","repo_name":"spierotti/irmin","sub_path":"webroot/files/ejecutables/Project_Irmin/Irmin_CreateDailyReport.py","file_name":"Irmin_CreateDailyReport.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31570295327","text":"import unittest\nfrom codac import *\nimport codac as codac\n\nclass TestFunctionOnTube(unittest.TestCase):\n\n def assertApproxIntv(self, first, second):\n if first.is_empty() is False:\n # if isinstance(second, Interval):\n self.assertAlmostEqual(first.lb(), second.lb())\n self.assertAlmostEqual(first.ub(), second.ub())\n else:\n self.assertEqual(first, second)\n def test_Functions_1(self):\n\n x = TubeVector(Interval(0.,10.), 0.01, TFunction(\"sin(t)+[-0.01,0.01]\"));\n f = TFunction(\"x\", \"t/10.+x\");\n y1 = TubeVector(f.eval_vector(x));\n y2 = TubeVector(Interval(0.,10.), 0.01, TFunction(\"t/10.+sin(t)+[-0.01,0.01]\"));\n self.assertAlmostEqual(y1.volume(), y2.volume());\n\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"codac-team/codac","sub_path":"python/codac/tests/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"27"} +{"seq_id":"7908675853","text":"# 백준CodePlus 풀면서 알게 된 내용\n\n# while 문에 try except를 써서 구현\n\nwhile True:\n try:\n n = int(input())\n except: # n에 input이 없을 경우 작동함. 즉 입력이 더 없는 경우 프로그램이 종료됨.\n break\n\n print(n) # 입력을 받았다면 아래에 할 일이 작동되는 형태","repo_name":"tldnr1/.etc","sub_path":"코테 연습문제 정리/Review/무한입력받기.py","file_name":"무한입력받기.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"75009438792","text":"import copy\nimport random\n\n\ndef bubble(a: list[int]) -> list[int]:\n a = copy.copy(a)\n for i in range(len(a) - 1):\n swapped = False\n for j in range(0, len(a) - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j+1] = a[j+1], a[j]\n swapped = True\n\n if not swapped:\n break\n return a\n\n\ndef quick_sort(a):\n def partition(pa, low, high):\n pivot = pa[high]\n i = low - 1\n\n for j in range(low, high):\n if pa[j] <= pivot:\n i = i + 1\n (pa[i], pa[j]) = (pa[j], pa[i])\n\n (pa[i + 1], pa[high]) = (pa[high], pa[i + 1])\n return i + 1\n\n def quick(qa, low, high):\n if low < high:\n pi = partition(qa, low, high)\n\n quick(qa, low, pi - 1)\n quick(qa, pi + 1, high)\n return qa\n\n return quick(copy.copy(a), 0, len(a) - 1)\n\n\ndef main():\n funcs = (('bubble', bubble,), ('quick', quick_sort))\n lists = ([], [1], [1,2,3,4,5], [5,4,3,2,1], [3,2,5,4,1], random.sample(range(1, 100), 20))\n\n for name, func in funcs:\n print(name)\n print('===')\n for l in lists:\n print(f'{l} -> {func(l)}')\n\n\nif __name__ == '__main__':\n main()","repo_name":"ashbyp/pscratch","sub_path":"algorithms/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3665171267","text":"from rest_framework import status\nfrom django.contrib.auth import authenticate\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view,action\nfrom django.contrib.auth.models import User\nfrom .models import Animal,Cattle,Cattle_User,Cattle_Animal\nfrom cattlemapp.serializers import UserSerializer,AnimalSerializer,TokenSerializer,Cattle_userSerializer\nfrom rest_framework import viewsets\nfrom datetime import date\nfrom django.http import HttpResponse\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated \nfrom rest_framework.decorators import authentication_classes,permission_classes\nfrom rest_framework.authtoken.models import Token\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Count\n\n\n\nclass UserCBV(viewsets.ViewSet):\n def create(self,request):\n x=request.data\n user=User.objects.create_user(username=x['username'],email=x['email'],password=x['password'],is_staff=x[\"is_staff\"])\n cattle=Cattle.objects.create()\n cattle_user=Cattle_User.objects.create(user_id=user,cattle_id=cattle,is_admin=user.is_staff)\n return Response({\"message\":\"user created\",\"cattle_id\":cattle.id})\n\nclass CattleCBV(viewsets.ViewSet):\n def update(self,request,pk=id):\n z=request.data\n x=Cattle.objects.filter(id=pk).update(cattle_name=z['cattle_name'],city=z['city'],place=z['place'],phone_no=z['phone_no'])\n return Response({\"message\":\"successfully added\"})\n\n def partial_update(self,request,pk=id):\n z=request.data\n m=request.user.id\n if Cattle_User.objects.filter(cattle_id_id=cattle_id,user_id_id=m): \n try:\n cattle_object=Cattle.objects.get(id=pk)\n cattle_object.cattle_name=z.get(\"cattle_name\",cattle_object.cattle_name)\n cattle_object.city=z.get(\"city\",cattle_object.city)\n cattle_object.place=z.get('place',cattle_object.place)\n cattle_object.phone_no=z.get('phone_no',cattle_object.phone_no)\n cattle_object.save()\n return Response({\"message\":\"cattle is successfully updated\"})\n except:\n return Response({\"message\":\"Invalid Cattle Id\"})\n else:\n return Response({\"message\":\"Invalid user\"})\nclass AnimalCBV(viewsets.ViewSet):\n authentication_classes = [TokenAuthentication]\n permission_classes=(IsAuthenticated,)\n def create(self,request,cattle_id):\n cattle=Cattle.objects.get(id=cattle_id)\n input_data=request.data\n user=request.user.id\n user_object=User.objects.get(id=user)\n if Cattle_User.objects.filter(cattle_id_id=cattle_id,user_id_id=user): \n animal1=Animal()\n animal1.cow_profile_pic=input_data['cow_profile_pic']\n animal1.tag_number=input_data['tag_number']\n animal1.name=input_data['name']\n animal1.breed=input_data['breed']\n animal1.gender=input_data['gender']\n animal1.date_of_birth=input_data['date_of_birth']\n animal1.date_of_entry=input_data['date_of_entry']\n animal1.how_cattle_obtained=input_data['how_cattle_obtained']\n if \"no_of_children\" in input_data:\n animal1.no_of_children=input_data['no_of_children']\n else:\n animal1.no_of_children=None\n if Cattle_Animal.objects.filter(cattle_id_id=cattle_id,animal_id_id=animal1.tag_number):\n return Response({\"message\":\"Animal already exist\"})\n else:\n pass \n if 'parent_id' in input_data:\n parent=input_data['parent_id']\n if Animal.objects.filter(tag_number=parent):\n animal1.parent_id_id=parent\n else:\n return Response({\"message\":\"Invalid parent Id/does not exist\"})\n else:\n animal1.parent_id_id=None\n try:\n animal=Animal.objects.get(tag_number=input_data['tag_number'])\n animal.is_alive=True\n animal.name=input_data[\"name\"]\n animal.date_of_entry=input_data['date_of_entry']\n animal.how_cattle_obtained=input_data['how_cattle_obtained']\n animal.save()\n except:\n animal=Animal.objects.create(tag_number=animal1.tag_number,cow_profile_pic=animal1.cow_profile_pic,breed=animal1.breed,name=animal1.name,gender=animal1.gender,date_of_birth=animal1.date_of_birth,date_of_entry=animal1.date_of_entry,how_cattle_obtained=animal1.how_cattle_obtained,parent_id_id=animal1.parent_id_id,no_of_children=animal1.no_of_children) \n cattle_animal_object=Cattle_Animal.objects.create(cattle_id=cattle,animal_id=animal,added_by_id=user_object,Date=date.today())\n return Response({\"message\":\"Animal added successfully\"})\n else:\n return Response({\"message\":\"Invalid User\"})\n def partial_update(self,request,cattle_id,pk=id):\n z=request.data\n m=request.user.id\n if Cattle_User.objects.filter(cattle_id_id=cattle_id,user_id_id=m): \n if Cattle_Animal.objects.filter(cattle_id_id=cattle_id,animal_id_id=pk):\n try:\n animal_object=Animal.objects.get(tag_number=pk)\n animal_object.no_of_children=z.get(\"no_of_children\",animal_object.no_of_children)\n animal_object.cow_profile_pic=z.get(\"cow_profile_pic\",animal_object.cow_profile_pic)\n animal_object.is_alive=z.get(\"is_alive\",animal_object.is_alive)\n animal_object.save()\n return Response({\"message\":\"successfully updated\"})\n except:\n return Response({\"message\":\"Invalid tag number\"})\n else:\n return Response({\"message\":\"Invalid credentials\"})\n else:\n return Response({\"message\":\"Invalid User\"})\n def destroy(self,request,pk):\n input_data=request.data\n m=request.user.id\n if Cattle_User.objects.filter(cattle_id_id=cattle_id,user_id_id=m): \n if Cattle_Animal.objects.filter(cattle_id_id=cattle_id,animal_id_id=pk):\n animal=Animal.objects.get(tag_number=tag_number)\n animal.is_alive=False\n animal.save()\n Cattle_Animal.objects.delete(cattle_id=cattle_id,animal_id_id=tag_number)\n return Response({\"message\":\"Animal deleted Successfully\"})\n else: \n return Response({\"message\":\"Animal does not exist in that cattle\"})\n else:\n return Response({\"message\":\"Invalid User\"})\n@api_view(['POST'])\ndef user_login(request):\n m=request.data\n user=User.objects.filter(email=m[\"email\"]).first()\n success=user.check_password(m['password'])\n print(user.id)\n cattle_user=Cattle_User.objects.filter(user_id_id=user.id).first()\n if success:\n x=Token.objects.filter(user_id=user.id).delete()\n token=Token.objects.create(user=user)\n return Response({\"token\":token.key,\"cattle_id\":cattle_user.cattle_id_id,\"user_id\":user.id})\n else:\n return Response({\"message\":\"Invalid credentials\"})\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef user_logout(request):\n m=request.data\n user=User.objects.get(id=m['user_id'])\n token=Token.objects.filter(user_id=user.id).delete()\n return Response({\"message\":\"logout success\"})\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef add_user(request,cattle_id):\n user_id=request.user.id\n user_is_admin=request.user.is_staff\n m=request.data\n if user_is_admin:\n if Cattle_User.objects.filter(cattle_id_id=cattle_id,user_id_id=user_id):\n cattle=Cattle.objects.get(id=m['cattle_id'])\n user=User.objects.create_user(username=m['user_name'],email=m['email'],password=m['password'],is_staff=m[\"is_staff\"])\n cattle_user=Cattle_User.objects.create(user_id=user,cattle_id=cattle,is_admin=user.is_staff)\n return Response({\"message\":\"user added successfully\"})\n else:\n return Response({\"message\":\"you are not admin\"})\n else:\n return Response({\"message\":\"You are not admin\"})\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef show_animals(request,cattle_id):\n m={}\n n={}\n input_data=request.data\n queryset=Cattle_Animal.objects.filter(cattle_id=cattle_id)\n animalid_list=queryset.values_list('animal_id_id',flat=True)\n for i in animalid_list:\n animal=Animal.objects.get(tag_number=i)\n serializers=AnimalSerializer(animal)\n m[i]=serializers.data\n for i in animalid_list:\n m[i]['cow_profile_pic']='http://127.0.0.1:8000'+str(m[i]['cow_profile_pic'])\n n=m\n return Response(n)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef show_animal(request,cattle_id,tag_number):\n m={}\n print(request.user.id)\n input_data=request.data\n if Cattle_Animal.objects.filter(cattle_id_id=cattle_id,animal_id_id=tag_number).exists():\n animal=Animal.objects.get(tag_number=tag_number)\n else:\n return Response({\"message\":\"Invalid Credentials\"})\n serializer=AnimalSerializer(animal)\n m=serializer.data\n m['cow_profile_pic']='http://127.0.0.1:8000'+str(m['cow_profile_pic'])\n return Response(m)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef search_animal(request,cattle_id):\n m={}\n input_data=request.query_params\n if Cattle_Animal.objects.filter(cattle_id_id=cattle_id,animal_id_id=input_data['animal_id']).exists():\n animal=Animal.objects.get(tag_number=input_data['animal_id'])\n else:\n return Response({\"message\":\"Invalid Tag Number\"})\n serializer=AnimalSerializer(animal)\n m=serializer.data\n m['cow_profile_pic']='http://127.0.0.1:8000'+str(m['cow_profile_pic'])\n return Response(m)\n\n@api_view([\"POST\"])\n@permission_classes([IsAuthenticated])\ndef delete_animal(request,cattle_id,tag_number):\n input_data=request.data\n animal=Animal.objects.get(tag_number=tag_number)\n animal.is_alive=False\n animal.save()\n try:\n canimal=Cattle_Animal.objects.get(cattle_id=cattle_id,animal_id_id=tag_number)\n canimal.delete()\n return Response({\"message\":\"Animal deleted Successfully\"})\n except:\n return Response({\"message\":\"Animal does not exist in that cattle\"})\n\n@api_view([\"POST\"])\n@permission_classes([IsAuthenticated])\ndef cattle_details(request,cattle_id):\n m=[]\n gender_count = Animal.objects.filter(cattle_animal__cattle_id=cattle_id).values('gender').annotate(Count('gender'))\n print(gender_count)\n for i in gender_count:\n z={} \n z['gender']=i[\"gender\"]\n z[\"count\"]=i[\"gender__count\"]\n m.append(z) \n return Response({\"message\":\"details sent\",\"data\":m})","repo_name":"PrasannaHegde66/cattlemanagerbackend","sub_path":"cattle_manager/cattlemapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38725375112","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.up = None\n self.down = None\n \nclass Table:\n def __init__(self, size, start_pointer):\n self.head = Node(0)\n self.pointer = start_pointer\n self.trash_bin = []\n\n cur = self.head\n for idx in range(1, size):\n cur.down = Node(idx)\n cur.down.up = cur\n cur = cur.down\n\n cur = self.head\n for _ in range(start_pointer):\n cur = cur.down\n self.pointing_node = cur\n\n\n def pop(self):\n temp = self.pointing_node # 현재 가리키고 있는 노드\n \n if temp.up is not None and temp.down is not None: # 위아래 모두 노드가 있는 경우\n temp.up.down = temp.down\n temp.down.up = temp.up\n self.pointing_node = temp.down\n elif temp.up is not None: # 꼬리인 경우\n temp.up.down = temp.down # 위쪽하고만 새로 연결\n self.pointing_node = temp.up\n elif temp.down is not None: # head 인 경우\n temp.down.up = temp.up\n self.pointing_node = temp.down\n self.head = temp.down\n else: # 마지막 원소인 경우\n self.pointing_node = temp.down\n self.head = None\n self.trash_bin.append(temp)\n\n def restore(self):\n restoring_node = self.trash_bin.pop()\n if self.pointing_node is None:\n self.head = restoring_node\n self.pointing_node = restoring_node\n elif restoring_node.up is None:\n self.head = restoring_node\n restoring_node.down.up = restoring_node\n elif restoring_node.down is None:\n restoring_node.up.down = restoring_node\n else:\n restoring_node.up.down = restoring_node\n restoring_node.down.up = restoring_node\n\n def move(self, action):\n action = action.split()\n if action[0] == 'U':\n for _ in range(int(action[1])):\n self.pointing_node = self.pointing_node.up\n else:\n for _ in range(int(action[1])):\n self.pointing_node = self.pointing_node.down\n\n def display(self):\n cur = self.head\n while cur is not None:\n print(cur.value)\n cur = cur.down\n\n\ndef solution(n, k, cmd):\n table = Table(size = n, start_pointer=k)\n for action in cmd:\n if action == 'C':\n table.pop()\n elif action == 'Z':\n table.restore()\n else:\n table.move(action)\n\n answer = ['X'] * n\n cur_node = table.head\n while cur_node is not None:\n answer[cur_node.value] = 'O'\n cur_node = cur_node.down\n\n return ''.join(answer)\n\ncase = 8, 2, [\"D 2\",\"C\",\"U 3\",\"C\",\"D 4\",\"C\",\"U 2\",\"Z\",\"Z\",\"U 1\",\"C\"]\n\nprint(solution(*case))","repo_name":"easttuna/coding-test-notes","sub_path":"kakao/kakao_lv3_표편집_linked_list.py","file_name":"kakao_lv3_표편집_linked_list.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9872160650","text":"from setuptools import setup, find_packages\n\nfrom codecs import open\nfrom os import path\n\n\n_HERE = path.abspath(path.dirname(__file__))\n\n\ndef read(*names, **kwds):\n return open(\n path.join(_HERE, *names),\n encoding=kwds.get('encoding', 'utf-8')\n ).read()\n\n\ndef find_version(*file_paths):\n import re\n\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(\n name='bmibabel',\n version=find_version(\"bmibabel/__init__.py\"),\n description='Generate BMI language bindings with babel.',\n long_description=read('README.md'),\n url='https://github.com/csdms/bmi-babel',\n\n author='Eric Hutton',\n author_email='hutton.eric@gmail.com',\n\n license='MIT',\n\n packages=['bmibabel', 'bmibabel.cmd'],\n\n entry_points={\n 'console_scripts': [\n 'bmi-babel-make=bmibabel.cmd.make:main',\n 'bmi-babel-fetch=bmibabel.cmd.bmi_babel_fetch:main',\n 'bmi-find=bmibabel.cmd.bmi_find:main',\n 'bmi-babelize=bmibabel.cmd.bmi_babelize:main',\n 'bmi-parameters=bmibabel.cmd.bmi_parameters:main',\n 'bmi-files=bmibabel.cmd.bmi_files:main',\n 'bmi-stage=bmibabel.cmd.bmi_stage:main',\n 'bmi-install=bmibabel.cmd.bmi_install:main',\n 'bmi=bmibabel.cmd.bmi:main',\n ],\n },\n\n package_data={'bmibabel': ['data/*sidl',\n 'data/c.Component/*',\n 'data/cxx.Component/*',\n 'data/py.Component/*']},\n)\n","repo_name":"bmi-forum/babelizer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15967991178","text":"import os, json\nfrom flask import request\nfrom werkzeug.exceptions import InternalServerError, BadRequest\nfrom werkzeug.datastructures import ImmutableMultiDict\n\nfrom bdc_search_stac.collections import ns\nfrom bdc_search_stac.collections.business import CollectionsBusiness\nfrom bdc_search_stac.collections.parsers import validate\nfrom bdc_core.utils.flask import APIResource\n\napi = ns\n\n@api.route('/')\nclass ItemsController(APIResource):\n\n def get(self):\n data, status = validate(request.args.to_dict(flat=True), 'providers')\n if status is False:\n raise BadRequest(json.dumps(data))\n\n \"\"\"\n List of STAC collections by providers\n \"\"\"\n return CollectionsBusiness.get_collections_by_providers(data['providers'])\n\n@api.route('/items')\nclass CollectionsController(APIResource):\n\n def get(self):\n data, status = validate(request.args.to_dict(flat=True), 'search')\n if status is False:\n raise BadRequest(json.dumps(data))\n\n \"\"\"\n Search RF in STAC's\n \"\"\"\n features = CollectionsBusiness.search(**request.args)\n\n return {\n \"meta\": {\n \"found\": len(features)\n },\n \"features\": features\n }\n","repo_name":"rmmariano/search_stac_mock","sub_path":"bdc_search_stac/collections/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39742735719","text":"from django.urls import path\nfrom django.urls.resolvers import URLPattern \n\nfrom . import views\n\nurlpatterns = [\n path('1mg', views.medicine_from_1mg),\n path('', views.home, name='home'),\n path('netmeds', views.medicine_from_netmeds),\n path('pharmeasy', views.medicine_from_pharmeasy),\n path('search', views.searchsuggestions),\n path('addSubscription',views.add_subscription),\n path('removeSubscription',views.remove_subscription),\n path('send_price_alert',views.send_price_alerts),\n path('findbymedicinename',views.findbymedicinename),\n path('showsubscription',views.give_user_by_email),\n path('searchbycontent', views.searchsuggestionsbycontent)\n\n]","repo_name":"sahil556/MedScrapper","sub_path":"MedScrapperServer/medscrapperapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"41098829365","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#@Time : 16:14 2019/11/25\n#@Author : tb_youth\n#@FileName: MulitWindows1.py\n#@SoftWare: PyCharm\n#@Blog : http://blog.csdn.net/tb_youth\n'''\nQMdiArea\nQMdiSubWindow\n'''\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nclass MulitWindowsDemo(QMainWindow):\n count = 0\n def __init__(self):\n super(MulitWindowsDemo,self).__init__()\n self.initUI()\n\n def initUI(self):\n self.resize(800,800)\n self.setWindowTitle('多窗口')\n\n self.mdi = QMdiArea()\n self.setCentralWidget(self.mdi)\n\n bar = self.menuBar()\n file = bar.addMenu('File')\n file.addAction('New')\n file.addAction('Cascade')\n file.addAction('Tiled')\n\n file.triggered.connect(self.windowAction)\n\n def windowAction(self,q):\n if q.text() == 'New':\n MulitWindowsDemo.count = MulitWindowsDemo.count + 1\n sub = QMdiSubWindow()\n sub.setWidget(QTextEdit())\n sub.setWindowTitle('子窗口'+str(MulitWindowsDemo.count))\n self.mdi.addSubWindow(sub)\n sub.show()\n elif q.text() == 'Cascade':\n self.mdi.cascadeSubWindows()\n else:\n self.mdi.tileSubWindows()\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MulitWindowsDemo()\n window.show()\n sys.exit(app.exec_())","repo_name":"tbyouth/Learn-python-notes","sub_path":"projects/demo/MulitWindows/MulitWindows1.py","file_name":"MulitWindows1.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6859242481","text":"from math import sqrt\n\nN = int(input())\n\n# Recognize in the algorithm, the number of successive squares\n# is 4x the previous amount of squares.\n\n# Recognize that the total amount of squares fits neatly into an array\n# with length sqrt(total squares).\n\n# Recognize that an array of points has one extra row and column compared\n# to the array of squares.\n\nsquares = 1 # Start\n\nfor i in range(1, N+1):\n new_squares = squares * 4 # for iteration i\n squares = new_squares\n\nsquares_per_row = sqrt(squares)\npoints_per_row = squares_per_row + 1\ntotal = points_per_row ** 2\n\nprint(int(total))","repo_name":"5yph/Kattis","sub_path":"1.x/Planina.py","file_name":"Planina.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"35636353965","text":"\"\"\"\nModels for the OCV calculations\n\"\"\"\n\nimport pandas as pd\n\n\nclass BatteryScript:\n \"\"\"\n Script or experiment performed on the battery cell.\n \"\"\"\n\n def __init__(self, csvdata):\n \"\"\"\n Initialize the script measurements.\n \"\"\"\n columns = ['Discharge_Capacity(Ah)', 'Charge_Capacity(Ah)', 'Step_Index', 'Voltage(V)'] \n df = pd.read_csv(csvdata, usecols=columns)\n self.disAh = df['Discharge_Capacity(Ah)'].values\n self.chgAh = df['Charge_Capacity(Ah)'].values\n self.step = df['Step_Index'].values\n self.voltage = df['Voltage(V)'].values\n\n\nclass BatteryData:\n \"\"\"\n Object to store battery measurements from script or experiment for a\n certain temperature.\n \"\"\"\n\n def __init__(self, csvfiles):\n \"\"\"\n Initialize with list of CSV data files.\n \"\"\"\n self.s1 = BatteryScript(csvfiles[0])\n self.s2 = BatteryScript(csvfiles[1])\n self.s3 = BatteryScript(csvfiles[2])\n self.s4 = BatteryScript(csvfiles[3])\n\n\nclass FileData:\n \"\"\"\n Calculated data from file.\n \"\"\"\n\n def __init__(self, disV, disZ, chgV, chgZ, rawocv, temp):\n self.disV = disV\n self.disZ = disZ\n self.chgV = chgV\n self.chgZ = chgZ\n self.rawocv = rawocv\n self.temp = temp\n\n\nclass ModelOcv:\n \"\"\"\n Model representing OCV results.\n \"\"\"\n\n def __init__(self, OCV0, OCVrel, SOC, OCV, SOC0, SOCrel, OCVeta, OCVQ):\n self.OCV0 = OCV0\n self.OCVrel = OCVrel\n self.SOC = SOC\n self.OCV = OCV\n self.SOC0 = SOC0\n self.SOCrel = SOCrel\n self.OCVeta = OCVeta\n self.OCVQ = OCVQ\n\n\n","repo_name":"batterysim/esctoolbox-python","sub_path":"ocv_model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"27"} +{"seq_id":"40816223267","text":"#remove all the characters in b\n#which are present in a\n#accept two strings\n\na = input()\nb = input()\nstr = \"\"\nfor i in b:\n if i not in a:\n str = str + i\nprint(str)\n","repo_name":"2k-akash/python-intro-IIT-MADRAS","sub_path":"week3/PPA12.py","file_name":"PPA12.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40998392938","text":"from pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import from_json, col\r\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType, TimestampType, DecimalType\r\n\r\n# Initialize Spark session\r\nspark = SparkSession.builder.appName(\"KafkaToBigQuery\").getOrCreate()\r\n \r\n# Set your bucket and project configuration\r\nbucket = \"bucket_json02\"\r\nspark.conf.set(\"temporaryGcsBucket\", bucket)\r\nspark.conf.set(\"parentProject\", \"mindful-bivouac-395521\")\r\n# Define schema for JSON data\r\nschema = StructType([\r\n StructField(\"adi\", StringType()),\r\n StructField(\"bos\", StringType()),\r\n StructField(\"dolu\", StringType()),\r\n StructField(\"lon\", StringType()), \r\n StructField(\"lat\", StringType()), \r\n StructField(\"sonBaglanti\", TimestampType())\r\n ])\r\n# Read data from Kafka topic\r\nkafkaDF = spark.readStream.format(\"kafka\") \\\r\n .option(\"kafka.bootstrap.servers\", \"34.125.130.154:9092\") \\\r\n .option(\"subscribe\", \"ornek\") \\\r\n .load()\r\n\r\n# Parse JSON data and select fields\r\nparsedDF = kafkaDF.selectExpr(\"CAST(value AS STRING)\") \\\r\n .select(from_json(\"value\", schema).alias(\"parsed\")) \\\r\n .select(\"parsed.*\")\r\n\r\n\r\n# Convert columns to the appropriate data types\r\nconvertedDF = parsedDF.withColumn(\"bos\", col(\"bos\").cast(IntegerType())) \\\r\n .withColumn(\"dolu\", col(\"dolu\").cast(IntegerType())) \\\r\n .withColumn(\"lon\", col(\"lon\").cast(DecimalType(18, 9))) \\\r\n .withColumn(\"lat\", col(\"lat\").cast(DecimalType(18, 9)))\r\n\r\n# Write data to BigQuery\r\nquery_bigquery = convertedDF.writeStream \\\r\n .outputMode(\"append\") \\\r\n .format(\"bigquery\") \\\r\n .option(\"table\", \"db.table0\") \\\r\n .option(\"checkpointLocation\", \"/path/to/checkpoint/dir/in/hdfs\") \\\r\n .option(\"parentProject\", \"mindful-bivouac-395521\") \\\r\n .option(\"credentialsFile\", \"/home/nt110511/sw.json\") \\\r\n .start()\r\n\r\nquery_bigquery.awaitTermination()","repo_name":"ntopel/DataEngineeringBitirme","sub_path":"3 - DataProc/table0kod.py","file_name":"table0kod.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26642325514","text":"from ast import Num\nfrom turtle import left, right\nfrom random import *\n\ndef MergeSort(list_to_sort):\n if(len(list_to_sort) > 1):\n mid = len(list_to_sort)//2\n left_list = list_to_sort[:len(list_to_sort)//2]\n right_list = list_to_sort[len(list_to_sort)//2 :]\n\n #create right and left list so that I can call MergeSort() on them\n for i in range(1, mid):\n list_to_sort[i] = left_list[i]\n\n for i in range(mid, len(right_list)):\n list_to_sort[i] = right_list[i]\n \n MergeSort(left_list)\n MergeSort(right_list)\n Merge(list_to_sort, left_list, right_list)\n\n return list_to_sort\n\ndef Merge(list_to_sort, left_list, right_list):\n #In this func we want to check to see if i values in left list are less than j values in right list and merge to k index in list to sort\n\n i = 0 # left list index \n j = 0 # right list index\n k = 0 #index for merged/sorted list\n\n #While there are elements in both lists to compare - run, else exit\n while i < len(left_list) and j < len(right_list):\n if left_list[i] < right_list[j]:\n list_to_sort[k] = left_list[i]\n i += 1\n else:\n list_to_sort[k] = right_list[j]\n j += 1\n k += 1\n #need to check if there are i or j indices (odd value list)\n\n #This loop implies that j is = or > len of right list and we need to finish adding left list elements\n while(i < len(left_list)):\n list_to_sort[k] = left_list[i]\n i += 1\n k += 1\n\n #This loop implies that i is = or > len of left list and we need to finish adding right list elements\n while(j < len(right_list)):\n list_to_sort[k] = right_list[j]\n j += 1\n k += 1\n\n\n\n\ndef MergeSortPractice2(ArrayToSort):\n\n ArrayLength = len(ArrayToSort)\n\n mid = ArrayLength//2\n LeftArray = ArrayToSort[:mid]\n RightArray = ArrayToSort[mid:]\n\n if ArrayLength > 2:\n #time to split lists\n for i in range(0,mid):\n ArrayToSort[i] = LeftArray[i]\n for i in range(mid, len(RightArray)):\n ArrayToSort[i] = RightArray[i]\n\n MergeSort(LeftArray)\n MergeSort(RightArray)\n Merge(ArrayToSort, LeftArray, RightArray)\n return ArrayToSort\n\ndef Merge(ArrayToSort, LeftHalf, RightHalf):\n i = 0\n j = 0\n k = 0\n\n #need to compare lists\n while i < len(LeftHalf) and j < len(RightHalf):\n if LeftHalf[i] < RightHalf[j]:\n ArrayToSort[k] = LeftHalf[i]\n i += 1\n else:\n ArrayToSort[k] = RightHalf[j]\n j+= 1\n k += 1\n #need to clean up in the case the list is odd and while loop exits\n while i < len(LeftHalf):\n ArrayToSort[k] = LeftHalf[i]\n i += 1\n k +=1\n\n while j < len(RightHalf):\n ArrayToSort[k] = RightHalf[j]\n j += 1\n k += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef generate_rand_list(NumberOfValues):\n #picks random number from 0-100000 for the amount of values numberofvalues is equal to\n x = [randint(0, 100000) for p in range(0, NumberOfValues)]\n return x\n\nif __name__ == '__main__':\n array = [9,8,5,7,3,2,3,0,-1,2,4, 300, -21, 34, -231, 1, -213]\n words = generate_rand_list(10)\n print(MergeSortPractice2(words))\n # print(MergeSort(words)) #calls function","repo_name":"JuicyMag/Sorting-Algorithms","sub_path":"mergesort_practice2.py","file_name":"mergesort_practice2.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16429457988","text":"import numpy as np\nfrom torch import Tensor\nfrom torch.autograd import Variable\n\n\nclass ReplayBuffer(object):\n \"\"\"\n Replay Buffer for multi-agent RL with parallel env support\n\n taken from https://github.com/shariqiqbal2810/maddpg-pytorch/blob/master/utils/buffer.py\n \"\"\"\n\n def __init__(self, max_steps, num_agents, obs_dims, ac_dims, no_rewards=False):\n \"\"\"\n Create buffer\n :param max_steps (int): maximum number of timepoints to store in buffer\n :param num_agents (int): number of agents in environment\n :param obs_dims (list of ints): number of obervation dimensions for each\n agent\n :param ac_dims (list of ints): number of action dimensions for each agent\n :param no_rewards (bool): flag if all rewards are 0 --> no normalisation!\n \"\"\"\n self.max_steps = max_steps\n self.num_agents = num_agents\n self.no_rewards = no_rewards\n self.obs_buffs = []\n self.ac_buffs = []\n self.rew_buffs = []\n self.next_obs_buffs = []\n self.done_buffs = []\n for odim, adim in zip(obs_dims, ac_dims):\n self.obs_buffs.append(np.zeros((max_steps, odim)))\n self.ac_buffs.append(np.zeros((max_steps, adim)))\n self.rew_buffs.append(np.zeros(max_steps))\n self.next_obs_buffs.append(np.zeros((max_steps, odim)))\n self.done_buffs.append(np.zeros(max_steps))\n\n self.filled_i = 0 # index of first empty location in buffer (last index when full)\n self.curr_i = 0 # current index to write to (ovewrite oldest data)\n\n def __len__(self):\n return self.filled_i\n\n def push(self, observations, actions, rewards, next_observations, dones):\n \"\"\"\n Add entry to buffer\n :param observations: current observations\n :param actions: applied actions\n :param rewards: received rewards\n :param next_observations: observations of reached states\n :param dones: terminal flags\n \"\"\"\n nentries = 1\n if self.curr_i + nentries > self.max_steps:\n rollover = self.max_steps - self.curr_i # num of indices to roll over\n for agent_i in range(self.num_agents):\n self.obs_buffs[agent_i] = np.roll(self.obs_buffs[agent_i], rollover, axis=0)\n self.ac_buffs[agent_i] = np.roll(self.ac_buffs[agent_i], rollover, axis=0)\n self.rew_buffs[agent_i] = np.roll(self.rew_buffs[agent_i], rollover)\n self.next_obs_buffs[agent_i] = np.roll(\n self.next_obs_buffs[agent_i], rollover, axis=0\n )\n self.done_buffs[agent_i] = np.roll(self.done_buffs[agent_i], rollover)\n self.curr_i = 0\n self.filled_i = self.max_steps\n for agent_i in range(self.num_agents):\n self.obs_buffs[agent_i][self.curr_i : self.curr_i + nentries] = np.vstack(\n observations[agent_i]\n )\n # actions are already batched by agent, so they are indexed differently\n self.ac_buffs[agent_i][self.curr_i : self.curr_i + nentries] = actions[agent_i]\n self.rew_buffs[agent_i][self.curr_i : self.curr_i + nentries] = rewards[agent_i]\n self.next_obs_buffs[agent_i][self.curr_i : self.curr_i + nentries] = np.vstack(\n next_observations[agent_i]\n )\n self.done_buffs[agent_i][self.curr_i : self.curr_i + nentries] = dones[agent_i]\n self.curr_i += nentries\n if self.filled_i < self.max_steps:\n self.filled_i += nentries\n if self.curr_i == self.max_steps:\n self.curr_i = 0\n\n def sample(self, N, to_gpu=False, norm_rews=True):\n \"\"\"\n Sample replay experience tuples (obs, actions, rewards, next_obs, dones)\n :param N: number of samples to generate\n :param to_gpu: flag whether tensors should be cast for GPU support\n :param norm_rews: flag whether rewards should be normalised\n \"\"\"\n inds = np.random.choice(np.arange(self.filled_i), size=N, replace=False)\n if to_gpu:\n cast = lambda x: Variable(Tensor(x), requires_grad=False).cuda()\n else:\n cast = lambda x: Variable(Tensor(x), requires_grad=False)\n if not self.no_rewards and norm_rews:\n ret_rews = [\n cast(\n (self.rew_buffs[i][inds] - self.rew_buffs[i][: self.filled_i].mean())\n / self.rew_buffs[i][: self.filled_i].std()\n )\n for i in range(self.num_agents)\n ]\n else:\n ret_rews = [cast(self.rew_buffs[i][inds]) for i in range(self.num_agents)]\n return (\n [cast(self.obs_buffs[i][inds]) for i in range(self.num_agents)],\n [cast(self.ac_buffs[i][inds]) for i in range(self.num_agents)],\n ret_rews,\n [cast(self.next_obs_buffs[i][inds]) for i in range(self.num_agents)],\n [cast(self.done_buffs[i][inds]) for i in range(self.num_agents)],\n )\n","repo_name":"LukasSchaefer/MSc_Curiosity_MARL","sub_path":"marl_algorithms/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"} +{"seq_id":"23001727940","text":"from matmodlab import *\nfrom os.path import join\nfrom matmodlab.mmd.simulator import StrainStep\n\nraise SystemExit('model has errors (still under development), simulation stopped')\nmps = MaterialPointSimulator('uanisohyper_inv')\nC10, D, K1, K2, Kappa = 7.64, 1.e-8, 996.6, 524.6, 0.226\nparameters = np.array([C10, D, K1, K2, Kappa])\na = np.array([[0.643055,0.76582,0.0]])\nmps.Material(UANISOHYPER_INV, parameters, fiber_dirs=a,\n source_files=[join(MAT_D, 'abaumats/uanisohyper_inv.f')],\n libname='uanisohyper_inv_t', rebuild=1)\nmps.GenSteps(StrainStep, components=(1,0,0), increment=2*pi,\n steps=200, frames=1, scale=.1, amplitude=(np.sin,))\nmps.dump()\n","repo_name":"tjfulle/matmodlab","sub_path":"matmodlab/examples/uanisohyper_inv.py","file_name":"uanisohyper_inv.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"74597751431","text":"import os\nimport unittest\n\nfrom shutil import rmtree\n\nfrom hochiminh.io.pdfconverter import PDFConverter\n\n\nclass TestPDFConverter(unittest.TestCase):\n\n @classmethod\n def setUp(cls):\n path = '../data/test/pdf-converter/'\n\n cls.in_path = path + 'pdf/'\n cls.out_path = path + 'out_images/'\n cls.extension = 'ppm'\n\n cls.next_image_answ_path = sorted(\n [\n cls.out_path + '2.pdf/-1.ppm',\n cls.out_path + '2.pdf/-2.ppm',\n cls.out_path + '2.pdf/-3.ppm',\n cls.out_path + '5.pdf/-1.ppm',\n cls.out_path + '5.pdf/-2.ppm',\n cls.out_path + '5.pdf/-3.ppm',\n cls.out_path + '5.pdf/-4.ppm',\n ]\n )\n cls.pdf_converter = PDFConverter(in_path=cls.in_path, out_path=cls.out_path, resolution=120)\n\n def __convert_all_match_images(self, folder_name, count_files):\n image_list = sorted(os.listdir(self.out_path + folder_name + '/'))\n self.assertListEqual(['-' + str(ind + 1) + '.' + self.extension for ind in range(count_files)], image_list)\n\n def test_convert_all(self):\n self.pdf_converter.convert_all()\n listdir = sorted(os.listdir(self.out_path))\n self.assertListEqual(['2.pdf', '5.pdf'], listdir)\n self.__convert_all_match_images('2.pdf', 3)\n self.__convert_all_match_images('5.pdf', 4)\n\n if os.path.isdir(self.out_path):\n rmtree(self.out_path)\n else:\n self.assertFalse('There is not folder with images')\n\n def test_next_document(self):\n self.pdf_converter.convert_all()\n\n paths = self.pdf_converter.next_paths_to_documents()\n self.assertEqual(3, len(paths))\n self.assertListEqual(self.next_image_answ_path[:3], sorted(paths))\n\n paths = self.pdf_converter.next_paths_to_documents()\n self.assertEqual(4, len(paths))\n self.assertListEqual(self.next_image_answ_path[3:], sorted(paths))\n\n if os.path.isdir(self.out_path):\n rmtree(self.out_path)\n else:\n self.assertFalse('There is not folder with images')\n\n def test_next_image(self):\n self.pdf_converter.convert_all()\n ind = 0\n paths = []\n while True:\n path = self.pdf_converter.next_path_to_image()\n if len(path) == 0:\n break\n paths.append(path)\n ind += 1\n self.assertListEqual(sorted(self.next_image_answ_path), sorted(paths))\n self.assertEqual(3 + 4, ind)\n\n if os.path.isdir(self.out_path):\n rmtree(self.out_path)\n else:\n self.assertFalse('There is not folder with images')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Hedgehogues/HoChiMinh","sub_path":"tests/test_pdfconverter.py","file_name":"test_pdfconverter.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"27"} +{"seq_id":"38369673640","text":"import discord\nimport time\nfrom datetime import datetime\nfrom discord import ui\nfrom discord.ext import commands\nfrom elasticsearch import Elasticsearch\nfrom discord import app_commands\nfrom discord import SyncWebhook\nfrom keep_alive import keep_alive\n\nimport config\n\n\nclass SearchBot(commands.Bot):\n def __init__(self):\n intents = discord.Intents.default()\n intents.message_content = True\n\n super().__init__(command_prefix=commands.when_mentioned_or('!'), intents=intents)\n\n async def on_ready(self):\n print(f'Logged in as {self.user} (ID: {self.user.id})')\n print('------')\n\n async def setup_hook(self):\n # This copies the global commands over to your guild.\n await self.tree.sync(guild=bot.get_guild(1154388870478168096))\n\n\nbot = SearchBot()\n\n\n@bot.tree.command(guild=bot.get_guild(1154388870478168096), description=\"Veri ara\")\n@app_commands.rename(search='arama')\n@app_commands.describe(\n search=\"Aramak istediğiniz şey\",\n)\nasync def search(interaction: discord.Interaction, search: str):\n elastic = Elasticsearch(config.api_url, api_key=config.api_key)\n resp = elastic.search(index=\"search-archive\", q=search)\n message = \"\"\n for hit in resp['hits']['hits']:\n message += f\" {hit['_source']['author']}| {hit['_source']['title']}:\\n {hit['_source']['text']}\\n\"\n await interaction.response.send_message(message)\n\n\n@bot.tree.command(guild=bot.get_guild(1154388870478168096), description=\"Arşive veri ekle\",\n name=\"add-data-to-archive\")\n@app_commands.rename(name=\"isim\", data=\"veri\")\n@app_commands.describe(\n name=\"Verinin ismi\",\n data=\"Veri\"\n)\nasync def add_to_archive(interaction: discord.Interaction, name: str, data: str):\n elastic = Elasticsearch(config.api_url, api_key=config.api_key)\n doc = {\n 'author': interaction.user.mention,\n 'title': name,\n 'text': data,\n 'timestamp': time.mktime(datetime.now().timetuple()),\n \"_extract_binary_content\": True,\n \"_reduce_whitespace\": True,\n \"_run_ml_inference\": False\n }\n resp = elastic.index(index=\"search-archive\", document=doc)\n channel = interaction.client.get_channel(1154389453532569691)\n await channel.send(\n f\"`{name}`, {interaction.user.mention} tarafından arşive eklendi:\\n## {name}\\n{data}\")\n await interaction.response.send_message(f\"Durum: `{resp['result']}` | {name} arşive eklendi\")\n\n\nkeep_alive()\n\n\nbot.run(config.bot_token)\n","repo_name":"Desto2000/LWSSS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28358475572","text":"import streamlit as st\n\n\ndef convert_float_num(enter, base_i, base_f):\n int_part, irr_part = enter.split(\".\")\n n_base_int_num = convert_int_num(int_part, base_i, base_f)\n \n decimal_irr_num = 0\n for i, d in enumerate(irr_part):\n decimal_irr_num += base_dict_float[d]*(base_i**(-(i+1)))\n x =decimal_irr_num\n n_base_irr_num = \"\"\n while x!=0.0 and len(n_base_irr_num)<=10:\n di = x*base_f\n print(di)\n x = di%1\n d = int(di//1)\n n_base_irr_num += base_dict_int[d]\n str_ans = n_base_int_num +\".\"+ n_base_irr_num\n return str_ans\n\ndef convert_int_num(enter, base_i, base_f):\n init_num = int(enter, base_i)\n num = abs(init_num)\n ans = \"\"\n while num>=base_f:\n d = num % base_f\n num //=base_f\n ans= base_dict_int[d] + ans\n ans = base_dict_int[num] + ans\n if init_num<0:\n ans='-'+ans\n return ans\n\n\ndef final_num(enter=0, base_i=10, base_f=2):\n global base_dict_int, base_dict_float\n if \".\" in enter:\n return convert_float_num(enter, base_i, base_f)\n else: \n return convert_int_num(enter, base_i, base_f)\n\n \nst.title(\"Base Converter\")\nlist_base= ['2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\nlist_operations= ['*','/',\"+\",\"\"]\nst.header(\"Initials Bases\")\nbase_i = st.selectbox(\"Initial Base\", [list_base.index(x)+2 for x in list_base]+[36])\n\n\n#if any(list_operations) in initial_num:\n# list_nums = initial_num.strip().split(list_operations) \n\nst.header(\"Finals Bases\")\nbase_f = st.selectbox(\"Final Base\", [list_base.index(x)+2 for x in list_base]+[36])\nbase_dict_int = {i:x for i,x in enumerate([\"0\",\"1\"]+list_base[:base_f])}\nbase_dict_float = {x:i for i,x in enumerate([\"0\",\"1\"]+list_base[:base_i])}\n\ninitial_num = st.text_input(\"Type the value you wanna convert: \")\ninitial_num = initial_num.upper()\n\nconfirm_button = st.button(\"CALCULATE\")\nif confirm_button:\n try:\n st.write(final_num(initial_num, base_i, base_f))\n except ValueError:\n st.write(final_num(0, base_i, base_f))\n except TypeError:\n st.write(\"This value has wrong characters, please check the input.\")\n except TypeError:\n st.write(\"This value has wrong characters, please check the input.\")\n \n\n\n\n","repo_name":"lucasdbr05/Lucas","sub_path":"baseconverter/base_converter.py","file_name":"base_converter.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28084881055","text":"\"\"\"\nThe full pipeline for generating simulated population reads for unit testing.\nUsage: python sim_pipeline.py [config file]\n\"\"\"\n\nimport subprocess\nimport os\nimport logging\nimport sys\nimport ConfigParser\nimport hyphy.hyphy_handler as hyphy_handler\nimport fasttree.fasttree_handler as fasttree_handler\nimport config.settings as settings\n\nsettings.setup_logging()\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_path_str(path, pardir):\n \"\"\"\n If absolute path, then returns the path as is.\n If relative path, then returns absolute path of concatenated pardir/path\n :param str path: absolute or relative file or directory path\n :param str pardir: parent directory to concatenate to path if path is relative directory\n :return str: absolute resolved path\n \"\"\"\n if not os.path.isabs(path):\n return os.path.join(pardir, path)\n else:\n return path\n\n\n\nSECTION = \"sim\"\n\nconfig_file = sys.argv[1]\nconfig = ConfigParser.RawConfigParser()\nconfig.read(config_file)\n\nOUTDIR = os.path.dirname(config_file) # Output directory for simulated data\n\n\n\n# Generate Tree\nSEED = config.getint(SECTION, \"SEED\")\nFILENAME_PREFIX = config.get(SECTION, \"FILENAME_PREFIX\")\nNUM_CODON_SITES = config.getint(SECTION, \"NUM_CODON_SITES\")\nNUM_INDIV = config.getint(SECTION, \"NUM_INDIV\")\n\ntreefile = OUTDIR + os.sep + FILENAME_PREFIX + \".nwk\"\nrenamed_treefile = OUTDIR + os.sep + FILENAME_PREFIX + \".rename.nwk\"\nif os.path.exists(treefile) and os.path.getsize(treefile) and os.path.exists(renamed_treefile) and os.path.getsize(renamed_treefile):\n LOGGER.warn(\"Not regenerating trees {} and {}\".format(treefile, renamed_treefile) )\nelse:\n asg_driver_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + \"asg_driver.py\")\n asg_driver_cmd = [\"python\", asg_driver_exe,\n OUTDIR + os.sep + FILENAME_PREFIX,\n str(NUM_INDIV),\n str(SEED)]\n LOGGER.debug(\"About to execute \" + \" \".join(asg_driver_cmd))\n subprocess.check_call(asg_driver_cmd, env=os.environ)\n LOGGER.debug(\"Finished execute \")\n\n\n # Relabel tree nodes to more manageable names. Reformat tree so that indelible can handle it.\n relabel_phylogeny_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + \"relabel_phylogeny.py\")\n relabel_phylogeny_cmd = [\"python\", relabel_phylogeny_exe,\n treefile]\n LOGGER.debug(\"About to execute \" + \" \".join(relabel_phylogeny_cmd))\n subprocess.check_call(relabel_phylogeny_cmd, env=os.environ)\n LOGGER.debug(\"Finished execute \")\n\n\n\n# Use Indelible to create population sequences at different scaling factors (ie mutation rates)\nINDELIBLE_BIN_DIR = get_path_str(config.get(SECTION, \"INDELIBLE_BIN_DIR\"), OUTDIR)\nINDELIBLE_SCALING_RATES = config.get(SECTION, \"INDELIBLE_SCALING_RATES\")\n\nbatch_indelible_exe = os.path.abspath(os.path.dirname(__file__) + \"/indelible/batch_indelible.py\")\nindelible_cmd = [\"python\", batch_indelible_exe,\n renamed_treefile, # full filepath to tree\n INDELIBLE_SCALING_RATES,\n str(SEED), # random seed\n str(NUM_CODON_SITES), # number of codon sites in genome\n OUTDIR, # indelible output file directory\n FILENAME_PREFIX, # Indelible output filename prefix\n INDELIBLE_BIN_DIR] # indelible bin dir\nLOGGER.debug(\"About to execute \" + \" \".join(indelible_cmd))\nsubprocess.check_call(indelible_cmd, env=os.environ)\nLOGGER.debug(\"Finished execute \")\n\n\n# Create sample genome by concatenating slices of indelible alignments from different mutation rates.\nsample_genomes_fasta = OUTDIR + os.sep + \"mixed\" + os.sep + FILENAME_PREFIX + \".mixed.fasta\"\nsample_genomes_consensus_fasta = sample_genomes_fasta.replace(\".fasta\", \".consensus.fasta\")\nif (os.path.exists(sample_genomes_fasta) and os.path.getsize(sample_genomes_fasta) and\n os.path.exists(sample_genomes_consensus_fasta) and os.path.getsize(sample_genomes_consensus_fasta)):\n LOGGER.warn(\"Not regenerating combined sample genome fastas {} and {} \".format(sample_genomes_fasta, sample_genomes_consensus_fasta))\nelse:\n sample_genomes_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + \"sample_genomes.py\")\n sample_genomes_cmd = [\"python\", sample_genomes_exe,\n INDELIBLE_SCALING_RATES, # comma delimited list of mutation scaling rates\n OUTDIR + os.sep + \"mixed\", # full filepath of directory for sample_genomes.py output\n FILENAME_PREFIX + \".mixed\", # prefix of sample_genomes.py population sequence output files\n str(SEED), # random seed\n str(NUM_CODON_SITES), # number codon sites\n OUTDIR, # Indelible output directory\n FILENAME_PREFIX] # INDELible output filename prefix\n LOGGER.debug(\"About to execute \" + \" \".join(sample_genomes_cmd))\n subprocess.check_call(sample_genomes_cmd, env=os.environ)\n LOGGER.debug(\"Finished execute \")\n\n\n# Simulate MiSeq reads from the population genomes.\nART_BIN_DIR = get_path_str(config.get(SECTION, \"ART_BIN_DIR\"), OUTDIR)\nART_QUAL_PROFILE_TSV1 = get_path_str(config.get(SECTION, \"ART_QUAL_PROFILE_TSV1\"), OUTDIR)\nART_QUAL_PROFILE_TSV2 = get_path_str(config.get(SECTION, \"ART_QUAL_PROFILE_TSV2\"), OUTDIR)\nART_FOLD_COVER = config.getint(SECTION, \"ART_FOLD_COVER\")\nART_MEAN_INSERT = config.getint(SECTION, \"ART_MEAN_INSERT\")\nART_STDEV_INSERT = config.getint(SECTION, \"ART_STDEV_INSERT\")\n\nPICARD_BIN_DIR = get_path_str(config.get(SECTION, \"PICARD_BIN_DIR\"), OUTDIR)\nBWA_BIN_DIR = get_path_str(config.get(SECTION, \"BWA_BIN_DIR\"), OUTDIR)\n\nPROCS = config.getint(SECTION, \"PROCS\")\n\n\n\nart_reads_dir = OUTDIR + os.sep + \"mixed\" + os.sep + \"reads\"\nart_reads_filename_prefix = FILENAME_PREFIX + \".mixed.reads\"\ngenerate_reads_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + \"generate_reads.py\")\ngenerate_reads_cmd = [\"python\", generate_reads_exe,\n ART_BIN_DIR,\n ART_QUAL_PROFILE_TSV1,\n ART_QUAL_PROFILE_TSV2,\n sample_genomes_fasta,\n sample_genomes_consensus_fasta,\n art_reads_dir + os.sep + art_reads_filename_prefix, # dir and filename prefix of ART output\n str(ART_FOLD_COVER),\n str(ART_MEAN_INSERT),\n str(ART_STDEV_INSERT),\n PICARD_BIN_DIR,\n BWA_BIN_DIR,\n OUTDIR + os.sep + \"mixed\" + os.sep + \"aln\", # BWA output dir\n str(PROCS),\n str(SEED),\n OUTDIR + os.sep + \"mixed\" + os.sep + FILENAME_PREFIX + \".mixed.rates.csv\"] # Indelible mixed mutation rates csv\nLOGGER.debug(\"About to execute \" + \" \".join(generate_reads_cmd))\nsubprocess.check_call(generate_reads_cmd, env=os.environ)\nLOGGER.debug(\"Finished execute \")\n\n# For the sample_genomes populations, we lose the true tree branch lengths when we concatenate multiple populations at different scalings together.\n# Get FastTree to approximate tree for concatenated population sequences.\nFASTTREE_EXE = get_path_str(config.get(SECTION, \"FASTTREE_EXE\"), OUTDIR)\nsample_genomes_tree_fname = fasttree_handler.make_tree_repro(fasta_fname=sample_genomes_fasta, intree_fname=renamed_treefile,\n fastree_exe=FASTTREE_EXE)\n\n\n# Calculate HyPhy dN/dS for the full sample_genomes population fasta\nHYPHY_EXE = get_path_str(config.get(SECTION, \"HYPHY_EXE\"), OUTDIR)\nHYPHY_BASEPATH = get_path_str(config.get(SECTION, \"HYPHY_BASEPATH\"), OUTDIR)\nhyphy_handler.calc_dnds(codon_fasta_filename=sample_genomes_fasta, tree_filename=sample_genomes_tree_fname,\n hyphy_exe=HYPHY_EXE, hyphy_basedir=HYPHY_BASEPATH, threads=PROCS)\n\n\n","repo_name":"tnguyensanger/Umberjack","sub_path":"test/simulations/sim_pipeline.py","file_name":"sim_pipeline.py","file_ext":"py","file_size_in_byte":7917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36551833517","text":"# https://leetcode.com/problems/design-linked-list/\n\nclass Node:\n \n def __init__(self,val):\n self.val = val\n self.next = None\n \nclass MyLinkedList:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.head = None\n self.count = 0\n\n def get(self, index: int) -> int:\n \"\"\"\n Get the value of the index-th node in the linked list. If the index is invalid, return -1.\n \"\"\"\n if index >= self.count or index < 0:\n return -1\n curr = self.head\n k = 0\n while index > k:\n k += 1\n curr = curr.next\n return curr.val\n \n def addAtHead(self, val: int) -> None:\n \"\"\"\n Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\n \"\"\"\n if self.head is None:\n self.head = Node(val)\n else:\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\n self.count += 1\n \n\n def addAtTail(self, val: int) -> None:\n \"\"\"\n Append a node of value val to the last element of the linked list.\n \"\"\"\n if self.count == 0:\n self.head = Node(val)\n else:\n curr = self.head\n while curr.next:\n curr = curr.next\n curr.next = Node(val)\n self.count += 1\n \n \n \n \n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"\n Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\n \"\"\"\n if index == 0:\n self.addAtHead(val)\n elif index == (self.count):\n self.addAtTail(val)\n else:\n current = 0\n prev = None\n node = self.head\n newNode = Node(val)\n while current != index and node.next != None:\n prev = node\n node = node.next\n current += 1\n if current == index:\n prev.next = newNode\n newNode.next = node\n self.count += 1\n\t\t\t\t\t\n \n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"\n Delete the index-th node in the linked list, if the index is valid.\n \"\"\"\n if index == 0: # if head to delete\n tempNode = self.head.val #keeping temp variable to return that value\n newHead = self.head.next\n self.head = newHead\n self.count -= 1 #because of deletion\n return tempNode #return the deleted node's value\n else:\n node = self.head \n current = 0\n prev = None # prev is a previous node which will be the new node after deletion\n while current != index and node.next != None: # conds for deletion(anywhere and end)\n prev = node\n node = node.next\n current += 1\n if current == index: #we're at the node\n prev.next = node.next #pointing the prev node to deleted node's next node\n self.count -= 1 #because of deletion\n\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\n \n\n\n# Your MyLinkedList object will be instantiated and called as such:\n# obj = MyLinkedList()\n# param_1 = obj.get(index)\n# obj.addAtHead(val)\n# obj.addAtTail(val)\n# obj.addAtIndex(index,val)\n# obj.deleteAtIndex(index)","repo_name":"Velmurgan13/my_work_place","sub_path":"January/linked_list/design_kinked_list.py","file_name":"design_kinked_list.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35703953764","text":"import sys\n\nwith open('bakery.csv', encoding='utf-8') as bakery:\n if len(sys.argv) == 1 or len(sys.argv) == 2:\n start, stop = 1, sum(1 for line in bakery)\n if len(sys.argv) == 2:\n start = int(sys.argv[1])\n bakery.seek(0)\n elif len(sys.argv) == 3:\n start, stop = int(sys.argv[1]), int(sys.argv[2])\n else:\n print('args error')\n sys.exit(1)\n lines = (line.strip() for index, line in enumerate(bakery, start=1) if start <= index <= stop)\n print(*lines, sep='\\n')\n","repo_name":"kirill-grechin/python-basics","sub_path":"grechin_kirill_hw_6/task_6_7/show_sales.py","file_name":"show_sales.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71066947273","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\nimport time\nimport deepdish as dd\nfrom networks import MLP\nimport os\nimport argparse\nimport copy\nEPS = 1e-15\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef main(args):\n if not os.path.exists(args.res_dir):\n os.mkdir(args.res_dir)\n if not os.path.exists(os.path.join(args.res_dir,args.trainsite)):\n os.mkdir(os.path.join(args.res_dir,args.trainsite))\n\n if not os.path.exists(args.model_dir):\n os.mkdir(args.model_dir)\n\n\n torch.manual_seed(args.seed)\n\n data1 = dd.io.load(os.path.join(args.vec_dir,'NYU_correlation_matrix.h5'))\n data2 = dd.io.load(os.path.join(args.vec_dir,'UM_correlation_matrix.h5'))\n data3 = dd.io.load(os.path.join(args.vec_dir,'USM_correlation_matrix.h5'))\n data4 = dd.io.load(os.path.join(args.vec_dir,'UCLA_correlation_matrix.h5'))\n\n x1 = torch.from_numpy(data1['data']).float()\n y1 = torch.from_numpy(data1['label']).long()\n x2 = torch.from_numpy(data2['data']).float()\n y2 = torch.from_numpy(data2['label']).long()\n x3 = torch.from_numpy(data3['data']).float()\n y3 = torch.from_numpy(data3['label']).long()\n x4 = torch.from_numpy(data4['data']).float()\n y4 = torch.from_numpy(data4['label']).long()\n\n if args.sepnorm:\n mean = x1.mean(0, keepdim=True)\n dev = x1.std(0, keepdim=True)\n x1 = (x1-mean)/dev\n mean = x2.mean(0, keepdim=True)\n dev = x2.std(0, keepdim=True)\n x2 = (x2 - mean) / dev\n mean = x3.mean(0, keepdim=True)\n dev = x3.std(0, keepdim=True)\n x3 = (x3 - mean) / dev\n mean = x4.mean(0, keepdim=True)\n dev = x4.std(0, keepdim=True)\n x4 = (x4 - mean) / dev\n\n else:\n if args.trainsite == 'NYU':\n mean = x1.mean(0, keepdim=True)\n dev = x1.std(0, keepdim=True)\n elif args.trainsite == 'UM':\n mean = x2.mean(0, keepdim=True)\n dev = x2.std(0, keepdim=True)\n elif args.trainsite == 'USM':\n mean = x3.mean(0, keepdim=True)\n dev = x3.std(0, keepdim=True)\n elif args.trainsite == 'UCLA':\n mean = x4.mean(0, keepdim=True)\n dev = x4.std(0, keepdim=True)\n x1 = (x1 - mean)/dev\n x2 = (x2 - mean) / dev\n x3 = (x3 - mean) / dev\n x4 = (x4 - mean) / dev\n\n\n datas = [TensorDataset(x1,y1),TensorDataset(x2,y2),TensorDataset(x3,y3),TensorDataset(x4,y4)]\n\n\n if args.trainsite == 'NYU':\n train_loader = DataLoader(datas[0], batch_size=args.batch_size, shuffle=True)\n elif args.trainsite == 'UM':\n train_loader = DataLoader(datas[1], batch_size=args.batch_size, shuffle=True)\n elif args.trainsite == 'USM':\n train_loader = DataLoader(datas[2], batch_size=args.batch_size, shuffle=True)\n elif args.trainsite == 'UCLA':\n train_loader = DataLoader(datas[3], batch_size=args.batch_size, shuffle=True)\n\n test_loader1 = DataLoader(datas[0], batch_size=args.test_batch_size1, shuffle=False)\n test_loader2 = DataLoader(datas[1], batch_size=args.test_batch_size2, shuffle=False)\n test_loader3 = DataLoader(datas[2], batch_size=args.test_batch_size3, shuffle=False)\n test_loader4 = DataLoader(datas[3], batch_size=args.test_batch_size4, shuffle=False)\n\n\n\n model = MLP(6105,args.dim,2).to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-2)\n print(model)\n nnloss = nn.NLLLoss()\n\n def train(data_loader,epoch):\n model.train()\n if epoch <= 50 and epoch % 20 == 0:\n for param_group1 in optimizer.param_groups:\n param_group1['lr'] = 0.5 * param_group1['lr']\n elif epoch > 50 and epoch % 20 == 0:\n for param_group1 in optimizer.param_groups:\n param_group1['lr'] = 0.5 * param_group1['lr']\n\n loss_all1 = 0\n\n for data, target in data_loader:\n optimizer.zero_grad()\n data = data.to(device)\n target = target.to(device)\n output1 = model(data)\n loss1 = nnloss(output1, target)\n loss1.backward()\n loss_all1 += loss1.item() * target.size(0)\n optimizer.step()\n\n return loss_all1 / (len(data_loader.dataset)), model\n\n\n def test(data_loader,train=False):\n model.eval()\n test_loss = 0\n correct = 0\n outputs = []\n preds = []\n targets = []\n for data, target in data_loader:\n data = data.to(device)\n targets.append(target[0].detach().numpy())\n target = target.to(device)\n output = federated_model(data)\n outputs.append(output.detach().cpu().numpy())\n test_loss += nnloss(output, target).item() * target.size(0)\n pred = output.data.max(1)[1]\n preds.append(pred.detach().cpu().numpy())\n correct += pred.eq(target.view(-1)).sum().item()\n\n test_loss /= len(data_loader.dataset)\n correct /= len(data_loader.dataset)\n if train:\n print('Train set: Average loss: {:.4f}, Average acc: {:.4f}'.format(test_loss,correct))\n else:\n print('Test set: Average loss: {:.4f}, Average acc: {:.4f}'.format(test_loss,correct))\n return test_loss, correct, targets, outputs, preds\n\n for epoch in range(args.epochs):\n start_time = time.time()\n print(f\"Epoch Number {epoch + 1}\")\n l1,federated_model = train(train_loader,epoch)\n print(' L1 loss: {:.4f}'.format(l1))\n print('===NYU===')\n _, acc1, targets1, outputs1, preds1 = test(test_loader1, train=False)\n print('===UM===')\n _, acc2, targets2, outputs2, preds2 = test(test_loader2, train=False)\n print('===USM===')\n _, acc3, targets3, outputs3, preds3 = test(test_loader3, train=False)\n print('===UCLA===')\n _, acc4, targets4, outputs4, preds4 = test(test_loader4, train=False)\n total_time = time.time() - start_time\n print('Communication time over the network', round(total_time, 2), 's\\n')\n\n model_wts = copy.deepcopy(model.state_dict())\n torch.save(model_wts, os.path.join(args.model_dir, args.trainsite +'.pth'))\n dd.io.save(os.path.join(args.res_dir, args.trainsite, 'NYU.h5'),\n {'outputs': outputs1, 'preds': preds1, 'targets': targets1})\n dd.io.save(os.path.join(args.res_dir, args.trainsite, 'UM.h5'),\n {'outputs': outputs2, 'preds': preds2, 'targets': targets2})\n dd.io.save(os.path.join(args.res_dir, args.trainsite,'USM.h5'),\n {'outputs': outputs3, 'preds': preds3, 'targets': targets3})\n dd.io.save(os.path.join(args.res_dir, args.trainsite,'UCLA.h5'),\n {'outputs': outputs4, 'preds': preds4, 'targets': targets4})\n\n#==========================================================================\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # specify for dataset site\n parser.add_argument('--trainsite', type=str, default='NYU', help='the site used for training')\n # do not need to change\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--lr', type=float, default=1e-5)\n parser.add_argument('--clip', type=float, default=2.0, help='gradient clip')\n parser.add_argument('--dim', type=int, default=8,help='hidden dim of MLP')\n parser.add_argument('-bs','--batch_size', type=int, default=250, help='training batch size')\n parser.add_argument('-tbs1', '--test_batch_size1', type=int, default=145, help='NYU test batch size')\n parser.add_argument('-tbs2', '--test_batch_size2', type=int, default=265, help='UM test batch size')\n parser.add_argument('-tbs3', '--test_batch_size3', type=int, default=205, help='USM test batch size')\n parser.add_argument('-tbs4', '--test_batch_size4', type=int, default=85, help='UCLA test batch size')\n parser.add_argument('--sepnorm', type=bool, default=False, help='normalization method')\n parser.add_argument('--overlap', type=bool, default=True, help='augmentation method')\n parser.add_argument('--res_dir', type=str, default='./result/cross_overlap')\n parser.add_argument('--model_dir', type=str, default='./model/cross_overlap')\n\n\n args = parser.parse_args()\n assert args.trainsite in ['NYU', 'UM', 'USM', 'UCLA']\n main(args)","repo_name":"xxlya/Fed_ABIDE","sub_path":"cross.py","file_name":"cross.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"27"} +{"seq_id":"549478950","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport torch\nimport torch.utils.data as Data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom sklearn.preprocessing import LabelBinarizer, scale\nfrom sklearn.metrics import accuracy_score\n# import\nuse_cuda = torch.cuda.is_available()\nprint(use_cuda)\n\n\ndef read_data(file):\n x = pd.read_csv(file, header=None)\n x = x.values\n y = x[:, 0]\n x = np.delete(x, 0, axis=1)\n x = scale(x)\n return x, y\n\n\nclass Neural_Network(nn.Module):\n def get_activation(self, activation):\n activations = {\n 'relu': nn.ReLU(),\n 'leakyrelu': nn.LeakyReLU(),\n 'sigmoid': nn.Sigmoid(),\n 'tanh': nn.Tanh(),\n 'softmax': nn.Softmax()\n }\n\n return activations[activation]\n\n def __init__(self, activation, inp_size, hidden_layers, output_size):\n super(Neural_Network, self).__init__()\n nhl = len(hidden_layers)\n self.layers = []\n out_activation = 'sigmoid'\n if nhl == 0:\n self.layers.append(nn.Linear(inp_size, output_size))\n self.layers.append(self.get_activation(out_activation))\n else:\n self.layers.append(nn.Linear(inp_size, hidden_layers[0]))\n self.layers.append(self.get_activation(activation))\n for i in range(nhl - 1):\n self.layers.append(nn.Linear(hidden_layers[i], hidden_layers[i + 1]))\n self.layers.append(self.get_activation(activation))\n self.layers.append(nn.Linear(hidden_layers[-1], output_size))\n self.layers.append(self.get_activation(out_activation))\n\n self.layers = nn.Sequential(*self.layers)\n\n def forward(self, x):\n out = x\n for layer in self.layers:\n out = layer(out)\n return out\n\n\ndef predict(net, test_x):\n test_x = torch.from_numpy(test_x).type(torch.FloatTensor)\n\n pred = []\n for i in (range(0, len(test_x), 100)):\n x = test_x[i: i + 100]\n x = Variable(x, volatile=False)\n if use_cuda:\n x = x.cuda()\n outputs = net(x)\n cur_pred = torch.max(outputs, dim=1)[1].data.cpu().numpy().tolist()\n pred.extend(cur_pred)\n return pred\n\n\ndef train_and_predict(net, train_x, train_y, test_x, test_y, lr0, batch_size, num_epochs):\n \"\"\" Use Mean Squared Error as Loss \"\"\"\n criterion = nn.MSELoss()\n\n \"\"\" USE SGD as the optimizer \"\"\"\n optimizer = torch.optim.SGD(net.parameters(), lr=lr0)\n\n \"\"\" Scheduler to dynamically change the learning rate of SGD optimizer \"\"\"\n def hc_lambda(epoch):\n return 1.0 / ((1 + epoch)**(0.5))\n\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, hc_lambda)\n\n \"\"\" converting training data to torch tensors \"\"\"\n train_x = torch.from_numpy(train_x).type(torch.FloatTensor)\n train_y = torch.from_numpy(train_y).type(torch.FloatTensor)\n\n \"\"\" Creating Dataset Loader \"\"\"\n dataset = Data.TensorDataset(train_x, train_y)\n train_loader = Data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)\n\n \"\"\" Training \"\"\"\n prev_epoch_loss = 0\n\n for epoch in range(num_epochs):\n\n net.train() # Set model mode to train\n gold, pred, losses = [], [], []\n\n for i, (x, y) in enumerate(train_loader):\n # print(\"\\r%d\" % (i), end=\"\")\n x, y = Variable(x), Variable(y)\n\n if use_cuda:\n x, y = x.cuda(), y.cuda()\n\n outputs = net(x)\n loss = criterion(outputs, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n cur_gold = torch.max(y, dim=1)[1].data.cpu().numpy().tolist()\n cur_pred = torch.max(outputs, dim=1)[1].data.cpu().numpy().tolist()\n gold.extend(cur_gold)\n pred.extend(cur_pred)\n losses.extend([loss.data.cpu().numpy()])\n\n test_pred = predict(net, test_x)\n print(\"\\rEpoch: %d Train Accuracy: %f Loss: %f Test Accuracy %f\" % (epoch, accuracy_score(gold, pred), np.mean(losses), accuracy_score(test_y, test_pred)), end=\"\")\n epoch_loss = np.mean(losses)\n # print(\" \", epoch_loss, prev_epoch_loss)\n if epoch_loss > prev_epoch_loss:\n scheduler.step() # reduce learning rate\n\n prev_epoch_loss = epoch_loss\n\n test_pred = predict(net, test_x)\n return test_pred\n\n\nif __name__ == '__main__':\n\n part = sys.argv[1]\n train = sys.argv[2]\n test = sys.argv[3]\n out = sys.argv[4]\n if part == 'a':\n batch_size = int(sys.argv[5])\n lr = float(sys.argv[6])\n activation = sys.argv[7]\n hidden_layers = list(map(int, sys.argv[8:]))\n else:\n batch_size = 100\n lr = 0.1\n activation = 'relu'\n hidden_layers = []\n\n train_x, train_y = read_data(train)\n # train_x = scale(train_x)\n train_x = train_x / 255\n lb = LabelBinarizer()\n lb.fit([i for i in range(46)])\n train_y = lb.transform(train_y)\n test_x, test_y = read_data(test)\n\n net = Neural_Network(activation, 1024, hidden_layers, 46)\n pred = train_and_predict(net, train_x, train_y, test_x, test_y, lr, batch_size, 200)\n\n with open(out, \"w\") as f:\n for each in pred:\n f.write(str(each) + \"\\n\")\n","repo_name":"hthuwal/ta-iitd","sub_path":"fall-2018/A2/model_solutions/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10859240012","text":"# -*- coding: utf-8 -*\nimport argparse\n# import os\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\n\n\nfrom twisted.internet import reactor\nfrom scrapy.crawler import Crawler\nfrom scrapy.utils.project import get_project_settings\n\n#--the spiders\nfrom myairlease.spiders.fleetintel_list import FleetintelListSpider\nfrom myairlease.spiders.Available_assets import AvailableAssetsSpider\n#--the spiders\n\n\ndes='''\\\nVan Truong.\nWeb Crawler - http://myairlease.com/\n\nThis tool will scrap the information at\n 1) http://www.myairlease.com/available/fleetintel_list\n 2) http://www.myairlease.com/available/available_for_lease\n 3) both links\n\nAnd generates its corresponding CSV file\n'''\n\ndef check_argument(value):\n\n try:\n value = int(value)\n\n if value is not 1 and value is not 2 and value is not 3:\n raise ValueError\n\n except ValueError:\n raise argparse.ArgumentTypeError(\"%s is invalid. Only (only 1, 2 or 3)\" % value)\n \n return value \n\ndef spider_closing(self, spider):\n log.msg(\"Spider closed: %s\" % spider, level=log.INFO)\n self.running_crawlers.remove(spider)\n if not self.running_crawlers:\n reactor.stop()\n\nparser = argparse.ArgumentParser(\n prog='myairlease_scraper.py',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=des)\n\nparser.add_argument(\n 'link', \n metavar='Nº_link',\n nargs=1, \n help='Define which link want to scrap (only 1, 2 or 3).',\n type=check_argument\n )\n\nargs = parser.parse_args()\n\n\nif args.link[0] is 1:\n spider = FleetintelListSpider()\n process = CrawlerProcess( get_project_settings() )\n process.crawl(spider)\n process.start()\nelif args.link[0] is 2:\n spider = AvailableAssetsSpider()\n process = CrawlerProcess( get_project_settings() )\n process.crawl(spider)\n\n process.start()\nelif args.link[0] is 3:\n process = CrawlerProcess( get_project_settings() )\n process.crawl(FleetintelListSpider()) \n process.crawl(AvailableAssetsSpider())\n process.start()\n\n\"\"\"\n# -------------------------------------------\nspider = getStringsSpider()\nprocess = CrawlerProcess( get_project_settings() )\n\nprocess.crawl(spider, toTranslate=args.toTranslate, translated=args.translated, nameOfFile=args.file )\nprocess.start()\n# -------------------------------------------\n\"\"\"","repo_name":"dianjuar/myairlease_scraper","sub_path":"myairlease_scraper.py","file_name":"myairlease_scraper.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71332733832","text":"import sys\nsys.stdin = open('binary_in.txt','r')\nT = int(input())\nfor tc in range(1, T+1):\n len_target, target = map(str, input().split())\n over_ten = {'A': '1010', 'B': '1011', 'C': '1100', 'D': '1101', 'E': '1110', 'F': '1111'}\n ans = ''\n for num in target:\n if num.isdigit():\n num = int(num)\n lst = []\n while 0 < num: #몫이 0이 될 때까지\n lst.append(num % 2)\n num = num // 2\n #몫(0)을 더해준다\n #더해주고 나면 뒤집어야 함\n if len(lst) < 4:\n while len(lst) < 4:\n lst.append(num//2)\n lst = lst[::-1]\n ans += ''.join(list(map(str, lst)))\n else: #10 넘으면(알파벳이면)\n ans += over_ten[num]\n\n\n\n print(f'#{tc} {ans}')","repo_name":"S4lTYD0G/TIL","sub_path":"0823/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14784778717","text":"import os\nimport webob\n\nclass Templates(object):\n \"\"\"\n Helper class which retrieves templates out of a skin. (See\n mod:``happy.skin``.) The helper class is ignorant of any specific\n template implementations and relies on factory callables that are passed\n in to it in order to create templates. A Templates object is created\n with a `skin` and a single default template factory. Additional template\n factories can be registered for specific file extensions. For template\n files that match the file registered file extension, the registered factory\n will be used instead of the default one.\n\n A template factory has the following signature::\n\n def factory(path_to_template_file):\n return template_callable\n\n The returned callable must have the following signature::\n\n def template_callable(**kw):\n return rendered_template_as_string\n\n Where `kw` is arbitrary arguments used to render the template.\n\n \"\"\"\n Response = webob.Response # override point\n\n def __init__(self, skin, default_factory):\n self.skin = skin\n self.default_factory = default_factory\n self.factories = {}\n self._cache = {}\n\n def register_factory(self, extension, factory):\n \"\"\"\n Register a factory for a given file extension.\n \"\"\"\n self.factories[extension] = factory\n\n def __getitem__(self, fname):\n \"\"\"\n Allow dictionary like access to templates::\n\n templates = Templates(skin, factory)\n template = templates['templates/homepage.pt']\n\n \"\"\"\n template = self._cache.get(fname, None)\n if template is None:\n resource = self.skin.lookup(fname)\n if resource is None:\n raise KeyError(fname)\n extension = os.path.splitext(fname)[1].lstrip('.')\n factory = self.factories.get(extension, self.default_factory)\n template = factory(resource.abspath())\n self._cache[fname] = template\n return template\n\n def render(self, fname, **kw):\n \"\"\"\n Render template to a string and return the string.\n \"\"\"\n return self[fname](**kw)\n\n def render_to_response(self, fname, **kw):\n \"\"\"\n Renders template to a response object. Content-type is set to\n 'text/html'. If template rendering returns a `unicode` object,\n response will be encoded as UTF-8. If template returns a `str` object,\n however, no attempt will be made to guess the encoding. Using\n templates that return `unicode` objects is recommended.\n \"\"\"\n response = self.Response()\n response.content_type = 'text/html'\n\n body = self.render(fname, **kw)\n if isinstance(body, unicode):\n body = body.encode('UTF-8')\n response.charset = 'UTF-8'\n else:\n response.charset = None\n response.body = body\n\n return response\n\n","repo_name":"chrisrossi/happy","sub_path":"happy/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"43760896323","text":"import numpy as np\nimport sys\nimport os\nimport re\nimport logging\nimport pickle\nlogging.basicConfig(stream=sys.stdout)\nmcy_logger = logging.getLogger('mathematica')\n\nfrom cymetric.pointgen.pointgen_mathematica import PointGeneratorMathematica, ToricPointGeneratorMathematica\nfrom cymetric.pointgen.nphelper import prepare_dataset, prepare_basis_pickle\n\nimport tensorflow as tf\nimport tensorflow.keras as tfk\n\ntf.get_logger().setLevel('ERROR')\n\nfrom cymetric.models.tfmodels import PhiFSModel, MultFSModel, FreeModel, MatrixFSModel, AddFSModel, PhiFSModelToric, MatrixFSModelToric\nfrom cymetric.models.tfhelper import prepare_tf_basis, train_model\nfrom cymetric.models.callbacks import SigmaCallback, KaehlerCallback, TransitionCallback, RicciCallback, VolkCallback, AlphaCallback\nfrom cymetric.models.metrics import SigmaLoss, KaehlerLoss, TransitionLoss, RicciLoss, VolkLoss\n\nfrom wolframclient.language import wl\nfrom wolframclient.serializers import export as wlexport\nfrom wolframclient.deserializers import WXFConsumer, binary_deserialize, WXFConsumerNumpy\nComplex = complex\n\n\nclass wlConsumer(WXFConsumer):\n def build_function(self, head, args, **kwargs):\n # return a built in complex if head is Complex and argument length is 2.\n if head == wl.Complex and len(args) == 2:\n return complex(*args)\n elif head == wl.NumericArray:\n return [np.array(x) for x in args[0]] \n # otherwise delegate to the super method (default case).\n else:\n return super().build_function(head, args, **kwargs)\n \n\ndef point_vec_to_complex(p):\n if len(p) == 0: \n return np.array([[]])\n p = np.array(p)\n plen = len(p[0])//2\n return p[:, :plen] + 1.j*p[:, plen:]\n\n\ndef to_numpy_arrays(my_args):\n args_dict = {}\n for k, v in my_args.items():\n if isinstance(v, list) or isinstance(v, tuple):\n if k == 'monomials' or k == 'coeffs': \n args_dict[k] = [np.array(x) for x in v]\n else:\n args_dict[k] = np.array(v)\n elif type(v) == type(wl.NumericArray([0])): \n args_dict[k] = binary_deserialize(wlexport(v, target_format='wxf'), consumer=wlConsumer())\n else:\n args_dict[k] = v\n \n args_dict['logger_level'] = eval(args_dict['logger_level'])\n return args_dict\n\n\ndef generate_points(my_args):\n global mcy_logger\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(\"Using output directory {}\".format(os.path.abspath(args['Dir'])))\n \n # print ambient space\n amb_str = \"\"\n for d in args['ambient_dims']:\n amb_str += \"P^{} x \".format(d)\n amb_str = amb_str[:-2]\n mcy_logger.debug(\"Ambient space: {}\".format(amb_str))\n mcy_logger.debug(\"Kahler moduli: {}\".format(args['KahlerModuli']))\n\n args_str = re.sub('\\],\\n', '], ', str(args))\n args_str = re.sub(' +', ' ', str(args_str))\n mcy_logger.debug(args_str)\n \n # need to specify monomials and their coefficients\n if args['monomials'] == [] or args['coeffs'] == []:\n raise ValueError(\"You need to specify both the monomials and their coefficients\")\n\n args['monomials'] = [x.astype(int) for x in args['monomials']]\n args['coeffs'] = [x.astype(complex) for x in args['coeffs']]\n \n point_gen = PointGeneratorMathematica(args['monomials'], args['coeffs'], args['KahlerModuli'], args['ambient_dims'], precision=args['Precision'], point_file_path=args['point_file_path'], selected_t=args['selected_t'])\n\n # save point generator to pickle\n mcy_logger.info(\"Saving point generator to {:}\".format(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\")))\n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'wb') as hnd:\n pickle.dump(point_gen, hnd)\n \n kappa = prepare_dataset(point_gen, args['num_pts'], args['Dir'], normalize_to_vol_j=True, ltails=0)\n mcy_logger.info(\"Computing derivatives of J_FS, Omega, ...\")\n prepare_basis_pickle(point_gen, args['Dir'], kappa)\n mcy_logger.debug(\"done\")\n\n\ndef generate_points_toric(my_args):\n global mcy_logger\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(\"Using output directory {}\".format(os.path.abspath(args['Dir'])))\n \n # print ambient space\n args_str = re.sub('\\], \\n', '], ', str(args))\n args_str = re.sub(' +', ' ', str(args_str))\n mcy_logger.debug(args_str)\n\n with open(os.path.join(args['Dir'], 'toric_data.pickle'), 'rb') as f:\n toric_data = pickle.load(f)\n for key in toric_data:\n mcy_logger.debug(key)\n mcy_logger.debug(toric_data[key])\n\n point_gen = ToricPointGeneratorMathematica(toric_data, precision=args['Precision'], verbose=args['Verbose'], point_file_path=args['point_file_path'])\n\n # save point generator to pickle\n mcy_logger.info(\"Saving point generator to {:}\".format(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\")))\n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'wb') as hnd:\n pickle.dump(point_gen, hnd)\n \n kappa = prepare_dataset(point_gen, args['num_pts'], args['Dir'], normalize_to_vol_j=True, ltails=0)\n mcy_logger.info(\"Computing derivatives of J_FS, Omega, ...\")\n prepare_basis_pickle(point_gen, args['Dir'], kappa)\n mcy_logger.debug(\"done\")\n \n\ndef train_NN(my_args):\n global mcy_logger\n \n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n \n # get info of generated points\n data = np.load(os.path.join(args['Dir'], 'dataset.npz'))\n BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))\n kappa = BASIS['KAPPA'].numpy()\n\n # load toric data if exists/needed\n toric_data = None\n if args['Model'] == 'PhiFSToric':\n if os.path.exists(args['toric_data_path']):\n toric_data = pickle.load(open(args['toric_data_path'], 'rb'))\n else:\n mcy_logger.error(\"Model set to {}, but {} with toric data not found.\".format(args['Model'], args['toric_data_path']))\n\n # force GPU disable if argument is set:\n if args[\"DisableGPU\"]:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n # check whether Keras is running on GPU or CPU:\n tf_devices = \"GPU\"\n if len(tf.config.list_physical_devices('GPU')) == 0:\n tf_devices = \"CPU\"\n mcy_logger.debug(\"Using {} for computation.\".format(tf_devices))\n \n # extract architecture for NN\n nfold = tf.cast(BASIS['NFOLD'], dtype=tf.float32).numpy()\n n_in = data['X_train'].shape[1]\n n_hiddens, acts = args[\"HiddenLayers\"], args[\"ActivationFunctions\"]\n n_out = nfold**2\n if args['Model'] == 'PhiFS' or args['Model'] == 'PhiFSToric':\n args['PrintLosses'][1] = False # Kahler loss is automatically 0\n args['PrintMeasures'][1] = False # Kahler loss is automatically 0\n n_out = 1\n \n # callbacks\n if args['EvaluateModel']:\n scb = SigmaCallback((data['X_val'], data['y_val']))\n kcb = KaehlerCallback((data['X_val'], data['y_val']))\n tcb = TransitionCallback((data['X_val'], data['y_val']))\n rcb = RicciCallback((data['X_val'], data['y_val']), data['val_pullbacks'])\n volkck = VolkCallback((data['X_val'], data['y_val']))\n cb_list = [scb, kcb, tcb, rcb, volkck]\n cb_list = [x for x, y in zip(cb_list, args['PrintMeasures']) if y]\n else:\n cb_list = []\n \n # metrics\n args['PrintLosses'][3] = False # Ricci loss not computed at the moment\n cmetrics = [SigmaLoss(), KaehlerLoss(), TransitionLoss(), RicciLoss(), VolkLoss()]\n cmetrics = [x for x, y in zip(cmetrics, args['PrintLosses']) if y]\n \n # build model\n if args['Model'] == 'PhiFS' or args['Model'] == 'PhiFSToric':\n model = tf.keras.Sequential()\n model.add(tfk.Input(shape=(n_in,)))\n for n_hidden, act in zip(n_hiddens, acts):\n model.add(tfk.layers.Dense(n_hidden, activation=act))\n model.add(tfk.layers.Dense(n_out, use_bias=False))\n# # reproduces the FS Kahler potential for the bicubic\n# import math\n# def reorder_input(x):\n# x1 = x[:,0:x.shape[-1]//4]\n# x2 = x[:,x.shape[-1]//4:2*x.shape[-1]//4]\n# x3 = x[:,2*x.shape[-1]//4:3*x.shape[-1]//4]\n# x4 = x[:,3*x.shape[-1]//4:]\n# return tf.keras.layers.concatenate([x1,x3], axis=1), tf.keras.layers.concatenate([x2,x4], axis=1)\n# \n# inp1 = tf.keras.layers.Input(shape=(12,))\n# in1, in2 = tf.keras.layers.Lambda(reorder_input)(inp1)\n# x1 = tf.keras.layers.dot([in1, in1], axes=-1)\n# x2 = tf.keras.layers.dot([in2, in2], axes=-1)\n# for n_hidden, act in zip(n_hiddens, acts):\n# x1 = tf.keras.layers.Dense(n_hidden, activation=act)(x1)\n# x2 = tf.keras.layers.Dense(n_hidden, activation=act)(x2)\n# x1 = tfk.layers.Dense(n_out, use_bias=False, activation='sigmoid')(x1)\n# x2 = tfk.layers.Dense(n_out, use_bias=False, activation='sigmoid')(x2)\n# x1 = tf.math.log(x1)\n# x2 = tf.math.log(x2)\n# x = tf.keras.layers.add([0.1/math.pi * x1, 0.1/math.pi * x2])\n# x = tfk.layers.Dense(n_out)(0.0000000001*x)\n# \n# model = tf.keras.models.Model(inputs=[inp1], outputs=x)\n else:\n model = tf.keras.Sequential()\n model.add(tfk.Input(shape=(n_in,)))\n for n_hidden, act in zip(n_hiddens, acts):\n model.add(tfk.layers.Dense(n_hidden, activation=act))\n model.add(tfk.layers.Dense(n_out))\n \n mcy_logger.debug(\"Using model {}\".format(args['Model']))\n if args['Model'] == 'PhiFS':\n fsmodel = PhiFSModel(model, BASIS, alpha=args['Alphas'])\n elif args['Model'] == 'PhiFSToric':\n fsmodel = PhiFSModelToric(model, BASIS, alpha=args['Alphas'], toric_data=toric_data)\n elif args['Model'] == 'MultFS':\n fsmodel = MultFSModel(model, BASIS, alpha=args['Alphas'])\n elif args['Model'] == 'MatrixMultFS':\n fsmodel = MatrixFSModel(model, BASIS, alpha=args['Alphas'])\n elif args['Model'] == 'MatrixMultFSToric':\n fsmodel = MatrixFSModelToric(model, BASIS, alpha=args['Alphas'], toric_data=toric_data)\n elif args['Model'] == 'AddFS':\n fsmodel = AddFSModel(model, BASIS, alpha=args['Alphas'])\n elif args['Model'] == 'Free':\n fsmodel = FreeModel(model, BASIS, alpha=args['Alphas'])\n else:\n mcy_logger.error(\"{} is not a recognized option for a model\".format(args['Model']))\n return {}\n optimizer = tfk.optimizers.Adam(learning_rate=args['LearningRate'])\n model.summary(print_fn=mcy_logger.debug)\n\n # train model\n fsmodel, training_history = train_model(fsmodel, data, optimizer=optimizer, epochs=args['Epochs'], batch_sizes=args['BatchSizes'], verbose=2, custom_metrics=cmetrics, callbacks=cb_list)\n \n # save trained model\n fsmodel.model.save(os.path.join(args['Dir'], 'model'))\n \n return training_history\n\n\ndef get_g(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = my_args['points']\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n\n # load toric data if exists/needed\n toric_data = None\n if args['Model'] == 'PhiFSToric':\n if os.path.exists(args['toric_data_path']):\n toric_data = pickle.load(open(args['toric_data_path'], 'rb'))\n else:\n mcy_logger.error(\"Model set to {}, but {} with toric data not found.\".format(args['Model'], args['toric_data_path']))\n \n BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))\n kappa = BASIS['KAPPA'].numpy()\n pts = tf.convert_to_tensor(pts, dtype=tf.float32)\n model = tfk.models.load_model(os.path.join(args['Dir'], 'model'))\n if args['Model'] == 'PhiFS':\n fsmodel = PhiFSModel(model, BASIS)\n elif args['Model'] == 'PhiFSToric':\n fsmodel = PhiFSModelToric(model, BASIS, toric_data=toric_data)\n elif args['Model'] == 'MultFS':\n fsmodel = MultFSModel(model, BASIS)\n elif args['Model'] == 'MatrixMultFS':\n fsmodel = MatrixFSModel(model, BASIS)\n elif args['Model'] == 'MatrixMultFSToric':\n fsmodel = MatrixFSModelToric(model, BASIS, toric_data=toric_data)\n elif args['Model'] == 'AddFS':\n fsmodel = AddFSModel(model, BASIS)\n elif args['Model'] == 'Free':\n fsmodel = FreeModel(model, BASIS)\n else:\n mcy_logger.error(\"{} is not a recognized option for a model\".format(args['Model']))\n return []\n\n gs = fsmodel(pts)\n return gs.numpy()\n\n\ndef get_g_fs(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = np.array(point_vec_to_complex(my_args['points']), dtype=np.complex128)\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n \n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'rb') as hnd:\n point_gen = pickle.load(hnd)\n \n pbs = point_gen.pullbacks(pts)\n ts = args['ts'] if args['ts'] != [] else point_gen.kmoduli\n fs = point_gen.fubini_study_metrics(pts, vol_js=ts)\n fs_pbs = np.einsum('xai,xij,xbj->xab', pbs, fs, np.conj(pbs))\n \n return fs_pbs\n\n\ndef get_kahler_potential(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = my_args['points']\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n\n # load toric data if exists/needed\n toric_data = None\n if args['Model'] == 'PhiFSToric':\n if os.path.exists(args['toric_data_path']):\n toric_data = pickle.load(open(args['toric_data_path'], 'rb'))\n else:\n mcy_logger.error(\"Model set to {}, but {} with toric data not found.\".format(args['Model'], args['toric_data_path']))\n \n BASIS = prepare_tf_basis(pickle.load(open(os.path.join(args['Dir'], 'basis.pickle'), 'rb')))\n pts = tf.convert_to_tensor(pts, dtype=tf.float32)\n model = tfk.models.load_model(os.path.join(args['Dir'], 'model'))\n if args['Model'] == 'PhiFS':\n fsmodel = PhiFSModel(model, BASIS)\n elif args['Model'] == 'PhiFSToric':\n fsmodel = PhiFSModelToric(model, BASIS, toric_data=toric_data)\n else:\n mcy_logger.error(\"Calculating the Kahler potential for model {} is not supported\".format(args['Model']))\n return []\n\n ks = fsmodel.get_kahler_potential(pts)\n return ks.numpy()\n\n \ndef get_weights(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = point_vec_to_complex(my_args['points'])\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n \n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'rb') as hnd:\n point_gen = pickle.load(hnd)\n return point_gen.point_weight(pts, normalize_to_vol_j=True)\n\n\ndef get_omegas(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = point_vec_to_complex(my_args['points'])\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n \n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'rb') as hnd:\n point_gen = pickle.load(hnd)\n \n omega = point_gen.holomorphic_volume_form(pts)\n return omega * np.conj(omega)\n\n\ndef get_pullbacks(my_args):\n global mcy_logger\n my_args = dict(my_args)\n pts = point_vec_to_complex(my_args['points'])\n del my_args['points']\n \n # parse arguments\n args = to_numpy_arrays(my_args)\n mcy_logger.setLevel(args['logger_level'])\n mcy_logger.debug(args)\n \n with open(os.path.join(os.path.abspath(args['Dir']), \"point_gen.pickle\"), 'rb') as hnd:\n point_gen = pickle.load(hnd)\n \n return point_gen.pullbacks(pts)\n","repo_name":"pythoncymetric/cymetric","sub_path":"cymetric/wolfram/mathematicalib.py","file_name":"mathematicalib.py","file_ext":"py","file_size_in_byte":16199,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"27"} +{"seq_id":"33294547564","text":"# -*- coding: utf-8 -*-\n__author__ = 'caden'\n\"\"\"\ndescription:定位元素的通用方法,https://www.cnblogs.com/dwdw/p/9998660.html\n\"\"\"\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom utils.log import logger\n\n\nclass CommonLocation:\n\n def __init__(self, driver):\n self.driver = driver\n self.timeout = TIME_OUT\n self.t = T\n\n def find_element(self, locator, value=''):\n \"\"\"\n 定位单个元素\n :param locator: 定位元素的方式和值,类型为元组,如:(\"id\", \"value1\")\n :param value: 默认为空,如果传值则通过文本定位\n :return: 返回元素定位的对象\n \"\"\"\n if not isinstance(locator, tuple):\n logger.info('locator参数类型错误,必须传元组类型:locator = (\"id\", \"value1\")')\n else:\n logger.info(\"正在定位元素信息:定位方式->{locator[0]}, 元素值->{locator[1]},value值->{value}\")\n if value != '':\n element = WebDriverWait(self.driver, self.timeout, self.t).\\\n until(EC.text_to_be_present_in_element_value(locator, value))\n return element\n else:\n element = WebDriverWait(self.driver, self.timeout, self.t).\\\n until(EC.presence_of_element_located(locator))\n if element:\n return element\n else:\n logger.info(\"定位失败:定位方式=>{locator[0]}, value值=>{locator[1]}\")\n return False\n\n def find_elements(self, locator, value=''):\n \"\"\"\n 定位一组元素\n :param locator: 定位元素的方式和值,类型为元组,如:(\"id\", \"value1\")\n :param value: 默认为空,如果传值则通过文本定位\n :return: 返回元素对象列表\n \"\"\"\n if not isinstance(locator, tuple):\n logger.info('locator参数类型错误,必须传元组类型:locator = (\"id\", \"value1\")')\n else:\n logger.info(\"正在定位一组元素信息:定位方式->{locator[0]}, 元素值->{locator[1]},value值->{value}\")\n if value != '':\n elements = WebDriverWait(self.driver, self.timeout, self.t).\\\n until(EC.text_to_be_present_in_element_value(locator, value))\n return elements\n else:\n elements = WebDriverWait(self.driver, self.timeout, self.t).\\\n until(EC.presence_of_element_located(locator))\n if elements:\n return elements\n else:\n logger.info(\"定位失败:定位方式=>{locator[0]}, value值=>{locator[1]}\")\n return False\n\n def send_key(self, locator, text):\n \"\"\"向标签中输入值\"\"\"\n try:\n self.clear(locator)\n self.find_element(locator).send_keys(text)\n except Exception as e:\n logger.info(f\"输入值{text}失败, 报错信息{str(e)}\")\n\n def clear(self, locator):\n try:\n self.find_element(locator).clear()\n except Exception as e:\n logger.info(f\"清理标签{locator}失败, 报错信息{str(e)}\")\n\n def is_selected(self, locator, type_=''):\n \"\"\"判断元素是否被选中,返回bool值 及点(选中/取消选中\"\"\"\n ele = self.find_element(locator)\n try:\n if type_ == '': # 如果type参数为空,返回元素是否为选中状态,True/False (默认)\n r = ele.is_selected()\n return r\n elif type_ == 'click': # 如果type参数为click,执行元素的点击操作\n ele.click()\n else:\n print(f\"type参数 {type_} 错误,仅可为click或''\")\n except Exception as e:\n logger.info(\"元素定位错误,错误信息:%s\" % str(e))\n return False\n\n def is_element_dom_exist(self, locator):\n \"\"\"\n 判断单个元素是否在DOM里面,不一定显示\n :param locator:\n :return:\n \"\"\"\n try:\n self.find_element(locator)\n return True\n except Exception as e:\n logger.info(\"元素定位错误,错误信息:%s\" % str(e))\n return False\n\n def is_element_dom_exists(self, locator):\n \"\"\"\n 判断一组元素是否在DOM里面,不一定显示,若不存在,返回一个空的list\n :param locator: 定位元素的方式和值,类型为元组,如:(\"id\", \"value1\")\n :return:\n \"\"\"\n ''' 判断一组元素是否在DOM里面 (是否存在),若不存在,返回一个空的list'''\n element = self.find_elements(locator)\n n = len(element)\n if n == 0:\n return False\n elif n == 1:\n return True\n else:\n logger.info(f\"定位到元素的个数:{n}\")\n return True\n\n def title(self, title):\n \"\"\"\n 获取当前页面的title\n :param title:\n :return:\n \"\"\"\n try:\n result1 = WebDriverWait(self.driver, self.timeout, self.t).until(EC.title_is(title))\n if result1:\n return result1\n else:\n result2 = WebDriverWait(self.driver, self.timeout, self.t).until(EC.title_contains(title))\n if result2:\n return result2\n else:\n return False\n except Exception as e:\n logger.info(\"获取title失败,失败信息:%s\" % str(e))\n return False\n\n def in_element_exist(self, locator, value, type_='text'):\n \"\"\"\n 根据传入的type判断内容是否在指定元素里面\n :param locator:\n :param value:\n :param type_:\n :return:\n \"\"\"\n if not isinstance(locator, tuple):\n logger.info(\"locator参数类型错误,必须传元祖类型\")\n try:\n if type_ == 'text':\n result = WebDriverWait(self.driver, self.timeout, self.t).until(\n EC.text_to_be_present_in_element(locator, value))\n return result\n elif type_ == 'value':\n result = self.find_element(locator, value)\n return result\n else:\n print(f\"type参数 {type_} 错误,仅可使用text或value属性定位\")\n return False\n except Exception as e:\n logger.info(\"获取title失败,失败信息:%s\" % str(e))\n return False\n\n def alert(self, timeout=3, type_=''):\n \"\"\"\n 对常规警告窗的操作,确定,取消\n :param timeout:\n :param type_:\n :return:\n \"\"\"\n result = WebDriverWait(self.driver, timeout, self.t).until(EC.alert_is_present())\n try:\n if type_ == '':\n if result:\n return result\n else:\n logger.info(\"alert不存在\")\n return False\n elif type_ == 'yes':\n result.accept()\n elif type_ == 'no':\n result.dismiss()\n else:\n logger.info(f\"type_参数类型 错误,仅可为yes、no、或''\")\n except Exception as e:\n logger.info(\"发生异常,异常信息:%s\" % str(e))\n return False\n\n def get_title_text_attribute(self, locator=None, _type='text', name=''):\n \"\"\"\n 根据_type类型获取元素指定的内容\n :param locator:元素的定位\n :param _type:获取内容的方式(title,text,attribute)\n :param name:元素属性名称\n :return:\n \"\"\"\n try:\n if _type == 'title':\n return self.driver.title\n elif _type == 'text':\n return self.find_element(locator).text\n elif _type == 'attribute': # 获取当前元素属性\n return self.find_element(locator).get_attribute(name)\n else:\n logger.info(\"_type参数传值错误,类型仅可用title、text、attribute三种类型\")\n except Exception as e:\n logger.info(\"发生异常,获取类容失败,异常信息:%s\" % str(e))\n return ''\n\n def select(self, locator, value, _type='index'):\n \"\"\"\n 根据传值_type获取下拉框的内容\n :param locator:元素定位\n :param value:值\n :param _type:类型\n :return:\n \"\"\"\n element = self.find_element(locator)\n try:\n if Type == 'index': # 用下标选择 (默认)\n Select(element).select_by_index(value)\n elif Type == 'value': # 根据value值选择\n Select(element).select_by_value(value)\n elif Type == 'text': # 根据选项的文本内容选择\n Select(element).select_by_visible_text(value)\n else:\n print(f\"给的type参数 {Type} 错误,仅可为:int、text、value\")\n except:\n print(f\"根据 {value} 操作下拉框失败\")\n\n def switch_iframe(self, iframe):\n \"\"\"\n 切换iframe\n :param iframe: 如果传入的是数字,则以该数字为下标取值,如果传入的是字符串,\n 则用iframe名字取值,如果是元祖,则根据传入的locator取值\n :return:\n \"\"\"\n try:\n if isinstance(iframe, int):\n self.driver.switch_to.frame(iframe)\n elif isinstance(iframe, str):\n self.driver.switch_to.frame(iframe)\n elif isinstance(iframe, tuple):\n ele = self.find_element(iframe)\n self.driver.switch_to.frame(ele)\n except Exception as e:\n logger.info(\"iframe切换异常,异常信息:%s\" % str(e))\n\n def move_to_element(self, locator):\n \"\"\"\n 鼠标悬停操作\n :param locator:\n :return:\n \"\"\"\n try:\n ele = self.find_element(locator)\n ActionChains(self.driver).move_to_element(ele).perform()\n except Exception as e:\n logger.info(\"鼠标悬停操作失败,异常信息:%s\" % str(e))\n return False\n\n def js_focus_element(self, locator):\n \"\"\"\n 聚焦元素\n :param locator:\n :return:\n \"\"\"\n target = self.find_element(locator)\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", target)\n\n def js_scroll_top(self):\n \"\"\"\n 滚动条滚到顶部\n :return:\n \"\"\"\n js = \"window.scrollTo(0,0)\"\n self.driver.execute_script(js)\n\n def js_scroll_end(self, x=0):\n \"\"\"\n 滚动条滚到底部某个位置\n :param x:\n :return:\n \"\"\"\n js = f\"window.scrollTo({x},document.body.scrollHeight)\"\n self.driver.execute_script(js)\n\n def js_find(self, action):\n \"\"\"\n js查找单个元素\n :param action:\n :return:\n \"\"\"\n js = f\"document.getElementById(“id”).{action}\"\n self.driver.execute_script(js)\n\n def js_finds(self, _type, element, index, action):\n ''' js查找元素,并做相应操作 输入值:value='XXX' 点击:click()\n js定位仅可为:id、Name、TagName、ClassName、Selector(CSS) '''\n list = ['Name', 'TagName', 'ClassName', 'Selector']\n if type in list:\n print(f\"正在执行js操作:定位方式->{_type}, 元素值->{element}, 下标值->{index}, 执行操作->{action}\")\n if type == 'Selector':\n js = f'document.query{_type}All(\"{element}\"){index}.{action}'\n else:\n js = f'document.getElementsBy{_type}({element})[{index}].{action};'\n self.driver.execute_script(js)\n else:\n print(f\"type参数 {_type} 错误,js定位仅可为:'Name'、'TagName'、'ClassName'、'Selector'(CSS)\")\n\n def js_readonly(self, idElement, value):\n ''' 去掉只读属性,并输入内容 一般为id '''\n js = f'document.getElementById({idElement}).removeAttribute(\"readonly\");document.getElementById({idElement}).value=\"{value}\"'\n driver.execute_script(js)\n\n def js_iframe(self, Type, element, action, index=''):\n ''' Js处理iframe 无需先切换到iframe上,再切回来操作\n 输入值:value='' 点击:click() type=id时,index='' '''\n js = f'document.getElementBy{Type}({element}){index}.contentWindow.document.body.{action}'\n driver.execute_script(js)\n\n '''\n jquery = '$(CSS).val(\"XXX\");' # 根据css语法定位到元素,输入内容\n jquery = '$(CSS).val('');' # 清空\n jquery = '$(CSS).click();' # 点击\n driver.execute_script(jquery)\n '''\n\n def get_title(self):\n \"\"\"\n 获取当前页面的title\n :return:\n \"\"\"\n return self.driver.title\n\n def get_text(self, locator):\n \"\"\"\n 获取某个元素的文本\n :param locator:\n :return:\n \"\"\"\n try:\n t = self.find_element(locator).text\n return t\n except Exception as e:\n logger.info(\"获取text失败,返回'',错误信息:%s\" % str(e))\n return \"\"\n\n def get_attribute(self, locator, name):\n \"\"\"\n 获取元素的属性\n :param locator: 定位方式元组\n :param name: 属性名称\n :return:\n \"\"\"\n try:\n element = self.find_element(locator)\n return element.get_attribute(name)\n except Exception as e:\n logger.info(\"获取%s属性失败,返回'',错误信息:%s\" % (name, str(e)))\n return \"\"\n\n def select_by_index(self, locator, index=0):\n \"\"\"\n 下拉选择框通过索引选择元素\n :param locator: 定位的下拉框\n :param index: 索引\n :return:\n \"\"\"\n element = self.find_element(locator)\n Select(element).select_by_index(index)\n\n def select_by_value(self, locator, value):\n \"\"\"\n 下拉框通过值选择元素\n :param locator: 定位的下拉框\n :param value: 值\n :return:\n \"\"\"\n element = self.find_element(locator)\n Select(element).select_by_value(value)\n\n def select_by_text(self, locator, text):\n \"\"\"\n 下拉框通过文本选择元素\n :param locator:定位的下拉框\n :param text:文本\n :return:\n \"\"\"\n element = self.find_element(locator)\n Select(element).select_by_visible_text(text)\n\n def current_window(self):\n \"\"\"获取当前窗口的句柄\"\"\"\n return self.driver.current_window.handle\n\n def handle(self, value):\n \"\"\"\n 根据传入的参数类型自动判断切换窗口\n :param value:参数类型为int/str,int:根据下标切换对应的窗口,str:根据名称切换窗口\n :return:\n \"\"\"\n try:\n if isinstance(value, int):\n handles = self.driver.window_handles\n self.driver.switch_to.window(handles[value])\n elif isinstance(value, str):\n self.driver.switch_to.window(value)\n else:\n logger.info(\"传入的type参数 %s 错误,仅可传int、str\" % type(value))\n except Exception as e:\n logger.info(\"切换句柄失败,错误信息:%s\" % str(e))\n\n # def js_find(self, action):\n # '''\n # 输入值:value='XXX' 点击:click()\n # '''\n # print(\"正在执行js操作,操作行为:%s\"%action)\n # js = \"document.getElementById(“id”).%s\"%action\n # self.driver.execute_script(js)\n\n\n","repo_name":"cadentang/selenium_91lng_auto_test","sub_path":"common/common_location.py","file_name":"common_location.py","file_ext":"py","file_size_in_byte":16123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34661532936","text":"#!/usr/bin/python3\n\"\"\"\nthis module creates the dictionary description of\nan object for json serialisatoin\n\"\"\"\n\n\ndef class_to_json(obj):\n \"\"\"\n returns the dictionary description with\n simple data structure\n \"\"\"\n\n if isinstance(obj, dict):\n return obj\n\n if hasattr(obj, \"__dict__\"):\n serialised = {}\n for key, value in obj.__dict__.items():\n if isinstance(value, (int, str, bool, list, dict)):\n serialised[key] = value\n elif isinstance(value, object):\n serialised[key] = class_to_json(value)\n return serialised\n","repo_name":"Phics2022/alx-higher_level_programming","sub_path":"0x0B-python-input_output/8-class_to_json.py","file_name":"8-class_to_json.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37884015844","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"Copyright: Arthur Milchior arthur@milchior.fr\r\nLicense: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html\r\nFeel free to contribute to the code on https://github.com/Arthur-Milchior/anki-LaTeX-Error\r\n\r\nAdd a tag LaTeXError to each cards which have a LaTeX error. Remove this tags from cards which doesn't have any LaTeX error.\r\n\"\"\"\r\n\r\nfrom anki.hooks import addHook\r\nfrom aqt.utils import tooltip, isWin, isMac\r\nfrom anki.utils import checksum, intTime\r\nfrom anki.media import MediaManager\r\nimport re\r\nimport unicodedata\r\nimport anki.notes\r\nimport os\r\nfrom anki.latex import regexps, _latexFromHtml, build, _buildImg\r\nfrom anki.consts import *\r\n\r\ndef mungeQA(html, type, fields, model, data, col):\r\n \"Convert TEXT with embedded latex tags to image links. Returns the HTML and whether an error occurred.\"\r\n error = False\r\n for match in regexps['standard'].finditer(html):\r\n link, er = _imgLink(col, match.group(1), model)\r\n html = html.replace(match.group(), link)\r\n error = error or er\r\n for match in regexps['expression'].finditer(html):\r\n link, er = _imgLink(\r\n col, \"$\" + match.group(1) + \"$\", model)\r\n html = html.replace(match.group(), link)\r\n error = error or er\r\n for match in regexps['math'].finditer(html):\r\n link, er = _imgLink(\r\n col,\r\n \"\\\\begin{displaymath}\" + match.group(1) + \"\\\\end{displaymath}\", model)\r\n html = html.replace(match.group(), link)\r\n error = error or er\r\n return html, error\r\n\r\ndef _imgLink(col, latex, model):\r\n \"\"\"A pair containing:\r\n An img link for LATEX, creating if necesssary. \r\n Whether an error occurred.\"\"\"\r\n txt = _latexFromHtml(col, latex)\r\n\r\n if model.get(\"latexsvg\", False):\r\n ext = \"svg\"\r\n else:\r\n ext = \"png\"\r\n\r\n # is there an existing file?\r\n fname = \"latex-%s.%s\" % (checksum(txt.encode(\"utf8\")), ext)\r\n link = '' % fname\r\n if os.path.exists(fname):\r\n return (link,False)\r\n\r\n # building disabled?\r\n if not build:\r\n return (\"[latex]%s[/latex]\" % latex,False)\r\n\r\n err = _buildImg(col, txt, fname, model)\r\n if err:\r\n return (err,True)\r\n else:\r\n return (link,False)\r\n\r\ndef filesInStr(s, mid, string, nid, includeRemote=False):\r\n l = []\r\n model = s.col.models.get(mid)\r\n strings = []\r\n someError = False\r\n if model['type'] == MODEL_CLOZE and \"{{c\" in string:\r\n # if the field has clozes in it, we'll need to expand the\r\n # possibilities so we can render latex\r\n strings = s._expandClozes(string)\r\n else:\r\n strings = [string]\r\n for string in strings:\r\n # handle latex\r\n (string,error) = mungeQA(string, None, None, model, None, s.col)\r\n someError = error or someError\r\n # extract filenames\r\n for reg in s.regexps:\r\n for match in re.finditer(reg, string):\r\n fname = match.group(\"fname\")\r\n isLocal = not re.match(\"(https?|ftp)://\", fname.lower())\r\n if isLocal or includeRemote:\r\n l.append(fname)\r\n if someError:\r\n note = s.col.getNote(nid)\r\n note.addTag(\"LaTeXError\")\r\n# tooltip(\"Error on card %s.\"% nid)\r\n note.flush()\r\n return (l,someError)\r\n\r\ndef check(self, local=None):\r\n \"Return (missingFiles, unusedFiles, numberError).\"\r\n totalError=0\r\n mdir = self.dir()\r\n # gather all media references in NFC form\r\n allRefs = set()\r\n for nid, mid, flds in self.col.db.execute(\"select id, mid, flds from notes\"):\r\n (noteRefs,error) = filesInStr(self,mid, flds, nid)\r\n if error :\r\n totalError +=1\r\n # check the refs are in NFC\r\n for f in noteRefs:\r\n # if they're not, we'll need to fix them first\r\n if f != unicodedata.normalize(\"NFC\", f):\r\n self._normalizeNoteRefs(nid)\r\n noteRefs = self.filesInStr(mid, flds,nid)\r\n break\r\n allRefs.update(noteRefs)\r\n # loop through media folder\r\n unused = []\r\n if local is None:\r\n files = os.listdir(mdir)\r\n else:\r\n files = local\r\n renamedFiles = False\r\n dirFound = False\r\n warnings = []\r\n for file in files:\r\n if not local:\r\n if not os.path.isfile(file):\r\n # ignore directories\r\n dirFound = True\r\n continue\r\n if file.startswith(\"_\"):\r\n # leading _ says to ignore file\r\n continue\r\n if self.hasIllegal(file):\r\n name = file.encode(sys.getfilesystemencoding(), errors=\"replace\")\r\n name = str(name, sys.getfilesystemencoding())\r\n warnings.append(\r\n _(\"Invalid file name, please rename: %s\") % name)\r\n continue\r\n nfcFile = unicodedata.normalize(\"NFC\", file)\r\n # we enforce NFC fs encoding on non-macs; on macs we'll have gotten\r\n # NFD so we use the above variable for comparing references\r\n if not isMac and not local:\r\n if file != nfcFile:\r\n # delete if we already have the NFC form, otherwise rename\r\n if os.path.exists(nfcFile):\r\n os.unlink(file)\r\n renamedFiles = True\r\n else:\r\n os.rename(file, nfcFile)\r\n renamedFiles = True\r\n file = nfcFile\r\n # compare\r\n if nfcFile not in allRefs:\r\n unused.append(file)\r\n else:\r\n allRefs.discard(nfcFile)\r\n # if we renamed any files to nfc format, we must rerun the check\r\n # to make sure the renamed files are not marked as unused\r\n if renamedFiles:\r\n return self.check(local=local)\r\n nohave = [x for x in allRefs if not x.startswith(\"_\")]\r\n # make sure the media DB is valid\r\n try:\r\n self.findChanges()\r\n except DBError:\r\n self._deleteDB()\r\n if dirFound:\r\n warnings.append(\r\n _(\"Anki does not support files in subfolders of the collection.media folder.\"))\r\n# if totalError>0:\r\n warnings.append(\r\n _(\"There are %s cards with a latex error.\")% totalError)\r\n return (nohave, unused, warnings)\r\n\r\nMediaManager.check = check\r\n","repo_name":"Arthur-Milchior/anki-LaTeX-Error","sub_path":"latexError.py","file_name":"latexError.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31241149515","text":"\nfrom .basepage import BasePage\n\nclass Post(BasePage):\n def __init__(self, input_path):\n super().__init__(input_path)\n\n self.wordcount = 0\n\n def __str__(self):\n if self.date:\n return \"Post on {}: {}\".format(self.date.isoformat(), self.title)\n else:\n return \"Post: {}\".format(self.title)\n\n def build(self, pandoc, config):\n super().build(pandoc, config)\n\n if not self.title:\n self.title = self.input_path.name.replace(\".md\", \"\")\n\n self.wordcount = pandoc.countwords(self.input_path)\n\n if not self.is_current():\n print(\"Building post: \", self)\n self.generate_html(pandoc)\n\n def __render(self, jinja_env, site_meta, page_meta):\n template = jinja_env.get_template(\"{}.html\".format(self.layout))\n return template.render(content=self.content_html, post=self, page=page_meta, site=site_meta)\n\n def write(self, jinja_env, site_meta):\n if not self.is_current():\n print(\"Writing post:\", self)\n template_out = self.__render(jinja_env, site_meta, self.get_page_metadata())\n self.output_path.parent.mkdir(parents=True, exist_ok=True)\n self.output_path.write_text(template_out)\n return True\n return False\n","repo_name":"ediril/papyrus","sub_path":"papyrus/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22907491778","text":"import pandas as pd\r\nimport csv\r\nimport os\r\n\r\nINPUT_DIR = os.path.join(\"data\", \"clean\")\r\nOUTPUT_DIR = os.path.join(\"data\", \"clean\")\r\n\r\n\r\ndf = pd.read_csv(os.path.join(INPUT_DIR, \"bracket_data.csv\"), index_col=\"team\")\r\nmatchup_dict = {}\r\nwinner_dict = {}\r\nregion_dict = {1: \"west\", 2: \"east\", 3: \"midwest\", 4: \"south\"}\r\n\r\n\r\nwith open(os.path.join(INPUT_DIR, \"coefficients.csv\"), 'r') as file:\r\n reader = csv.DictReader(file)\r\n for row in reader:\r\n coef_dict = dict(row)\r\n\r\n\r\ndef pick_winner(matchup):\r\n \"\"\"This function picks the winner of one game by applying the coefficients from the regression to each team's ratings. The projected\r\n winning team is returned as a string.\"\"\"\r\n\r\n if (df.at[matchup[1], \"seed\"] - df.at[matchup[0], \"seed\"]) * float(coef_dict['seed_diff']) + \\\r\n df.at[matchup[0], \"osrs\"] * float(coef_dict[\"osrs_avg\"]) - \\\r\n df.at[matchup[1], \"osrs\"] * float(coef_dict[\"osrs_avg\"]) + \\\r\n df.at[matchup[0], \"dsrs\"] * float(coef_dict[\"dsrs_avg\"]) - \\\r\n df.at[matchup[1], \"dsrs\"] * float(coef_dict[\"dsrs_avg\"]) > 0:\r\n return_value = matchup[0]\r\n else:\r\n return_value = matchup[1]\r\n return return_value\r\n\r\n\r\ndef build_round(game_lower, game_upper):\r\n \"\"\"This function builds up matchups for a given round based on the upper and lower bounds of game numbers for a\r\n given round are used as arguments. There is no return value.\"\"\"\r\n\r\n for i in range(game_lower, game_upper):\r\n matchup_dict[i] = [winner_dict[(2 * i) - 69], winner_dict[(2 * i) - 68]]\r\n\r\n\r\ndef pick_round(game_lower, game_upper):\r\n \"\"\"This function picks one round of games by looping over matchup_dict. The upper and lower bounds of game numbers\r\n for a given round are used as arguments. The winner_dict dictionary is updated to reflect the projected winners of\r\n the round. There is no return value.\"\"\"\r\n\r\n for i in range(game_lower, game_upper):\r\n winner_dict[i] = pick_winner(matchup_dict[i])\r\n\r\n\r\ndef build_bracket():\r\n \"\"\"This function builds up the play-in bracket, picks the play-in winners using pick_round, and builds up the main\r\n bracket. There is no argument or return value. The dataframe is must be properly scoped so that the function can\r\n access it.\"\"\"\r\n\r\n matchup_dict[1] = [df.index[(df[\"region\"] == region_dict[1]) & (df['seed'] == 11)].values[0],\r\n df.index[(df[\"region\"] == region_dict[1]) & (df['seed'] == 11)].values[1]]\r\n matchup_dict[2] = [df.index[(df[\"region\"] == region_dict[2]) & (df['seed'] == 12)].values[0],\r\n df.index[(df[\"region\"] == region_dict[2]) & (df['seed'] == 12)].values[1]]\r\n matchup_dict[3] = [df.index[(df[\"region\"] == region_dict[3]) & (df['seed'] == 16)].values[0],\r\n df.index[(df[\"region\"] == region_dict[3]) & (df['seed'] == 16)].values[1]]\r\n matchup_dict[4] = [df.index[(df[\"region\"] == region_dict[4]) & (df['seed'] == 16)].values[0],\r\n df.index[(df[\"region\"] == region_dict[4]) & (df['seed'] == 16)].values[1]]\r\n\r\n pick_round(1, 5)\r\n\r\n matchup_dict[9] = [df.index[(df[\"region\"] == region_dict[1]) & (df['seed'] == 6)].values[0], winner_dict[1]]\r\n matchup_dict[15] = [df.index[(df[\"region\"] == region_dict[2]) & (df['seed'] == 5)].values[0], winner_dict[2]]\r\n matchup_dict[21] = [df.index[(df[\"region\"] == region_dict[3]) & (df['seed'] == 1)].values[0], winner_dict[3]]\r\n matchup_dict[29] = [df.index[(df[\"region\"] == region_dict[4]) & (df['seed'] == 1)].values[0], winner_dict[4]]\r\n\r\n list1 = [1, 8, 5, 4, 6, 3, 7, 2]\r\n\r\n for i in list1:\r\n for n in range(1, 5):\r\n if not ((n == 1 and i == 6) or (n == 2 and i == 5) or (n == 3 and i == 1) or (n == 4 and i == 1)):\r\n matchup_dict[list1.index(i) + 5 + ((n - 1) * 8)] = \\\r\n [df.index[(df[\"region\"] == region_dict[n]) & (df['seed'] == i)].values[0],\r\n df.index[(df[\"region\"] == region_dict[n]) & (df['seed'] == (17 - i))].values[0]]\r\n\r\n\r\ndef pick_bracket():\r\n \"\"\"This function fills out entire bracket with successive calls to pick_round. It must be called after\r\n build_bracket. There is no argument or return value.\"\"\"\r\n\r\n pick_round(5, 37)\r\n build_round(37, 53)\r\n pick_round(37, 53)\r\n build_round(53, 61)\r\n pick_round(53, 61)\r\n build_round(61, 65)\r\n pick_round(61, 65)\r\n build_round(65, 67)\r\n pick_round(65, 67)\r\n build_round(67, 68)\r\n pick_round(67, 68)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n build_bracket()\r\n pick_bracket()\r\n\r\n with open(os.path.join(OUTPUT_DIR, \"winners.csv\"), 'w') as file:\r\n for key in winner_dict.keys():\r\n file.write(\"%s, %s\\n\" % (key, winner_dict[key]))\r\n\r\n with open(os.path.join(OUTPUT_DIR, \"matchups.csv\"), 'w') as file:\r\n for key in matchup_dict.keys():\r\n file.write(\"%s, %s\\n\" % (key, matchup_dict[key]))\r\n","repo_name":"afifmazhar/ncaa_rankings","sub_path":"code/build_bracket.py","file_name":"build_bracket.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"22397287545","text":"def Fibonacci(n):\n \"\"\"Computes all Fibonacci numbers up to n (not including).\n Starts at 1, 2, etc.\"\"\"\n L = []\n a, b = 1, 2\n while a < n:\n L.append(a)\n a, b = b, a + b\n return L\n\n\ndef p297(N):\n \"\"\"Computes the sum of z(n) for all n = 1, ..., N-1.\"\"\"\n # Compute all candidate fibonacci numbers\n F = Fibonacci(N)\n\n # Precompute values for S(n)\n S = {1: 1, 2: 2, 3: 3}\n for i, f in enumerate(F):\n if f not in S:\n S[F[i]] = (F[i] - F[i-1] - 1) + S[F[i-1]] + S[F[i-2]]\n\n # Define a function that does recursive computations of S(n)\n # Based on the recurrence relation below.\n def compute_S(n):\n \"\"\"\n Calculates S(n) according to the rule\n n = maximal_f + rem\n S[n] = S[maximal_f] + compute_S(rem) + n - maximal_f\n\n where S[n] is the dictionary of all S values.\n\n If n is Fibonacci, the function reduces to a dictionary look up in S,\n that is, compute_S(n) returns S[n]\n \"\"\"\n if n in F:\n return S[n]\n\n else:\n maximal_f = 1\n for f in F:\n if f <= n:\n maximal_f = max(maximal_f, f)\n rem = n - maximal_f\n return S[maximal_f] + compute_S(rem) + n - maximal_f\n\n return(compute_S(N-1))\n\n\nif __name__ == \"__main__\":\n print(p297(10**17))\n","repo_name":"leonlan/projecteuler","sub_path":"python/p297.py","file_name":"p297.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3559693755","text":"# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom process_question import Question\n# 创建问题处理对象,这样模型就可以常驻内存\nque=Question()\n# Restorepip freeze > requirements.txt\ndef enablePrint():\n sys.stdout = sys.__stdout__\nenablePrint()\n\n\nresult=que.question_process(\"李连杰生日是哪天?\")\nprint(result)\n","repo_name":"xiaoliang8006/Movie-QA-System","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"27"} +{"seq_id":"31570696137","text":"from pyibex.thickset import *\nfrom pyibex import Interval, IntervalVector\nfrom vibes import vibes\n\n\ndef ex_circle():\n t = ThickDisk(0,0,2)\n X0 = IntervalVector(2, [-5,5])\n # P = ThickPaving(X0, lambda x: opNot(t.test(x)), 0.05)\n P = ThickPaving(X0, t, 0.05)\n\n P.visit(ToVibes(1000, \"Disk\"))\n\n t2 = ThickTranslateInPaving(P, IntervalVector([[0,0], [0,1]]))\n X1 = IntervalVector([[4.6,4.7], [2.8,2.9]])\n vibes.drawBox(X1[0][0], X1[0][1], X1[1][0], X1[1][1], 'y')\n vibes.drawBox(X1[0][0]+1, X1[0][1]+1, X1[1][0]+1, X1[1][1]+1, 'g')\n vibes.drawBox(X1[0][0]+1, X1[0][1]+1, X1[1][0]+1, X1[1][1]+1, 'g')\n vibes.drawBox(5, 5, X0[1][0]+1, X0[1][1]+1, 'orange')\n vibes.axisAuto()\n P2 = ThickPaving(X0.inflate(10), t2, 0.1)\n P2.visit(ToVibes(1000, \"Disk translated\"))\n vibes.setFigurePos(500,10)\n\n\n\n\nif __name__ == '__main__':\n vibes.beginDrawing()\n ex_circle()\n vibes.endDrawing()\n","repo_name":"codac-team/codac","sub_path":"src/3rd/pyibex/transfer/pyIbex/pyibex/thickset/examples/ex_translate.py","file_name":"ex_translate.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"27"} +{"seq_id":"31333508077","text":"from newspaper import Article\nimport datetime\n\ndef lambda_handler(event, context):\n url = event['url']\n article = Article(url)\n article.download()\n article.parse()\n\n title = article.title\n authors = article.authors\n authors = [a for a in authors if len(a) < 50]\n\n publication_dt = article.publish_date\n publication_date_str = \"\"\n if publication_dt:\n publication_date_str = datetime.datetime.strftime(publication_dt, \"%Y-%m-%d'T'%H:%M:%S\")\n\n content = article.text\n\n return {\n 'url' : url,\n 'domain' : article.source_url,\n 'title' : title,\n 'authors': authors,\n 'publication_date' : publication_date_str,\n 'content' : article.text\n }\n","repo_name":"lucaslingle/newspaper-api","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70757055113","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# some description\n# author: holysll\n# datetime: 2020-8-16 21:51\n# software: PyCharm\n\"\"\"\n题目:图像渲染\n\n有一幅以二维整数数组表示的图画,每一个整数表示该图画的像素值大小,数值在 0 到 65535 之间。\n\n给你一个坐标 (sr, sc) 表示图像渲染开始的像素值(行 ,列)和一个新的颜色值 newColor,让你重新上色这幅图像。\n\n为了完成上色工作,从初始坐标开始,记录初始坐标的上下左右四个方向上像素值与初始坐标相同的相连像素点,接着再记录这四个方向上符合条件的像素点与他们对应四个方向上像素值与初始坐标相同的相连像素点,……,重复该过程。将所有有记录的像素点的颜色值改为新的颜色值。\n\n最后返回经过上色渲染后的图像。\n\n示例 1:\n\n输入:\nimage = [[1,1,1],[1,1,0],[1,0,1]]\nsr = 1, sc = 1, newColor = 2\n输出: [[2,2,2],[2,2,0],[2,0,1]]\n解析:\n在图像的正中间,(坐标(sr,sc)=(1,1)),\n在路径上所有符合条件的像素点的颜色都被更改成2。\n注意,右下角的像素没有更改为2,\n因为它不是在上下左右四个方向上与初始点相连的像素点。\n注意:\n\nimage 和 image[0] 的长度在范围 [1, 50] 内。\n给出的初始点将满足 0 <= sr < image.length 和 0 <= sc < image[0].length。\nimage[i][j] 和 newColor 表示的颜色值在范围 [0, 65535]内。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/flood-fill\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n# Python packages\n\nimport collections\n\n\n# 广度优先搜索\n# 时间复杂度:O(n×m), 空间复杂度:\nclass Solution(object):\n def floodFill(self, image, sr, sc, newColor):\n \"\"\"\n :type image: List[List[int]]\n :type sr: int\n :type sc: int\n :type newColor: int\n :rtype: List[List[int]]\n \"\"\"\n currColor = image[sr][sc]\n if currColor == newColor:\n return image\n\n n, m = len(image), len(image[0])\n q = collections.deque([(sr, sc)])\n image[sr][sc] = newColor\n while q:\n x, y = q.popleft()\n for mx, my in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:\n if 0 <= mx < n and 0 <= my < m and image[mx][my] == currColor:\n q.append((mx, my))\n image[mx][my] = newColor\n return image\n\n\n# 深度优先搜索\n# 时间复杂度:O(n×m), 空间复杂度:\nclass Solution1(object):\n def floodFill(self, image, sr, sc, newColor):\n \"\"\"\n :type image: List[List[int]]\n :type sr: int\n :type sc: int\n :type newColor: int\n :rtype: List[List[int]]\n \"\"\"\n n, m = len(image), len(image[0])\n currColor = image[sr][sc]\n\n def dfs(x, y):\n if image[x][y] == currColor:\n image[x][y] = newColor\n for mx, my in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:\n if 0 <= mx < n and 0 <= my < m and image[mx][my] == currColor:\n dfs(mx, my)\n\n if currColor != newColor:\n dfs(sr, sc)\n return image\n\n\nif __name__ == '__main__':\n image = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]\n sr = 1\n sc = 1\n newColor = 2\n solution = Solution()\n res = solution.floodFill(image, sr, sc, newColor)\n print(res)\n\n solution1 = Solution1()\n res1 = solution1.floodFill(image, sr, sc, newColor)\n print(res1)\n","repo_name":"holysll/Leetcode","sub_path":"733_flood-fill.py","file_name":"733_flood-fill.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"22043627862","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom pathlib import Path\n\n\ndef relative_to_abs() -> None:\n \"\"\"Updates paths in the dataset_config.yaml file from relative to absolute.\n\n This function is used to replace the dataset path in the\n `dataset_config.yml` file from a relative to absolute path.\n \"\"\"\n with open('dataset_config.yml') as f:\n lines = f.readlines()\n\n existing_path = lines[0].split('path: ')[1].strip()\n replace_with = str(Path('dataset-YOLO').absolute())\n lines[0] = lines[0].replace(existing_path, replace_with)\n\n with open('dataset_config.yml', 'w') as f:\n f.writelines(lines)\n\n print(''.join(lines))\n\n\nif __name__ == '__main__':\n relative_to_abs()\n","repo_name":"bird-feeder/BirdFSD-YOLOv5","sub_path":"birdfsd_yolov5/model_utils/relative_to_abs.py","file_name":"relative_to_abs.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"30874478405","text":"# 분류: 1주차 BFS / DFS\n# 문제: 백준 2468 안전영역 \n# 문제 주소: https://www.acmicpc.net/problem/2468\n# 푼 사람: 진홍엽\n# 설명: BFS, 델타 탐색 사용\n\nimport sys\nfrom collections import deque\nimport pprint\n\ni_n = int(sys.stdin.readline().rstrip())\ngraph = [list(map(int, sys.stdin.readline().split())) for i_1 in range(i_n)]\n\nresult = []\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\ni_max = 1\ni_min = 100\nfor ls_1 in graph:\n if max(ls_1) > i_max:\n i_max = max(ls_1)\n if min(ls_1) < i_min:\n i_min = min(ls_1) - 1\n\nfor i_2 in range(i_min, i_max):\n visited = [[False] * i_n for i_6 in range(i_n)]\n cnt_island = 0\n for i_3 in range(i_n):\n for i_4 in range(i_n):\n if graph[i_3][i_4] > i_2 and visited[i_3][i_4] == False:\n cnt_island += 1\n visited[i_3][i_4] = True\n que = deque()\n que.append((i_4, i_3))\n while que:\n x, y = que.popleft()\n for i_5 in range(4):\n nx = x + dx[i_5]\n ny = y + dy[i_5]\n if nx < 0 or nx >= i_n or ny < 0 or ny >= i_n:\n continue\n if graph[ny][nx] > i_2 and visited[ny][nx] == False:\n que.append((nx, ny))\n visited[ny][nx] = True\n else:\n visited[i_3][i_4] = True\n result.append(cnt_island)\nprint(max(result))","repo_name":"Choi-jw-96/Algo-","sub_path":"1주/2468/2468_hyndrome.py","file_name":"2468_hyndrome.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"26551395548","text":"class Phone:\n def __init__(self, name, brand, price):\n self.name = name\n self.brand = brand\n self.price = price\n\n def __repr__(self):\n return f\"{self.name}\"\n\n\nclass Cellphone(Phone):\n def __init__(self, name, brand, price, color, memory):\n super().__init__(name, brand, price)\n self.color = color\n self.memory = memory\n\n def __repr__(self):\n return f\"{self.name} {self.brand} {self.price} {self.color} {self.memory}\"\n\n\nclass Radiophone(Phone):\n def __init__(self, name, brand, price, phone_reach, answering_machine):\n super().__init__(name, brand, price)\n self.phone_reach = phone_reach\n self.answering_machine = answering_machine\n\n def __repr__(self):\n return f\"{self.name} {self.brand} {self.price} {self.phone_reach} {self.answering_machine}\"\n\n\ndef read_from_file(file_name):\n phones_list = []\n with open(file_name) as file:\n for line in file:\n phone = None\n line = [element.strip() for element in line.split(\",\")]\n name = line[0]\n brand = line[1]\n price = int(line[2])\n if line[3].isnumeric():\n phone_reach = int(line[3])\n answering_machine = True if line[4] == \"yes\" else False\n phone = Radiophone(name=name, brand=brand, price=price, phone_reach=phone_reach,\n answering_machine=answering_machine)\n else:\n color = line[3]\n memory = int(line[4])\n phone = Cellphone(name=name, brand=brand, price=price, color=color, memory=memory)\n phones_list.append(phone)\n return phones_list\n\n\nphones1 = read_from_file('brand1_phones.txt')\nphones2 = read_from_file('brand2_phones.txt')\n\nall_phones = phones1 + phones2\nprint(all_phones)\nall_phones.sort(key=lambda phone: phone.price)\nprint(all_phones)\n\nwith open('sorted_phoned.txt', 'w') as sf:\n for phone in all_phones:\n sf.writelines(f\"{phone}\\n\")\n\nwith open('sorted_phoned.txt', 'a') as sf:\n sum_prices = 0\n for phone in all_phones:\n sum_prices += phone.price\n sf.write(f\"Sum of price is {sum_prices}\")\n\nwith open('radio_answering_phone.txt', 'w') as rad_ans_ph:\n for phone in all_phones:\n if type(phone) is Radiophone and phone.answering_machine is True:\n rad_ans_ph.write(f\"{phone}\\n\")\n","repo_name":"Python-fundamental-13-12-22/pf13.12.22","sub_path":"lesson10/hw/zoriananaz/task1_10/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"2470337828","text":"import codecademylib3_seaborn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import linear_model\n\n# Load the honey production data into a Pandas DataFrame\ndf = pd.read_csv(\"honeyproduction.csv\")\n\n# Group the data by year and calculate the mean total production for each year\nprod_per_year = df.groupby('year').totalprod.mean().reset_index()\n\n# Extract the year and total production values\nX = prod_per_year['year']\ny = prod_per_year['totalprod']\n\n# Reshape the X data from a 1D array to a 2D array with one column\nX = X.values.reshape(-1,1)\n\n# Create a scatter plot of the data with proper axis labels\nplt.scatter(X, y)\nplt.xlabel('Year')\nplt.ylabel('Total Production (lbs)')\n\n# Fit a linear regression model to the data and plot the predicted values\nregr = linear_model.LinearRegression()\nregr.fit(X,y)\ny_predict = regr.predict(X)\nplt.plot(X,y_predict)\nplt.show()\n# Plot the predicted values for future years\nX_future = np.array(range(2013, 2050))\nX_future = X_future.reshape(-1,1)\nfuture_predict = regr.predict(X_future)\nplt.plot(X_future,future_predict)\n\n# Add proper axis labels and show the plot\nplt.xlabel('Year')\nplt.ylabel('Total Production (lbs)')\nplt.show()\n","repo_name":"rajkumar3934/honey-production-prediction","sub_path":"honey_production_prediction.py","file_name":"honey_production_prediction.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"36467340793","text":"#!/usr/bin/env python3\n\n#import pdb\nimport time\nimport com\nimport stirrer\nimport led\nimport heater\nimport temp_sensor\nimport light_sensor\n\nif __name__ == \"__main__\":\n ser = com.open_serial(\"/dev/ttyACM0\")\n\n # stir for 5 sec\n print(\"Stirring for 5 seconds.\")\n print(\"Please, check if the fan is rotating\")\n s = stirrer.stirrer(ser)\n result = s.stir_for_n_seconds(5)\n print(\"Stirring off.\")\n s.save_log(result)\n\n #led on for 5 sec\n print(\"Led on\")\n l = led.led(ser)\n l.turn_on()\n time.sleep(5)\n l.turn_off()\n print(\"Led off\")\n\n # heat for 10 sec\n h = heater.heater(ser)\n h.turn_on()\n print(\"Tourning the heating on.\")\n h.save_log(1)\n time.sleep(5)\n h.turn_off()\n print(\"Tourning the heating off.\")\n h.save_log(0)\n\n # read temp\n t = temp_sensor.temp_sensor(ser)\n temp = t.read(0)\n print(\"Temperature is \" + str(temp) + \" degrees.\")\n t.save_log(0,temp)\n\n # read light \n ls = light_sensor.light_sensor(ser)\n light = ls.read()\n print (\"Light intensity with the led off is \" + str(light))\n l.turn_on()\n time.sleep(1)\n light = ls.read()\n print (\"Light intensity with the led on is \" + str(light))\n l.turn_off()\n print(\"Led off\")\n\n print(\"Test ended\")\n","repo_name":"dinogen/brewing_lab","sub_path":"python/component_test.py","file_name":"component_test.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11715816799","text":"class File_System:\n def __init__(self, file_system_name, block_size, amount_of_blocks):\n self.system = file_system_name\n self.block_size = block_size\n self.amount_of_blocks = amount_of_blocks\n\n def create(self):\n with open(self.system, mode='w') as my_system:\n for _ in range(self.amount_of_blocks):\n my_system.write(\"0\" * self.block_size + \"\\n\")\n\n def data_to_massive_ready_for_system(self, data):\n temporary = str()\n for i in data:\n temporary += str(i) + \" \"\n\n temporary_massive = []\n n = self.block_size - 1\n for i in range(0, len(temporary), n):\n temporary_massive.append(temporary[i:i + n])\n\n return temporary_massive\n\n def get_system_data(self):\n with open(self.system, encoding=\"utf-8\", mode='r') as my_system:\n system_data = my_system.readlines()\n return system_data\n\n def check_for_free_space(self, all_data, processed_data):\n free_space_counter = 0\n for i in all_data:\n if i[0] == \"0\":\n free_space_counter += 1\n processed_data_size = len(processed_data)\n if free_space_counter >= processed_data_size + 1:\n return True\n else:\n return False\n\n def add_file(self, name, data):\n processed_data = self.data_to_massive_ready_for_system(data)\n all_data = self.get_system_data()\n enough_space = self.check_for_free_space(all_data, processed_data)\n\n if enough_space:\n for i in range(len(all_data)):\n if all_data[i][0] == \"0\":\n all_data[i] = \"#\" + name + \" \" + ((self.block_size - (len(name) + 2)) * \"0\") + \"\\n\"\n break\n\n counter = 0\n for i in range(len(all_data)):\n if all_data[i][0] == \"0\":\n all_data[i] = \"/\" + processed_data[counter] + (\n (self.block_size - (len(processed_data[counter]) + 1)) * \"0\"\n ) + \"\\n\"\n\n if counter != len(processed_data) - 1:\n counter += 1\n else:\n break\n\n with open(self.system, encoding=\"utf-8\", mode='w') as my_system:\n my_system.writelines(all_data)\n else:\n print(\"Недостаточно места\")\n\n def delete_file(self, name):\n all_data = self.get_system_data()\n delete_massive = []\n\n for i in range(len(all_data)):\n if all_data[i].startswith(\"#\" + name):\n delete_massive.append(i)\n\n try:\n for i in range(delete_massive[0] + 1, len(all_data)):\n if all_data[i][0] == \"/\":\n delete_massive.append(i)\n else:\n break\n except IndexError:\n print(\"Файл не найден\")\n\n for i in delete_massive:\n all_data[i] = \"0\" * self.block_size + \"\\n\"\n\n with open(self.system, encoding=\"utf-8\", mode='w') as my_system:\n my_system.writelines(all_data)\n\n def read_file(self, name):\n all_data = self.get_system_data()\n read_massive = []\n\n for i in range(len(all_data)):\n if all_data[i].startswith(\"#\" + name):\n read_massive.append(i)\n\n try:\n for i in range(read_massive[0] + 1, len(all_data)):\n if all_data[i][0] == \"/\":\n read_massive.append(i)\n else:\n break\n except IndexError:\n print(\"Файл не найден\")\n\n text = str()\n for i in read_massive[1:]:\n text += all_data[i][1:-1]\n print(text)\n\n def copy_file(self, name):\n all_data = self.get_system_data()\n copy_massive = []\n\n for i in range(len(all_data)):\n if all_data[i].startswith(\"#\" + name):\n copy_massive.append(i)\n\n try:\n for i in range(copy_massive[0] + 1, len(all_data)):\n if all_data[i][0] == \"/\":\n copy_massive.append(i)\n else:\n break\n except IndexError:\n print(\"Файл не найден\")\n\n enough_space = self.check_for_free_space(all_data, copy_massive[1:])\n\n counter = 0\n try:\n if enough_space:\n for i in range(len(all_data)):\n if all_data[i][0] == \"0\":\n all_data[i] = all_data[copy_massive[counter]]\n counter += 1\n if counter == len(copy_massive):\n break\n else:\n print(\"Недостаточно места\")\n except IndexError:\n pass\n\n with open(self.system, encoding=\"utf-8\", mode='w') as my_system:\n my_system.writelines(all_data)\n\n def sort(self):\n all_data = self.get_system_data()\n empty_block_massive = []\n\n for i in range(len(all_data)):\n if all_data[i][0] == \"0\":\n empty_block_massive.append(i)\n\n empty_block_massive.sort(reverse=True)\n for i in empty_block_massive:\n all_data.pop(i)\n all_data.append(str(\"0\" * 64 + \"\\n\"))\n\n with open(self.system, encoding=\"utf-8\", mode='w') as my_system:\n my_system.writelines(all_data)\n\n\nclass File:\n def __init__(self):\n self.name = str()\n self.data = []\n\n def create(self):\n name = str(input(\"Название файла: \"))\n self.name = name\n while True:\n data = str(input(\"\"))\n if data == \"\":\n print(\"Ввод завершён\\n\")\n break\n self.data.append(data)\n\n\ndef main():\n system = File_System(\"my_file_system.fs\", 64, 8)\n # system.create()\n\n # file_01 = File()\n # file_01.create()\n # system.add_file(file_01.name, file_01.data)\n\n # system.delete_file(\"text.txt\")\n system.read_file(\"test.txt\")\n # system.copy_file(\"test.txt\")\n # system.sort()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"karicotiza/file_system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"30385382603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\noccultation.py |github|\n-----------------------\n\nA simple occultation of TRAPPIST-1c by TRAPPIST-1b.\nPlanet c has a latitudinal hotspot offset, just for fun.\n\n\n .. plot::\n :align: center\n\n from scripts import occultation\n occultation._test()\n\n .. role:: raw-html(raw)\n :format: html\n\n .. |github| replace:: :raw-html:``\n\n'''\n\nfrom __future__ import division, print_function, absolute_import, \\\n unicode_literals\nfrom planetplanet import Trappist1\nfrom planetplanet.constants import *\nimport matplotlib.pyplot as pl\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\n\ndef _test():\n '''\n\n '''\n\n plot()\n\ndef plot():\n '''\n\n '''\n\n # Instantiate the Trappist-1 system\n system = Trappist1(sample = True, phasecurve = True,\n nbody = True, seed = 999)\n\n # Fudge: Let's make this a nice, near-full occultation\n system.c.Omega = -0.15\n\n # Give `c` a large latitudinal offset in its hotspot just for fun\n system.c.Phi = 30\n\n # Compute an occultation by `b`\n # This would be on December 3, 2021\n time = np.linspace(9552.9364, 9552.9664, 100)\n system.compute(time)\n\n # Plot the occultation\n fig, axlc, axxz, axim = system.plot_occultation('c', 9552.95)\n pl.show()\n\nif __name__ == '__main__':\n plot()\n","repo_name":"rodluger/planetplanet","sub_path":"scripts/occultation.py","file_name":"occultation.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"27"} +{"seq_id":"7873533875","text":"import requests\n\nfrom core import models\n\n\"\"\"\nstats_url = \"https://api.sleeper.app/stats/nfl/2020?season_type=regular&position[]=DEF&position[]=QB&position[]=RB&position[]=TE&position[]=WR&order_by=pts_dynasty_half_ppr\"\nstats_response = requests.get(stats_url).json()\n\"\"\"\n\nprojectsions_url = \"https://api.sleeper.app/projections/nfl/2021?season_type=regular&position[]=DEF&position[]=QB&position[]=RB&position[]=TE&position[]=WR&order_by=pts_dynasty_half_ppr\"\nprojections_response = requests.get(projectsions_url).json()\nplayers = {\n projection['player_id']: projection for projection in projections_response\n}\n\nrosters_url = \"https://api.sleeper.app/v1/league/650006508892954624/rosters\"\nrosters_response = requests.get(rosters_url).json()\n\nfor roster in rosters_response:\n team = models.Team.objects.get(sleeper_id=roster['roster_id'], year=2021)\n for player in roster['players']:\n projection_player = players[player]\n try:\n db_player = models.Player.objects.get(\n name=f\"{projection_player['player']['first_name']} {projection_player['player']['last_name']}\",\n team__year=2021,\n )\n except models.Player.DoesNotExist:\n continue\n db_player.adp = projection_player['stats']['adp_half_ppr']\n if db_player.team.sleeper_id != roster['roster_id']:\n db_player.team = team\n db_player.save()\n","repo_name":"andrijan/mfl-commish","sub_path":"sleeper_importer.py","file_name":"sleeper_importer.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"24557981708","text":"import PopCatAPIWrapper\nimport asyncio\n\nclient = PopCatAPIWrapper.client.PopCatAPI()\n\n\nasync def steam(name: str):\n steamapp = await client.get_steam_app(app_name=name)\n print(steamapp.description)\n\n\nif __name__ == \"__main__\":\n asyncio.run(steam(\"God Of War\"))\n","repo_name":"Infernum1/PopCatWrapper","sub_path":"examples/steamapp.py","file_name":"steamapp.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"36609932318","text":"import string\n\nfrom neveredit.util import Loggers\nimport logging\nlogger = logging.getLogger(\"neveredit.file\")\n\nfrom neveredit.util import neverglobals\n\nclass CExoLocString:\n\n def __init__(self,gffentry=None,value=None,langID=0,gender=0):\n if gffentry:\n self.strref = gffentry[0]\n self.locStrings = dict(gffentry[1])\n else:\n self.strref = -1\n self.locStrings = {}\n if value != None:\n self.setString(value,langID,gender)\n\n def getStringAndIndex(self,langID,gender):\n index = langID * 2 + gender\n if index in self.locStrings:\n return (self.locStrings[index],index)\n elif self.strref != -1:\n if not neverglobals.getResourceManager():\n logger.error('no resource manager in CExoLocString')\n return ('',-1)\n else:\n s = neverglobals.getResourceManager().getDialogString(self.strref)\n if s != None:\n return (s,-1)\n else:\n logger.error('error, Invalid Strref in CExoLocString')\n return ('',-1)\n else:\n #print string.join (['error, no string for language',\n # `index`,\n # 'in CExoLocString for embedded strings',\n # `self.locStrings`\n # ])\n return ('',-1)\n\n def getString(self,langID=0,gender=0):\n (text,res) = self.getStringAndIndex(langID,gender)\n return text\n\n def setString(self,str,langID=0,gender=0):\n index = langID * 2 + gender\n s = None\n if self.strref != -1:\n if not neverglobals.getResourceManager():\n logger.error('no resource manager in CExoLocString')\n return\n else:\n s = neverglobals.getResourceManager().getDialogString(self.strref)\n if s != str:\n self.locStrings[index] = str\n \n def toGFFEntry(self):\n return (self.strref,\n zip(self.locStrings.keys(),\n self.locStrings.values()))\n\n def __str__(self):\n return self.getString()\n\n def __repr__(self):\n return self.__str__()\n","repo_name":"sumpfork/neveredit","sub_path":"neveredit/file/CExoLocString.py","file_name":"CExoLocString.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"7195998010","text":"import argparse\nimport multiprocessing\nimport os\nparser = argparse.ArgumentParser(description=\"Process command input\")\nparser.add_argument(\"bam\", action=\"store\", help=\"bam with csi index\")\nparser.add_argument(\"prefix\", action=\"store\", default=10, help=\"prefix of snp calling run\")\nargs = parser.parse_args()\n\ndef worker(com):\n \"\"\"runs the command item in os.system\"\"\"\n os.system(com)\n\n\nchroms = [\"chr1A\", \"chr1B\", \"chr1D\", \"chr2A\", \"chr2B\", \"chr2D\", \"chr3A\", \"chr3B\", \"chr3D\", \"chr4A\", \"chr4B\", \"chr4D\", \"chr5A\", \"chr5B\", \"chr5D\", \"chr6A\", \"chr6B\", \"chr6D\", \"chr7A\", \"chr7B\", \"chr7D\", \"chrUn\"]\n\ncommands = []\nfor item in chroms:\n commands.append(\"samtools mpileup -v --output-tags AD,DP -r {0} -f iwgsc_refseqv1.0_all_chromosomes/161010_Chinese_Spring_v1.0_pseudomolecules.fasta {1} | bcftools call -mv -Ov -o {2}_variants_{0}.raw.vcf\"\n .format(item, args.bam, args.prefix))\n\nif __name__ == '__main__':\n jobs = []\n for item in commands:\n p = multiprocessing.Process(target=worker, args=(item,))\n jobs.append(p)\n p.start()\n for proc in jobs:\n proc.join()\n","repo_name":"Surbhigrewal/chromosome-specific_KASPs","sub_path":"bcftools_call_parallel.py","file_name":"bcftools_call_parallel.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"39586696894","text":"from django.conf.urls import url\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom argo import views\n\nlocal_urlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^filter/$',views.filter,name='filter'),\n url(r'^job_display/(?P\\w+)/$',views.job_display,name='job_display'),\n ]\n\nurlpatterns = local_urlpatterns \n\n\n","repo_name":"hep-cce/hpc-edge-service","sub_path":"argo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42090417084","text":"from __future__ import annotations\n\nimport pickle\nimport socket\nfrom abc import ABC\nfrom enum import Enum\n\n\n#####################\n# CUSTOM EXCEPTIONS #\n#####################\nclass MessageExchangeError(Exception):\n \"\"\"\n An exception for errors in message exchange.\n \"\"\"\n\n pass\n\n\nclass NoMessageError(MessageExchangeError):\n \"\"\"\n An exception for when no message is received.\n \"\"\"\n\n pass\n\n\nclass SocketManager(ABC):\n \"\"\"\n An abstract class for handling sockets\n \"\"\"\n\n ##############################################\n # GROUP A SKILL: COMPLEX CLIENT-SERVER MODEL #\n ##############################################\n def __init__(self, host: str, port: int):\n self.host = host\n self.port = port\n self.socket: socket.socket = self.__createUnboundSocket()\n # socket attribute should be set by the subclass\n\n def __createUnboundSocket(self) -> socket.socket:\n # Create a socket object\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(20)\n return s\n\n def _createServerSocket(self) -> socket.socket:\n s = self.__createUnboundSocket()\n # Bind to the address\n s.bind((self.host, self.port))\n s.listen(1)\n # wait for client connection.\n conn, addr = s.accept()\n # close the listening socket\n s.close()\n # return the client connection socket\n return conn\n\n def _createClientSocket(self) -> socket.socket:\n s = self.__createUnboundSocket()\n # connect to the server\n s.connect((self.host, self.port))\n return s\n\n def __send(self, msg: bytes):\n \"\"\"\n Sends a message to the socket.\n \"\"\"\n self.socket.sendall(msg)\n\n def __receiveData(self) -> bytes:\n \"\"\"\n Receives a message from the socket.\n \"\"\"\n return self.socket.recv(2048)\n\n def __sendMessage(self, msg: bytes):\n \"\"\"\n Sends a message to the socket.\n Waits for a confirmation message from the socket.\n Sets the timeout to be 5 seconds while waiting for the confirmation message.\n \"\"\"\n # send the message\n self.__send(msg)\n # change the timeout to 5 seconds\n timeout = self.socket.gettimeout()\n self.socket.settimeout(5)\n try:\n # wait for the confirmation message\n c = self.__receiveData()\n finally:\n # reset the timeout\n self.socket.settimeout(timeout)\n # if the confirmation message is not the expected one, raise an exception\n if not c or c.decode() != self.possibleMessages.CONFIRM.value:\n raise MessageExchangeError(\"Did not receive confirmation\")\n\n def __receiveMessage(self) -> bytes:\n \"\"\"\n Receives a message from the socket.\n Sends a confirmation message to the socket.\n \"\"\"\n msg = self.__receiveData()\n if not msg:\n raise NoMessageError(\"No message received\")\n # send the confirmation message\n self.__send(self.possibleMessages.CONFIRM.value.encode())\n return msg\n\n def sendMessage(self, msg: possibleMessages, *args):\n \"\"\"\n Sends data to the socket.\n The data is pickled and then sent.\n The primary message will be in the form of:\n \n It will then wait for a confirmation message.\n Any subsequent messages will be in the form of:\n \n Finally, it should receive a confirmation message.\n \"\"\"\n ##################################################\n # GROUP A SKILL: COMPLEX USER DEFINED ALGORITHMS #\n ##################################################\n primaryMsg = msg.value + self.possibleMessages.DELIMITER.value + str(len(args))\n # send the primary message\n self.__sendMessage(primaryMsg.encode())\n # send the subsequent messages\n for arg in args:\n self.__sendMessage(self.__pickleData(arg))\n\n def receiveMessage(self, timeout: bool = True) -> tuple[possibleMessages, list]:\n \"\"\"\n Receives a message from the socket.\n Sends a confirmation message.\n Splits it according to the delimiter.\n The message should be in the form of:\n \n It then receives the subsequent pickled data.\n It returns a tuple of the message type and the list of data.\n \"\"\"\n ##################################################\n # GROUP A SKILL: COMPLEX USER DEFINED ALGORITHMS #\n ##################################################\n # if we do not want the socket to timeout, we set the timeout to None\n if not timeout:\n oldTimeout = self.socket.gettimeout()\n self.socket.settimeout(None)\n # receive the primary msg\n primaryMsg = self.__receiveMessage()\n # split the primary msg into the message type and the number of subsequent messages\n msg, numData = self.splitData(primaryMsg.decode())\n encData = []\n # receive the subsequent data\n for _ in range(int(numData[0])):\n d = self.__receiveMessage()\n encData.append(d)\n # unpickle the data\n data = [self.__unpickleData(d) for d in encData]\n if not timeout:\n # set the timeout back to the old value\n self.socket.settimeout(oldTimeout)\n return msg, data\n\n def splitData(self, data: str) -> tuple[possibleMessages, list]:\n \"\"\"\n Splits the data received from the socket.\n Returns a tuple of the message and a list of the data.\n \"\"\"\n msg = data.split(self.possibleMessages.DELIMITER.value)\n return self.getEnumFromStr(msg[0]), msg[1:]\n\n def getEnumFromStr(self, msg: str) -> possibleMessages:\n \"\"\"\n Convert a string value into its enum equivalent from the enum possibleMessages.\n \"\"\"\n for member in self.possibleMessages:\n if msg == member.value:\n return member\n raise ValueError(f\"Invalid Enum: {msg}\")\n\n def __pickleData(self, data) -> bytes:\n \"\"\"\n Pickles the data and returns it as bytes\n \"\"\"\n return pickle.dumps(data)\n\n def __unpickleData(self, data: bytes):\n \"\"\"\n Unpickles the data and returns it\n \"\"\"\n return pickle.loads(data)\n\n def close(self):\n \"\"\"\n Closes the socket.\n \"\"\"\n self.socket.close()\n\n def __del__(self):\n self.close()\n\n class possibleMessages(Enum):\n \"\"\"\n An enum for possible messages by the server.\n \"\"\"\n\n DELIMITER = \"#\"\n GET_MOVE = \"getMove\"\n RETURN_MOVE = \"returnMove\"\n GET_CODE = \"getCode\"\n RETURN_CODE = \"returnCode\"\n DISPLAY_BOARD = \"displayBoard\"\n DISPLAY_ROUND_WINNER = \"displayRoundWinner\"\n DISPLAY_WINNER = \"displayWinner\"\n DISPLAY_ROUND_NUMBER = \"displayRoundNumber\"\n DISCONNECT = \"disconnect\"\n CONFIRM = \"confirm\"\n","repo_name":"Rufushwilliams/NEA---MasterMind","sub_path":"Sockets.py","file_name":"Sockets.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43686808530","text":"import sys\n\ninput = sys.stdin.readline\n\nstr1 = input().rstrip()\nstr2 = input().rstrip()\n\nLCS = [[0] * (len(str2) + 1) for _ in range(len(str1) + 1)]\n\n## dp 사용\n\"\"\"\ncache = [0] * len(str2)\nfor i in range(len(str1)):\n cnt = 0\n for j in range(len(str2)):\n if cnt < cache[j]:\n cnt = cache[j]\n\n elif str1[i] == str2[j]:\n cache[j] = cnt + 1\n\nprint(max(cache))\n\"\"\"\n\n## LCS 알고리즘\nfor i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i - 1] == str2[j - 1]:\n LCS[i][j] = LCS[i - 1][j - 1] + 1\n else:\n LCS[i][j] = max(LCS[i - 1][j], LCS[i][j - 1])\n\nprint(max(map(max, LCS)))\n","repo_name":"bmlsj/Solve-algorithms","sub_path":"BOJ/DP/[9251]LCS.py","file_name":"[9251]LCS.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27375428528","text":"#Erhan Kayır\r\n#28.03.2023\r\n\r\nimport time\r\n\r\ndef Insertion_Sort(my_list):# Insertion Sort\r\n start_time = time.time()\r\n for i in range(1, len(my_list)):\r\n key_item = my_list[i]\r\n j = i - 1\r\n while j >= 0 and my_list[j] > key_item:\r\n my_list[j + 1] = my_list[j]\r\n j -= 1\r\n my_list[j + 1] = key_item\r\n end_time = time.time()\r\n print(\"Insertion Sort processing time: {:.5f} seconds\".format(end_time - start_time))","repo_name":"erhankayir/python-exercises-1","sub_path":"q2.1/Insertion_Sort.py","file_name":"Insertion_Sort.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6499796608","text":"import main\nfrom flask import json\nfrom unittest import TestCase\n\nclass TestIntegrations(TestCase):\n def setUp(self):\n self.app = main.app.test_client()\n\n def test_1_running(self):\n response = self.app.get('/')\n print(response.data)\n assert b'We are online.' in response.data\n\n def test_2_getRepostiory_withRightIndex(self):\n response = self.app.get('/api/SOC-LAB-IOT')\n data = json.loads(response.data)\n print(data)\n self.assertEqual(data, repo)\n\n def test_3_getRepository_withFalseIndex(self):\n response = self.app.get('/api/46418852')\n print(response.data)\n assert b'Repository not found' in response.data\n\n def test_4_postRepository(self):\n response = self.app.post('api/111', data= repo1)\n data = json.loads(response.data)\n print(data)\n self.assertEqual(data, repo1)\n response = self.app.post('api/111', data=repo1)\n print(response.data)\n assert b'Repository with index 111 already exists' in response.data\n\n\n def test_5_putRepository(self):\n response = self.app.put('api/SOC-LAB-IOT', data= changes)\n print(repoChanged)\n data = json.loads(response.data)\n print(data)\n self.assertEqual(data, repoChanged)\n\n def test_6_deleteRepository(self):\n response = self.app.delete('api/SOC-LAB-IOT')\n print(response.data)\n assert b'Repository SOC-LAB-IOT is deleted' in response.data\n response = self.app.get('api/SOC-LAB-IOT')\n print(response.data)\n assert b'Repository not found\"' in response.data\n\n\n\nrepo = {\n \"Index\": \"SOC-LAB-IOT\",\n \"Title\": \"IOT Image Processing\",\n \"Version\": \"0.0.1\",\n \"Description\": \"\",\n \"Changelog\": [\"3.11.2018 Primary release\", \"4.11.2018 Bug fixing\"],\n \"File\": \"file link\",\n \"Date\": \"4.11.2018\",\n \"Checksum\": \"\"\n}\n\nchanges = {\n \"Version\": \"0.0.5\",\n \"Changelog\": \"4.11.2018 additional Bug fixing\",\n \"Date\": \"5.11.2018\"\n}\n\nrepoChanged = {\n \"Index\": \"SOC-LAB-IOT\",\n \"Title\": \"IOT Image Processing\",\n \"Version\": \"0.0.5\",\n \"Description\": \"\",\n \"Changelog\": [\"3.11.2018 Primary release\", \"4.11.2018 Bug fixing\", \"4.11.2018 additional Bug fixing\"],\n \"File\": \"file link\",\n \"Date\": \"5.11.2018\",\n \"Checksum\": \"\"\n}\n\n\n\nrepo1 = {\n \"Index\": \"111\",\n \"Title\": \"Some Title\",\n \"Version\": \"0.0.1\",\n \"Description\": \"\",\n \"Changelog\": [\"3.11.2018 Primary release\"],\n \"File\": \"file link\",\n \"Date\": \"4.11.2018\",\n \"Checksum\": \"\"\n}\n\n","repo_name":"1chor/SoC-project","sub_path":"server/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"73843385033","text":"# -*- coding: utf-8 -*-\n# @date:2023/5/18 17:29\n# @Author:LiuYiJie\n# @file: sha256\nimport hashlib\n\n\ndef shaEncrypt(data):\n sha = hashlib.sha256()\n data = data.encode(encoding='utf-8')\n sha.update(data)\n result = sha.hexdigest()\n return result\n\n\nif __name__ == '__main__':\n data = '123456'\n result = shaEncrypt(data)\n print(result)","repo_name":"yjsdl/python_encryption","sub_path":"SHA256/sha_data.py","file_name":"sha_data.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"326577298","text":"import numpy as np\nimport streamlit as st\nfrom PIL import Image\n\nfrom MachineLearninginComputerVision import demo\n\nst.set_page_config(layout=\"wide\")\nst.title(\"YOLOv3-nano Demo\")\nst.write(\n \"This web app demonstrates the YOLOv3-nano model \"\n \"that detects objects and assignes them \"\n \"to 10 different classes displaying the confidence level (from 0 to 1)\"\n)\n\nfile_upload = st.file_uploader(\"Upload the image\", type=[\"jpeg\", \"jpg\", \"png\"])\ncol_1, col_2 = st.columns(2)\n\nif file_upload is not None:\n img = Image.open(file_upload)\n col_1.image(img, \"Original image\")\n array_img = np.array(img)\n result = demo.main(show=False, file_img=array_img)\n col_2.image(result, \"Resulted image\")\n","repo_name":"aleksei-andreev/Machine-Learning-in-Computer-Vision","sub_path":"src/MachineLearninginComputerVision/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"7482095654","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\"\n}\ndef get_product():\n base_url = \"https://armeyka.com.ua/ua/g108531679-bryuki\"\n \n r = requests.get(url=base_url, headers=headers)\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n \n page_count = int(soup.find(\"div\", {\"data-bazooka\": \"Paginator\"})[\"data-pagination-pages-count\"])\n current_page = int(soup.find(\"div\", {\"data-bazooka\": \"Paginator\"})[\"data-pagination-current-page\"])\n \n for page in range(1, page_count + 1):\n url = f\"{base_url}/page_{page}\"\n r = requests.get(url=url, headers=headers)\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n \n \n # Найти все ссылки с классом \"b-product-gallery__image-link\"\n links = soup.find_all(\"a\", class_=\"b-product-gallery__image-link\")\n \n categ = soup.find(\"h1\", class_=\"b-title\").text\n \n # Обойти все ссылки и вывести их href и title\n for link in links:\n href = \"https://armeyka.com.ua\" + link.get(\"href\")\n title = link.get(\"title\")\n print(\"category:\", categ)\n print(\"Href:\", href)\n print(\"Title:\", title)\n yield href\n \n print(f\"Processing page {page}: {url}\")\n \ndef product_aryy (categ):\n for href in get_product():\n r = requests.get(href, headers=headers)\n time.sleep(3)\n # Переход на страницу href и поиск класса \"img\"\n soup = BeautifulSoup(r.text, \"lxml\")\n title = soup.find(\"img\", class_ = \"b-product-view__image\").get(\"alt\")\n img = soup.find(\"img\", class_ = \"b-product-view__image\").get(\"src\")\n des = soup.find(\"div\", class_ = \"b-user-content\").text.strip()\n # desPro = soup.find(\"table\", class_ = \"b-product-info\")\n art = soup.find('span', {'data-qaid': 'product_code'}).text.strip()\n price = soup.find(\"span\", class_ = \"b-sticky-panel__price\").text\n # Код для получения переменной soup\n print(f\"{categ} | {title} | {img} | {des} | {art} | {price}\\n\\n\")\n \n resalt = f\"{title} | {img} | {des} | {art} | {price}\\n\\n\"\n \n # Сохраняем soup в HTML файл\n with open('file.txt', 'a', encoding='utf-8') as file:\n file.write(resalt)\n print(\"write ok\")\n \nproduct_aryy (get_product())","repo_name":"CelestatOne/Termux","sub_path":"pars.py","file_name":"pars.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9599432393","text":"# coding=utf-8\nimport os\nimport sys\nimport unittest\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nfrom untils.base_runner import ParametrizedTestCase\nfrom businessView.loginView import LoginView\n\n\nclass LoginFail(ParametrizedTestCase):\n @classmethod\n def setUpClass(cls):\n super(LoginFail, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(LoginFail, cls).tearDownClass()\n\n def tearDown(self):\n self.loginView.get_screenshot()\n\n def test_login_failed(self):\n u'''use incorrect username and password'''\n self.loginView = LoginView(self.driver)\n self.loginView.input_username(u'测试')\n self.loginView.input_password(u'asdasdasdsd')\n self.loginView.click_login_button()\n self.assertEquals('a', 'b')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ITO-East-AutomationTesting/AppAutomationTestV2","sub_path":"test_case/test_login_fail.py","file_name":"test_login_fail.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27030815420","text":"\"\"\"Get list of houses in Chicago community areas.\"\"\"\n\nimport os\nimport time\nimport requests\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom numpy.random import randint\nimport filetype\n\n\nclass Houses:\n \"\"\"Get data for Chicago houses.\n\n This class includes methods to get property characteristics (e.g.\n number of bedrooms, wall materials, etc.), images, and footprint\n shapes of houses in Chicago. The results are filtered by community\n area and build year.\n\n Attributes:\n community_areas: User-provided list of community areas\n year_range: User-provided tuple of min and max build years\n house_list: Geopandas GeoDataFrame with house data\n community_boundaries: Shapely Multipolygon of community area shapes\n \"\"\"\n def __init__(self, community_areas: list, year_range: tuple = False):\n \"\"\"Instance of Houses class.\n\n Args:\n community_areas: A list of neighborhoods, e.g. ['Logan Square', 'Edgewater'].\n See [Chicago community areas]\n (https://en.wikipedia.org/wiki/Community_areas_in_Chicago).\n year_range: A two-element tuple with min and max build years, e.g. (1890, 1910)\n \"\"\"\n self.community_areas = community_areas\n self.year_range = year_range\n self.house_list = None\n self.community_boundaries = None\n\n def get_houses(self, results_limit: int = 100000, all_data: bool = False):\n \"\"\"Get a list of houses (with residential characteristics).\n\n The data come from the Cook County Assessor. By default, this method\n returns a subset of fields from the dataset: tax pin, address, and coordniates.\n Setting `full_results` to `true` will return all fields, which include\n property characteristics (e.g. number of bedrooms, wall materials, etc.).\n\n Args: \n all_data: Return all fields, including property characteristics \n (e.g. number of bedrooms, wall materials, etc.). May reutrn multiple\n rows per address.\n results_limit: Max number of results to obtain from the Assessor API\n (the actual number of houses may be smaller after filtering\n by community area shape).\n\n Returns: \n GeoPandas GeoDataFrame of house data\n \"\"\"\n boundaries, outer_boundary = get_community_boundaries(\n self.community_areas)\n self.community_boundaries = boundaries\n house_list = get_house_list(outer_boundary, self.year_range,\n results_limit, all_data)\n house_list = process_house_list(\n house_list, outer_boundary)\n self.house_list = house_list\n return house_list\n\n def get_images(self, output_path: str = 'img/'):\n \"\"\"Get house images.\n\n The Cook County Assessor has photos of most residential properties available \n on its website (e.g. https://www.cookcountyassessor.com/pin/13253100070000).\n The photos are retrieved via structured urls; this function retreives and \n saves those images if available, recording the paths to a new column in the\n house list.\n\n Note: please use this sparingly. This is basically scraping and your access \n may be rate limited or blocked if you aren't considerate.\n\n Args:\n output_path (optional): Set path where images are saved\n\n Returns: \n img_paths: GeoPandas GeoDataFrame of house data with image paths\n \"\"\"\n # Create destination directory.\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n\n # Prep data.\n pins = self.house_list['pin'].to_list()\n img_paths = []\n\n # Get images.\n for i, pin in enumerate(pins):\n print(f'Fetching image {i} of {len(pins)}...', end='\\r')\n img_data = get_image(pin)\n save_path = save_image(pin, img_data, output_path)\n img_paths.append(save_path)\n time.sleep(randint(5, 10))\n print('Image retrieval complete.' + ' ' * 50)\n self.house_list['img_paths'] = img_paths\n return self.house_list\n\n\ndef get_community_boundaries(community_areas):\n \"\"\"Get geo outlines for a list of community areas.\n \"\"\"\n community_areas = [x.upper() for x in community_areas]\n url = ('https://data.cityofchicago.org/api/geospatial/'\n 'cauq-8yn6?method=export&format=GeoJSON')\n gdf = gpd.read_file(url)\n gdf = gdf[gdf['community'].isin(community_areas)]\n # Merge geometries for every community area into one.\n shape = gdf.unary_union\n return gdf, shape\n\n\ndef get_house_list(community_boundaries, year_range,\n results_limit, all_data):\n \"\"\"Get raw property data.\n \"\"\"\n # Note: the datasource is from 2022.\n url = 'https://datacatalog.cookcountyil.gov/resource/bcnq-qi2z.json'\n # Coords for box enclosing community area\n # The `x` values are negative here.\n minx, miny, maxx, maxy = community_boundaries.bounds\n if all_data:\n query = f\"\"\"\n SELECT \n *\n WHERE \n centroid_y > \"{miny}\" \n AND centroid_y < \"{maxy}\" \n AND centroid_x > \"{maxx}\" \n AND centroid_x < \"{minx}\" \"\"\"\n if year_range:\n query += (f'AND age BETWEEN {2022 - year_range[1]}'\n f' AND {2022 - year_range[0]} ')\n else:\n query = f\"\"\"\n SELECT \n MIN(pin) AS pin, \n addr, \n MIN(centroid_x) AS centroid_x,\n MIN(centroid_y) AS centroid_y, \n MIN(2022 - age) AS year_built\n GROUP BY addr \n HAVING \n centroid_y > \"{miny}\" \n AND centroid_y < \"{maxy}\" \n AND centroid_x > \"{maxx}\" \n AND centroid_x < \"{minx}\" \"\"\"\n if year_range:\n query += (f'AND year_built BETWEEN {year_range[0]}'\n f' AND {year_range[1]} ')\n if results_limit:\n query += f'LIMIT {results_limit}'\n r = requests.get(url, params={'$query': query})\n if len(r.json()) == 0:\n raise RuntimeError('API response had no results.')\n return r.json()\n\n\ndef process_house_list(house_list, shape):\n \"\"\"Filter house list by community boundaries.\n \"\"\"\n gdf = gpd.GeoDataFrame(house_list)\n gdf = gdf.set_geometry(gpd.points_from_xy(gdf['centroid_x'],\n gdf['centroid_y']))\n gdf = gdf.set_crs('EPSG:4326')\n gdf = gdf[gdf['geometry'].within(shape)]\n if len(gdf.index) == 0:\n raise RuntimeError('No results in community boundaries.')\n gdf = gdf.drop(columns=['centroid_x', 'centroid_y'])\n return gdf\n\n\ndef get_image(pin, debug=False):\n \"\"\"Build URL and request image.\n \"\"\"\n url_stub = 'https://prodassets.cookcountyassessor.com/s3fs-public/pin_detail/'\n url_encoding = f'{pin[0:3]}-{pin[3:5]}/{pin[5:8]}/{pin}_AA.JPG'\n img_data = requests.get(url_stub + url_encoding, timeout=120).content\n if debug:\n print(f'Attempted to retrieve image at {url_stub + url_encoding}')\n return img_data\n\n\ndef save_image(pin, img_data, output_path, debug=False):\n \"\"\"If input is valid image, create image file.\n \"\"\"\n file_path = f\"{output_path}{pin}.jpg\"\n if filetype.is_image(img_data):\n with open(file_path, 'wb') as img:\n img.write(img_data)\n if debug:\n print(f'{pin}: created image at {file_path}')\n return file_path\n else:\n if debug:\n print(f'{pin}: pin did not return valid image')\n return None","repo_name":"tbirch4/chi-houses","sub_path":"chi_houses/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":7628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35441250953","text":"\"\"\"\nAuthor: Mark McDonald\nThis model holds nuts regions and shape geometry\n\"\"\"\nimport logging\nimport shapely.wkt\nfrom shapely.geometry import Point\n\nfrom django.db import models\n\nimport geopandas as gpd\n\n# Default NUTS version - 4-digit integer of year version published\nCURRENT_NUTS_VERSION = 2016\nEU_ISOCODES = ['AT', 'BE', 'BG', 'HR', 'CY', 'CZ', 'DK', 'EE', 'FI',\n 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU',\n 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'UK']\n\n\nclass NutsRegions(models.Model):\n \"\"\"\n Hold nuts regional information including geometry\n \"\"\"\n key = models.CharField(max_length=32, primary_key=True)\n year = models.CharField(max_length=4, db_index=True)\n id = models.CharField(max_length=8, db_index=True)\n LEVL_CODE = models.IntegerField(blank=True, db_index=True)\n NUTS_ID = models.CharField(max_length=16, db_index=True)\n CNTR_CODE = models.CharField(max_length=64, db_index=True)\n NUTS_NAME = models.CharField(max_length=128, db_index=True)\n FID = models.CharField(max_length=16, db_index=True)\n EU_MEMBER = models.BooleanField()\n geometry = models.CharField(max_length=4194304)\n\n def __str__(self):\n return self.NUTS_ID\n\n @staticmethod\n def get_nuts_region_boundaries(nuts_level: list = None, country_codes: list = None,\n year: int = CURRENT_NUTS_VERSION) -> dict:\n \"\"\"\n Return the basic regional information for NUTS regions.\n :param nuts_level: Nuts level for which to return data. If None, all are returned.\n :param country_codes: List of country code for which boundaries are returned. If None, all EU countries are returned.\n :param year: The year of the NUTS version.. Defaults to the current version.\n :return: A dictionary with the shape objects for each NUTS region.\n \"\"\"\n\n def _create_level_dict(level_qs, country_list: list) -> dict:\n rv = {}\n for r in level_qs:\n if r.CNTR_CODE in country_list:\n rv.update({r.NUTS_ID: {'name': r.NUTS_NAME,\n 'country_code': r.CNTR_CODE,\n 'geography': r.geometry}})\n return rv\n\n if nuts_level is None:\n levels_list = [0, 1, 2, 3]\n else:\n levels_list = [nuts_level]\n\n if country_codes is None:\n country_codes = EU_ISOCODES\n else:\n country_codes = [c.upper() for c in country_codes]\n\n rv_dict = {}\n for level in levels_list:\n qs = NutsRegions.objects.filter(LEVL_CODE=level, year=year)\n rv_dict.update({level: _create_level_dict(qs, country_codes)})\n\n return rv_dict\n\n @staticmethod\n def get_nuts_regions(nuts_level: int = None, year: int = CURRENT_NUTS_VERSION) -> dict:\n \"\"\"\n Return the basic regional information for NUTS regions.\n :param nuts_level: Nuts level for which to return data\n :param year: The NUTS year\n :return: dictionary of data\n \"\"\"\n\n def _create_level_dict(level_qs) -> dict:\n rv = {}\n for r in level_qs:\n rv.update({r.NUTS_ID: {'name': r.NUTS_NAME, 'country_code': r.CNTR_CODE}})\n return rv\n\n if nuts_level is None:\n levels_list = [0, 1, 2, 3]\n else:\n levels_list = [nuts_level]\n\n rv_dict = {}\n for level in levels_list:\n qs = NutsRegions.objects.filter(LEVL_CODE=level, year=year)\n rv_dict.update({level: _create_level_dict(qs)})\n\n return rv_dict\n\n @staticmethod\n def get_country_code_lookup() -> dict:\n country_lookup = {}\n for c in EU_ISOCODES:\n try:\n country_lookup.update({c: NutsRegions.objects.get(CNTR_CODE=c, LEVL_CODE=0)})\n except Exception as e:\n logging.debug(f\"'{c}' skipped. Not in NUTS regions.\")\n\n return country_lookup\n\n @staticmethod\n def get_nuts_geoframe(nuts_version: int = 2016, crs: int = 4326, nuts_level: int = None) -> gpd.GeoDataFrame:\n \"\"\"\n Return the NUTS regions data as a Pandas GeoDataFrame.\n :param nuts_level: Optional. If None, all are returned.\n :param nuts_version: Optional. Set to 2016.\n :param crs: Optional. Default set to 4326.\n :return: GeoPandas DataFrame with NUTS regions\n \"\"\"\n\n gdf = gpd.GeoDataFrame()\n\n if nuts_level is not None:\n qs = NutsRegions.objects.filter(LEVL_CODE=nuts_level, year=nuts_version)\n else:\n qs = NutsRegions.objects.filter(year=nuts_version)\n\n for record in qs:\n d = {'key': record.key,\n 'year': record.year,\n 'id': record.id,\n 'LEVL_CODE': record.LEVL_CODE,\n 'NUTS_ID': record.NUTS_ID,\n 'CNTR_CODE': record.CNTR_CODE,\n 'NUTS_NAME': record.NUTS_NAME,\n 'FID': record.FID,\n 'db_record': record,\n 'geometry': gpd.GeoSeries(shapely.wkt.loads(record.geometry))} # record.geometry}\n\n gdf = gdf.append(gpd.GeoDataFrame(d), ignore_index=True)\n\n gdf.crs = crs # set the projection\n\n return gdf\n\n @staticmethod\n def get_nuts_record(lat, lon, crs: int, nuts_level: int, nuts_version: int = 2016):\n \"\"\"\n Returns a single NUTS record based on lat lon and nuts_level\n :param lat: Latitude of the point\n :param lon: Longitude of the point\n :param crs: CRS of the point\n :param nuts_level: Required. Level of record to return.\n :param nuts_version: The NUTS version/year as 4-digit year YYYY. Optional. Defaults to 2016.\n\n :return: The results records from the dataset where the lat/lon point is in.\n \"\"\"\n\n gdf = NutsRegions.get_nuts_geoframe(nuts_level=nuts_level)\n\n point = gpd.GeoSeries(Point(lon, lat), crs=crs).to_crs(gdf.crs)[0]\n\n try:\n nuts_key = gdf[(gdf.contains(point)) & (gdf.LEVL_CODE == nuts_level)].iloc[0].key\n except (AttributeError, Exception) as e:\n err = f\"{e} : lat:{lat} lon:{lon} nuts_level:{nuts_level} year:{nuts_version} crs:{crs}\"\n logging.info(err)\n return None\n\n return NutsRegions.objects.get(pk=nuts_key)\n\n\nclass EUCountries(models.Model):\n key = models.CharField(max_length=4, primary_key=True)\n nuts_region = models.ForeignKey(NutsRegions, on_delete=models.DO_NOTHING, null=True, related_name=\"EU_countries\",\n db_index=True)\n\n def __str__(self):\n return self.key\n\n @staticmethod\n def get_country_code_lookup() -> dict:\n\n eu_countries = EUCountries.objects.all()\n\n country_lookup = {}\n for c in eu_countries:\n try:\n country_lookup.update({str(c.key): c})\n except Exception as e:\n logging.debug(f\"'{c}' skipped. Not an EU country.\")\n\n return country_lookup\n\n\n\n","repo_name":"mcdomx/eugreendeal","sub_path":"airpollution/models/models_nuts.py","file_name":"models_nuts.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14281137904","text":"import os \nimport errno\nfrom collections import defaultdict\nfrom osgeo import gdal\n\nimport boto3\nfrom botocore import UNSIGNED\nfrom botocore.config import Config\n\nfrom agrimask.satellite import sentinel2\nfrom agrimask.utils.helper import *\nfrom agrimask.utils.raster import *\n\nwd = str(os.getcwd())\n\ns2 = sentinel2.Sentinel2()\n\ns3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED))\ns3_client = boto3.client('s3')\n\n\nclass indice:\n def __init__(self):\n gdal.SetConfigOption('AWS_NO_SIGN_REQUEST', 'YES')\n self.source_s3_bucket = 'sentinel-cogs'\n self.source_s3_dir = 'sentinel-s2-l2a-cogs'\n self.s2_tile_shp = 'satellite_tiles/s2_tile.shp'\n self.bands = ['B04' 'B08']\n self.indices = 'ndvi'\n self.crs = 'EPSG:4326'\n self.sat_name = 'sentinel2'\n self.sat_id = 'S2'\n self.home_dir = os.path.join(wd, 'cc',\n str(datetime.today().strftime('%Y-%m-%d-%H-%M-%S')).split('.')[0].replace(' ', ''))\n self.home_dir = self.home_dir.replace('\\\\', '/')\n\n def pid_to_path(self, prod_id, band):\n lst = prod_id.split('_')\n tile = lst[1]\n utm_zone, lat_band, grid_sq = str(tile)[:2], str(tile)[2], str(tile)[3:5]\n _year, _month, _day = lst[2][0:4], lst[2][4:6], lst[2][6:8]\n vsi_ext = '/vsis3/'\n band_tif = os.path.join(vsi_ext, self.source_s3_bucket, self.source_s3_dir, str(utm_zone), str(lat_band),\n str(grid_sq),str(_year), str(int(_month)), str(prod_id), str(band) + '.tif')\n band_tif = band_tif.replace('\\\\', '/')\n return band_tif\n\n def layer_stack(self, start_date, end_date, cloud_threshold, data_days_interval,\n shp_file=None, bbox=None):\n if shp_file:\n shp_file = shp_file.replace('\\\\', '/')\n pids = s2.get_product_ids(start_date, end_date, cloud_threshold, data_days_interval, shp_file, bbox)\n ndvi_list = []\n _dates = sorted(pids.keys())\n print(f'Dates found (in ascending order) :{_dates}')\n count = 0\n n = len(_dates)\n for _key in _dates:\n pid_list = pids[_key]\n bands_dict = defaultdict(list)\n for item in pid_list:\n for band in self.bands:\n band_tif = self.pid_to_path(prod_id=item, band=band)\n bands_dict[band].append(band_tif)\n\n merged_file_dir = os.path.join(self.home_dir, str(_key), 'merged')\n try:\n os.makedirs(merged_file_dir)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n pass\n merged_file_dir = merged_file_dir.replace('\\\\', '/')\n merged_b8 = merge_clip_raster(bands_dict['B08'], str(merged_file_dir) + '/B08.tif',\n shp_file, bbox)\n merged_b4 = merge_clip_raster(bands_dict['B04'], str(merged_file_dir) + '/B04.tif',\n shp_file, bbox)\n b8 = raster_to_array(merged_b8)\n b4 = raster_to_array(merged_b4)\n del merged_b4\n ndvi_arr = self.ndvi(b4, b8)\n del b8\n del b4\n ndvi_file_path = os.path.join(self.home_dir, str(_key), 'ndvi.tif')\n ndvi_file_path = ndvi_file_path.replace('\\\\', '/')\n write_raster(merged_b8, ndvi_arr, ndvi_file_path, gdal.GDT_Float32)\n del merged_b8\n del ndvi_arr\n ndvi_list.append(ndvi_file_path)\n count += 1\n print(f'Progress : {100 * count // n}% completed')\n stack_path = os.path.join(self.home_dir, 'stack')\n stack_path = stack_path.replace('\\\\', '/')\n try:\n os.makedirs(stack_path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n pass\n file_path = os.path.join(stack_path, 'ndvi.tif')\n raster_mosaic(ndvi_list, file_path)\n return file_path\n\n @staticmethod\n def ndvi(red, nir):\n arr = (nir - red) / (nir + red)\n arr[arr > 1] = 0\n arr[arr < -1] = 0\n return arr\n\n @staticmethod\n def fcc(green, red, nir, output_vrt):\n return raster_mosaic([green, red, nir], output_vrt)\n","repo_name":"sumit-maan/agriculture-land-identification","sub_path":"agrimask/stack/ndvi_ts.py","file_name":"ndvi_ts.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21380193166","text":"\"\"\"\nUsed in train_v4, supports full action and state space.\n\nNew features:\n1. Store for every timestep the 5 most probable actions that the agent can choose\n2. Store for every timestep the parameter distribution of all the top 5 actions' parameters\n3. Do not store every parameter distribution\n4. Init passing agent instance instead of action_dict and env instance\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom torch.distributions import Categorical\nimport torch.nn.functional as F \nfrom pysc2.lib import actions\n\nclass InspectionDict():\n def __init__(self, step_idx, PID, agent):\n self.step_idx = step_idx\n self.PID = PID\n \n # Copy some useful internal variables - check which are needed\n self.screen_res = agent.AC.screen_res\n self.all_actions = agent.AC.all_actions\n self.all_arguments = agent.AC.all_arguments\n self.action_space = agent.AC.action_space\n self.arguments_names_lst = agent.AC.arguments_names_lst # not sure this one is needed\n self.arguments_type = agent.AC.arguments_type # useful in plotting\n self.act_to_arg_names = agent.AC.act_to_arg_names # useful to get top_5_action_distr\n self.dict = dict(\n state_traj = [],\n rewards = [],\n action_distr = [],\n action_sel = [],\n top_5_actions = [],\n top_5_action_distr = [],\n args = [],\n values = None,\n trg_values = None,\n critic_losses = None,\n advantages = None,\n actor_losses = None)\n\n def store_step(self, step_dict):\n # store every other trajectory variable except state_traj\n for k in step_dict:\n self.dict[k].append(step_dict[k])\n return\n \n def store_update(self, update_dict):\n for k in update_dict:\n self.dict[k] = update_dict[k]\n return\n \n def save_dict(self, path='../Results/MoveToBeacon/Inspection/'):\n np.save(path+self.PID+\"_\"+str(self.step_idx), self.dict)\n return\n\ndef inspection_step(agent, inspector, state, action_mask):\n spatial_state = state['spatial']\n player_state = state['player']\n spatial_state = torch.from_numpy(spatial_state).float().to(agent.device)\n player_state = torch.from_numpy(player_state).float().to(agent.device)\n action_mask = torch.tensor(action_mask).to(agent.device)\n\n log_probs, spatial_features, nonspatial_features = agent.AC.pi(spatial_state, player_state, action_mask)\n entropy = agent.compute_entropy(log_probs)\n probs = torch.exp(log_probs)\n a = Categorical(probs).sample()\n a = a.detach().cpu().numpy()\n log_prob = log_probs[range(len(a)), a]\n\n ### Inspection ###\n step_dict = {}\n p = probs.detach().cpu().numpy() \n step_dict['action_distr'] = p\n step_dict['action_sel'] = a\n \n # Choose top 5 actions from the probabilities - check about the batch dim\n top_5 = np.argsort(p)[:,-5:]\n top_5_actions = np.array(top_5[:,::-1])[0] # some issues in accessing p if I don't call np.array()\n #print(\"top_5_actions: \", top_5_actions, top_5_actions.shape)\n step_dict['top_5_actions'] = top_5_actions\n \n # Save distributions only of the top 5 actions\n step_dict['top_5_action_distr'] = {}\n with torch.no_grad():\n for act in top_5_actions:\n step_dict['top_5_action_distr'][act] = {} # first nested level\n arg_names = inspector.act_to_arg_names[act]\n for arg_name in arg_names:\n if inspector.arguments_type[arg_name] == 'spatial': # it's either 'spatial' or 'categorical'\n insp_arg, insp_log_prob, insp_distr = agent.AC.sample_param(spatial_features, arg_name)\n p = insp_distr.detach().cpu().numpy().reshape(spatial_state.shape[-2:]) \n else:\n insp_arg, insp_log_prob, insp_distr = agent.AC.sample_param(nonspatial_features, arg_name)\n p = insp_distr.detach().cpu().numpy() \n \n step_dict['top_5_action_distr'][act][arg_name+'_distr'] = p # second nested level\n \n ### End inspection ###\n \n args, args_log_prob, args_entropy = agent.get_arguments(spatial_features, nonspatial_features, a)\n step_dict['args'] = args\n \n log_prob = log_prob + args_log_prob\n\n action = [actions.FunctionCall(a[i], args[i]) for i in range(len(a))]\n\n inspector.store_step(step_dict)\n return action, log_prob, torch.mean(entropy)\n \ndef inspection_update(agent, rewards, log_probs, entropies, states, done, bootstrap, trg_states): \n # from list of dictionaries of arrays to 2 separate arrays\n spatial_states_lst = [s['spatial'] for s in states] #[(batch, other dims) x traj_len times]\n player_states_lst = [s['player'] for s in states] \n spatial_states = torch.tensor(spatial_states_lst).float().to(agent.device).transpose(1,0)\n player_states = torch.tensor(player_states_lst).float().to(agent.device).transpose(1,0)\n\n spatial_states_lst_trg = [s['spatial'] for s in trg_states]\n player_states_lst_trg = [s['player'] for s in trg_states]\n spatial_states_trg = torch.tensor(spatial_states_lst_trg).float().to(agent.device).transpose(1,0)\n player_states_trg = torch.tensor(player_states_lst_trg).float().to(agent.device).transpose(1,0)\n\n # merge batch and episode dimensions\n old_spatial_states = spatial_states.reshape((-1,)+spatial_states.shape[2:])\n old_player_states = player_states.reshape((-1,)+player_states.shape[2:])\n \n V_trg = agent.compute_n_step_V_trg(agent.n_steps, rewards, done, bootstrap, \n spatial_states_trg, player_states_trg)\n ### Wrap variables into tensors - merge batch and episode dimensions ### \n log_probs = torch.stack(log_probs).to(agent.device).transpose(1,0).reshape(-1)\n entropies = torch.stack(entropies, axis=0).to(agent.device).reshape(-1)\n\n values, trg_values, critic_losses = inspect_critic_loss(agent, old_spatial_states, old_player_states, V_trg)\n\n advantages, actor_losses = inspect_actor_loss(agent, log_probs, entropies, old_spatial_states, old_player_states, V_trg)\n\n update_dict = dict(values=values, \n trg_values=trg_values, \n critic_losses=critic_losses, \n advantages=advantages, \n actor_losses=actor_losses )\n return update_dict\n\ndef inspect_critic_loss(agent, old_spatial_states, old_player_states, V_trg):\n with torch.no_grad():\n V = agent.AC.V_critic(old_spatial_states, old_player_states).squeeze()\n V = V.cpu().numpy() \n V_trg = V_trg.cpu().numpy()\n critic_losses = (V-V_trg)**2\n return V, V_trg, critic_losses\n\ndef inspect_actor_loss(agent, log_probs, entropies, old_spatial_states, old_player_states, V_trg):\n with torch.no_grad():\n V_pred = agent.AC.V_critic(old_spatial_states, old_player_states).squeeze()\n A = V_trg - V_pred\n policy_gradient = - log_probs*A\n A = A.cpu().numpy()\n pg = policy_gradient.cpu().numpy()\n return A, pg\n","repo_name":"nicoladainese96/SC2-RL","sub_path":"SC_Utils/A2C_inspection_v2.py","file_name":"A2C_inspection_v2.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"6758824775","text":"from flask import Flask, render_template, redirect,session, request, flash\nimport random\nfrom data_base import baseDatos as ConectBD\nfrom forms.DISTRIBUIDOR.distribuidorForm import Distribuidor\n\n\n#BD\n\nBD = ConectBD.conexion()\n\n#FUNCION CONSULTA SELLER / DISTRIBUIDOR *VISTA* \ndef vistaDistribuidor():\n if 'usuario-administrador' in session:\n titulo = 'Seller'\n distribuidorBD = BD['Seller']\n distribuidorRecibido = distribuidorBD.find()\n return render_template('DISTRIBUIDOR/consulta_distribuidor.html', titulo=titulo, distribuidorRecibido=distribuidorRecibido)\n\n#FUNCION INGRESAR SELLER / DISTRIBUIDOR *VISTA*\ndef ingresarDistribuidor():\n if 'usuario-administrador' in session:\n titulo = 'Agregar Seller'\n distribuidorBD = BD['Seller']\n distribuidorRecibido = distribuidorBD.find()\n return render_template('DISTRIBUIDOR/agregarDistribuidor.html', titulo=titulo, distribuidorRecibido=distribuidorRecibido)\n\n\n#FUNCION DE AGREGAR NUEVO SELLER / DISTRIBUIDOR*FORMULARIO*\ndef agregarNuevoDistribuidor():\n if 'usuario-administrador' in session:\n distribuidorBD = BD['Seller']\n nombre = request.form['nombre']\n direccion = request.form['direccion'] \n telefono = request.form['telefono']\n #ID ALEATORIO CON NOMBRE \n IdentificadorD = str(random.randrange(1,4000,4))\n Unir = IdentificadorD + nombre \n Longitud = 7\n Extencion = random.sample(Unir,Longitud)\n Aleatorio = \"\".join(Extencion)\n identificador = Aleatorio\n #print(identificador)\n\n if identificador and nombre and direccion and telefono:\n distribuidor = Distribuidor(identificador, nombre, direccion,telefono)\n #INSERCION BD\n distribuidorBD.insert_one(distribuidor.datosDistribuidorJson())\n flash(nombre + \" Agregado correctamnete \")\n return redirect('/distribuidor')\n \n elif 'usuario-proveedor' in session:\n return redirect('/') \n\n\n#FUNCION *VISTA¨EDITAR INFO SELLER / DISTRIBUIDOR\ndef informacionDistribuidor(key):\n if 'usuario-administrador' in session:\n titulo = 'Editar Infomacion Distribuidor'\n distribuidorBD = BD['Seller']\n distribuidorRecibido = distribuidorBD.find_one({'identificador':key})\n return render_template('DISTRIBUIDOR/actualizarDistribuidor.html', titulo=titulo, distribuidorRecibido=distribuidorRecibido)\n \n elif 'usuario-proveedor' in session:\n return redirect('/')\n \n#FUNCION ACTUALIZAR INFO SELLER / DISTRIBUIDOR\ndef actualizarDistribuidor(key,campo):\n if 'usuario-administrador' in session:\n distribuidorBD = BD['Seller']\n dato = request.form['dato']\n if dato:\n distribuidorBD.update_one({'identificador':key}, {'$set':{campo:dato}})\n flash(\"Se actualizo correctamente: \" + key)\n return informacionDistribuidor(key)\n \n elif 'usuario-proveedor' in session:\n return redirect('/')\n\n\n\n\n\n#FUNCION ELIMINAR SELLER / DISTRIBUIDOR \ndef eliminarDistribuidor(key):\n if 'usuario-administrador' in session:\n distribuidorBD = BD['Seller']\n distribuidorBD.delete_one({'identificador':key})\n flash(\"Se elimino correctamente: \" + key)\n return redirect('/distribuidor')\n \n elif 'usuario-proveedor' in session:\n return redirect('/')","repo_name":"Jessrutherford11/AVI-ALMACEN","sub_path":"funciones/DISTRIBUIDOR/funciones_distribuidor.py","file_name":"funciones_distribuidor.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42768571647","text":"'''2.Crie um algoritmo que leia quatro valores digitadospelo usuário: n, a, b, c.\r\na.Se n = 1 imprimir os três valores a, b, c em ordem crescente.\r\nb.Se n = 2 escrever os três valores a, b, c em ordemdecrescente.\r\nc.Se n = 3 escrever os três valores a, b, c de formaque o maior fique no meio'''\r\nn = int(input(\"Digite um valor de 1 a 3 \"))\r\na = int(input(\"Digite um valor \"))\r\nb = int(input(\"Digite um valor \"))\r\nc = int(input(\"Digite um valor \"))\r\nlista= []\r\nlista.append(a)\r\nlista.append(b)\r\nlista.append(c)\r\nif n == 1:\r\n lista.sort()\r\n print(lista)\r\nelif n == 2:\r\n lista.sort()\r\n lista.reverse()\r\n print(lista)\r\nelif n == 3:\r\n lista2 = []#to sem criatividade pra dar nome pra variavel\r\n if a>b and a>c:\r\n lista2 = [b,a,c]\r\n elif b>a and b>c:\r\n lista2 = [a,b,c]\r\n elif c>a and c>b:\r\n lista2 = [b,c,a]\r\n else:\r\n print(\"Opa!\")\r\n print(lista2)\r\nelse:\r\n print(\"numero invalido porra\")\r\n\r\n","repo_name":"BrunoMartinsGameDev/JovemProgramador","sub_path":"Exercicios 3/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"33592158506","text":"\"\"\"\r\n盛最多水的容器\r\n输入:[1,8,6,2,5,4,8,3,7]\r\n输出:49\r\n解释:图中垂直线代表输入数组 [1,8,6,2,5,4,8,3,7]。在此情况下,容器能够容纳水(表示为蓝色部分)的最大值为 49。\r\n\r\n\"\"\"\r\nfrom typing import List\r\nclass Solution:\r\n def maxArea(self, height: List[int]) -> int :\r\n leng = len(height)\r\n i = 0\r\n j = leng - 1\r\n MaxNum = 0\r\n ans = 0\r\n while i < j:\r\n MaxNum = min(height[i], height[j]) * (j - i)\r\n ans = max(MaxNum, ans)\r\n if height[i] < height[j]:\r\n i += 1\r\n else:\r\n j -= 1\r\n return ans\r\nif __name__ == '__main__':\r\n height = [1,8,6,2,5,4,8,3,7]\r\n print(Solution().maxArea(height))","repo_name":"MichaelZhangs/practise_own","sub_path":"leetcode/leetcode_11_盛最多水的容器.py","file_name":"leetcode_11_盛最多水的容器.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15410023387","text":"class TrieNode:\r\n def __init__(self):\r\n self.children = {}\r\n self.is_end_of_word = False\r\n\r\nclass Trie:\r\n def __init__(self):\r\n self.root = TrieNode()\r\n\r\n def insert(self, word):\r\n node = self.root\r\n for char in word:\r\n if char not in node.children:\r\n node.children[char] = TrieNode()\r\n node = node.children[char]\r\n node.is_end_of_word = True\r\n\r\n def search(self, word):\r\n node = self.root\r\n for char in word:\r\n if char not in node.children:\r\n return False\r\n node = node.children[char]\r\n return node.is_end_of_word\r\n\r\n def startsWith(self, prefix):\r\n node = self.root\r\n for char in prefix:\r\n if char not in node.children:\r\n return False\r\n node = node.children[char]\r\n return True\r\n\r\n# Example usage:\r\ntype_values = [1, 1, 2, 3, 2]\r\nword_values = [\"hello\", \"help\", \"help\", \"hel\", \"hel\"]\r\n\r\ntrie = Trie()\r\n\r\nfor i in range(len(type_values)):\r\n if type_values[i] == 1: # Insert operation\r\n trie.insert(word_values[i])\r\n elif type_values[i] == 2: # Search operation\r\n result = trie.search(word_values[i])\r\n print(result)\r\n elif type_values[i] == 3: # Starts with operation\r\n result = trie.startsWith(word_values[i])\r\n print(result)\r\n","repo_name":"369harshit/Day27-Trie","sub_path":"implement trie-1.py","file_name":"implement trie-1.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}