diff --git "a/118.jsonl" "b/118.jsonl" new file mode 100644--- /dev/null +++ "b/118.jsonl" @@ -0,0 +1,358 @@ +{"seq_id":"19203175246","text":"from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, StratifiedKFold, LeavePOut\nfrom setup.model import SimpleNet, test_model, train_model\nfrom setup.MNISTImageDataset import MNISTImageDataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\ndef monte_carlo_cross_validation(data, targets):\n\n # defines the batch sizes and shuffles the data\n train_params = {'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 1}\n\n # Monte Carlo\n # Set up seeds and array to store accuracies\n seed_list = [42, 151, 297, 333, 406]\n acc_array = []\n\n for seed in seed_list:\n # split your data into train, validate and test sets\n X_temp, X_test, Y_temp, Y_test = train_test_split(data, targets, test_size=0.3, stratify=targets,\n random_state=seed)\n X_train, X_validate, Y_train, Y_validate = train_test_split(X_temp, Y_temp, test_size=20 / 70, stratify=Y_temp,\n random_state=seed)\n\n training_set = MNISTImageDataset(X_train, Y_train)\n valid_set = MNISTImageDataset(X_validate, Y_validate)\n test_set = MNISTImageDataset(X_test, Y_test)\n\n training_loader = DataLoader(training_set, **train_params)\n valid_loader = DataLoader(valid_set, **train_params)\n test_loader = DataLoader(test_set, **train_params)\n\n best_model, _ = train_model(n_epochs=10, train_loader=training_loader, valid_loader=valid_loader)\n acc = test_model(loader=test_loader, test_model=best_model)\n acc_array.append(acc)\n\n print(f\"Average of all seeds: = {np.array(np.mean(acc_array))}\")\n print(f\"Standard deviation of all seeds: = {np.array(np.std(acc_array))}\")","repo_name":"zhuemann/Cross-Validation-Guide","sub_path":"monte_carlo_cross_validation.py","file_name":"monte_carlo_cross_validation.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"85"} +{"seq_id":"7095485083","text":"list1 = []\nlist2 = []\nlist3 = []\n\nprint(\"Enter 3 numbers\")\nfor i in range(1,4): #1 2 3 4 5\n num = int(input())\n list1.append(num) # 10 20 30\n\nprint(\"Enter 4 numbers\")\nfor i in range(1, 5): # 1 2 3 4 5\n num = int(input())\n list2.append(num) # 100 200 300 400\n\nlist3 = list1 + list2 # merge two list into 3rd list\nprint(list1)\nprint(list2)\nprint(list3)","repo_name":"tejasshah2k19/21-python-usa","sub_path":"list/merge-list.py","file_name":"merge-list.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"19912584648","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\n\nfrom dataset.caption_dataset import re_train_dataset, re_eval_dataset, pretrain_dataset_4m, coco_dataset, nocaps_dataset\nfrom dataset.nlvr_dataset import nlvr_dataset\nfrom dataset.ve_dataset import ve_dataset\nfrom dataset.vqa_dataset import vqa_dataset\nfrom dataset.grounding_dataset import build_uni_training_dataset,build_vg_dataset\nfrom dataset.videoqa_dataset import videoqa_dataset\nfrom dataset.video_dataset import vatex_video_caps_dataset\n\nfrom dataset.randaugment import RandomAugment\n\ndef create_dataset(dataset, config, epoch=None):\n \n normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n \n pretrain_transform = transforms.Compose([ \n transforms.RandomResizedCrop(config['image_res'],scale=(0.2, 1.0), interpolation=Image.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n train_transform = transforms.Compose([ \n transforms.RandomResizedCrop(config['image_res'],scale=(0.5, 1.0), interpolation=Image.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n test_transform = transforms.Compose([\n transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n if dataset=='pretrain':\n dataset = pretrain_dataset_4m(config['train_file'], pretrain_transform, read_local_data=config['read_local_data'], image_root=config['image_root'], epoch=epoch)\n return dataset\n\n elif dataset=='re':\n train_dataset = re_train_dataset(config['train_file'], train_transform, config['image_root'])\n val_dataset = re_eval_dataset(config['val_file'], test_transform, config['image_root'])\n test_dataset = re_eval_dataset(config['test_file'], test_transform, config['image_root'])\n return train_dataset, val_dataset, test_dataset\n \n elif dataset=='vqa': \n train_dataset = vqa_dataset(config['train_file'], train_transform, config['vqa_root'], config['vg_root'], config['gqa_root'], split='train', read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n vqa_test_dataset = vqa_dataset(config['test_file'], test_transform, config['vqa_root'], config['vg_root'], config['gqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n vqa_val_dataset = vqa_dataset(config['val_file'], test_transform, config['vqa_root'], config['vg_root'], config['gqa_root'],split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], add_ocr=config['add_ocr'], add_object=config['add_object']) \n return train_dataset, vqa_val_dataset, vqa_test_dataset\n elif dataset== 'nocaps':\n val_dataset = nocaps_dataset(config['val_file'], test_transform, config['nocaps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n test_dataset = nocaps_dataset(config['test_file'], test_transform, config['nocaps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n return val_dataset, test_dataset\n elif dataset== 'coco':\n train_dataset = coco_dataset(config['train_file'], train_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=True, add_object=config['add_object'])\n val_dataset = coco_dataset(config['val_file'], test_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n test_dataset = coco_dataset(config['test_file'], test_transform, config['coco_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, add_object=config['add_object'])\n return train_dataset, val_dataset, test_dataset\n elif dataset=='nlvr': \n train_dataset = nlvr_dataset(config['train_file'], train_transform, config['image_root']) \n val_dataset = nlvr_dataset(config['val_file'], test_transform, config['image_root']) \n test_dataset = nlvr_dataset(config['test_file'], test_transform, config['image_root']) \n return train_dataset, val_dataset, test_dataset \n \n elif dataset=='ve': \n train_dataset = ve_dataset(config['train_file'], train_transform, config['image_root']) \n val_dataset = ve_dataset(config['val_file'], test_transform, config['image_root']) \n test_dataset = ve_dataset(config['test_file'], test_transform, config['image_root']) \n return train_dataset, val_dataset, test_dataset \n\n elif 'vg_' in dataset:\n if 'uni' in dataset:\n train_dataset = build_uni_training_dataset(args=config)\n val_dataset = build_vg_dataset(split='val',args=config,dataset_name='unc')\n eval_dataset = 'unc'\n else:\n train_dataset = build_vg_dataset(split='train',args=config,dataset_name=dataset[3:])\n val_dataset = build_vg_dataset(split='val',args=config,dataset_name=dataset[3:])\n eval_dataset = dataset[3:]\n eval_split = {\n 'unc':['testA','testB'],\n 'unc+':['testA','testB'],\n 'gref_umd':['test']\n }\n test_datasets = {split:build_vg_dataset(split=split,args=config,dataset_name=eval_dataset) for split in eval_split[eval_dataset]}\n return train_dataset, val_dataset,test_datasets\n \n\n\n elif dataset=='video_qa': \n train_dataset = videoqa_dataset(config['train_file'], train_transform, config['videoqa_root'], split='train', read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n vqa_test_dataset = videoqa_dataset(config['test_file'], test_transform, config['videoqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n vqa_val_dataset = videoqa_dataset(config['val_file'], test_transform, config['videoqa_root'], split='test', answer_list=config['answer_list'], read_local_data=config['read_local_data'], max_img_size=config['image_res']) \n return train_dataset, vqa_val_dataset, vqa_test_dataset\n\n elif dataset== 'vatex_video_caps':\n test_dataset = vatex_video_caps_dataset(config['test_file'], config['vatex_video_caps_root'], max_words=config['max_length'], read_local_data=config['read_local_data'], is_train=False, num_frm=config['num_frm_test'], max_img_size=config['image_res'], frm_sampling_strategy='uniform')\n return test_dataset\n\ndef videoqa_collate_fn(batch):\n image_list, question_list, answer_list, n = [], [], [], []\n for image, question, answer in batch:\n image_list.append(image)\n question_list.append(question)\n answer_list.append(answer)\n n.append(1)\n return torch.stack(image_list,dim=0), question_list, answer_list, n\n\ndef vqa_collate_fn(batch):\n image_list, question_list, answer_list, weight_list, n = [], [], [], [], []\n for image, question, answer, weights in batch:\n image_list.append(image)\n question_list.append(question)\n weight_list += weights \n answer_list += answer\n n.append(len(answer))\n return torch.stack(image_list,dim=0), question_list, answer_list, torch.Tensor(weight_list), n\n\ndef nocaps_collate_fn(batch):\n image_list, image_id_list = [], []\n for image, image_id in batch:\n image_list.append(image)\n image_id_list.append(image_id)\n return torch.stack(image_list,dim=0), image_id_list\ndef coco_collate_fn(batch):\n image_list, caption_list, object_labels, image_id_list, gold_caption_list = [], [], [], [], []\n for image, caption, object_label, image_id, gold_caption in batch:\n image_list.append(image)\n caption_list.append(caption)\n image_id_list.append(image_id)\n gold_caption_list.append(gold_caption)\n object_labels.append(object_label)\n return torch.stack(image_list,dim=0), caption_list, object_labels, image_id_list, gold_caption_list\n\n\ndef create_sampler(datasets, shuffles, num_tasks, global_rank):\n samplers = []\n for dataset,shuffle in zip(datasets,shuffles):\n sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)\n samplers.append(sampler)\n return samplers \n\n\ndef create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns):\n loaders = []\n for dataset,sampler,bs,n_worker,is_train,collate_fn in zip(datasets,samplers,batch_size,num_workers,is_trains,collate_fns):\n if is_train:\n shuffle = (sampler is None)\n drop_last = True\n else:\n shuffle = False\n drop_last = False\n loader = DataLoader(\n dataset,\n batch_size=bs,\n num_workers=n_worker,\n pin_memory=True,\n sampler=sampler,\n shuffle=shuffle,\n collate_fn=collate_fn,\n drop_last=drop_last,\n ) \n loaders.append(loader)\n return loaders \n","repo_name":"alibaba/AliceMind","sub_path":"mPLUG/dataset/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":1868,"dataset":"github-code","pt":"85"} +{"seq_id":"35849360454","text":"KEYS = {\r\n 'a': 'z',\r\n 'b': 'y',\r\n 'c': 'x',\r\n 'd': 'w',\r\n 'e': 'v',\r\n 'f': 'u',\r\n 'g': 't',\r\n 'h': 's',\r\n 'i': 'r',\r\n 'j': 'q',\r\n 'k': 'p',\r\n 'l': 'o',\r\n 'm': 'n',\r\n 'n': 'm',\r\n 'o': 'l',\r\n 'p': 'k',\r\n 'q': 'j',\r\n 'r': 'i',\r\n 's': 'h',\r\n 't': 'g',\r\n 'u': 'f',\r\n 'v': 'e',\r\n 'w': 'd',\r\n 'x': 'c',\r\n 'y': 'b',\r\n 'z': 'a',\r\n 'A': 'Z',\r\n 'B': 'Y',\r\n 'C': 'X',\r\n 'D': 'W',\r\n 'E': 'V',\r\n 'F': 'U',\r\n 'G': 'T',\r\n 'H': 'S',\r\n 'I': 'R',\r\n 'J': 'Q',\r\n 'K': 'P',\r\n 'L': 'O',\r\n 'M': 'N',\r\n 'N': 'M',\r\n 'O': 'L',\r\n 'P': 'K',\r\n 'Q': 'J',\r\n 'R': 'I',\r\n 'S': 'H',\r\n 'T': 'G',\r\n 'U': 'F',\r\n 'V': 'E',\r\n 'W': 'D',\r\n 'X': 'C',\r\n 'Y': 'B',\r\n 'Z': 'A',\r\n\r\n}\r\n\r\ndef cifrar(mensaje):\r\n palabras = mensaje.split(' ')\r\n mensaje_cifrado = []\r\n\r\n for palabra in palabras:\r\n palabra_cifrada = ''\r\n for letra in palabra:\r\n palabra_cifrada += KEYS[letra]\r\n mensaje_cifrado.append(palabra_cifrada)\r\n\r\n return ' '.join(mensaje_cifrado)\r\n\r\n\r\ndef descifrar(mensaje):\r\n palabras = mensaje.split(' ')\r\n mensaje_descifrado = []\r\n\r\n for palabra in palabras:\r\n palabra_descifrada = ''\r\n for letra in palabra:\r\n for key, value in KEYS.items():\r\n if value == letra:\r\n palabra_descifrada += key\r\n mensaje_descifrado.append(palabra_descifrada)\r\n return ' '.join(mensaje_descifrado)\r\n","repo_name":"nlbucurur/proyecto-programacion","sub_path":"metodos/atbash.py","file_name":"atbash.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"6559174474","text":"def solution(array, height):\n answer = 0\n for i in array:\n if i > height:\n answer += 1\n return answer\n\n # 프로그래머스 연습문제 머쓱이보다 키 큰 사람\n\n\ndef another(array, height):\n array.append(height)\n array.sort(reverse=True)\n return array.index(height)\n\n # 배열의 인덱스로 접근한 문제\n","repo_name":"dhwpdnr/coding_test","sub_path":"programmers/2301/230130_4.py","file_name":"230130_4.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11178176088","text":"import sqlite3\nimport traceback\n\n\ndef dbConnection():\n try:\n conn = sqlite3.connect('ZscalerClient-versions.db')\n print(\"Opened database successfully\")\n return conn\n except Exception:\n print(\"Error occured\")\n\ndef createBaseTable(conn):\n try:\n conn.execute('''CREATE TABLE ZSCALER_VERSIONS\n (VERSION TEXT NOT NULL,\n TYPE TEXT NOT NULL,\n CATEGORY TEXT NOT NULL);''')\n print(\"Table created successfully\")\n except Exception:\n print(\"Table already exists\")\n\ndef checkVersion(conn,version,type):\n try:\n cur = conn.cursor()\n cur.execute(\"SELECT count(*) from ZSCALER_VERSIONS where VERSION = ? and TYPE = ?\",(version,type,))\n return cur.fetchone()\n except Exception:\n print(\"Problem with checkVersion\")\n \n\ndef recordNewVersion(conn, version, type, category):\n try:\n cur = conn.cursor()\n cur.execute(\"INSERT INTO ZSCALER_VERSIONS(VERSION,TYPE,CATEGORY)values(?,?,?)\",(version,type,category))\n conn.commit()\n return\n except Exception:\n print(\"Error Occured in recording new version\")\n traceback.print_exc()\n\ndef closeConnection(conn):\n try:\n conn.close()\n except Exception:\n print(\"Error Occured in closing connection\")","repo_name":"priyankasallaram/pyprogs","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"25893387996","text":"'''\nYou will be given a number and you will need to return it as a string in Expanded Form. For example:\n\nexpanded_form(12) # Should return '10 + 2'\nexpanded_form(42) # Should return '40 + 2'\nexpanded_form(70304) # Should return '70000 + 300 + 4'\nNOTE: All numbers will be whole numbers greater than 0.\n'''\ndef expanded_form(num):\n str_num = str(num)\n str_num_len = len(str_num)\n answer = \"\"\n \n for i in range(str_num_len):\n if str_num_len == 1:\n if str_num[i] == \"0\":\n answer = answer[:-3]\n break\n else:\n answer += str_num[i]\n break\n if str_num[i] == \"0\":\n str_num_len -= 1\n else:\n answer += str_num[i] + \"0\"*(str_num_len-1)\n answer += \" + \"\n str_num_len -= 1\n\n return answer","repo_name":"mpsb/practice","sub_path":"codewars/python/cw-write-number-in-expanded-form.py","file_name":"cw-write-number-in-expanded-form.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"21387611178","text":"import collections\nfrom .texttable import Texttable\nfrom . import termui\nimport click\nfrom rich import print as rprint\nfrom rich.table import Table\n\n\n# -----------------------------------------------------------------------------\ndef dumpSubRegs(node):\n regs = {}\n for i in sorted(node.getNodes()):\n regs[i] = node.getNode(i).read()\n node.getClient().dispatch()\n\n return {k: v.value() for k, v in regs.items()}\n\n\n# -----------------------------------------------------------------------------\ndef dumpReg(node):\n v = node.read()\n node.getClient().dispatch()\n return {node.getId(): v.value()}\n\n\n# # -----------------------------------------------------------------------------\n# def readTpgStatus(node):\n\n# row_names = [\n# 'upck >> hsc',\n# 'hsc >> psub',\n# 'psub >> fir ',\n# 'fir >> hf',\n# 'hf >> hsc',\n# 'hsc >> cr_if',\n# 'tpg >> mask',\n# 'mask >> filt'\n# ]\n\n# flag_map = collections.OrderedDict([\n# ('v', 'valid'),\n# ('u', 'user'),\n# ('l', 'last')\n# ])\n\n# probes = {}\n# for i in range(4):\n# node.getNode('sel.chain').write(i)\n# node.getClient().dispatch()\n# probes[i] = dumpSubRegs(node.getNode('csr.mon'))\n\n# hdr = ['probe']+['{}'.format(k) for k in range(4)]\n# tpg_table = Texttable(max_width=0)\n# tpg_table.header(hdr)\n# tpg_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# tpg_table.set_chars(['-', '|', '+', '-'])\n# for k in range(8):\n# lbl = 'p'+str(k)\n\n# flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n# row = [lbl+': '+row_names[k]]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], 'rdy' if probes[i][lbl+'.ready'] else 'bsy', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n# tpg_table.add_row(row)\n# tbl = tpg_table.draw()\n# tbl = tbl.replace('[rdy]', '['+termui.kGreen+'rdy'+termui.kReset+']')\n# tbl = tbl.replace('[bsy]', '['+termui.kRed+'bsy'+termui.kReset+']')\n# return tbl\n\n\n# # -----------------------------------------------------------------------------\n# def readStreamProcessorStatus(node, nproc):\n\n# row_names = [\n# 'upck >> hsc',\n# 'hsc >> psub',\n# 'psub >> fir ',\n# 'fir >> hf',\n# 'hf >> hsc',\n# 'hsc >> cr_if',\n# 'tpg >> mask',\n# 'mask >> filt'\n# ]\n\n# flag_map = collections.OrderedDict([\n# ('v', 'valid'),\n# ('u', 'user'),\n# ('l', 'last')\n# ])\n\n# strmSelNode = node.getNode('csr.ctrl.stream_sel')\n# strmCapNode = node.getNode('csr.ctrl.cap_ctrs')\n# strmCsrNode = node.getNode('stream_proc.csr')\n# strmCapNode.write(1)\n# strmCapNode.write(0)\n# strmCapNode.getClient().dispatch()\n\n# probes = {}\n# for i in range(nproc):\n# strmSelNode.write(i)\n# node.getClient().dispatch()\n# probes[i] = dumpSubRegs(strmCsrNode.getNode('mon'))\n\n# hdr = ['probe']+['{}'.format(k) for k in range(nproc)]\n# tpg_table = Texttable(max_width=0)\n# tpg_table.header(hdr)\n# tpg_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# tpg_table.set_chars(['-', '|', '+', '-'])\n# for k in range(8):\n# lbl = 'p'+str(k)\n\n# flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n# row = [lbl+': '+row_names[k]]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], 'rdy' if probes[i][lbl+'.ready'] else 'bsy', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n# tpg_table.add_row(row)\n# tbl = tpg_table.draw()\n# tbl = tbl.replace('[rdy]', '['+termui.kGreen+'rdy'+termui.kReset+']')\n# tbl = tbl.replace('[bsy]', '['+termui.kRed+'bsy'+termui.kReset+']')\n# return tbl\n\n\n# # -----------------------------------------------------------------------------\n# def readSinkStatus(node):\n\n# reg_map = collections.OrderedDict([\n# ('en', 'ctrl.en'),\n# ('fifo_mode', 'ctrl.fifo_mode'),\n# ('rdy_mode', 'ctrl.rdy_mode'),\n# ('empty', 'stat.empty'),\n# ('err', 'stat.err'),\n# ('full', 'stat.full'),\n# ('count', 'count')\n# ])\n\n# sinks = {}\n# for i in range(4):\n# sinks[i] = dumpSubRegs(node.getNode('sink'+str(i)+'.csr'))\n# sinks[i].update(dumpReg(node.getNode('sink'+str(i)+'.buf.count')))\n\n# hdr = ['reg']+['{}'.format(k) for k in range(4)]\n# sink_table = Texttable(max_width=0)\n# sink_table.header(hdr)\n# sink_table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)\n# sink_table.set_chars(['-', '|', '+', '-'])\n# for k, l in reg_map.items():\n# row = [k]+[sinks[i][l] for i in range(4)]\n# sink_table.add_row(row)\n\n# return sink_table.draw()\n\n\n# # ------------------------------------------------------------------------------\n# def printRegTable(aRegs, aHeader=True, aSort=True):\n# print(( formatRegTable(aRegs, aHeader, aSort) ))\n\n\n# # ------------------------------------------------------------------------------\n# def formatRegTable(aRegs, aHeader=True, aSort=True):\n\n# lRegTable = Texttable(max_width=0)\n# lRegTable.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER)\n# lRegTable.set_chars(['-', '|', '+', '-'])\n# if aHeader:\n# lRegTable.header( ['name', 'value'] )\n\n# lRegs = sorted(aRegs) if aSort else aRegs\n# for k in lRegs:\n# lRegTable.add_row( [str(k), hex(aRegs[k])] )\n\n# return lRegTable.draw()\n\n# # ------------------------------------------------------------------------------\n# def formatDictTable(aDict, aHeader=True, aSort=True, aFmtr=str):\n# lDictTable = Texttable(max_width=0)\n# lDictTable.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER)\n# lDictTable.set_chars(['-', '|', '+', '-'])\n# if aHeader:\n# lDictTable.header( ['name', 'value'] )\n\n# for k in (sorted(aDict) if aSort else aDict):\n# v = aDict[k]\n# lDictTable.add_row( [str(k), aFmtr(v) if aFmtr else v])\n\n# return lDictTable.draw()\n\n# # ------------------------------------------------------------------------------\n# def printDictTable(aDict, aHeader=True, aSort=True, aFmtr=None):\n# print(( formatDictTable(aDict, aHeader, aSort, aFmtr) ))\n\n\n# -----------------------------------------------------------------------------\ndef dump_sub_regs(node, names: list = None):\n\n if names is None:\n names = sorted(node.getNodes())\n regs = collections.OrderedDict()\n for i in names:\n regs[i] = node.getNode(i).read()\n node.getClient().dispatch()\n\n return {k: hex(v.value()) for k, v in regs.items()}\n\n# -----------------------------------------------------------------------------\ndef dump_reg(node):\n v = node.read()\n node.getClient().dispatch()\n return {node.getId(): v.value()}\n\n# -----------------------------------------------------------------------------\ndef dict_to_table( vals: dict, **kwargs):\n t = Table(**kwargs)\n t.add_column('name')\n t.add_column('value', style='green')\n for k,v in vals.items():\n t.add_row(k,str(v))\n\n return t\n\n# -----------------------------------------------------------------------------\ndef dict_to_hextable( vals: dict, **kwargs):\n t = Table(**kwargs)\n t.add_column('name')\n t.add_column('value', style='green')\n for k,v in vals.items():\n t.add_row(k,hex(v))\n\n return t\n\n# ------------------------------------------------------------------------------\ndef print_reg_table(aRegs, **kwargs):\n rprint( dict_to_hextable(aRegs, **kwargs) )\n\n\n# ------------------------------------------------------------------------------\ndef print_dict_table(aDict, **kwargs):\n rprint(dict_to_table(aDict, **kwargs) )\n\n# -----------------------------------------------------------------------------\ndef read_stream_processor_status(node, nproc, **kwargs):\n\n row_names = [\n 'upck >> hsc',\n 'hsc >> psub',\n 'psub >> fir ',\n 'fir >> hf',\n 'hf >> meta',\n 'meta >> hsc',\n 'hsc >> mask',\n 'mask >> filt',\n 'filt >> arb'\n ]\n\n flag_map = collections.OrderedDict([\n ('v', 'valid'),\n ('u', 'user'),\n ('l', 'last')\n ])\n\n strmSelNode = node.getNode('csr.ctrl.stream_sel')\n strmCapNode = node.getNode('csr.ctrl.cap_ctrs')\n strmCsrNode = node.getNode('stream_proc.csr')\n strmCapNode.write(1)\n strmCapNode.write(0)\n strmCapNode.getClient().dispatch()\n\n probes = {}\n for i in range(nproc):\n strmSelNode.write(i)\n node.getClient().dispatch()\n probes[i] = dumpSubRegs(strmCsrNode.getNode('mon'))\n\n\n hdr = ['probe']+[f'{k}' for k in range(nproc)]\n t = Table(*hdr, **kwargs)\n\n for k in range(9):\n lbl = f'p{k}'\n\n flags = ''.join([f for f, l in flag_map.items() if probes[i][lbl+'.'+l]])\n\n row = [f\"{lbl}: {row_names[k]}\"]+['{} [{}] ({}) {}'.format(probes[i][lbl+'.pkt_ctr'], '[green]rdy[/green]' if probes[i][lbl+'.ready'] else '[red]bsy[/red]', flags, probes[i][lbl+'.last_err']) for i in range(4)]\n t.add_row(*row)\n return t\n\n\n# ------------------------------------------------------------------------------\n\n\n# ------------------------------------------------------------------------------\ndef gen_range_validator(first, last):\n \"\"\"\n Utility function to generate validators for integer number lists with range check\n\n \"\"\"\n def validate_channels(ctx, param, value):\n return _validate_range_impl(value, first, last)\n\n return validate_channels\n\n# ------------------------------------------------------------------------------\ndef validate_link_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_links']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef validate_proc_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_port']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef validate_chan_ids(ctx, param, value):\n first, last = 0, ctx.obj.mConfigInfo['n_mux']\n return _validate_range_impl(value, first, last)\n\n\n# ------------------------------------------------------------------------------\ndef _validate_range_impl(value, first, last):\n if value is None:\n return None\n\n if value == 'all':\n return list(range(first, last))\n elif value == 'none':\n return []\n\n if not value[0].isdigit():\n raise click.ClickException('Malformed option (comma separated list expected): %s' % value)\n\n _sep = ','\n _dash = '-'\n\n numbers = []\n items = value.split(_sep)\n for item in items:\n nums = item.split(_dash)\n if len(nums) == 1:\n # single number\n numbers.append(int(item))\n elif len(nums) == 2:\n i = int(nums[0])\n j = int(nums[1])\n if i > j:\n raise click.ClickException('Invalid interval '+item)\n numbers.extend(list(range(i, j+1)))\n else:\n raise click.ClickException('Malformed option (comma separated list expected): %s' % value)\n\n out_of_range = [n for n in numbers if (n < first or n >= last)]\n if any(out_of_range):\n raise click.ClickException('Values out of range %s-%s: %s' % (first, last, out_of_range))\n\n return numbers\n","repo_name":"DUNE-DAQ/dtpcontrols","sub_path":"python/dtpcontrols/toolbox.py","file_name":"toolbox.py","file_ext":"py","file_size_in_byte":11633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37186239505","text":"from http import HTTPStatus\n\nimport pytest\n\nfrom tests.test_services.testdata import search_responses\n\npytestmark = [pytest.mark.asyncio, pytest.mark.integrational]\n\n\nclass TestSearch:\n @pytest.mark.parametrize(\n 'url,status_code,response_json',\n [\n (\n \"/api/v1/films/search/?query=Star&page_number=1&page_size=2\",\n HTTPStatus.OK,\n search_responses.SEARCH_FILMS_SUCCESS,\n ),\n (\n \"/api/v1/films/search/?query=Star&page_number=-1&page_size=2\",\n HTTPStatus.UNPROCESSABLE_ENTITY,\n search_responses.SEARCH_FILMS_UNPROCESSABLE_PAGE_NUMBER,\n ),\n (\n \"/api/v1/films/search/?query=jkngjrkt&page_number=1&page_size=2\",\n HTTPStatus.NOT_FOUND,\n search_responses.SEARCH_FILMS_NOT_FOUND,\n ),\n (\n \"/api/v1/persons/search?query=Carrie&page_size=2&page_number=1\",\n HTTPStatus.OK,\n search_responses.SEARCH_PERSONS_SUCCESS,\n ),\n (\n \"/api/v1/persons/search?query=Carrie&page_size=-2&page_number=1\",\n HTTPStatus.UNPROCESSABLE_ENTITY,\n search_responses.SEARCH_PERSONS_UNPROCESSABLE_PAGE_SIZE,\n ),\n (\n \"/api/v1/persons/search?query=ASJGNRIJGNRKJEG&page_size=2&page_number=1\",\n HTTPStatus.NOT_FOUND,\n search_responses.SEARCH_PERSONS_NOT_FOUND,\n ),\n ],\n )\n async def test_search(self, client, url, status_code, response_json):\n response = await client.get(url)\n assert response.status_code == status_code, response.text\n assert response.json() == response_json\n","repo_name":"stranded-in-python/movix-api","sub_path":"src/tests/test_services/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"34289964835","text":"from cmath import cos\nfrom bert_serving.client import BertClient\nimport speech_recognition as sr\nimport numpy as np\n\n\n# Working with audio files\nr = sr.Recognizer()\nbc = BertClient(ip='139.224.100.23')# ip中是部署了bert模型的服务器地址\n\nstc = [\n \"为我解读一下我这个月的碳信用\",\n \"我现在有多少碳币\",\n \"我的碳信用分数是多少\",\n]\nvec = []\nvec = bc.encode(stc)\nnp.save('./bert_vec.npy',vec)\ninput_vec = []\nvec = np.load('./bert_vec.npy')\n\n\n\nprint(\"您可以向我查询碳币、碳信用等等...\"+'\\n'+'请说话:')\nmicrophone = sr.Microphone()\nwith microphone as source:\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source)\ntry:\n print('录音结束')\n # sentence = r.recognize_sphinx(audio)\n input_sentence = r.recognize_google(audio,language=\"cmn-Hans-CN\") #简体中文\n print('识别结束')\n # 计算用户说的句子的bert向量\n input_vec = bc.encode([input_sentence])\n print(input_sentence)\nexcept:\n print(\"无法识别出句子,请重试。\")\n\n# 将输入句子的向量与预设句子的向量一一求出余弦值,与余弦值最大的匹配成功\ncos_input = []\nfor each in vec:\n each = each.reshape(768,1)\n res = input_vec.dot(each) / (np.linalg.norm(input_vec) * np.linalg.norm(each))\n res = (res[0][0])\n cos_input.append(res)\nprint(cos_input)\nindex = cos_input.index(max(cos_input))\nprint('检测到输入应为预设库中的第'+str(index+1)+'条,“',stc[index]+'”')\n#cos_input = a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))","repo_name":"florrietan/citicup","sub_path":"citicup/thm/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"18241750250","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport vtk\n\nfrom wavedata.tools.obj_detection import obj_utils\nfrom wavedata.tools.visualization import vis_utils\nfrom wavedata.tools.visualization.vtk_boxes import VtkBoxes\nfrom wavedata.tools.visualization.vtk_ground_plane import VtkGroundPlane\nfrom wavedata.tools.visualization.vtk_point_cloud import VtkPointCloud\n\nimport mlod\nfrom mlod.builders.dataset_builder import DatasetBuilder\nfrom mlod.core import box_3d_encoder\nfrom mlod.core.anchor_generators import grid_anchor_3d_generator\nfrom mlod.utils import demo_utils\n\n\ndef main():\n \"\"\"\n Visualization of the mini batch anchors for RpnModel training.\n\n Keys:\n F1: Toggle mini batch anchors\n F2: Toggle positive/negative proposal anchors\n F3: Toggle easy ground truth objects (Green)\n F4: Toggle medium ground truth objects (Orange)\n F5: Toggle hard ground truth objects (Red)\n F6: Toggle all ground truth objects (default off)\n F7: Toggle ground-plane\n \"\"\"\n\n anchor_colour_scheme = {\n \"Car\": (255, 0, 0), # Red\n \"Pedestrian\": (255, 150, 50), # Orange\n \"Cyclist\": (150, 50, 100), # Purple\n \"DontCare\": (255, 255, 255), # White\n\n \"Anchor\": (150, 150, 150), # Gray\n\n \"Positive\": (0, 255, 255), # Teal\n \"Negative\": (255, 0, 255) # Bright Purple\n }\n\n ##############################\n # Options\n ##############################\n show_orientations = True\n\n # Classes name\n config_name = 'car'\n # config_name = 'ped'\n # config_name = 'cyc'\n # config_name = 'ppl'\n\n # # # Random sample # # #\n sample_name = None\n\n # Small cars\n # sample_name = '000008'\n # sample_name = '000639'\n\n # # # Cars # # #\n # sample_name = \"000001\"\n # sample_name = \"000050\"\n # sample_name = \"000112\"\n # sample_name = \"000169\"\n # sample_name = \"000191\"\n\n # # # People # # #\n # sample_name = '000000'\n\n # val_half\n # sample_name = '000001' # Hard, 1 far cyc\n # sample_name = '000005' # Easy, 1 ped\n # sample_name = '000122' # Easy, 1 cyc\n # sample_name = '000134' # Hard, lots of people\n # sample_name = '000167' # Medium, 1 ped, 2 cycs\n # sample_name = '000187' # Medium, 1 ped on left\n # sample_name = '000381' # Easy, 1 ped\n # sample_name = '000398' # Easy, 1 ped\n # sample_name = '000401' # Hard, obscured peds\n # sample_name = '000407' # Easy, 1 ped\n sample_name = '000448' # Hard, several far people\n # sample_name = '000486' # Hard 2 obscured peds\n # sample_name = '000509' # Easy, 1 ped\n # sample_name = '000718' # Hard, lots of people\n # sample_name = '002216' # Easy, 1 cyc\n\n # sample_name = \"000000\"\n # sample_name = \"000011\"\n # sample_name = \"000015\"\n # sample_name = \"000028\"\n # sample_name = \"000035\"\n # sample_name = \"000134\"\n # sample_name = \"000167\"\n # sample_name = '000379'\n # sample_name = '000381'\n # sample_name = '000397'\n # sample_name = '000398'\n # sample_name = '000401'\n # sample_name = '000407'\n # sample_name = '000486'\n # sample_name = '000509'\n\n # # Cyclists # # #\n # sample_name = '000122'\n # sample_name = '000448'\n\n # # # Multiple classes # # #\n # sample_name = \"000764\"\n ##############################\n # End of Options\n ##############################\n\n # Dataset config\n dataset_config_path = mlod.top_dir() + \\\n '/demos/configs/mb_rpn_{}.config'.format(config_name)\n\n # Create Dataset\n dataset = DatasetBuilder.load_dataset_from_config(\n dataset_config_path)\n\n # Random sample\n if sample_name is None:\n sample_idx = np.random.randint(0, dataset.num_samples)\n sample_name = dataset.sample_list[sample_idx].name\n\n anchor_strides = dataset.kitti_utils.anchor_strides\n\n img_idx = int(sample_name)\n\n print(\"Showing mini batch for sample {}\".format(sample_name))\n\n image = cv2.imread(dataset.get_rgb_image_path(sample_name))\n image_shape = [image.shape[1], image.shape[0]]\n\n # KittiUtils class\n dataset_utils = dataset.kitti_utils\n\n ground_plane = obj_utils.get_road_plane(img_idx, dataset.planes_dir)\n\n point_cloud = obj_utils.get_depth_map_point_cloud(img_idx,\n dataset.calib_dir,\n dataset.depth_dir,\n image_shape)\n\n points = point_cloud.T\n point_colours = vis_utils.project_img_to_point_cloud(points, image,\n dataset.calib_dir,\n img_idx)\n\n clusters, _ = dataset.get_cluster_info()\n anchor_generator = grid_anchor_3d_generator.GridAnchor3dGenerator()\n\n # Read mini batch info\n anchors_info = dataset_utils.get_anchors_info(\n dataset.classes_name, anchor_strides, sample_name)\n\n if not anchors_info:\n # Exit early if anchors_info is empty\n print(\"Anchors info is empty, please try a different sample\")\n return\n\n # Generate anchors for all classes\n all_anchor_boxes_3d = []\n for class_idx in range(len(dataset.classes)):\n\n anchor_boxes_3d = anchor_generator.generate(\n area_3d=dataset.kitti_utils.area_extents,\n anchor_3d_sizes=clusters[class_idx],\n anchor_stride=anchor_strides[class_idx],\n ground_plane=ground_plane)\n\n all_anchor_boxes_3d.extend(anchor_boxes_3d)\n all_anchor_boxes_3d = np.asarray(all_anchor_boxes_3d)\n\n # Use anchors info\n indices, ious, offsets, classes = anchors_info\n\n # Get non empty anchors from the indices\n anchor_boxes_3d = all_anchor_boxes_3d[indices]\n\n # Sample an RPN mini batch from the non empty anchors\n mini_batch_utils = dataset.kitti_utils.mini_batch_utils\n mb_mask_tf, _ = mini_batch_utils.sample_rpn_mini_batch(ious)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n mb_mask = sess.run(mb_mask_tf)\n\n mb_anchor_boxes_3d = anchor_boxes_3d[mb_mask]\n mb_anchor_ious = ious[mb_mask]\n\n # ObjectLabel list that hold all boxes to visualize\n obj_list = []\n\n num_positives = 0\n # Convert the mini_batch anchors to object list\n mini_batch_size = mini_batch_utils.rpn_mini_batch_size\n for i in range(mini_batch_size):\n if mb_anchor_ious[i] > mini_batch_utils.rpn_pos_iou_range[0]:\n obj_type = \"Positive\"\n num_positives += 1\n else:\n obj_type = \"Negative\"\n\n obj = box_3d_encoder.box_3d_to_object_label(mb_anchor_boxes_3d[i],\n obj_type)\n obj_list.append(obj)\n\n print('Num positives', num_positives)\n\n # Convert all non-empty anchors to object list\n non_empty_anchor_objs = \\\n [box_3d_encoder.box_3d_to_object_label(\n anchor_box_3d, obj_type='Anchor')\n for anchor_box_3d in anchor_boxes_3d]\n\n ##############################\n # Ground Truth\n ##############################\n if dataset.has_labels:\n easy_gt_objs, medium_gt_objs, \\\n hard_gt_objs, all_gt_objs = demo_utils.get_gts_based_on_difficulty(\n dataset, img_idx)\n else:\n easy_gt_objs = medium_gt_objs = hard_gt_objs = all_gt_objs = []\n\n # Visualize 2D image\n vis_utils.visualization(dataset.rgb_image_dir, img_idx)\n plt.show(block=False)\n\n # Create VtkAxes\n axes = vtk.vtkAxesActor()\n axes.SetTotalLength(5, 5, 5)\n\n # Create VtkBoxes for mini batch anchors\n vtk_pos_anchor_boxes = VtkBoxes()\n vtk_pos_anchor_boxes.set_objects(obj_list, anchor_colour_scheme)\n\n # VtkBoxes for non empty anchors\n vtk_non_empty_anchors = VtkBoxes()\n vtk_non_empty_anchors.set_objects(non_empty_anchor_objs,\n anchor_colour_scheme)\n vtk_non_empty_anchors.set_line_width(0.1)\n\n # Create VtkBoxes for ground truth\n vtk_easy_gt_boxes, vtk_medium_gt_boxes, \\\n vtk_hard_gt_boxes, vtk_all_gt_boxes = \\\n demo_utils.create_gt_vtk_boxes(easy_gt_objs,\n medium_gt_objs,\n hard_gt_objs,\n all_gt_objs,\n show_orientations)\n\n vtk_point_cloud = VtkPointCloud()\n vtk_point_cloud.set_points(points, point_colours)\n vtk_point_cloud.vtk_actor.GetProperty().SetPointSize(2)\n\n vtk_ground_plane = VtkGroundPlane()\n vtk_ground_plane.set_plane(ground_plane, dataset.kitti_utils.bev_extents)\n\n # vtk_voxel_grid = VtkVoxelGrid()\n # vtk_voxel_grid.set_voxels(vx_grid)\n\n # Create Voxel Grid Renderer in bottom half\n vtk_renderer = vtk.vtkRenderer()\n vtk_renderer.AddActor(vtk_point_cloud.vtk_actor)\n vtk_renderer.AddActor(vtk_ground_plane.vtk_actor)\n\n vtk_renderer.AddActor(vtk_hard_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_medium_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_easy_gt_boxes.vtk_actor)\n vtk_renderer.AddActor(vtk_all_gt_boxes.vtk_actor)\n\n # vtk_renderer.AddActor(vtk_voxel_grid.vtk_actor)\n vtk_renderer.AddActor(vtk_non_empty_anchors.vtk_actor)\n vtk_renderer.AddActor(vtk_pos_anchor_boxes.vtk_actor)\n vtk_renderer.AddActor(axes)\n vtk_renderer.SetBackground(0.2, 0.3, 0.4)\n\n # Setup Camera\n current_cam = vtk_renderer.GetActiveCamera()\n current_cam.Pitch(160.0)\n current_cam.Roll(180.0)\n\n # Zooms out to fit all points on screen\n vtk_renderer.ResetCamera()\n\n # Zoom in slightly\n current_cam.Zoom(2.5)\n\n # Reset the clipping range to show all points\n vtk_renderer.ResetCameraClippingRange()\n\n # Setup Render Window\n vtk_render_window = vtk.vtkRenderWindow()\n mb_iou_thresholds = np.round(\n [mini_batch_utils.rpn_neg_iou_range[1],\n mini_batch_utils.rpn_pos_iou_range[0]], 3)\n vtk_render_window.SetWindowName(\n 'Sample {} RPN Mini Batch {}/{}, '\n 'Num Positives {}'.format(\n sample_name,\n mb_iou_thresholds[0],\n mb_iou_thresholds[1],\n num_positives))\n vtk_render_window.SetSize(900, 500)\n vtk_render_window.AddRenderer(vtk_renderer)\n\n # Setup custom interactor style, which handles mouse and key events\n vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()\n vtk_render_window_interactor.SetRenderWindow(vtk_render_window)\n\n vtk_render_window_interactor.SetInteractorStyle(\n vis_utils.ToggleActorsInteractorStyle([\n vtk_non_empty_anchors.vtk_actor,\n vtk_pos_anchor_boxes.vtk_actor,\n\n vtk_easy_gt_boxes.vtk_actor,\n vtk_medium_gt_boxes.vtk_actor,\n vtk_hard_gt_boxes.vtk_actor,\n vtk_all_gt_boxes.vtk_actor,\n\n vtk_ground_plane.vtk_actor\n ]))\n\n # Render in VTK\n vtk_render_window.Render()\n vtk_render_window_interactor.Start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JianDeng2018/MLOD","sub_path":"demos/mini_batch_rpn_vis.py","file_name":"mini_batch_rpn_vis.py","file_ext":"py","file_size_in_byte":11122,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"85"} +{"seq_id":"10200205240","text":"from copy import deepcopy\n\n\nclass City:\n\n MIN_SHARE_COUNT = 1000\n NEIGHBORS_COORD = ((-1, 0), (0, -1), (1, 0), (0, 1))\n\n def __init__(self, x, y, country_name):\n self.country_name = country_name\n self.x = x\n self.y = y\n self.neighbor_cities = []\n self.country_coins_mapping = [\n {'country_name': country_name, 'amount': 1000000}\n ]\n self.temp_mapping = [\n {'country_name': country_name, 'amount': 0}\n ]\n\n def change_balance(self):\n for i in range(len(self.country_coins_mapping)):\n if self.country_coins_mapping[i]['amount'] >= self.MIN_SHARE_COUNT:\n\n partition = self.country_coins_mapping[i]['amount'] // self.MIN_SHARE_COUNT\n\n for neighbor in self.neighbor_cities:\n neighbor.add_other_countries_coins(self.country_coins_mapping[i]['country_name'], partition)\n\n self.temp_mapping[i]['amount'] -= partition * len(self.neighbor_cities)\n\n def add_other_countries_coins(self, country_name, amount):\n for coins in self.temp_mapping:\n # Check if city already has coins of this country\n if coins['country_name'] == country_name:\n # Prepare amount of coins from other country to add\n coins['amount'] += amount\n return\n\n self.temp_mapping.append({'country_name': country_name, 'amount': amount})\n\n def update_balance(self):\n for i in range(len(self.temp_mapping)):\n try:\n self.country_coins_mapping[i]['amount'] += self.temp_mapping[i]['amount']\n except IndexError:\n self.country_coins_mapping.append(deepcopy(self.temp_mapping[i]))\n\n # Reset temporary value\n self.temp_mapping[i]['amount'] = 0\n\n def get_neighbor_cities(self, grid):\n for i in self.NEIGHBORS_COORD:\n n_x, n_y = self.x + i[0], self.y + i[1]\n try:\n if grid[n_x][n_y] != 0:\n self.neighbor_cities.append(grid[n_x][n_y])\n except IndexError:\n continue\n","repo_name":"isaieva/ttps-practice","sub_path":"city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"38963046284","text":"def factorial():\n value = int(input(\"Factorial Number: \")) ##Input the Number\n n = 0 ## Set the While Loop\n output = 1 ## Number you output\n while (n < value): ## Run the code below if and only if the factorial isn't finished yet\n output = output * (n + 1) ## Multiply the numbers\n n = n + 1 ## Increase the While Loop Number Because you multiplied by it\n print(output) ## Print the Factorial Value\n\nwhile True:\n factorial()","repo_name":"urmilmodi/ESC180","sub_path":"Python/Lectures/Lecture_1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"2357964756","text":"def read_file(file_name):\n with open(file_name, 'r') as f:\n for s in f:\n yield s\n\n\ndef split_log_string(s):\n s = s.split(' - ')\n s += s.pop(1).split(' [', maxsplit=1)\n s += s.pop(2).split('] ', maxsplit=1)\n s[2] = s[2].strip()\n return s\n\n\ndef analyze_logs(file_name):\n output_dict = {}\n fun_count = 0\n modes_dict = {'normal': 0, 'radial': 0}\n message = []\n for s in read_file(file_name):\n fun_count += int(s.find('fun:') != -1)\n try:\n s = split_log_string(s)\n except Exception:\n pass\n else:\n if s[1] not in output_dict:\n output_dict[s[1]] = {\n 'DEBUG': 0,\n 'INFO': 0,\n 'WARNING': 0,\n 'ERROR': 0}\n output_dict[s[1]][s[2]] += 1\n message = s[3].split()\n if message[0] == 'Found' and message[2] == 'modes':\n modes_dict['normal'] += int(message[1])\n if message[0] == 'Found' and message[2] == 'radial' and message[3] == 'mode(s)':\n modes_dict['radial'] += int(message[1])\n return [output_dict, fun_count, modes_dict]\n\n\nif __name__ == '__main__':\n result = analyze_logs('log.txt')\n print(result[0])\n print(f'fun_count: {result[1]}')\n print(f'modes_count: {result[2]}')\n","repo_name":"WaterRaven/Python_Sandbox_202","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23943627585","text":"# 336 HashMap | O(N * W ^ 2) where W is the length of the longest word\nfrom typing import List\n\n\ndef maximumScore(nums: List[int], multipliers: List[int]) -> int:\n store = [0] * (len(multipliers) + 1)\n n = len(nums)\n max_score = [0] * len(multipliers)\n for i in reversed(range(len(multipliers))):\n for j in range(i + 1):\n left = store[j + 1] + nums[j] * multipliers[i]\n right = store[j] + nums[n - i + j - 1] * multipliers[i]\n max_score[j] = max(left, right)\n store = max_score\n\n return store[0]\n","repo_name":"soumyendra98/Data-Structures-and-Algorithms","sub_path":"Arrays/LeetCode/Palindrome Pairs.py","file_name":"Palindrome Pairs.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73465247639","text":"import pprint\r\n\r\nfrom pymongo import MongoClient\r\nclient = MongoClient('mongodb://olk11:7LwEV4mUxIqd@nosql.dcs.aber.ac.uk/olk11')\r\n\r\ndb = client.olk11\r\n\r\noutputfile = open(\"mariners_ships\", encoding=\"utf-8\", mode=\"w\")\r\n\r\ncursor = db.shipsTest.aggregate([\r\n {\"$unwind\": \"$mariners\"},\r\n {\"$group\": {\"_id\": \"$mariners.name\", \r\n \"ships\": {\"$push\": {\"vessel name\": \"$vessel name\",\r\n \"capacity\": \"$mariners.this_ship_capacity\",\r\n \"Date_birth\": \"$mariners.year_of_birth\",\r\n \"leave_date\": \"$mariners.this_ship_leaving_date\",\r\n \"age\": \"$mariners.age\",\r\n \"join_date\": \"$mariners.this_ship_joining_date\"}}}}\r\n])\r\n\r\nfor doc in cursor:\r\n pprint.pprint(doc, outputfile)","repo_name":"oknappett/data_mining","sub_path":"prac3/aggregation3.py","file_name":"aggregation3.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"15438963931","text":"import json\n\nfrom django import forms\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.translation import gettext as _\n\nfrom raw_materials.models import RawMaterial\nfrom sales.models import MaterialSaleRelation\nfrom datetime import date, datetime\n\nclass SelectPredictionForm(forms.Form):\n\n date = forms.DateField( # datetime.date\n label=_(\"Fecha de prediccion\"),\n required=True\n )\n\n raw_materials = forms.ModelMultipleChoiceField(\n label=_(\"Escoge las materias primas a predecir\"),\n queryset=None,\n required=True\n )\n\n def __init__(self, *args, **kwargs):\n company = kwargs.pop('company')\n self.request = kwargs.pop('request')\n super(SelectPredictionForm, self).__init__(*args, **kwargs)\n\n self.fields['raw_materials'].queryset = RawMaterial.objects.filter(\n company=company.pk,\n )\n\n def clean(self):\n errors = []\n data = super(SelectPredictionForm, self).clean()\n\n try:\n date_string_error = data['date'].strftime(\"%m/%d/%Y\")\n date_string = data['date'].strftime(\"%Y-%m-%d\")\n except (Exception, KeyError) as ex:\n raise forms.ValidationError(\n _('Se introdujo una fecha no valida.'),\n code='invalid',\n )\n\n\n if data['date'] < datetime.now().date():\n errors.append(forms.ValidationError(\n _('Error de fecha se escogio la fecha %(value)s, la cual es del pasado.'),\n code='invalid',\n params={\n 'value': date_string_error,\n },\n ))\n\n for material in data['raw_materials']:\n sales_relation_count = MaterialSaleRelation.objects.filter(\n raw_material=material.pk\n ).count()\n\n if sales_relation_count == 0:\n errors.append(forms.ValidationError(\n _('La materia prima %(value)s no ha formado parte de ninguna compra.'),\n code='invalid',\n params={\n 'value': material.name,\n },\n ))\n elif sales_relation_count == 1:\n errors.append(forms.ValidationError(\n _('La materia prima %(value)s se ha comprado una unica vez, siendo no apta para predicciones.'),\n code='invalid',\n params={\n 'value': material.name,\n },\n ))\n\n if errors:\n raise forms.ValidationError(errors)\n\n self.request.session['prediction_date'] = date_string\n self.request.session['prediction_raw_materials'] = json.dumps(\n list(\n data['raw_materials'].values(\n 'name',\n 'pk',\n )\n ),\n cls=DjangoJSONEncoder\n )\n self.request.session['prediction_to_save'] = True\n\n return data\n\n class Meta:\n pass\n","repo_name":"LuisReyes98/estimator","sub_path":"estimator/predictions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"27178449491","text":"import pathlib\nimport sys\n\nimport utils\n\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\n\ndef train(data_gen, data_dir, epochs):\n train_iter = data_gen.flow_from_directory(\n data_dir,\n **utils.load_options,\n subset=\"training\"\n )\n\n validation_iter = data_gen.flow_from_directory(\n data_dir,\n **utils.load_options,\n subset=\"validation\"\n )\n\n model = Sequential([\n Conv2D(256, 5, activation='relu', padding='same', input_shape=utils.model_input_shape),\n MaxPooling2D(pool_size=2),\n Conv2D(128, 5, activation='relu', padding='same'),\n MaxPooling2D(pool_size=2),\n Dropout(0.2),\n Conv2D(64, 3, activation='relu', padding='same'),\n MaxPooling2D(pool_size=2),\n Dropout(0.2),\n Flatten(),\n Dense(128, activation='relu'),\n Dense(4, activation='softmax')\n ])\n\n model.compile(\n 'adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'],\n )\n\n model.fit(\n train_iter,\n epochs=epochs,\n validation_data=validation_iter,\n )\n\n return model\n\ndef main(data, epochs):\n data_gen = utils.prepare_data()\n\n data_folder = pathlib.Path(data)\n model_folder = pathlib.Path(data).parent / (data_folder.name + \"_model\")\n\n model = train(data_gen, str(data_folder), epochs)\n model.save(str(model_folder))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Please supply data directory as argument\")\n exit(-1)\n\n main(sys.argv[1], 10)\n","repo_name":"brendanburkhart/gesture-detection","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29165982823","text":"from random import random\nn = 100\np = 0.7\ncelda = [1 * (random() < p) for x in range(n)]\nm = 0\na = 0\nfor c in celda:\n if not c:\n if a > m:\n m = a\n a = 0\n else:\n a += 1\nm = max(m, a)\nprint(m)\n","repo_name":"satuelisa/Simulation","sub_path":"CellularAutomata/racha.py","file_name":"racha.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"31624204421","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# # Exploratory Data Analysis 0: Initial Data Analysis\n# \n# Initial data analysis is a subset of exploratory data analysis which focuses on making the data fit to be put into a model. This means dealing with non-existant values, normalizing as necescary, and completing other tasks as necescary by the final model to be used.\n# \n# Sources:\n# https://reader.elsevier.com/reader/sd/pii/S0022522315017948?token=E85E57F81B03A15524B9F114673CAF3F3F0FF45188AA953EB7FDD8195887A04325990D11A24383AC4424F669BB95EDAE\n# \n# https://towardsdatascience.com/dealing-with-missing-data-17f8b5827664 \n# \n# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3668100/\n\n# %%\nimport pandas as pd \nimport seaborn as sns\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pathlib\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n\n# %%\n# Initializing data\nvg_df = pd.read_csv(pathlib.Path('vgsales.csv'))\n\n\n# %%\n\nvg_df.dtypes\nvg_df.describe()\n\nInteractiveShell.ast_node_interactivity = \"last_expr\"\n\n# %% [markdown]\n# ### Check for duplicates\n\n# %%\n# Check for duplicates\nduplicate_bool_ser = vg_df.duplicated(keep = False)\n\nduplicate_bool_ser[duplicate_bool_ser == True]\n\n# %% [markdown]\n# ### Check for Nonexistant Values/Nones (grouped together as nulls) and other unknowns\n\n# %%\n# Plotting the locations of the NaNs by row\n\nnan_locs = vg_df[vg_df.isnull().any(axis = 1)].index.tolist()\n\nplt.bar(nan_locs, 1, width = 10)\nplt.title(\"Null Values by Row\")\nplt.show()\n\n# Binned by thousands\n\nplt.hist(np.array(nan_locs), bins = 17, range = (0.0, 17000.0))\nplt.title('Occurences of NaN in Dataframe binned by 1000s')\nplt.xlabel('Row Number')\nplt.ylabel('Number of Occurences within Each Bin of 1000 rows')\nplt.show()\n\n# Table of where the NaNs are located\n\nNaN_count_col_df = pd.DataFrame()\nfor col in vg_df.columns:\n NaN_count_col_df[f'NaNs_in_{col}'] = [vg_df[col].isnull().values.sum()]\nNaN_count_col_df\n\n\n# %%\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# Found synonyms of null, unknown, nonexistant, n/a, not any etc that might be present in a dataset\n\nnull_synonyms = ['unknown', 'untold', 'undetermined', 'undefined', 'hidden', 'indefinite', 'pending', 'inconclusive', 'unnamed', 'undesignated', 'insignificant', 'nonexistant', 'non-existant', 'missing', 'absent', 'unavailable', 'nonexistent', 'withdrawn', 'null', 'invalid', 'void', 'rescinded', 'repealed', 'blank', 'empty', 'canceled', 'revoked', 'rescinded', 'not any', 'n/a', 'None', 'nan', 'excluded',]\n\n# Select columns with dtype 'object' and converting all strings to lowercase\nobj_cols = ['Name', 'Platform', 'Genre', 'Publisher']\n\nobj_vg_df = pd.DataFrame(dtype = 'object')\nfor col in obj_cols:\n obj_vg_df[col] = vg_df[col].str.lower()\n\n# Recording where the word occus\n\nfor word in null_synonyms:\n for col in obj_cols:\n if True in (obj_vg_df[col] == word).values:\n print('Word: ',word)\n print('in Column: ',col)\n np.array(obj_vg_df.index[obj_vg_df[col] == word].tolist())\n # The output list tells us that the only occurence of a synonym of null was 'unknown' in the 'publisher column'\n\n# Plotting the locations of the 'unknown's by row\n\nunknown_indices = obj_vg_df.index[obj_vg_df['Publisher'] == 'unknown'].tolist()\n\nplt.bar(unknown_indices, 1, width = 10)\nplt.title(\"Unknown Values by Row\")\nplt.show()\n\n# Binned by thousands\n\nplt.hist(np.array(unknown_indices), bins = 17, range = (0.0, 17000.0))\nplt.title('Occurences of \"unknown\" in \"publisher\" column binned by 1000s')\nplt.xlabel('Row Number')\nplt.ylabel('Number of Occurences within Each Bin of 1000 rows')\nplt.show()\n\n# %% [markdown]\n# ### How to deal with these NaN and unknown values?\n# \n# Simple options:\n# * Delete the features with NaN and unknown values from the dataset entirely\n# * Delete rows with those features missing\n# * Delete the chunk of rows with those features missing\n# * Turn NaN/unknown into a category\n# * Eg., if options for publishers are 'Nintendo', 'Sega' etc add a new option of 'unknown'\n# * Replace with mean, median or mode\n# \n# From Kang (2013):\n# * \n","repo_name":"AaDalal/senior_sem_ai","sub_path":"dataExploration0_IDA_backup.py","file_name":"dataExploration0_IDA_backup.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13448148902","text":"#SENSOR DIAGNOSTIC\nimport time\nimport board\nimport busio\nimport json\nimport os\nimport subprocess\nimport re\n\n#Import MQTT\nimport paho.mqtt.client as mqtt\n\n#Imports the Pressure/Altitude Sensor (mpl3115a2)\nimport adafruit_mpl3115a2\n#Imports the Accelerometer Sensor (lsm9ds1)\nimport adafruit_lsm9ds1\n\n#Import the Radiation Sensor (Geiger Counter)\nfrom PiPocketGeiger import RadiationWatch\n\n#Initializes global variables\nglobal altitudePressureSensor\nglobal accelerationSensor\nglobal radiationSensor\n\n#Method to initialize all sensors using the global variables\ndef initializeSensors():\n # Test initializing the I2C\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n except:\n print(\"I2C bus could not be initialized\")\n\n # Initialize the Altitude/Pressure Sensor (MPL3115A2)\n # Alternatively you can specify a different I2C address for the device:\n #sensor = adafruit_mpl3115a2.MPL3115A2(i2c, address=0x10)\n global altitudePressureSensor\n try:\n altitudePressureSensor = adafruit_mpl3115a2.MPL3115A2(i2c, address=0x60)\n\n # You can configure the pressure at sealevel to get better altitude estimates.\n # This value has to be looked up from your local weather forecast or meteorlogical\n # reports. It will change day by day and even hour by hour with weather\n # changes. Remember altitude estimation from barometric pressure is not exact!\n # Set this to a value in pascals:\n altitudePressureSensor.sealevel_pressure = 101760\n except(OSError, ValueError, NameError):\n print(\"Altitude sensor not detected. Please check the connection to the sensor.\\n\")\n\n #Initialize the Acceleration Sensor (LSM9DS1)\n global accelerationSensor\n try:\n accelerationSensor = adafruit_lsm9ds1.LSM9DS1_I2C(i2c)\n except(OSError, ValueError, NameError):\n print(\"Acceleration sensor not detected. Please check the connection to the sensor.\\n\")\n\n global radiationSensor\n try:\n radiationSensor = RadiationWatch(24, 23)\n radiationSensor.setup()\n except(OSError, ValueError, NameError):\n print(\"Radiation sensor not detected. Please check the connection to the sensor.\\n\")\n\n#Method to get Altitude (MPL3115A2)\ndef getAltitude():\n return altitudePressureSensor.altitude\n\n#Method to get Temp (MPL3115A2)\ndef getTemp():\n return altitudePressureSensor.temperature\n\n#Method to get Pressure (MPL3115A2)\ndef getPressure():\n return altitudePressureSensor.pressure\n\n#MetgetAltitudehod to get Acceleration (LSM9DS1)\ndef getAcceleration():\n accelerationArray = []\n accel_x, accel_y, accel_z = accelerationSensor.acceleration\n accelerationArray.append(accel_x)\n accelerationArray.append(accel_y)\n accelerationArray.append(accel_z)\n return accelerationArray\n\n\n#Method to get Magnetometer (LSM9DS1)\ndef getMagnetometer():\n magnetometerArray = []\n mag_x, mag_y, mag_z = accelerationSensor.magnetic\n magnetometerArray.append(mag_x)\n magnetometerArray.append(mag_y)\n magnetometerArray.append(mag_z)\n return magnetometerArray\n\n#Method to get Gyroscope (LSM9DS1)\ndef getGyro():\n gyroscopeArray = []\n gyro_x, gyro_y, gyro_z = accelerationSensor.gyro\n gyroscopeArray.append(gyro_x)\n gyroscopeArray.append(gyro_y)\n gyroscopeArray.append(gyro_z)\n return gyroscopeArray\n\n#Method to get radiation counts\ndef getRadiation():\n return radiationSensor.status()\n\ndef testAllSensors():\n try:\n getAltitude()\n except(OSError, ValueError, NameError):\n print(\"Altitude sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getTemp()\n except(OSError, ValueError, NameError):\n print(\"Temperature sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getPressure()\n except(OSError, ValueError, NameError):\n print(\"Pressure sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getAcceleration()\n except(OSError, ValueError, NameError):\n print(\"Acceleration sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getMagnetometer()\n except(OSError, ValueError, NameError):\n print(\"Magnetometer sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getGyro()\n except(OSError, ValueError, NameError):\n print(\"Gyroscope sensor not detected. Please check the connection to the sensor.\\n\")\n\n try:\n getRadiation()\n except(OSError, ValueError, NameError):\n print(\"Radiation sensor not detected. Please check the connection to the sensor.\\n\")\n\ndef testFileWriting():\n f = \"fileWritingTest.txt\"\n try:\n file = open(f, \"w+\")\n file.close()\n except(FileNotFoundError):\n #cant open file error\n print(\"File \" + f + \" was not found. Please make sure the file exists and that you have permission to write to the folder.\")\n\ndef testMQTTConnection():\n try:\n mqtt.Client(\"sensor-sender\").connect(\"iot.eclipse.org\", 1883, 60)\n except(ConnectionError):\n print(\"The program can't connect to the server. \\nIf the system is connected to the internet, make sure the server namecan be resolved. \\nServer address is: \" + serverName)\n\ndef testWIFIConnection():\n hostname = \"google.com\"\n response = os.system(\"ping -c 1 \" + hostname)\n\n if (response == 0):\n print(hostname, 'is up!\\n')\n else:\n print(hostname, 'is down\\n')\n\ndef testI2C():\n # Test initializing the I2C\n print(\"I2C addresses: \")\n bashCommand = \"i2cdetect -y 1\"\n os.system(bashCommand)\n print('\\n')\n\ndef testInternalTemperature():\n #Test the internal temperature of the Raspberry Pi\n bashCommand = \"vcgencmd measure_temp\"\n os.system(bashCommand)\n print(\"\\n\")\n\ndef testClockSpeed():\n #Returns the current clock rate of the Raspberry Pi\n print(\"CPU clock speed (MHz): \")\n bashCommand = \"cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq\"\n os.system(bashCommand)\n print(\"\\n\")\n\ndef testExternalStorage():\n #Returns the amount of free space on the external the external storage\n print(\"Available storage space: \")\n os.system(\"df -h\")\n\ndef runAllTests():\n testI2C()\n initializeSensors()\n testAllSensors()\n testFileWriting()\n testMQTTConnection()\n testWIFIConnection()\n testInternalTemperature()\n testClockSpeed()\n testExternalStorage()\n\nrunAllTests()\n","repo_name":"Matt-Santalla/OSNDS","sub_path":"sensorDiagnostic.py","file_name":"sensorDiagnostic.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73505808279","text":"import datetime\r\nimport calendar\r\nfrom datetime import timedelta\r\n\r\n#set first weekday\r\nc = calendar.Calendar(firstweekday=calendar.SUNDAY)\r\n\r\n####################\r\n# Set this stuff #\r\n####################\r\n\r\n#set the year\r\nyear = 2020\r\n#offset in days from patch Tuesday. 3 = Friday, 4 = Saturday, 5 = Sunday\r\noffsetDays = 4\r\n#set the offset in weeks. W1 = 0, W2 = 1, W3 = 2, W4 = 3\r\noffsetWeeks = 1\r\n\r\n\r\n#turn it into days because we can only do math in days\r\noffsetWeeks = offsetWeeks * 7\r\n\r\n#loop through all months\r\nmonth = 1\r\nwhile month <= 12:\r\n monthcal = c.monthdatescalendar(year,month)\r\n #get second Tuesday\r\n secondTues = [day for week in monthcal for day in week if day.weekday() == calendar.TUESDAY and day.month == month][1]\r\n #add offset\r\n offsetDate = secondTues + datetime.timedelta(days=offsetDays) + datetime.timedelta(days=offsetWeeks)\r\n print(offsetDate)\r\n month += 1\r\n","repo_name":"kr4spy/PatchTuesday","sub_path":"patchtuesday.py","file_name":"patchtuesday.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14125114126","text":"\"\"\"\nSee results folder to see which hyperparameters achieved best results.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom fakenews_detector.model import Model\n\n\ndef get_feature_importance(language: str, embeddings_path: str, platform_folder: str, n_estimators: int, platform: str, dataset: str):\n print(\"#################\")\n print(dataset)\n print()\n t = Model(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder)\n feature_importances = t.feature_importance(n_estimators=n_estimators, platform=platform, dataset=dataset)\n print(feature_importances)\n print()\n\n\nif __name__ == \"__main__\":\n language = 'en'\n platform_folder='datasets/Websites/fakenewsdata1_randomPolitics/'\n platform = 'Websites'\n dataset = 'fakenewsdata1_randomPolitics'\n embeddings_path='embeddings/en/model.bin'\n n_estimators = 651\n get_feature_importance(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder, n_estimators=n_estimators, platform=platform, dataset=dataset)\n\n language = 'en'\n platform_folder='datasets/Websites/Bhattacharjee/'\n platform = 'Websites'\n dataset = 'Bhattacharjee'\n embeddings_path='embeddings/en/model.bin'\n n_estimators = 851\n get_feature_importance(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder, n_estimators=n_estimators, platform=platform, dataset=dataset)\n\n language = 'pt'\n platform_folder='datasets/Websites/FakeBrCorpus/'\n platform = 'Websites'\n dataset = 'FakeBrCorpus'\n embeddings_path='embeddings/pt/model.txt'\n n_estimators = 601\n get_feature_importance(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder, n_estimators=n_estimators, platform=platform, dataset=dataset)\n\n\n language = 'pt'\n platform_folder='datasets/Websites/tweets_br/'\n platform = 'Twitter'\n dataset = 'tweets_br'\n embeddings_path='embeddings/pt/model.txt'\n n_estimators = 201\n get_feature_importance(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder, n_estimators=n_estimators, platform=platform, dataset=dataset)\n\n\n language = 'bg'\n platform_folder='datasets/Websites/btv-lifestyle/'\n platform = 'Websites'\n dataset = 'btv-lifestyle'\n embeddings_path='embeddings/bg/model.txt'\n n_estimators = 101\n get_feature_importance(language=language, embeddings_path=embeddings_path, platform_folder=platform_folder, n_estimators=n_estimators, platform=platform, dataset=dataset)\n","repo_name":"phfaustini/fakenews","sub_path":"binaria/code/feature_importance.py","file_name":"feature_importance.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"14269491806","text":"from typing import Optional\nimport re\n\nfrom autotraders.error import SpaceTradersException\nfrom autotraders.paginated_list import PaginatedList\nfrom autotraders.shared_models.waypoint_symbol import WaypointSymbol\nfrom autotraders.space_traders_entity import SpaceTradersEntity\nfrom autotraders.session import AutoTradersSession\n\n\nclass Agent(SpaceTradersEntity):\n contracts: Optional[PaginatedList]\n starting_faction: str\n symbol: str\n account_id: str\n credits: int\n ship_count: int\n ships: Optional[PaginatedList]\n headquarters: WaypointSymbol\n\n def __init__(\n self, session: AutoTradersSession, symbol=None, data: Optional[dict] = None\n ):\n \"\"\"\n :param symbol: If it's None, then the agent associated with the token will be retrieved.\n Otherwise, the specified agent will be retrieved.\n \"\"\"\n if symbol is None:\n super().__init__(session, \"my/agent\", data)\n else:\n super().__init__(session, \"agents/\" + symbol, data)\n\n def update(self, data: Optional[dict] = None):\n data = super()._update(data)\n mappings = {\n \"account_id\": {\"type\": None, \"class\": str, \"alias\": \"accountId\"},\n \"symbol\": {\"type\": None, \"class\": str, \"optional\": False},\n \"headquarters\": {\"type\": None, \"class\": WaypointSymbol},\n \"credits\": {\"type\": None, \"class\": int, \"optional\": False},\n \"starting_faction\": {\n \"type\": None,\n \"class\": str,\n \"alias\": \"startingFaction\",\n },\n \"ship_count\": {\n \"type\": None,\n \"class\": int,\n \"alias\": \"shipCount\",\n \"optional\": True,\n },\n }\n super().update_attr(mappings, data)\n\n @staticmethod\n def create(session, faction, symbol, email, override_email_check=False):\n def check_email(e):\n return re.fullmatch(r\"^[\\w-\\.]+@([\\w-]+\\.)+[\\w-]{2,4}$\", e)\n\n if not override_email_check and not (email is None or check_email(email)):\n raise ValueError(\n email\n + \" is not a valid email. Use override_email_check=True to bypass this error.\"\n )\n r = session.post(\n session.b_url + \"register\",\n json={\n \"faction\": faction.upper(),\n \"symbol\": symbol,\n \"email\": email,\n },\n )\n j = r.json()\n if \"error\" in j:\n raise SpaceTradersException(\n j[\"error\"], r.url, r.status_code, r.request.headers, r.headers\n )\n return j[\"data\"][\"token\"]\n\n @staticmethod\n def all(session, page: int = 1) -> PaginatedList:\n def paginated_func(p, num_per_page):\n r = session.get(\n session.b_url + \"agents?limit=\" + str(num_per_page) + \"&page=\" + str(p)\n )\n j = r.json()\n if \"error\" in j:\n raise SpaceTradersException(\n j[\"error\"], r.url, r.status_code, r.request.headers, r.headers\n )\n agents = []\n for agent in j[\"data\"]:\n a = Agent(session, agent[\"symbol\"], agent)\n agents.append(a)\n return agents, r.json()[\"meta\"][\"total\"]\n\n return PaginatedList(paginated_func, page)\n","repo_name":"cosmictraders/autotraders","sub_path":"autotraders/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"85"} +{"seq_id":"12409015352","text":"import requests as re\r\nfrom datetime import date,datetime,timedelta \r\nimport argparse\r\nimport json\r\n\r\n# create parser\r\nparser = argparse.ArgumentParser()\r\n \r\n# add arguments to the parser\r\nparser.add_argument(\"username\")\r\nparser.add_argument(\"reponame\")\r\n\r\n# parse the arguments\r\nargs = parser.parse_args()\r\n\r\nuser = args.username\r\nrepo = args.reponame\r\n\r\nurl_branch = 'https://api.github.com/repos/{}/{}/branches'\r\ndata = re.get(url_branch.format(user,repo)).json() #'Rishabh1803','100DaysOfCode'\r\n\r\n# fetching the latest commited branch\r\nurl_latest_branch = data[0]['commit']['url']\r\ndata = re.get(url_latest_branch).json()\r\n\r\n# fetching latest commit date\r\nlatest_commit_date = data['commit']['committer']['date'][:10]\r\nlatest_commit_date_list = [int(i) for i in latest_commit_date.split('-')]\r\nprint(latest_commit_date_list[0], latest_commit_date_list[1], latest_commit_date_list[2])","repo_name":"smriti111/100DaysOfCode","sub_path":"Day1/github_other_branch.py","file_name":"github_other_branch.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"42294654417","text":"from unittest.mock import MagicMock\nfrom uuid import UUID\n\nimport pytest\nfrom fastapi import HTTPException\n\nfrom coffee_backend.exceptions.exceptions import ObjectNotFoundError\nfrom coffee_backend.services.coffee_image import ImageService\nfrom tests.conftest import DummyImages\n\n\n@pytest.mark.asyncio\nasync def test_coffee_image_service_get_coffee_image(\n dummy_coffee_images: DummyImages,\n) -> None:\n \"\"\"Test the CoffeeImagesService get_coffee_image method for retrieving a\n coffee image.\n\n Args:\n dummy_coffee_images (DummyImages): An instance providing dummy coffee\n image data.\n\n \"\"\"\n coffee_image_crud = MagicMock()\n coffee_image_crud.read.return_value = (\n dummy_coffee_images.image_1_bytes,\n \"jpg\",\n )\n\n coffe_uuid = UUID(\"123e4567-e19b-12d3-a456-426655440000\")\n\n test_coffee_service = ImageService(coffee_images_crud=coffee_image_crud)\n\n result = test_coffee_service.get_coffee_image(coffe_uuid)\n\n assert coffee_image_crud.read.call_count == 1\n\n coffee_image_crud.read.assert_called_once_with(\n \"123e4567-e19b-12d3-a456-426655440000\"\n )\n\n assert result == (dummy_coffee_images.image_1_bytes, \"jpg\")\n\n\n@pytest.mark.asyncio\nasync def test_coffee_image_service_get_coffee_image_object_not_found(\n dummy_coffee_images: DummyImages,\n) -> None:\n \"\"\"Test the CoffeeImagesService get_coffee_image method when the object is\n not found.\n\n Args:\n dummy_coffee_images (DummyImages): An instance providing dummy coffee\n image data.\n\n Raises:\n pytest.raises(HTTPException): An HTTPException should be raised when\n the requested coffee image is not found in the S3 bucket.\n\n \"\"\"\n\n coffee_image_crud = MagicMock()\n coffee_image_crud.read.side_effect = ObjectNotFoundError(\n message=\"Object not found\"\n )\n\n coffe_uuid = UUID(\"123e4567-e19b-12d3-a456-426655440000\")\n\n test_coffee_service = ImageService(coffee_images_crud=coffee_image_crud)\n\n with pytest.raises(HTTPException):\n test_coffee_service.get_coffee_image(coffe_uuid)\n","repo_name":"andifg/coffee_backend","sub_path":"tests/services/coffee_image/test_coffee_image_read.py","file_name":"test_coffee_image_read.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"13959477545","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### MultiVariate Linear Regression with L2 Regularization with K-fold crossValidation\n\n# In[79]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import LeaveOneOut\n\n\n# In[80]:\n\n\ndf=pd.read_csv(\"AdmissionDataset/data.csv\")\ndf.head()\n\n\n# **Drop the Serial No because it is irrelevant in prediction**\n\n# In[81]:\n\n\ndf.drop('Serial No.',axis=1,inplace=True)\n\n\n# In[82]:\n\n\ndf.head()\n\n\n# In[90]:\n\n\ndata=np.matrix(df)\nprint(data.shape)\n\n\n# ### Feature Normalisation\n# ${x_i}$= $\\frac{x_i - \\mu}{\\sigma}$ \n# \n# Feature Normalisation is done because data in some columns is very small in comparison to other columns data.\n\n# #### Preparing the Training data\n\n# In[91]:\n\n\ndef get_data(training_data,testing_data):\n training_data=pd.DataFrame(training_data)\n testing_data=pd.DataFrame(testing_data)\n \n columns=training_data.shape[1]\n\n X=training_data.iloc[:,0:columns-1]# features Sets\n\n mu = X.mean()\n sigma = X.std()\n\n X=(X-X.mean())/X.std()\n\n Y=training_data.iloc[:,columns-1:columns] # outputSet\n X.insert(0, 'Ones', 1)\n\n \n X_train = np.matrix(X.values)\n Y_train = np.matrix(Y.values)\n\n \n columns=testing_data.shape[1]\n\n X=testing_data.iloc[:,0:columns-1]# features Sets\n\n Y=testing_data.iloc[:,columns-1:columns] # outputSet\n\n \n X=(X-mu)/sigma\n\n\n X.insert(0, 'Ones', 1)\n\n X_test = np.matrix(X.values)\n Y_test = np.matrix(Y.values)\n\n return X_train,Y_train,X_test,Y_test\n\n\n# #### Mean Square Error with L1 Regularization\n# ${J(\\theta)}$=${\\frac{1}{2m}}{\\sum_{i=0}^{m}}$(${\\hat{y_i}-{y_i})^2 + \\frac{\\lambda}{m}*\\parallel \\theta \\parallel}$ \n# \n# \n# J: is cost function\n# \n# \n# m : no. of training examples\n# \n# ${\\theta}$: parameters\n\n# In[92]:\n\n\ndef costCompute_L2(X,Y,theta,lambd):\n j=0.0\n m=X.shape[0]\n \n err = np.power((np.dot(X,theta.T)-Y),2)\n j=np.sum(err)\n reg= (lambd/m)*np.sum(theta)\n \n return j/(2*m)+reg\n\n\n# #### Gradient Descent algo\n# repeat Untill Converges{\n# \n# \n# ${{\\theta_j} :=}{{\\theta_j}}$-${\\alpha}$*${\\frac{\\partial}{\\partial {\\theta_j}} J(\\theta)}$\n# \n# \n# }\n# \n# ${\\alpha}$: Learning rate constant\n\n# In[93]:\n\n\n#Vectorised Implementation\ndef gradientDescent(X, y, theta, alpha, iters,lambd):\n \n Jhistory=np.zeros(iters)\n temp=0.0\n \n m=X.shape[0]\n for i in range(iters):\n \n pre = np.dot(X,theta.T)-y\n \n temp=theta[0,0]-(alpha/m)*np.sum(pre)\n \n delta=np.dot(np.transpose(pre),X)\n theta=theta*(1-(alpha*lambd)/m)-(alpha/m)*delta\n \n \n theta[0,0]=temp\n Jhistory[i] = costCompute_L2(X, y, theta,lambd)\n \n \n \n\n return theta,Jhistory\n\n\n# In[94]:\n\n\ndef prediction_Error(X,Y,finalParameter):\n out= np.dot(X,finalParameter.T)\n \n \n err= np.sum(np.square(out-Y))/X.shape[0]\n \n return err\n\n\n# \n\n# In[96]:\n\n\nalpha=.009\niters=1000\nlambd=.01\nkf = KFold(n_splits=3)\n\nerr_Kfold=[]\n\nfor train_index, test_index in kf.split(data):\n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n\n theta = np.matrix(np.random.randn(1,X_train.shape[1]))*0.01\n #print(theta.shape)\n\n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error=prediction_Error(X_train,Y_train,minTheta)\n err_Kfold.append(error)\n \nprint(err_Kfold)\navgErr=0.0 \nfor i in err_Kfold:\n avgErr+=i\n \nprint(avgErr/len(err_Kfold))\n\n\n# In[ ]:\n\n\n\n\n\n# In[124]:\n\n\nchoice_kfold=[2,3,4,5,6,7,8,9,10]\n\nerr_train=[]\nerr_test=[]\nalpha=.009\niters=1000\nfor l in choice_kfold:\n err_Kfold_train=[]\n err_Kfold_test=[]\n kf = KFold(n_splits=l)\n \n for train_index, test_index in kf.split(data):\n \n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n #print(X_train.shape)\n \n theta = np.matrix(np.random.randn(1,X_train.shape[1]))\n #print(theta.shape)\n\n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error1=prediction_Error(X_train,Y_train,minTheta)\n error2=prediction_Error(X_test,Y_test,minTheta)\n \n err_Kfold_train.append(error1)\n err_Kfold_test.append(error2)\n \n avgErr_train=0.0 \n for i in err_Kfold_train:\n avgErr_train+=i\n \n avgErr_train=avgErr_train/len(err_Kfold)\n err_train.append(avgErr_train)\n \n avgErr_test=0.0 \n for i in err_Kfold_test:\n avgErr_test+=i\n \n avgErr_test=avgErr_test/len(err_Kfold)\n err_test.append(avgErr_test) \n \n\n\n# In[125]:\n\n\nplt.rcParams['figure.figsize'] = [12, 6]\nplt.scatter(choice_kfold,err_train,label=\"Training Error\")\nplt.plot(choice_kfold,err_train,'g')\nplt.scatter(choice_kfold,err_test,label=\"Validation Error\")\nplt.plot(choice_kfold,err_test,'r')\n\nplt.legend()\n\nplt.xlabel('Regularisation Parameter')\nplt.ylabel('Error')\nplt.title('Training Error vs Reg. Parameter')\n\n\n# In[126]:\n\n\nprint(err_train)\nprint(err_test)\n\n\n# ### Leave One Out Cross Validation (LOOCV)\n# \n# **It is special case of K-Fold cross Validation**\n# > **K =m**
\n# > m: no of training examples\n\n# In[129]:\n\n\nloo = LeaveOneOut()\n\nalpha=.009\niters=1000\n\n\nerr_loo_train=[]\nerr_loo_test=[]\n\nfor train_index, test_index in loo.split(data):\n training_data,testing_data= data[train_index], data[test_index]\n X_train,Y_train,X_test,Y_test=get_data(training_data,testing_data)\n \n #print(X_train.shape)\n \n theta = np.matrix(np.random.randn(1,X_train.shape[1]))\n #print(theta.shape)\n \n minTheta, cost= gradientDescent(X_train, Y_train, theta, alpha, iters,lambd)\n error1=prediction_Error(X_train,Y_train,minTheta)\n error2=prediction_Error(X_test,Y_test,minTheta)\n\n err_loo_train.append(error1)\n err_loo_test.append(error2)\n \navgErr_train=0.0 \nfor i in err_loo_train:\n avgErr_train+=i\n\navgErr_train=avgErr_train/len(err_Kfold)\nprint(avgErr_train)\n\navgErr_test=0.0 \nfor i in err_Kfold_test:\n avgErr_test+=i\n\navgErr_test=avgErr_test/len(err_Kfold)\nprint(avgErr_test) \n \n\n\n# - It is highly computationaly expensive\n# - There is no randomness in splitting procedure, therefore it always yeilds the same result, so it is stable\n\n# In[ ]:\n\n\n\n\n","repo_name":"ishan16696/Machine_Learning","sub_path":"assignment7/src/q-1-5.py","file_name":"q-1-5.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"73033816279","text":"from machine import I2C\nfrom utime import sleep_ms\n\nREG_TEMP = const(0x00)\nREG_HUMI = const(0x01)\nREG_CFG = const(0x02)\nREG_SER_ID1 = const(0xfb)\nREG_SER_ID2 = const(0xfc)\nREG_SER_ID3 = const(0xfd)\n\nCFG_RST = const(0b10000000)\nCFG_HEAT_ON = const(0b100000)\nCFG_HEAT_OFF = const(0b0)\nCFG_MODE_SINGLE = const(0b0)\nCFG_MODE_BOTH = const(0b10000)\nCFG_BTST = const(0b1000)\nCFG_TEMP_14BIT = const(0b0)\nCFG_TEMP_11BIT = const(0b100)\nCFG_HUMI_14BIT = const(0b0)\nCFG_HUMI_11BIT = const(0b1)\nCFG_HUMI_8BIT = const(0b10)\n\nRES_8_BIT = const(1)\nRES_11_BIT = const(2)\nRES_14_BIT = const(3)\n\nclass HDC1008(object):\n def __init__(self, i2c, addr=0x40, heater=False):\n if i2c == None or i2c.__class__ != I2C:\n raise ValueError('I2C object needed as argument!')\n self._i2c = i2c\n self._addr = addr\n self._heater = heater\n self._tmp = bytearray(4)\n # Read the sensor serial ID\n self.serial = 0\n self._send_byte(REG_SER_ID1)\n self._recv(self._tmp, 2)\n self.serial += (self._tmp[0] << 32) + (self._tmp[1] << 24)\n self._send_byte(REG_SER_ID2)\n self._recv(self._tmp, 2)\n self.serial += (self._tmp[0] << 16) + (self._tmp[1] << 8)\n self._send_byte(REG_SER_ID3)\n self._recv(self._tmp, 2)\n self.serial += self._tmp[0]\n\n def _send_byte(self, b):\n self._tmp[0] = b\n self._i2c.writeto(self._addr, self._tmp[0:1])\n\n def _send_bytes(self, b):\n self._i2c.writeto(self._addr, b)\n \n def _recv(self, buf, c):\n buf[0:c] = self._i2c.readfrom(self._addr, c)\n\n def _config(self, reg=None):\n if reg is None:\n self._send_byte(REG_CFG)\n self._recv(self._tmp, 2)\n return self._tmp[0:2]\n else:\n if self._heater:\n reg = reg | CFG_HEAT_ON\n self._tmp[0] = REG_CFG\n self._tmp[1] = reg\n self._tmp[2] = 0 \n self._send_bytes(self._tmp[0:3])\n\n def heater(self, s=None):\n if s is not None:\n if s.__class__ != bool:\n raise ValueError('Heater state must be a boolean value or None!')\n else:\n self._heater = s\n else:\n return self._heater\n\n def battery_low(self):\n # one read command needed to have the config register defined, where \n # we can read this flag from\n self._raw_temp(CFG_TEMP_11BIT)\n return (self._config()[0] & CFG_BTST) == CFG_BTST\n\n def reset(self):\n self._tmp[0] = REG_CFG\n self._tmp[1] = CFG_RST\n self._tmp[2] = 0 \n self._send_bytes(self._tmp[0:3])\n sleep_ms(20)\n\n def _raw_temp(self, acc):\n self._config(CFG_MODE_SINGLE | acc)\n self._send_byte(REG_TEMP)\n sleep_ms(15)\n self._recv(self._tmp, 2)\n return self._tmp[1] + (self._tmp[0] << 8)\n \n def temp(self, acc=CFG_TEMP_14BIT):\n return (self._raw_temp(acc) / 65536) * 165 - 40\n \n def _raw_humi(self, acc):\n self._config(CFG_MODE_SINGLE | acc)\n self._send_byte(REG_HUMI)\n sleep_ms(13)\n self._recv(self._tmp, 2)\n return self._tmp[1] + (self._tmp[0] << 8)\n \n def humi(self, acc=CFG_HUMI_14BIT):\n return (self._raw_humi(acc) / 65536.0) * 100.0\n\n def _raw_temp_humi(self, t_acc, h_acc):\n self._config(CFG_MODE_BOTH | t_acc | h_acc)\n self._send_byte(REG_TEMP)\n sleep_ms(20)\n self._recv(self._tmp, 4)\n return (self._tmp[1] + (self._tmp[0] << 8), self._tmp[3] + (self._tmp[2] << 8))\n\n def temp_humi(self, t_acc=CFG_TEMP_14BIT, h_acc=CFG_HUMI_14BIT):\n (raw_temp, raw_humi) = self._raw_temp_humi(t_acc, h_acc)\n return (((raw_temp / 65536) * 165) - 40, (raw_humi / 65536) * 100.0)\n","repo_name":"kfricke/micropython-hdc1008","sub_path":"hdc1008.py","file_name":"hdc1008.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"85"} +{"seq_id":"39171739265","text":"# steth_controller.py\n# Coordinates all the digital stethescope components\n\n### Imports ###\n\n# Built-ins\nimport datetime\nimport logging\nimport os\nimport sys\nimport threading\nfrom time import sleep\n\n# Local imports\nfrom data_classifier import DataClassifier\nfrom data_collection import BluetoothController\nfrom data_preproc import DataPreproc\nfrom interface_api import Interface_API\nfrom peak_detector import PeakDetector\nfrom analysis_controller import AnalysisController\n\n### Globals ###\n\nLOGGING_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs')\nDATA_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')\n\n### File Checks ###\n\nif not os.path.isdir(LOGGING_DIR):\n os.mkdir(LOGGING_DIR)\n\nif not os.path.isdir(DATA_DIR):\n os.mkdir(DATA_DIR)\n\n### Logging Configuration ###\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"[%(levelname)s] %(asctime)s - %(name)s - %(message)s\",\n datefmt='%d/%m/%Y %H:%M:%S',\n handlers=[\n logging.FileHandler(os.path.join(LOGGING_DIR, 'python.log')),\n logging.StreamHandler(sys.stdout)\n ])\n\nLOGGER = logging.getLogger(\"controller\")\n\n### Classes ###\n\nclass StethescopeController():\n def __init__(self):\n # Child modules for handling various components\n LOGGER.info(\"Creating modules...\")\n self.data_classifier_module = DataClassifier(self)\n self.data_collection_module = BluetoothController(self) \n self.data_preproc = DataPreproc(self)\n self.interface = Interface_API(self)\n self.peak_detector_module = PeakDetector(self)\n self.analysis_peak_detector = PeakDetector(self)\n self.analysis_controller = AnalysisController(self)\n \n # General class variables\n self.child_threads = []\n self.data_dir = DATA_DIR\n self.ecg_file_name = None\n self.mic_file_name = None\n\n self.ecg_save_file_name = None\n self.mic_save_file_name = None\n self.target_save_data_dir = None\n\n # Control signals\n self.start_analysis = False\n self.enable_bt_search = False\n self.collect_bt_data = False\n \n # Data structures for shared information\n self.raw_data_stream = None\n self.ecg_data = None\n self.mic_data = None\n\n def start_listening(self):\n LOGGER.info(\"Spawning child threads...\")\n interface_api_thread = threading.Thread(\n target=self.interface.connect_to_interface, \n daemon=True)\n interface_api_thread.start()\n\n anal_controller_thread = threading.Thread(\n target=self.analysis_controller.start_controller, \n daemon=True) \n anal_controller_thread.start()\n \n while True:\n while not self.enable_bt_search:\n sleep(2)\n result = self.data_collection_module.search_for_device()\n self.enable_bt_search = False\n\n if result:\n LOGGER.info(\"Connected to BT\")\n self.interface.send_bt_status(result, DATA_DIR)\n else:\n LOGGER.info(\"Failed to connect to BT\")\n self.interface.send_bt_status(result, DATA_DIR)\n continue\n\n while not self.collect_bt_data:\n if self.enable_bt_search: break\n sleep(2)\n\n if self.collect_bt_data:\n LOGGER.info(\"Collecting BT data now...\")\n self.data_collection_module.connect_and_listen()\n \n LOGGER.info(\"Data pipe closed\")\n\n### Main ###\n\nif __name__ == \"__main__\":\n # Code for testing the entire system\n stethescope = StethescopeController()\n \n try:\n stethescope.start_listening() \n except KeyboardInterrupt:\n LOGGER.info(\"Listening stopped by user.\")","repo_name":"AZRehkopf/stethescope_lads","sub_path":"python/steth_controller.py","file_name":"steth_controller.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41508639112","text":"from cryptography.fernet import Fernet\n\n\ndef get_encryption_key(service):\n from models import SpotifyProfile, TwitterProfile\n\n if service == 'twitter':\n key = TwitterProfile.get_credentials()[3]\n elif service == 'spotify':\n key = SpotifyProfile.get_credentials()[3]\n\n return key\n\n\ndef encrypt_token(value, service):\n key = get_encryption_key(service)\n\n value = value.encode()\n\n f = Fernet(key)\n value = f.encrypt(value)\n\n return value.decode()\n\n\ndef decrypt_token(value, service):\n key = get_encryption_key(service)\n\n value = value.encode()\n\n f = Fernet(key)\n value = f.decrypt(value)\n\n return value.decode()\n","repo_name":"jaimescose/stopify","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9081447525","text":"import time\nfrom datetime import datetime, timedelta\nimport picamera\n\n## expects interval in seconds\ndef timelapse(interval, duration, rotation=0):\n count = 0\n with picamera.PiCamera() as cam:\n cam.rotation = rotation\n cam.start_preview()\n time.sleep(2)\n for filename in cam.capture_continuous('{timestamp:%Y-%m-%d-%H-%M}-tc{counter:02d}.jpg'):\n count += 1\n if count > (duration / interval):\n return count\n time.sleep(interval)\n return\n\n## expects interval in hours\ndef timelapse2(interval, duration, rotation=0):\n count = 0\n mx = int(duration / interval)\n interval = interval * 3600\n while count <= mx:\n with picamera.PiCamera() as cam:\n cam.rotation = rotation\n cam.start_preview()\n cam.capture('{}-tc{:03d}.jpg'.format(datetime.now(), count))\n count += 1\n time.sleep(interval)\n","repo_name":"gvelonis/pi-tc","sub_path":"libpitc.py","file_name":"libpitc.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"37858925026","text":"'''\nCreated on Feb 22, 2019\n\n@author: daniel\n'''\nfrom morphological_skeleton_transform import reconstruct_image, compute_skeleton_subsets\nimport sys\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n\ndef main():\n B = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))\n plt.imshow(B)\n plt.gray()\n plt.title(\"Structuring Element\")\n plt.show()\n if len(sys.argv[1:]) == 0:\n sys.argv[1:] = [\"Data/deer.png\", \"Data/dog.png\", \"Data/butterfly.png\", \"Data/lamp.png\", \"Data/fish.png\"]\n\n imgs = []\n for arg in sys.argv[1:]:\n imgs.append(cv2.imread(arg))\n for X in imgs:\n\n # preprocessing step...\n X = np.bitwise_not(X)\n X = cv2.cvtColor(X, cv2.COLOR_BGR2GRAY)\n _, X = cv2.threshold(X, 0, 255, cv2.THRESH_OTSU)\n\n S = compute_skeleton_subsets(X, B)\n\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, len(S) + 1, 1)\n plt.imshow(X)\n plt.title(\"Original Image\")\n plt.axis('off')\n for i, s in enumerate(S):\n fig.add_subplot(1, len(S) + 1, i + 2)\n plt.imshow(s)\n plt.title(\"$S_{\" + str(i) + \"}$\")\n plt.axis('off')\n plt.show()\n\n (components, reconstructions) = reconstruct_image(S, X, B)\n\n fig = plt.figure()\n plt.gray()\n fig.add_subplot(1, len(components) + 1, 1)\n plt.imshow(X)\n plt.title(\"Original Image\")\n plt.axis('off')\n\n for n, component in enumerate(components):\n fig.add_subplot(1, len(components) + 1, n + 2)\n title = \"$S_{\" + str(n) + \"}\"\n\n if n > 1:\n title = title + \"\\oplus \" + str(n) + \"B\"\n else:\n title = title + \"\\oplus B\"\n title = title + \"$\"\n plt.imshow(component)\n plt.title(title)\n plt.axis('off')\n plt.show()\n\n fig = plt.figure()\n fig.add_subplot(121)\n plt.imshow(X)\n plt.title('Original Image')\n plt.axis('off')\n fig.add_subplot(122)\n plt.imshow(reconstructions[-1])\n plt.title('Reconstruction')\n plt.axis('off')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n exit()\n","repo_name":"danielenricocahall/Morphological-Skeleton-Transform","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"23545943434","text":"import os\nimport comtypes.client\nfrom pathlib import Path\nimport tkinter as tk\nfrom tkinter import filedialog, messagebox\n\ndef ppt_to_pdf(input_file, output_file):\n powerpoint = comtypes.client.CreateObject(\"Powerpoint.Application\")\n powerpoint.Visible = 1\n\n presentation = powerpoint.Presentations.Open(str(input_file))\n presentation.ExportAsFixedFormat(str(output_file), 2) # 2 for PDF format\n presentation.Close()\n\n powerpoint.Quit()\n\ndef excel_to_pdf(input_file, output_file):\n excel = comtypes.client.CreateObject(\"Excel.Application\")\n excel.Visible = 0\n\n workbook = excel.Workbooks.Open(str(input_file))\n workbook.ExportAsFixedFormat(0, str(output_file)) # 0 for PDF format\n workbook.Close()\n\n excel.Quit()\n\ndef word_to_pdf(input_file, output_file):\n word = comtypes.client.CreateObject(\"Word.Application\")\n word.Visible = 0\n\n doc = word.Documents.Open(str(input_file))\n doc.SaveAs(str(output_file), FileFormat=17) # 17 for PDF format\n doc.Close()\n\n word.Quit()\n\ndef convert_to_pdf(input_path, output_path):\n input_file = Path(input_path)\n output_file = Path(output_path)\n\n if input_file.suffix in ['.ppt', '.pptx']:\n ppt_to_pdf(input_file, output_file)\n elif input_file.suffix in ['.xls', '.xlsx']:\n excel_to_pdf(input_file, output_file)\n elif input_file.suffix in ['.doc', '.docx']:\n word_to_pdf(input_file, output_file)\n else:\n messagebox.showerror(\"Error\", \"Unsupported file format.\")\n\ndef browse_input_file():\n file_path = filedialog.askopenfilename()\n input_entry.delete(0, tk.END)\n input_entry.insert(tk.END, file_path)\n\ndef browse_output_folder():\n folder_path = filedialog.askdirectory()\n output_entry.delete(0, tk.END)\n output_entry.insert(tk.END, folder_path)\n\ndef convert_file():\n input_file = input_entry.get()\n output_folder = output_entry.get()\n\n if not input_file or not output_folder:\n messagebox.showerror(\"Error\", \"Please select input file and output folder.\")\n return\n\n output_file = os.path.join(output_folder, Path(input_file).stem + \".pdf\")\n \n try:\n convert_to_pdf(input_file, output_file)\n messagebox.showinfo(\"Success\", \"File converted to PDF successfully!\")\n except Exception as e:\n messagebox.showerror(\"Error\", str(e))\n\n# Create the main window\nwindow = tk.Tk()\nwindow.title(\"File Converter\")\n\n# Input File\ninput_label = tk.Label(window, text=\"Input File:\")\ninput_label.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W)\ninput_entry = tk.Entry(window, width=50)\ninput_entry.grid(row=0, column=1, padx=5, pady=5)\ninput_button = tk.Button(window, text=\"Browse\", command=browse_input_file)\ninput_button.grid(row=0, column=2, padx=5, pady=5)\n\n# Output Folder\noutput_label = tk.Label(window, text=\"Output Folder:\")\noutput_label.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W)\noutput_entry = tk.Entry(window, width=50)\noutput_entry.grid(row=1, column=1, padx=5, pady=5)\noutput_button = tk.Button(window, text=\"Browse\", command=browse_output_folder)\noutput_button.grid(row=1, column=2, padx=5, pady=5)\n\n# Convert Button\nconvert_button = tk.Button(window, text=\"Convert\", command=convert_file)\nconvert_button.grid(row=2, column=1, padx=5, pady=10)\n\n# Start the main loop\nwindow.mainloop()\n","repo_name":"akkhanna10/pdf-converter","sub_path":"file_converter.py","file_name":"file_converter.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"12983472751","text":"#!/usr/bin/env python\n\"\"\"Кратные числа\"\"\"\nimport sys\n\n\nclass Multiplicity:\n def __init__(self, min=0, max=100, multiplicity=10):\n \"\"\"Получение кратных чисел в диапазоне\n\n Выводит числа, кратные заданному, в диапазоне [min:max]\n\n Args:\n min (int): минимальное значение\n max (int): максимальное значение\n multiplicity (int): кратность\n\n Returns:\n int\n\n Examples:\n >>> next(Multiplicity(13, 30, 10))\n 20\n \"\"\"\n if min > max:\n raise ValueError('min must be less or equal max')\n \n self.min = min\n self.max = max\n self.multiplicity = multiplicity\n self.next_value = None\n \n def __iter__(self):\n return self\n\n def __next__(self):\n min_value = self.next_value or self.min - 1 # Включаем минимальное значение\n\n # Определяем необходимый прирост\n self.next_value = (min_value + self.multiplicity\n - (min_value % self.multiplicity))\n\n if self.next_value > self.max:\n raise StopIteration\n\n return self.next_value\n \n\nif __name__ == '__main__':\n default_min = 45\n default_max = 670\n default_mp = 10\n \n try:\n mn = int(input(f'Min ({default_min}): ') or default_min)\n mx = int(input(f'Max ({default_max}): ') or default_max)\n mp = int(input(f'Multiplicity ({default_mp}): ') or default_mp)\n except ValueError:\n print(\"Don't be stupid and enter valid numbers!\", file=sys.stderr)\n exit(1)\n\n for m in Multiplicity(mn, mx, mp):\n print(m)\n","repo_name":"dmlogv/hr-mgfn-automation","sub_path":"gppl/py/b_multiplicity.py","file_name":"b_multiplicity.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30029938188","text":"# Given a binary array A, find the maximum sequence of continuous 1's\r\n# that can be formed by replacing at-most B zeroes.\r\n# For this problem, return the indices of maximum continuous series of 1s in order.\r\n# If there are multiple possible solutions, return the sequence which has the minimum start index.\r\n# 0 <= B <= 10^5\r\n# 1 <= size(A) <= 10^5\r\n# A[i]==0 or A[i]==1\r\n# First argument is an binary array A.\r\n# Second argument is an integer B.\r\n# Return an array of integers denoting the indices(0-based) of 1's in the maximum continuous series.\r\n# Input 1:\r\n# A = [1 1 0 1 1 0 0 1 1 1 ]\r\n# B = 1\r\n# Input 2:\r\n# A = [1, 0, 0, 0, 1, 0, 1]\r\n# B = 2\r\n# Output 1:\r\n# [0, 1, 2, 3, 4]\r\n# Output 2:\r\n# [3, 4, 5, 6]\r\n# Explanation 1:\r\n# Flipping 0 present at index 2 gives us the longest continuous series of 1's i.e subarray [0:4].\r\n# Explanation 2:\r\n# Flipping 0 present at index 3 and index 5 gives us the longest\r\n# continuous series of 1's i.e subarray [3:6].\r\ndef maxContinuousSeriesOf1s(A, B):\r\n temp = 0\r\n result = []\r\n flipCount = 0\r\n currMax = 0\r\n left = 0\r\n for right in range(len(A)):\r\n if A[right] == 0:\r\n flipCount += 1\r\n while flipCount > B:\r\n if A[temp] == 0:\r\n flipCount -= 1\r\n temp += 1\r\n if right - temp + 1 > currMax:\r\n currMax = right - temp + 1\r\n left = temp\r\n\r\n for i in range(left, left + currMax):\r\n result.append(i)\r\n\r\n return result\r\n\r\n\r\nprint(maxContinuousSeriesOf1s([1, 1, 0, 1, 1, 0, 0, 1, 1, 1], 1)) # [0, 1, 2, 3, 4]\r\nprint(maxContinuousSeriesOf1s([1, 0, 0, 0, 1, 0, 1], 2)) # [3, 4, 5, 6]\r\n","repo_name":"deysantanu84/python-portfolio","sub_path":"problemSolving/twoPointers/maxContiuousSeriesOf1s.py","file_name":"maxContiuousSeriesOf1s.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"33516866909","text":"import numpy as np\n\nfrom CGFCore.GameFile import Game\n\n\nclass NonZeroSumGame(Game):\n \"\"\"Represents a non-zero sum game.\"\"\"\n\n def __init__(self, payoff_matrix):\n # Constructor of the NonZeroSumGame class. It takes a payoff_matrix as an argument.\n super().__init__(payoff_matrix)\n # Calls the constructor of the superclass Game, passing the payoff_matrix as an argument.\n # This line allows NonZeroSumGame to inherit the attributes and methods of the superclass Game.\n self.players = len(payoff_matrix)\n # initializes the number of players in the game to be the number of elements in the list payoff_matrix\n\n def best_response(self, player, opponent_strategy):\n # calculates the best response for a given player against a given opponent strategy.\n expected_payoffs = np.dot(self.payoff_matrix[player], opponent_strategy)\n # calculates the expected payoffs for each strategy of the player by dot product of the player's row of the payoff matrix and the opponent's strategy vector.\n best_response = np.argmax(expected_payoffs)\n # calculates the best response strategy of the player, which is the strategy that gives the maximum expected payoff.\n return best_response\n\n def compute_nash_equilibrium(self, initial_strategies, iterations=1000):\n # computes an approximation of the Nash Equilibrium of the game using a simple iterative method.\n # It takes as input initial_strategies which is a list of initial strategies for each player and iterations which is the number of iterations to run the algorithm.\n # If iterations is not provided, it defaults to 1000.\n strategies = initial_strategies\n for _ in range(iterations):\n for player in range(self.players):\n opponent = 1 - player # This works for 2-player games.\n strategies[player] = self.best_response(player, strategies[opponent])\n # This line updates the strategy of the current player to be the best response against the current strategy of the opponent player.\n return strategies\n","repo_name":"NIV27e/CGFCore","sub_path":"CGF/src/CGFCore/games/NonZeroSumGameFile.py","file_name":"NonZeroSumGameFile.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9643609105","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\na=np.array([2,3,4,5,6,7])\n\n#1.1、普通数学运算\na1=a**2 #a1仍是一个数组,每个元素为a的平方, [ 1 4 9 16 25 36 49 64 81]\nn=np.sqrt(a) #表示的开方\n# print(a,a1,n)\n\nprint(np.median(a)) # 计算均值\nprint(a.std()) # 计算均值\nprint(np.ptp(a)) # 计算极差--即最大值和最小值之间的差:7-2=5\n\n\n#1.2 sort()函数的使用\nd=np.random.randint(1,100,10) #表示在1`100之间生成10个随机的整数\n# print(d) #[15 20 19 30 63 52 93 20 20 20]\n\n#法一:此时改变了d的值,并没有新创建一个数组\nd1=d.sort()\n# print(d1) #-----None 没有值\n# print(d) #[15 19 20 20 20 20 30 52 63 93]\n\n#法二:新创建一个数组,并不改变d的值\n# d2=np.sort(d)\n# print('*'*10,d2)\n\n#1.3 round()函数\nt=np.random.random(3) #在0~1之间生成3个随机小数 [0.87683054 0.29784175 0.98638834]\n# print(t)\n# t1=np.array([random.random() for i in range(3)])\n# print(t1)\nm=np.round(t,2) # 将t的结果保留2位小数 [0.88 0.3 0.99]\n# print(m)\n\n#1.4 因为a仍是一个数组,可以在a上进一步操作\n# b1=np.array([a,a*2])\n# # print(type(b1),b1)\n# b2=b1.sum(axis=0)\n# print(type(b2),b2)\ns1=np.arange(12).reshape(3,4)\nprint(np.hsplit(s1,2))","repo_name":"happy-yu1/PythonProjects","sub_path":"数据分析/第一部分--numpy/02-numpy函数.py","file_name":"02-numpy函数.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"37655867614","text":"'''\nGiven an array of integers arr, replace each element with its rank.\n\nThe rank represents how large the element is. The rank has the following rules:\n\n Rank is an integer starting from 1.\n The larger the element, the larger the rank. If two elements are equal, their rank must be the same.\n Rank should be as small as possible.\n\n \n\nExample 1:\n\nInput: arr = [40,10,20,30]\nOutput: [4,1,2,3]\nExplanation: 40 is the largest element. 10 is the smallest. 20 is the second smallest. 30 is the third smallest.\n\nExample 2:\n\nInput: arr = [100,100,100]\nOutput: [1,1,1]\nExplanation: Same elements share the same rank.\n\nExample 3:\n\nInput: arr = [37,12,28,9,100,56,80,5,12]\nOutput: [5,3,4,2,8,6,7,1,3]\n\n\n'''\n\n\n\n\ndef arrayRankTransform(arr):\n arr_1 = list(set(arr))\n arr_1.sort()\n for j in range(len(arr)):\n index = arr_1.index(arr[j])\n arr [j] = index + 1\n return arr\n\n\narr = [37, 12, 28, 9, 100, 56, 80, 5, 12]\n# [5, 9, 12, 12, 28, 37, 56, 80, 100]\n#[5, 3, 4, 2, 8, 6, 7, 1, 3]\n#[6, 3, 5, 2, 9, 7, 8, 1, 3]\nprint(arrayRankTransform(arr))\n\n\n","repo_name":"hnarasimha/Git-Murthy","sub_path":"15_rank_transformation_of_array.py","file_name":"15_rank_transformation_of_array.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16754804496","text":"\r\nimport re\r\nimport json\r\nimport time\r\nimport numpy as np\r\nfrom functools import reduce\r\nfrom collections import OrderedDict\r\nfrom stanfordnlp import Pipeline\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\n\r\ndef get_token_spans(sentence, words):\r\n\r\n\tspans = []\r\n\tbegin = 0\r\n\tfor w in words:\r\n\t\t# remove all whitespaces\r\n\t\twhile not sentence.startswith(w.text):\r\n\t\t\t# if text[0] != \" \":\r\n\t\t\tassert sentence[0] == \" \"\r\n\t\t\tbegin += 1\r\n\t\t\tsentence = sentence[1:]\r\n\t\t# add span\r\n\t\tspans.append((begin, begin + len(w.text)))\r\n\t\t# update begin and text for next token\r\n\t\tbegin += len(w.text)\r\n\t\tsentence = sentence[len(w.text):]\r\n\r\n\treturn spans\r\n\r\nclass ConceptParser:\r\n\r\n\tdef __init__(self):\r\n\t\t# load stopwords\r\n\t\tself.stop_words = stopwords.words('english')\r\n\t\t# conjunctions to consider\r\n\t\tself.conjugation_tokens = ['and', 'or']\r\n\t\t# create lemmatizer and nlp-pipeline\r\n\t\tself.lemmatizer = WordNetLemmatizer()\r\n\t\tself.tokenizer = Pipeline(lang='en', processors=\"tokenize\")\r\n\t\tself.nlp = Pipeline(lang='en', processors=\"tokenize,pos,depparse\", tokenize_pretokenized=True)\r\n\t\t# map dep-type to function\r\n\t\tself.func_map = {\r\n\t\t\t'nsubj':\tnsubject,\r\n\t\t\t'det':\t\tdet,\r\n\t\t\t'dep':\t\tdep,\r\n\t\t\t'dobj':\t\tdobj,\r\n\t\t\t'acomp':\tacomp,\r\n\t\t\t'amod':\t\tamod,\r\n\t\t\t'aux':\t\taux,\r\n\t\t\t'nn':\t\tnn,\r\n\t\t\t'neg':\t\tneg,\r\n\t\t\t'prep':\t\tprep,\r\n\t\t}\r\n\r\n\tdef remove_PRP_without_NN(self, words, deps):\r\n\t\t# check all dependencies\r\n\t\tfor i, d in zip(range(len(deps)-1, -1, -1), deps[::-1]):\r\n\t\t\t# get part-of-speech tags of dependency-targets\r\n\t\t\tpos_tags = [d[0].pos, d[2].pos]\r\n\t\t\t# check condition\r\n\t\t\tif (\"PRP\" in pos_tags) and (\"NN\" not in pos_tags) and (d[1] != 'nsubj'):\r\n\t\t\t\tdel deps[i]\r\n\t\t# return dependencies\r\n\t\treturn deps\r\n\r\n\tdef process_sentence(self, sentence):\r\n\t\t# get words and dependencies\r\n\t\twords = sentence.words\r\n\t\tdeps = sentence.dependencies\r\n\t\t# remove unnecessary dependencies\r\n\t\tdeps = self.remove_PRP_without_NN(words, deps)\r\n\r\n\t\t# all words except stopwords are concepts\r\n\t\t# this differs from usual idea of concepts\r\n\t\tword_concepts = [(w,) for w in words if w.text not in self.stop_words]\r\n\t\t# get concepts from each depencency\r\n\t\tdep_concepts = (self.func_map[t](w1, w2) for (w1, t, w2) in deps if t in self.func_map)\r\n\t\tdep_concepts = [concept for concept in dep_concepts if concept is not None]\r\n\t\t# get conjugations concepts\r\n\t\tconj_positions = self.conjugation_finder(words)\r\n\t\tconj_concepts = sum(map(lambda i: self.conjugator(words, i), conj_positions), [])\r\n\t\t# get manual concepts\r\n\t\tmunual_concepts = self.manual(words)\r\n\r\n\t\t# throw all together and return\r\n\t\tconcepts = set(word_concepts + dep_concepts + conj_concepts + munual_concepts)\r\n\t\treturn list(concepts)\r\n\r\n\tdef parse(self, sentence):\r\n\t\t# tokenize sentence\r\n\t\tdoc = self.tokenizer(sentence)\r\n\t\tall_words = sum([sent.words for sent in doc.sentences], [])\r\n\t\t# get word spans\r\n\t\tall_word_spans = get_token_spans(sentence, all_words)\r\n\t\t# apply lemmatizer on all words and\r\n\t\t# reconstruct document from tokens such that nlp pipeline \r\n\t\t# recreates the exact sentences and tokens\r\n\t\ttokenized_sentence = '\\n'.join([' '.join([self.lemmatizer.lemmatize(w.text) for w in sent.words]) for sent in doc.sentences])\r\n\t\t# apply pipeline\r\n\t\tdoc = self.nlp(tokenized_sentence)\r\n\t\t# apply word-spans to words\r\n\t\tall_words = sum([sent.words for sent in doc.sentences], [])\r\n\t\tassert len(all_words) == len(all_word_spans)\r\n\t\tfor w, (b, e) in zip(all_words, all_word_spans):\r\n\t\t\tw.begin = b\r\n\t\t\tw.end = e\r\n\t\t# process single sentence\r\n\t\treturn self.process_sentence(doc.sentences[0])\r\n\r\n\t# This rule has been created for \"TO\" type postags for relation between objects\r\n\tdef manual(self, words):\r\n\t\tmanual_concepts = []\r\n\r\n\t\tfor i in range(1, len(words) - 1):\r\n\t\t\tword_span = (words[i-1], words[i], words[i+1])\r\n\t\t\tpos_span = words[i-1].pos + words[i].pos + words[i+1].pos\r\n\t\t\t\r\n\t\t\tif pos_span in [\"JJTOVB\", \"JJTOVBD\", \"JJTOVBZ\", \"JJSTOVB\", \"JJSTOVBD\", \"JJSTOVBZ\", \"JJRTOVB\", \"JJRTOVBD\", \"JJRTOVBZ\"]:\r\n\t\t\t\tmanual_concepts.append(word_span)\r\n\t\t\r\n\t\treturn manual_concepts\r\n\r\n\t# This rule has been created for finding the multiple positions of conjugations\r\n\tdef conjugation_finder(self, words):\r\n\t\t# find all conjugations\r\n\t\tocc = sum(([i for i, w in enumerate(words) if w.text == t and w.pos == 'CC'] for t in self.conjugation_tokens), [])\r\n\t\tocc = sorted(occ)\r\n\t\treturn occ\r\n\r\n\t# This rule has been created for \"AND\" types for relation between structures of sentence\r\n\tdef conjugator(self, words, i):\r\n\t\tconcepts = []\r\n\r\n\t\tword1 = i - 1\r\n\t\tword2 = min((j for j, w in enumerate(words[i+1:], start=i+1) if w.pos != 'DT'), default=-1)\r\n\r\n\t\ttarget_words = [word1] + ([word2] if word2 >= 0 else [])\r\n\r\n\t\tif len(target_words) == 2:\r\n\t\t\tconcepts.append((words[word1], words[i], words[word2]))\r\n\t\t# find verb and noun\r\n\t\tverbs = list(filter(lambda i: words[i].pos == 'VB', range(i - 3, i)))\r\n\t\tnouns = list(filter(lambda i: words[i].pos == 'NN', range(i - 3, i)))\r\n\t\t# conjugation with noun\r\n\t\tif len(nouns) > 0:\r\n\t\t\tconcepts.extend(\r\n\t\t\t\t[(words[j], words[nouns[0]]) for j in target_words if j != nouns[0]]\r\n\t\t\t)\r\n\t\t# conjugation with verb\r\n\t\tif len(verbs) > 0:\r\n\t\t\tconcepts.extend(\r\n\t\t\t\t[(words[verbs[0]], words[j]) for j in target_words]\r\n\t\t\t)\r\n\t\t# relations after conjugation\r\n\t\trelations = [\"between\", \"over\", \"with\", \"on\", \"to\", \"of\", \"into\", \"in\", \"at\"]\r\n\t\tfor j, w in enumerate(words[i:], start=i):\r\n\t\t\tif w.text in relations:\r\n\t\t\t\tword3 = j + 1\r\n\t\t\t\tconcepts.extend(\r\n\t\t\t\t\t[(words[j], words[word3]) for j in target_words if j != word3]\r\n\t\t\t\t)\r\n\t\t\t\tbreak\r\n\r\n\t\treturn concepts\r\n\r\n\r\n\r\n\"\"\" Dependency Types \"\"\"\r\n\r\n# nsubj : nominal subject : Nominal subject is a noun phrase which is the syntactic subject of a clause\r\ndef nsubject(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\t# DT check\r\n\tif \"DT\" not in pos:\r\n\r\n\t\t# NN and JJ check\r\n\t\tif \"JJ\" in pos:\r\n\t\t\treturn (w1, w2)\r\n\r\n\t\tif \"NN\" in pos:\t\t\t\t\t\t\t\t\r\n\t\t\tif \"PRP\" in pos:\r\n\t\t\t\treturn (w1,)\r\n\t\t\telse:\r\n\t\t\t\treturn (w2, w1)\r\n\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\t\t\t\t\r\n\t\r\n# det : determiner : Determiner is the relation between the head of an NP and its determiner\r\ndef det(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"DT\" not in pos:\r\n\t\treturn (w2, w1)\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\r\n\r\n# dep : dependent : Dependency is labeled as dep when the system is unable to determine a more precise dependency relation between two words\r\ndef dep(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif (\"DT\" not in pos) and (\"JJ\" in pos):\r\n\t\treturn (w2, w1)\r\n\r\n\tif (\"DT\" not in pos) and (\"JJ\" not in pos):\r\n\t\tif (\"NN\" in pos) and (\"VB\" not in pos):\r\n\t\t\treturn (w1,)\r\n\t\telse:\r\n\t\t\treturn (w1, w2)\r\n\r\n\tif \"DT\" in pos:\r\n\t\treturn (w1,)\r\n\r\n\treturn None\r\n\r\n# dobj : direct object : Direct object of a VP is the noun phrase which is the (accusative) object of the verb\r\ndef dobj(w1, w2):\r\n\treturn (w1, w2)\r\n\r\n# acomp : adjectival complement : Adjectival complement of a verb is an adjectival phrase which functions as the complement\r\ndef acomp(w1, w2):\r\n\treturn (w1, w2)\r\n\t\r\n# advmod : adverbial modifier : Adverbial modifier of a word is a (non-clausal) adverb or adverbial phrase (ADVP) that serves to modify the meaning of the word\r\ndef advmod(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\t#print pos\r\n\tif (\"VB\" in pos) and (\"JJ\" in pos):\r\n\t\treturn (w1, w2)\r\n\tif (\"VB\" in pos) and (\"JJ\" not in pos) and (\"IN\" in pos):\r\n\t\treturn (w1, w2)\r\n\tif (\"VB\" in pos) and (\"JJ\" not in pos) and (\"IN\" not in pos):\r\n\t\treturn (w2, w1)\r\n\tif \"VB\" not in pos:\r\n\t\treturn (w2, w1)\r\n\r\n\treturn None\r\n\r\n# amod : adjectival modifier : Adjectival modifier of an NP is any adjectival phrase that serves to modify the meaning of the NP\r\ndef amod(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"VB\" in pos:\r\n\t\treturn (w1, w2)\r\n\telse:\r\n\t\treturn (w2, w1)\r\n\r\n# aux : auxiliary : Auxiliary of a clause is a non-main verb of the clause\t\r\ndef aux(w1, w2):\r\n\tpos = [w1.pos, w2.pos]\r\n\r\n\tif \"TO\" in pos:\r\n\t\treturn (w1,)\r\n\tif \"VB\" not in pos:\r\n\t\treturn (w2, w1)\r\n\r\n# nn : noun compound modifier : Noun compound modifier of an NP is any noun that serves to modify the head noun\r\ndef nn(w1, w2):\r\n\t# order words by index\r\n\tif w2.index < w1.index:\r\n\t\treturn (w2, w1)\r\n\telse:\r\n\t\treturn (w1, w2)\r\n\r\ndef neg(w1, w2):\r\n\tif w1 != w2:\r\n\t\treturn (w2, w1)\r\n\r\n# prep : prepositional modifier : Prepositional modifier of a verb, adjective, or noun is any prepositional phrase that serves to modify the meaning of the verb, adjective, noun, or even another prepositon\r\ndef prep(w1, w2):\r\n\treturn (w1, w2)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\tconcept = ConceptParser()\r\n\r\n\tconcepts = concept.parse(\"The coffee was hot and tasty.\")\r\n\r\n\tfor e, cs in concepts.items():\r\n\t\tprint(\"%s: %s\" % (e, str(cs)))\r\n\t\r\n\t\r\n\t\r\n\t# print(concept.parse(\"Redevelopment of the Darlington #nuclearplant shows some of the work we're famous for in numerous #nuclear projects.\"))\r\n\r\n\t# concepts_per_sentence = concept.parse_all([\r\n\t\t# \"The coffee was hot and tasty.\",\r\n\t\t# \"I enjoyed the time i spent at this new restaurant!\"\r\n\t# ])\r\n\r\n\t# for concepts in concepts_per_sentence:\r\n\t\t# print(concepts)","repo_name":"ndoll1998/KnowBert","sub_path":"src/knowledge/senticnet/concept_parser.py","file_name":"concept_parser.py","file_ext":"py","file_size_in_byte":9011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"40849815068","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport subprocess\n\ndef DigestAllSamples(listFile, outFile):\n with open(listFile, 'r') as predlist:\n preds = predlist.readlines()\n pm = {'peptidematch_jar':'/data/home/hfx365/Software/PeptideMatchCMD_1.0.jar', 'reference_index': '/data2/home/hfx365/Reference/Ensembl/index'}\n allEps = DigestSample(preds, False, pm)\n with open(outFile, 'w') as outf:\n outf.write('peptide_pos\\thla\\tpeptide\\tpep_core\\tOf\\tGp\\tGl\\tIp\\tIl\\tIcore\\tIdentity\\tScore\\tRank\\tCandidate\\tBindLevel\\tPatIndex\\tNovelty\\n')\n outf.write(('\\n').join(allEps))\n\n\ndef DigestSample(toDigest, checkPeptides, pepmatchPaths):\n '''\n Filters the resulting file and strips all information within it down to individual calls.\n :param toDigest: A list of files to be digested for an individual patient.\n :param patName: Patient/sample identifier\n :return: All Neoantigen Prediction lines free of other information in prediction files.\n '''\n # temp_files = None\n # output_file = \"%s%s.digested.txt\" % (FilePath, toDigest[0].split('/')[len(toDigest[0].split('/')) - 1].split('.epitopes.')[0])\n\n lines = []\n pmInputFile = 'tmp/normal.peptidematch.input'\n pmInput = open(pmInputFile,'w')\n for epFile in toDigest:\n print(\"INFO: Digesting neoantigens for %s\" % (patName))\n with open(epFile.rstrip('\\n'), 'r') as digest_in:\n patName = epFile.rstrip('\\n').replace('.netMHCout', '')\n for line in digest_in:\n line = line.rstrip('\\n')\n try:\n if line.strip()[0].isdigit():\n linespl = line.split()\n if '<=' not in linespl:\n linespl.append('<=\\tN')\n lines.append('\\t'.join(linespl)+'\\t'+patName)\n if checkPeptides:\n pmInput.write('>' + linespl[10] + ';' + linespl[2] + '\\n' + linespl[2] + '\\n')\n except IndexError as e:\n pass\n pmInput.close()\n if checkPeptides:\n pmOutFile = 'tmp/normal.peptidematch.out'\n print('Checking peptides against proteome')\n RunPepmatch(pmInputFile, pepmatchPaths['peptidematch_jar'], pepmatchPaths['reference_index'], pmOutFile)\n lines = ProcessPepmatch(pmOutFile, lines)\n print(\"INFO: Object size of neoantigens: %s Kb\"%(sys.getsizeof(lines)))\n return(lines)\n\n\ndef RunPepmatch(pmInput, pepmatchJar, refIndex, pmfileName):\n with open('logForPeptideMatch.tmp', 'a') as logFile:\n cmd = ['java', '-jar', pepmatchJar, '-a', 'query', '-i', refIndex,'-Q', pmInput, '-o', pmfileName]\n runcmd = subprocess.Popen(cmd, stdout=logFile)\n runcmd.wait()\n\ndef ProcessPepmatch(pmfileName, epLines):\n with open(pmfileName, 'r') as pmFile:\n pmFile.readline()\n pmFile.readline() #read first two header lines\n pmDict = {line.split('\\t')[0] : line.split('\\t')[1].rstrip('\\n') for line in pmFile.readlines() }\n appendedLines = []\n for line in epLines:\n epkey = line.split('\\t')[10]+';'+line.split('\\t')[2]\n novel = int(pmDict[epkey]=='No match')\n appendedLines.append(line+'\\t'+str(novel))\n\n return(appendedLines)\n\nDigestAllSamples('wt_file_list.txt', 'WT.neoantigens.txt')\n","repo_name":"elakatos/NeoepProcessing","sub_path":"random_analysis.py","file_name":"random_analysis.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"17838729146","text":"from ..job import DatasetJob\n\n\nclass ImageDatasetJob(DatasetJob):\n def __init__(self, **kwargs):\n self.image_dims = kwargs.pop('image_dims', None)\n self.resize_mode = kwargs.pop('resize_mode', None)\n\n super(ImageDatasetJob, self).__init__(**kwargs)\n\n @staticmethod\n def resize_mode_choices():\n return [\n ('crop', 'Crop'),\n ('squash', 'Squash'),\n ('fill', 'Fill'),\n ('half_crop', 'Half crop, half fill'),\n ]\n\n def resize_mode_name(self):\n c = dict(self.resize_mode_choices())\n return c[self.resize_mode]","repo_name":"wewe0901/web","sub_path":"web/datasets/images/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71568795798","text":"\n\n'''\nFaça um programa que tenha uma função chamada escreva(),\nque receba um texto qualquer como parâmetro\ne mostre uma mensagem com tamanho adaptável.\n Ex:\n escreva(‘Olá, Mundo!’) Saída:\n ~~~~~~~~~\n Olá, Mundo!\n ~~~~~~~~~\n'''\n#MINHA SOLUÇÃO\ndef escreva(texto):\n print('-'*len(texto))\n print(texto)\n print('-' * len(texto))\n\n\n#--------PROGRAMA PRINCIPAL----------\n\nescreva('CURSO EM VÍDEO')\nescreva('PYTHON')\nescreva('APRENDA PROGRAMAÇÃO')\nescreva('Gustavo Guanabara')\nescreva('Curso de Python no YouTube')\nescreva('Olá, Mundo!')\n\n#SOLUÇÃO CURSO EM VIDEO\ndef escrever(msg):\n tam = len(msg) + 4\n print('~'*tam)\n print(f' {msg}')\n print('~' * tam)\n\n\n#--------PROGRAMA PRINCIPAL----------\n\nescrever('CURSO EM VÍDEO')\nescrever('PYTHON')\nescrever('APRENDA PROGRAMAÇÃO')\nescrever('Gustavo Guanabara')\nescrever('Curso de Python no YouTube')\nescrever('Olá, Mundo!')\n","repo_name":"GuedesPeter/Python-I","sub_path":"ex097.py","file_name":"ex097.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35658068884","text":"\"\"\" required imports for module functionality \"\"\"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.all_workouts, name='all_workouts'),\n path('workout_detail//', views.workout_detail, name='workout_detail'),\n path('add_workout/', views.add_workout, name='add_workout'),\n path('edit_workout//', views.edit_workout, name='edit_workout'),\n path('delete_workout//', views.delete_workout, name='delete_workout'),\n]\n","repo_name":"RussOakham/Immortal-Fitness-Collective","sub_path":"workout_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"41122811953","text":"import os\nimport pathlib\nfrom langchain import PromptTemplate\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom dotenv import load_dotenv\nfrom langchain.vectorstores import Milvus\nimport pandas as pd\nfrom src.utils.app_util import get_bam_creds, get_numeric_value, get_states\nfrom src.utils.data_util import loadData\nfrom langchain.chains import RetrievalQA\nfrom dotenv import load_dotenv\nfrom genai.schemas import GenerateParams\nfrom genai.extensions.langchain import LangChainInterface\n\n\nPATH = pathlib.Path(__file__).parent.resolve()\nDATA_DIR_PATH = str(PATH) + os.sep + \"data\" + os.sep\nload_dotenv()\n\n\ndef main():\n milvus_host = os.getenv(\"MILVUS_HOST\", None)\n milvus_port = os.getenv(\"MILVUS_PORT\", None)\n\n model_id = \"google/flan-t5-xxl\"\n\n creds = get_bam_creds()\n\n params = GenerateParams(\n decoding_method=\"greedy\",\n max_new_tokens=1000,\n min_new_tokens=10,\n repetition_penalty=1.4)\n\n llm = LangChainInterface(\n model=model_id,\n credentials=creds,\n params=params,\n\n )\n\n docs = []\n docs.extend(loadData(DATA_DIR_PATH, 'state_ut_water_scarcity_original'))\n docs.extend(loadData(DATA_DIR_PATH, 'state_ut_ground_water_original'))\n docs.extend(\n loadData(DATA_DIR_PATH, 'state_ut_water_conservation_harvesting_original'))\n\n print('Data added to Milvus Collection')\n\n embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n vectordb = Milvus.from_documents(\n docs,\n embeddings,\n connection_args={\"host\": milvus_host, \"port\": milvus_port},\n drop_old=True\n )\n\n prompt_template = \"\"\"\n try to answer the question in 1 word and keep answer concise as possible. Don't add any random characters\n *********\n {context}\n *********\n \n {question}\n\n Helpful Answer:\n \"\"\"\n PROMPT = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n\n retriever = vectordb.as_retriever(\n search_type=\"mmr\", search_kwargs={\"k\": 5})\n qa_chain = RetrievalQA.from_chain_type(\n llm=llm, chain_type=\"stuff\", retriever=retriever, return_source_documents=True,\n chain_type_kwargs={\"prompt\": PROMPT}\n )\n\n finalOutput = []\n states = get_states()\n\n for state in states:\n quest = \"What is total water harvesting in million litres for \"+state\n result = qa_chain({\"query\": quest})\n output = get_numeric_value(result['result'])\n\n quest1 = \"What is total water usage in million litres for \"+state\n result1 = qa_chain({\"query\": quest1})\n output1 = get_numeric_value(result1['result'])\n\n quest2 = \"What is total water consumption in million litres for \"+state\n result2 = qa_chain({\"query\": quest2})\n output2 = get_numeric_value(result2['result'])\n\n quest3 = \"Is there water Scarcity in \"+state\n result3 = qa_chain({\"query\": quest3})\n output3 = 'no'\n if \"yes\" in result3['result'].lower():\n output3 = \"yes\"\n\n finalOutput.append(\n {\"state\": state,\n \"Water Harvesting in million litres\": output,\n \"Water Usage in million litres\": output1,\n \"Water Consumption in million litres\": output2,\n \"Water Scarcity\": output3})\n print('\\n')\n print('\\n')\n print(finalOutput)\n print('\\n')\n print('\\n')\n\n df=pd.DataFrame(finalOutput)\n df.to_excel(DATA_DIR_PATH+\"output_by_watson_ai.xlsx\", index=False)\n\n\nmain()\n","repo_name":"tvelingkar/Aqua-Rescue","sub_path":"insights/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8252343577","text":"import socket\nimport tkinter as tk\nfrom tkinter.constants import DISABLED, END\nfrom message import Message\nimport pickle\nimport threading\n\ndef sendMessage(entry, msgBox):\n mess = Message(\n userName, entry.get()\n )\n \n msgBox.insert(\n END,\n \"\\n\"+\"%s says %s\"%(mess.sender, mess.text)\n )\n\n entry.delete(0, END)\n \n s.send(\n pickle.dumps(mess, protocol=3)\n )\n\n\ndef receiveMessage():\n while True:\n receivedMessage = s.recv(512)\n receivedMessage = pickle.loads(receivedMessage)\n msgBox.insert(\n END,\n \"\\n\"+\"%s says %s\"%(receivedMessage.sender, receivedMessage.text)\n )\n\ndef setupGUI():\n window = tk.Tk()\n\n window.geometry(\"300x300\")\n\n title = tk.Label(window, text=\"Chat Application\", font=(\"Arial\", 14, \"bold\"))\n title.place(relx=0.5, rely=0.1, anchor=tk.CENTER)\n\n global entry, msgBox\n\n entry = tk.Entry(window, width=30)\n entry.place(relx=0.4, rely=0.9, anchor=tk.CENTER)\n\n\n send = tk.Button(window, text=\"Send\", font=(\"Arial\", 10), command=lambda: sendMessage(entry, msgBox))\n send.place(relx=0.8, rely=0.9, anchor=tk.CENTER)\n\n msgBox = tk.Text(window, width=35, height=10, state=DISABLED)#, text=\"Hello World\")\n msgBox.place(relx = 0.5, rely= 0.5, anchor=\"center\")\n\n window.mainloop()\n\nif __name__ == \"__main__\":\n\n userName = input(\"What is your username? \")\n \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n port = 5000\n s.connect(('192.168.86.110', port))\n\n threading.Thread(target=receiveMessage,).start()\n\n setupGUI()","repo_name":"shua1090/chatAppTutorial","sub_path":"chatApp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1922814004","text":"import glob\nimport os\n\nfrom spack.package import *\n\n\nclass Trinotate(Package):\n \"\"\"Trinotate is a comprehensive annotation suite designed for\n automatic functional annotation of transcriptomes, particularly\n de novo assembled transcriptomes, from model or non-model organisms\"\"\"\n\n homepage = \"https://trinotate.github.io/\"\n url = \"https://github.com/Trinotate/Trinotate/archive/Trinotate-v3.1.1.tar.gz\"\n\n version(\"3.2.2\", sha256=\"1c41258a544cccb332f77b73f7397b457d5f3d7ce0038505369aeecc1e0650c2\")\n version(\"3.1.1\", sha256=\"f8af0fa5dbeaaf5a085132cd4ac4f4206b05cc4630f0a17a672c586691f03843\")\n\n depends_on(\"trinity\", type=\"run\")\n depends_on(\"transdecoder\", type=\"run\")\n depends_on(\"sqlite\", type=\"run\")\n depends_on(\"ncbi-rmblastn\", type=\"run\")\n depends_on(\"hmmer\", type=\"run\")\n depends_on(\"perl\", type=\"run\")\n depends_on(\"lighttpd\", type=\"run\")\n depends_on(\"perl-dbi\", type=\"run\")\n depends_on(\"perl-cgi\", type=\"run\")\n depends_on(\"perl-dbd-sqlite\", type=\"run\")\n\n def patch(self):\n with working_dir(join_path(self.stage.source_path, \"admin/util\")):\n perlscripts = glob.glob(\"*.pl\")\n filter_file(\"#!/usr/bin/perl\", \"#!/usr/bin/env perl\", *perlscripts)\n\n # trinotate web generates a config on run but puts it in a bad place\n # this causes issues with permissions; we hack the source to keep it\n # in the calling user's homedir\n\n filter_file(\n '\"$FindBin::RealBin/TrinotateWeb.conf/lighttpd.conf.port',\n '$ENV{\"HOME\"} . \"/.trinotate_lighttpd.conf.port',\n \"run_TrinotateWebserver.pl\",\n string=True,\n )\n\n def install(self, spec, prefix):\n # most of the perl modules have local deps, install the whole tree\n mkdirp(prefix.lib)\n install_tree(\".\", join_path(prefix.lib, \"trinotate\"))\n\n mkdirp(prefix.bin)\n os.symlink(\n join_path(prefix.lib, \"trinotate/Trinotate\"), join_path(prefix.bin, \"Trinotate\")\n )\n\n os.symlink(\n join_path(prefix.lib, \"trinotate/run_TrinotateWebserver.pl\"),\n join_path(prefix.bin, \"run_TrinotateWebserver.pl\"),\n )\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/trinotate/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"33131381467","text":"import os\nimport time\n\nimport stem\nimport stem.control\nimport stem.util.proc\nimport stem.util.str_tools\nimport stem.util.system\n\nimport nyx\nimport nyx.curses\nimport nyx.panel\nimport nyx.popups\nimport nyx.tracker\n\nfrom stem.util import conf, log\nfrom nyx import nyx_interface, tor_controller\n\nfrom nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD, HIGHLIGHT\n\nMIN_DUAL_COL_WIDTH = 141 # minimum width where we'll show two columns\nSHOW_FD_THRESHOLD = 60 # show file descriptor usage if usage is over this percentage\nUPDATE_RATE = 5 # rate in seconds at which we refresh\n\nCONFIG = conf.config_dict('nyx', {\n 'attr.flag_colors': {},\n 'attr.version_status_colors': {},\n})\n\n\nclass HeaderPanel(nyx.panel.DaemonPanel):\n \"\"\"\n Top area containing tor settings and system information.\n \"\"\"\n\n def __init__(self):\n nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)\n self._vals = Sampling.create()\n\n self._last_width = nyx.curses.screen_size().width\n self._reported_inactive = False\n self._pause_time = 0\n\n self._message = None\n self._message_attr = []\n\n tor_controller().add_status_listener(self._reset_listener)\n\n def show_message(self, message = None, *attr, **kwargs):\n \"\"\"\n Sets the message displayed at the bottom of the header. If not called with\n anything it clears the override.\n\n :param str message: message to be displayed\n :param list attr: text attributes to apply\n :param int max_wait: seconds to wait for user input, no limit if **None**\n\n :returns: :class:`~nyx.curses.KeyInput` user pressed if provided a\n **max_wait**, **None** otherwise or if prompt was canceled\n \"\"\"\n\n self._message = message\n self._message_attr = attr\n self.redraw()\n\n if 'max_wait' in kwargs:\n user_input = nyx.curses.key_input(kwargs['max_wait'])\n self.show_message() # clear override\n return user_input\n\n def is_wide(self):\n \"\"\"\n True if we should show two columns of information, False otherwise.\n \"\"\"\n\n return self._last_width >= MIN_DUAL_COL_WIDTH\n\n def get_height(self):\n \"\"\"\n Provides the height of the content, which is dynamically determined by the\n panel's maximum width.\n \"\"\"\n\n max_height = nyx.panel.DaemonPanel.get_height(self)\n\n if self._vals.is_relay:\n return min(max_height, 5 if self.is_wide() else 7)\n else:\n return min(max_height, 4 if self.is_wide() else 5)\n\n def send_newnym(self):\n \"\"\"\n Requests a new identity and provides a visual queue.\n \"\"\"\n\n controller = tor_controller()\n\n if not controller.is_newnym_available():\n return\n\n controller.signal(stem.Signal.NEWNYM)\n\n # If we're wide then the newnym label in this panel will give an\n # indication that the signal was sent. Otherwise use a msg.\n\n if not self.is_wide():\n self.show_message('Requesting a new identity', HIGHLIGHT, max_wait = 1)\n\n def set_paused(self, is_pause):\n if is_pause:\n self._pause_time = time.time()\n\n def key_handlers(self):\n def _reconnect():\n if self._vals.is_connected:\n return\n\n controller = tor_controller()\n self.show_message('Reconnecting...', HIGHLIGHT)\n\n try:\n try:\n controller.reconnect(chroot_path = nyx.chroot())\n except stem.connection.MissingPassword:\n password = nyx.input_prompt('Controller Password: ')\n\n if password:\n controller.authenticate(password)\n\n log.notice(\"Reconnected to Tor's control port\")\n self.show_message('Tor reconnected', HIGHLIGHT, max_wait = 1)\n except Exception as exc:\n self.show_message('Unable to reconnect (%s)' % exc, HIGHLIGHT, max_wait = 3)\n controller.close()\n\n return (\n nyx.panel.KeyHandler('n', action = self.send_newnym),\n nyx.panel.KeyHandler('r', action = _reconnect),\n )\n\n def _draw(self, subwindow):\n vals = self._vals # local reference to avoid concurrency concerns\n self._last_width = subwindow.width\n is_wide = self.is_wide()\n\n # space available for content\n\n interface = nyx_interface()\n left_width = max(subwindow.width // 2, 77) if is_wide else subwindow.width\n right_width = subwindow.width - left_width\n\n _draw_platform_section(subwindow, 0, 0, left_width, vals)\n\n if vals.is_connected:\n _draw_ports_section(subwindow, 0, 1, left_width, vals)\n else:\n _draw_disconnected(subwindow, 0, 1, vals.last_heartbeat)\n\n if is_wide:\n _draw_resource_usage(subwindow, left_width, 0, right_width, vals, self._pause_time)\n\n if vals.is_relay:\n _draw_fingerprint_and_fd_usage(subwindow, left_width, 1, right_width, vals)\n _draw_flags(subwindow, 0, 2, vals.flags)\n _draw_exit_policy(subwindow, left_width, 2, vals.exit_policy)\n elif vals.is_connected:\n _draw_newnym_option(subwindow, left_width, 1, vals.newnym_wait)\n else:\n _draw_resource_usage(subwindow, 0, 2, left_width, vals, self._pause_time)\n\n if vals.is_relay:\n _draw_fingerprint_and_fd_usage(subwindow, 0, 3, left_width, vals)\n _draw_flags(subwindow, 0, 4, vals.flags)\n\n _draw_status(subwindow, 0, self.get_height() - 1, interface.is_paused(), self._message, *self._message_attr)\n\n def _reset_listener(self, controller, event_type, _):\n self._update()\n\n if event_type == stem.control.State.CLOSED:\n log.notice('Tor control port closed')\n\n def _update(self):\n self._vals = Sampling.create(self._vals)\n\n if self._vals.fd_used and self._vals.fd_limit != -1:\n fd_percent = 100 * self._vals.fd_used // self._vals.fd_limit\n\n if fd_percent >= 90:\n log_msg = \"Tor's file descriptor usage is at %s%%. If you run out Tor will be unable to continue functioning.\" % fd_percent\n log.log_once('fd_used_at_ninety_percent', log.WARN, log_msg)\n log.DEDUPLICATION_MESSAGE_IDS.add('fd_used_at_sixty_percent')\n elif fd_percent >= 60:\n log_msg = \"Tor's file descriptor usage is at %s%%.\" % fd_percent\n log.log_once('fd_used_at_sixty_percent', log.NOTICE, log_msg)\n\n if self._vals.is_connected:\n if not self._reported_inactive and (time.time() - self._vals.last_heartbeat) >= 10:\n self._reported_inactive = True\n log.notice('Relay unresponsive (last heartbeat: %s)' % time.ctime(self._vals.last_heartbeat))\n elif self._reported_inactive and (time.time() - self._vals.last_heartbeat) < 10:\n self._reported_inactive = False\n log.notice('Relay resumed')\n\n self.redraw()\n\n\nclass Sampling(object):\n def __init__(self, **attr):\n self._attr = attr\n\n for key, value in attr.items():\n setattr(self, key, value)\n\n @staticmethod\n def create(last_sampling = None):\n controller = tor_controller()\n retrieved = time.time()\n\n pid = controller.get_pid('')\n tor_resources = nyx.tracker.get_resource_tracker().get_value()\n nyx_total_cpu_time = sum(os.times()[:3], stem.util.system.SYSTEM_CALL_TIME)\n\n or_listeners = controller.get_listeners(stem.control.Listener.OR, [])\n control_listeners = controller.get_listeners(stem.control.Listener.CONTROL, [])\n my_router_status_entry = nyx.tracker.get_consensus_tracker().my_router_status_entry()\n\n if controller.get_conf('HashedControlPassword', None):\n auth_type = 'password'\n elif controller.get_conf('CookieAuthentication', None) == '1':\n auth_type = 'cookie'\n else:\n auth_type = 'open'\n\n try:\n fd_used = stem.util.proc.file_descriptors_used(pid)\n except IOError:\n fd_used = None\n\n if last_sampling:\n nyx_cpu_delta = nyx_total_cpu_time - last_sampling.nyx_total_cpu_time\n nyx_time_delta = retrieved - last_sampling.retrieved\n nyx_cpu = nyx_cpu_delta / nyx_time_delta\n else:\n nyx_cpu = 0.0\n\n attr = {\n 'retrieved': retrieved,\n 'is_connected': controller.is_alive(),\n 'connection_time': controller.connection_time(),\n 'last_heartbeat': controller.get_latest_heartbeat(),\n\n 'fingerprint': controller.get_info('fingerprint', 'Unknown'),\n 'nickname': controller.get_conf('Nickname', ''),\n 'newnym_wait': controller.get_newnym_wait(),\n 'exit_policy': controller.get_exit_policy(None),\n 'flags': getattr(my_router_status_entry, 'flags', []),\n\n 'version': str(controller.get_version('Unknown')).split()[0],\n 'version_status': controller.get_info('status/version/current', 'Unknown'),\n\n 'address': or_listeners[0][0] if (or_listeners and or_listeners[0][0] != '0.0.0.0') else controller.get_info('address', 'Unknown'),\n 'or_port': or_listeners[0][1] if or_listeners else '',\n 'dir_port': controller.get_conf('DirPort', '0'),\n 'control_port': str(control_listeners[0][1]) if control_listeners else None,\n 'socket_path': controller.get_conf('ControlSocket', None),\n 'is_relay': bool(or_listeners),\n\n 'auth_type': auth_type,\n 'pid': pid,\n 'start_time': controller.get_start_time(0),\n 'fd_limit': int(controller.get_info('process/descriptor-limit', '-1')),\n 'fd_used': fd_used,\n\n 'nyx_total_cpu_time': nyx_total_cpu_time,\n 'tor_cpu': '%0.1f' % (100 * tor_resources.cpu_sample),\n 'nyx_cpu': '%0.1f' % (100 * nyx_cpu),\n 'memory': stem.util.str_tools.size_label(tor_resources.memory_bytes) if tor_resources.memory_bytes > 0 else 0,\n 'memory_percent': '%0.1f' % (100 * tor_resources.memory_percent),\n\n 'hostname': os.uname()[1],\n 'platform': '%s %s' % (os.uname()[0], os.uname()[2]), # [platform name] [version]\n }\n\n return Sampling(**attr)\n\n def format(self, message, crop_width = None):\n formatted_msg = message.format(**self._attr)\n\n if crop_width is not None:\n formatted_msg = stem.util.str_tools.crop(formatted_msg, crop_width)\n\n return formatted_msg\n\n\ndef _draw_platform_section(subwindow, x, y, width, vals):\n \"\"\"\n Section providing the user's hostname, platform, and version information...\n\n nyx - odin (Linux 3.5.0-52-generic) Tor 0.2.5.1-alpha-dev (unrecommended)\n |------ platform (40 characters) ------| |----------- tor version -----------|\n \"\"\"\n\n initial_x, space_left = x, min(width, 40)\n\n x = subwindow.addstr(x, y, vals.format('nyx - {hostname}', space_left))\n space_left -= x - initial_x\n\n if space_left >= 10:\n subwindow.addstr(x, y, ' (%s)' % vals.format('{platform}', space_left - 3))\n\n x, space_left = initial_x + 43, width - 43\n\n if vals.version != 'Unknown' and space_left >= 10:\n x = subwindow.addstr(x, y, vals.format('Tor {version}', space_left))\n space_left -= x - 43 - initial_x\n\n if space_left >= 7 + len(vals.version_status):\n version_color = CONFIG['attr.version_status_colors'].get(vals.version_status, WHITE)\n\n x = subwindow.addstr(x, y, ' (')\n x = subwindow.addstr(x, y, vals.version_status, version_color)\n subwindow.addstr(x, y, ')')\n\n\ndef _draw_ports_section(subwindow, x, y, width, vals):\n \"\"\"\n Section providing our nickname, address, and port information...\n\n Unnamed - 0.0.0.0:7000, Control Port (cookie): 9051\n \"\"\"\n\n if not vals.is_relay:\n x = subwindow.addstr(x, y, 'Relaying Disabled', CYAN)\n else:\n x = subwindow.addstr(x, y, vals.format('{nickname} - {address}:{or_port}'))\n\n if vals.dir_port != '0':\n x = subwindow.addstr(x, y, vals.format(', Dir Port: {dir_port}'))\n\n if vals.control_port:\n if width >= x + 19 + len(vals.control_port) + len(vals.auth_type):\n auth_color = RED if vals.auth_type == 'open' else GREEN\n\n x = subwindow.addstr(x, y, ', Control Port (')\n x = subwindow.addstr(x, y, vals.auth_type, auth_color)\n subwindow.addstr(x, y, vals.format('): {control_port}'))\n else:\n subwindow.addstr(x, y, vals.format(', Control Port: {control_port}'))\n elif vals.socket_path:\n subwindow.addstr(x, y, vals.format(', Control Socket: {socket_path}'))\n\n\ndef _draw_disconnected(subwindow, x, y, last_heartbeat):\n \"\"\"\n Message indicating that tor is disconnected...\n\n Tor Disconnected (15:21 07/13/2014, press r to reconnect)\n \"\"\"\n\n x = subwindow.addstr(x, y, 'Tor Disconnected', RED, BOLD)\n last_heartbeat_str = time.strftime('%H:%M %m/%d/%Y', time.localtime(last_heartbeat))\n subwindow.addstr(x, y, ' (%s, press r to reconnect)' % last_heartbeat_str)\n\n\ndef _draw_resource_usage(subwindow, x, y, width, vals, pause_time):\n \"\"\"\n System resource usage of the tor process...\n\n cpu: 0.0% tor, 1.0% nyx mem: 0 (0.0%) pid: 16329 uptime: 12-20:42:07\n \"\"\"\n\n if vals.start_time:\n if not vals.is_connected:\n now = vals.connection_time\n elif pause_time:\n now = pause_time\n else:\n now = time.time()\n\n uptime = stem.util.str_tools.short_time_label(max(0, now - vals.start_time))\n else:\n uptime = ''\n\n sys_fields = (\n (0, vals.format('cpu: {tor_cpu}% tor, {nyx_cpu}% nyx')),\n (27, vals.format('mem: {memory} ({memory_percent}%)')),\n (47, vals.format('pid: {pid}')),\n (59, 'uptime: %s' % uptime),\n )\n\n for (start, label) in sys_fields:\n if width >= start + len(label):\n subwindow.addstr(x + start, y, label)\n else:\n break\n\n\ndef _draw_fingerprint_and_fd_usage(subwindow, x, y, width, vals):\n \"\"\"\n Presents our fingerprint, and our file descriptor usage if we're running\n out...\n\n fingerprint: 1A94D1A794FCB2F8B6CBC179EF8FDD4008A98D3B, file desc: 900 / 1000 (90%)\n \"\"\"\n\n initial_x, space_left = x, width\n\n x = subwindow.addstr(x, y, vals.format('fingerprint: {fingerprint}', width))\n space_left -= x - initial_x\n\n if space_left >= 30 and vals.fd_used and vals.fd_limit != -1:\n fd_percent = 100 * vals.fd_used / vals.fd_limit\n\n if fd_percent >= SHOW_FD_THRESHOLD:\n if fd_percent >= 95:\n percentage_format = (RED, BOLD)\n elif fd_percent >= 90:\n percentage_format = (RED,)\n elif fd_percent >= 60:\n percentage_format = (YELLOW,)\n else:\n percentage_format = ()\n\n x = subwindow.addstr(x, y, ', file descriptors' if space_left >= 37 else ', file desc')\n x = subwindow.addstr(x, y, vals.format(': {fd_used} / {fd_limit} ('))\n x = subwindow.addstr(x, y, '%i%%' % fd_percent, *percentage_format)\n subwindow.addstr(x, y, ')')\n\n\ndef _draw_flags(subwindow, x, y, flags):\n \"\"\"\n Presents flags held by our relay...\n\n flags: Running, Valid\n \"\"\"\n\n x = subwindow.addstr(x, y, 'flags: ')\n\n if flags:\n for i, flag in enumerate(flags):\n flag_color = CONFIG['attr.flag_colors'].get(flag, WHITE)\n x = subwindow.addstr(x, y, flag, flag_color, BOLD)\n\n if i < len(flags) - 1:\n x = subwindow.addstr(x, y, ', ')\n else:\n subwindow.addstr(x, y, 'none', CYAN, BOLD)\n\n\ndef _draw_exit_policy(subwindow, x, y, exit_policy):\n \"\"\"\n Presents our exit policy...\n\n exit policy: reject *:*\n \"\"\"\n\n x = subwindow.addstr(x, y, 'exit policy: ')\n\n if not exit_policy:\n return\n\n rules = list(exit_policy.strip_private().strip_default())\n\n for i, rule in enumerate(rules):\n policy_color = GREEN if rule.is_accept else RED\n x = subwindow.addstr(x, y, str(rule), policy_color, BOLD)\n\n if i < len(rules) - 1:\n x = subwindow.addstr(x, y, ', ')\n\n if exit_policy.has_default():\n if rules:\n x = subwindow.addstr(x, y, ', ')\n\n subwindow.addstr(x, y, '', CYAN, BOLD)\n\n\ndef _draw_newnym_option(subwindow, x, y, newnym_wait):\n \"\"\"\n Provide a notice for requiesting a new identity, and time until it's next\n available if in the process of building circuits.\n \"\"\"\n\n if newnym_wait == 0:\n subwindow.addstr(x, y, \"press 'n' for a new identity\")\n else:\n plural = 's' if newnym_wait > 1 else ''\n subwindow.addstr(x, y, 'building circuits, available again in %i second%s' % (newnym_wait, plural))\n\n\ndef _draw_status(subwindow, x, y, is_paused, message, *attr):\n \"\"\"\n Provides general usage information or a custom message.\n \"\"\"\n\n if message:\n subwindow.addstr(x, y, message, *attr)\n elif not is_paused:\n interface = nyx_interface()\n subwindow.addstr(x, y, 'page %i / %i - m: menu, p: pause, h: page help, q: quit' % (interface.get_page() + 1, interface.page_count()))\n else:\n subwindow.addstr(x, y, 'Paused', HIGHLIGHT)\n","repo_name":"torproject/nyx","sub_path":"nyx/panel/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":16082,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"73"} +{"seq_id":"19164643305","text":"def to_digit(input):\n digit = []\n for char in input:\n digit.append(list_for_conv.index(char))\n return digit\n\nlist_for_conv = (\"0123456789ABCDEF\")\n\ndef switch_base(base, numbr, out_base):\n number = 0\n for index in range(len(numbr)):\n number += base ** index * numbr[-index - 1]\n output = []\n while number != 0:\n output.append(number % out_base)\n number = number // out_base\n return output[::-1]\n\n\ndef switch_two(numbr):\n out = \"\"\n for digit in numbr:\n out += list_for_conv[digit]\n return out\n\n\nnumber = input(\"number: \")\nbase = int(input(\"base: \"))\nnumber = to_digit(number)\nout_base = int(\n input(\"conv_base:\")\n)\nresult = switch_base(base, number, out_base)\n\nprint(switch_two(result))","repo_name":"Dateifi/aicourse","sub_path":"baseN.py","file_name":"baseN.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22917846904","text":"import re\nimport logging\nimport time\nfrom collections import namedtuple\n\nfrom . import utils\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import ElementNotInteractableException\n\n\n# RSA 公钥\nSHU_RSA_PUBKEY = \"\"\"\n-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDl/aCgRl9f/4ON9MewoVnV58OL\nOU2ALBi2FKc5yIsfSpivKxe7A6FitJjHva3WpM7gvVOinMehp6if2UNIkbaN+plW\nf5IwqEVxsNZpeixc4GsbY9dXEk3WtRjwGSyDLySzEESH/kpJVoxO7ijRYqU+2oSR\nwTBNePOk1H+LRQokgQIDAQAB\n-----END PUBLIC KEY-----\"\"\"\n\n# group: 1 - description, 2 - url location\nHISTORY_RE = re.compile(r'\\[\"\",\"(.*?)\",\\d+,\"\",\"(.*?)\",\"\",(?:true|false)]')\nRecord = namedtuple(\"Record\", [\"complete\", \"desc\", \"location\"])\nQuestion = namedtuple(\"Question\", [\"desc\", \"xpath\", \"type_\"])\nFORM = [\n Question(\n desc=\"我承诺,以下报送内容真实有效并可用于学校管理需要!\",\n xpath=\"//div[@id='p1_ChengNuo']/div[@class='f-field-body-cell']//i\",\n type_=\"checkbox\"\n ),\n Question(\n desc=\"当前身体状况\",\n xpath=\"//div[@id='p1_DangQSTZK']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"当天是否在上海\",\n xpath=\"//div[@id='p1_ShiFSH']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"当天是否住学校\",\n xpath=\"//div[@id='p1_ShiFZX']//td[1]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n ),\n Question(\n desc=\"是否家庭地址\",\n xpath=\"//div[@id='p1_ShiFZJ']//td[2]/div/div[@class='f-field-body-cell']//i\",\n type_=\"radio\"\n )\n]\n\nSAFETY_CHOICE_XPATH = \"//div[@id='p1_pnlDangSZS_DangSZS']//div[@id='fineui_%d']\"\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\"\n}\nlogger = logging.getLogger(__name__)\n\n\nclass User:\n def __init__(self, username, passwd, chrome_driver):\n self.username = username\n self.passwd = passwd\n self.chrome_driver= chrome_driver\n self.session = requests.Session()\n self.session.headers.update(headers)\n # proxies = {\n # \"http\": \"http://127.0.0.1:7890\",\n # \"https\": \"http://127.0.0.1:7890\"\n # }\n # self.session.proxies.update(proxies)\n # self.session.verify = False\n\n def set_cookie(self, oauth_session, asp_session_id, ncov2019selfreport):\n self.session.cookies.set(\"SHU_OAUTH2_SESSION\", oauth_session, domain=\"newsso.shu.edu.cn\")\n self.session.cookies.set(\"ASP.NET_SessionId\", asp_session_id, domain=\"selfreport.shu.edu.cn\")\n self.session.cookies.set(\".ncov2019selfreport\", ncov2019selfreport, domain=\"selfreport.shu.edu.cn\")\n\n def login(self):\n session = self.session\n r = session.get(\"https://selfreport.shu.edu.cn/Default.aspx\")\n url = r.url\n\n # encrypt password\n password = utils.rsa_encrypt(SHU_RSA_PUBKEY, self.passwd.encode())\n data = dict(username=self.username, password=password)\n\n r = session.post(url, data, headers={\"Referer\": url})\n if r.status_code == 200 and \"学工号:\" in r.text:\n logger.debug(\"Login success.\")\n return True\n else:\n logger.error(\"Login failed. status_code%s, url:%s\\ntext:%s\", r.status_code, r.url, r.text)\n return False\n\n def fetch_history(self):\n \"\"\"\n 获取历史填报记录\n\n :return: [Record(False, \"2021-06-17(未填报,请点击此处补报)\", \"/DayReport.aspx?day=2021-06-17\"), ...]\n \"\"\"\n r = self.session.get(\"https://selfreport.shu.edu.cn/ReportHistory.aspx\")\n if not r.url.startswith(\"https://selfreport.shu.edu.cn/\"):\n raise RuntimeError(\"invalid cookie\")\n js_object = utils.substring(r.text, \"f2_state=\", \";\")\n history = HISTORY_RE.findall(js_object)\n ret = []\n for desc, url in history:\n if \"未填报\" in desc:\n ret.append(Record(complete=False, desc=desc, location=url))\n else:\n ret.append(Record(complete=True, desc=desc, location=url))\n return ret\n\n def finish_today(self):\n \"\"\"\n 调用 selenium 完成当天的 每日一报\n :return: True on success, False on failed\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless\")\n if self.chrome_driver.startswith(\"http\"):\n chrome = webdriver.Remote\n else:\n chrome = webdriver.Chrome\n with chrome(self.chrome_driver, desired_capabilities=options.to_capabilities()) \\\n as driver:\n # 设置 selfreport.shu.edu.cn cookie\n driver.get(\"https://selfreport.shu.edu.cn/res/css/slick.css\")\n # cookies_name = [\"ASP.NET_SessionId\", \".ncov2019selfreport\"]\n cookies_name = [\".ncov2019selfreport\"]\n cookiejar = self.session.cookies\n for name in cookies_name:\n driver.add_cookie(dict(name=name, value=cookiejar.get(name, domain=\"selfreport.shu.edu.cn\")))\n\n # 设置 newsso.shu.edu.cn cookie\n driver.get(\"https://newsso.shu.edu.cn/static/css/alert-a1b99b3681.css\")\n driver.add_cookie(dict(\n name=\"SHU_OAUTH2_SESSION\",\n value=cookiejar.get(\"SHU_OAUTH2_SESSION\", domain=\"newsso.shu.edu.cn\")\n ))\n # 打开填报页面\n # TODO 处理“历史填报未完成弹窗”\n\n # 检测是否正在加载\n def is_loading():\n js = \"\"\"return (function(){\n let loading = $(\"#f_ajax_loading\"); \n if(loading.length > 0) {return loading.is(\":visible\");} \n return false; \n })(); \n \"\"\"\n return driver.execute_script(js)\n\n # 等待加载完成\n def waiting_loading():\n time.sleep(1)\n while is_loading():\n print(\"Waiting for loading...\")\n time.sleep(1)\n\n # 获取安全知识答案\n def get_answer():\n \"\"\"\n :return: (0, 1) # 0 表示 A, 1 表示 B...\n \"\"\"\n element = driver.find_element_by_xpath(\"//div[@id='p1_pnlDangSZS_ckda-inputEl']/a\")\n text = element.get_attribute(\"onclick\") # javascript:alert('参考答案:A');\n start = text.index(\":\") + 1\n end = text.index(\"'\", start)\n answer = text[start:end]\n answer = map(lambda x: ord(x) - ord('A'), list(answer))\n return answer\n\n driver.get(\"https://selfreport.shu.edu.cn/DayReport.aspx\")\n\n if not driver.current_url.startswith(\"https://selfreport.shu.edu.cn/DayReport.aspx\"):\n logger.info(\"invalid cookies, page redirect to: %s\", driver.current_url)\n return False\n\n for ques in FORM:\n waiting_loading()\n element = driver.find_element_by_xpath(ques.xpath)\n try:\n element.click()\n except ElementNotInteractableException:\n logger.error(\"ElementNotInteractableException: %s\", ques.desc)\n\n # 回答 消防安全问题\n for ans in get_answer():\n element = driver.find_element_by_xpath(SAFETY_CHOICE_XPATH % ans)\n try:\n element.click()\n except ElementNotInteractableException:\n logger.error(\"ElementNotInteractableException: failed to click safety question choice\")\n\n # 校验表单,这里没有判断 消防安全问题是否已回答\n is_ok = driver.execute_script(\"return F.validateForm('p1', '_self', true, false);\")\n if not is_ok:\n logger.warning(\"表单未完成\")\n return False\n else:\n logger.info(\"表单校验完成\")\n\n # 提交表单\n driver.execute_script(\"__doPostBack('p1$ctl02$btnSubmit', '');\")\n waiting_loading()\n if \"日报信息提交成功\" in driver.page_source:\n logger.info(\"提交成功\")\n return True\n else:\n logger.warning(\"提交失败\")\n logger.warning(\"Current url: %s, Page Source:\\n%s\", driver.current_url, driver.page_source)\n return False\n","repo_name":"hwenwur/SHU-Selfreport","sub_path":"selfreport/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31201782675","text":"import random\nimport string \n\nlength= 12\nrandomString=''.join(random.choice(string.ascii_letters) for i in range(length))\nrandomString2=''.join(random.choice(string.ascii_letters) for i in range(length))\n\n\n# CCI q1.1\ndef isCharUnique(str):\n\tvar=0\n\tfor i in str:\n\t\tvar+=1\n\t\tcount = str.count(i)\n\t\tif count>1:\n\t\t\treturn False\n\t\t\tbreak\n\t\telif var>=len(str):\n\t\t\treturn True \n\t\t\n#CCI q1.3\ndef removeChar(str):\n\tx=0\n\ty=len(str)\n\tfor i in str:\n\t\tx+=1\n\t\tcount = str.count(i)\n\t\tif count>1:\n\t\t\tvar=str.replace(i, '',count-1)\n\t\t\tstr=var \n\t\tif x==y:\n\t\t\treturn str \n\n\n#CCI q1.4\ndef isAnagram(s1,s2):\n\ta=list(s1)\n\tb=list(s2)\n\ta.sort()\n\tb.sort()\n\tif a==b:\n\t\treturn True \n\telse:\n\t\treturn False \n\t\n#CCI q1.5\ndef repl_(str):\n var=str.replace(' ', '%20')\n str=var\n return var\n\n\ndef repl(str):\n lst = list(str)\n temp = list()\n for i in lst: \n if i == ' ': \n i = '%20'\n temp.append(i)\n else:\n temp.append(i)\n return ''.join(temp)\n\n\n\n#CCI q1.6\n#rotate NxN matrix 90 degrees, clockwise \ndef rotateMatrixClockwise(originalMatrix):\n newMatrix = [i[:] for i in originalMatrix]\n length = len(originalMatrix[0])\n \n for row in range(0,length):\n for column in range(0,length):\n newMatrix[column][length-(row+1)] = originalMatrix[row][column] \n return newMatrix\n\n#rotate 90 degrees, counterclockwise \ndef rotateMatrixCounterclockwise(originalMatrix):\n newMatrix = [i[:] for i in originalMatrix]\n length = len(originalMatrix[0])\n \n for row in range(0,length):\n for column in range(0,length):\n newMatrix[length-(column+1)][row] = originalMatrix[row][column]\n return newMatrix\n \t\n# CCI 1.7 Write an algorithm such that if an element in an MxN matrix is 0, its entire row and column is set to 0 \n#TODO: refactor \nzeros=[]\ndef locateZeros(matrix):\n for row, rowElements in enumerate(matrix):\n for column, colElements in enumerate(rowElements):\n if colElements == 0:\n zeros.append((row,column))\n return zeros \n\ndef setZeros(matrix):\n zeros=locateZeros(matrix)\n for row, rowElements in enumerate(matrix):\n for column, colElements in enumerate(rowElements):\n if row in (i[0] for i in zeros) or column in (i[1] for i in zeros):\n matrix[row][column] = 0\n return matrix\n\n\n\n#CCI q1.8\ndef isSubstring(str1, str2):\n\tvar=str1.find(str2)\n#find: index if found, -1 otherwise\n\tif var>-1:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef isRotation(st1,st2):\n\t\tstr=st1+st1\n\t\tif isSubstring(str,st2)==True:\n\t\t\treturn True \n\t\telif isSubstring(str,st2)==False:\n\t\t\treturn False \n\n\n\n\n\n\n\n","repo_name":"acoltelli/Algorithms-DataStructures","sub_path":"Ch1Solutions.py","file_name":"Ch1Solutions.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28750646535","text":"import datetime\nimport logging\n\nfrom odoo import models, fields, api\n\nSELECTION_BAND = [\n (\"160m\", \"160m\"),\n (\"80m\", \"80m\"),\n (\"40m\", \"40m\"),\n (\"30m\", \"30m\"),\n (\"20m\", \"20m\"),\n (\"17m\", \"17m\"),\n (\"15m\", \"15m\"),\n (\"12m\", \"12m\"),\n (\"10m\", \"10m\")\n]\n\nSELECTION_MODE = [\n (\"CW\", \"CW\"),\n (\"SSB\", \"SSB\"),\n (\"DIGI\", \"DIGI\")\n]\n\nSELECTION_DUPE = [\n (\"normal\", \"Normal\"),\n (\"dupe\", \"DUPE\")\n]\n\n_logger = logging.getLogger(__name__)\n\n\nclass QSO(models.Model):\n _name = \"award_naval.qso\"\n _order = \"ts ASC\"\n\n callsign = fields.Char(\n string=\"Callsign\",\n required=True\n )\n\n ts = fields.Datetime(\n string=\"Date & Time\",\n required=True\n )\n\n band = fields.Selection(\n string=\"Band\",\n selection=SELECTION_BAND,\n required=True\n )\n\n mode = fields.Selection(\n string=\"Mode\",\n selection=SELECTION_MODE,\n required=True\n )\n\n reference = fields.Char(\n string=\"Reference\"\n )\n\n operator = fields.Char(\n string=\"Operator\",\n required=True\n )\n\n rawdata = fields.Char(\n string=\"Raw data\",\n readonly=True\n )\n\n reference_auto = fields.Char(\n string=\"Automatic reference\",\n readonly=True\n )\n\n dupe = fields.Selection(\n string=\"DUPE\",\n selection=SELECTION_DUPE,\n readonly=True,\n required=True,\n default=\"normal\"\n )\n\n rst_tx = fields.Char(\n string=\"RST TX\",\n compute=\"compute_rst\",\n readonly=True\n )\n\n rst_rx = fields.Char(\n string=\"RST RX\",\n compute=\"compute_rst\",\n readonly=True\n )\n\n country_id = fields.Many2one(\n string=\"Country\",\n comodel_name=\"hamutility.country\",\n compute=\"compute_country\",\n readonly=True,\n store=True\n )\n\n ts_date = fields.Date(\n string=\"Date\",\n compute=\"compute_ts_date\",\n readonly=True,\n store=True\n )\n\n ts_time = fields.Char(\n string=\"Time\",\n compute=\"compute_ts_date_time\",\n readonly=True,\n store=True\n )\n\n @api.depends(\"mode\")\n def compute_rst(self):\n for rec in self:\n rst = rec.mode in [\"CW\", \"DIGI\"] and \"599\" or \"59\"\n rec.rst_tx = rst\n rec.rst_rx = rst\n\n @api.depends(\"callsign\")\n def compute_country(self):\n utility_callsign_onj = self.env[\"hamutility.utility_callsign\"]\n\n for rec in self:\n if rec and rec.callsign:\n rec.country_id = utility_callsign_onj.get_country(rec.callsign)\n\n @api.depends(\"ts\")\n def compute_ts_date_time(self):\n for rec in self:\n rec.ts_date = rec.ts.strftime(\"%Y-%m-%d\")\n rec.ts_time = rec.ts.strftime(\"%H%M%S\")\n\n @api.model\n def action_update_reference_auto(self):\n _logger.info(\"Updating Auto Reference\")\n\n armi_obj = self.env[\"awards_naval.armi\"]\n qso_obj = self.env[\"award_naval.qso\"]\n\n armi_ids = armi_obj.search([])\n _logger.info(\"ARMI records count: %d\" % len(armi_ids))\n\n qso_ids = qso_obj.search([])\n qso_count = len(qso_ids)\n _logger.info(\"QSO count: %d\" % qso_count)\n\n count = 0\n for qso_id in qso_ids:\n qso_callsign = qso_id.callsign.upper()\n\n for armi_id in armi_ids:\n armi_callsign = armi_id.callsign.upper()\n\n if armi_callsign in qso_callsign:\n reference = armi_id.reference\n qso_id.reference_auto = reference\n _logger.info(\"Found %s for %s\" % (reference, qso_callsign))\n count += 1\n continue\n\n _logger.info(\"Registered %d references in %d QSO\" % (count, qso_count))\n\n @api.model\n def action_update_missing_reference(self):\n _logger.info(\"Updating Missing Reference\")\n\n qso_obj = self.env[\"award_naval.qso\"]\n\n qso_ids = qso_obj.search([\n (\"reference\", \"!=\", None)\n ])\n\n ref_dict = {}\n for qso_id in qso_ids:\n ref_dict[qso_id.callsign] = qso_id.reference\n\n for callsign, reference in ref_dict.items():\n qso_ids = qso_obj.search([\n (\"callsign\", \"ilike\", callsign),\n \"|\",\n (\"reference\", \"=\", False),\n (\"reference\", \"=\", \"\")\n ])\n\n qso_ids.write({\n \"reference\": reference\n })\n\n @api.model\n def action_compute_dupe(self):\n qso_ids = self.search([])\n qso_ids.write({\"dupe\": \"normal\"})\n\n qso_first = self.search([], limit=1, order=\"ts ASC\")\n qso_last = self.search([], limit=1, order=\"ts DESC\")\n\n datetime_first = qso_first.ts.replace(hour=0, minute=0, second=0, microsecond=0)\n datetime_last = qso_last.ts.replace(hour=23, minute=59, second=59, microsecond=999999)\n\n datetime_interval = datetime_last - datetime_first\n days = datetime_interval.days + 1\n\n qso_ids_group = self.read_group(domain=[], fields=[\"callsign\"], groupby=[\"callsign\"])\n callsign_list = [x[\"callsign\"] for x in qso_ids_group if x[\"callsign\"]]\n\n for callsign in callsign_list:\n for i in range(0, days):\n datetime_day = datetime_first + datetime.timedelta(days=i)\n\n self.check_dupe(callsign, datetime_day, \"CW\")\n self.check_dupe(callsign, datetime_day, \"SSB\")\n self.check_dupe(callsign, datetime_day, \"DIGI\")\n\n def check_dupe(self, callsign, datetime_day, mode):\n ts_start = datetime_day.strftime(\"%Y-%m-%d 00:00:00\")\n ts_end = datetime_day.strftime(\"%Y-%m-%d 23:59:59\")\n\n qso_ids = self.search([\n (\"callsign\", \"=\", callsign),\n (\"mode\", \"=\", mode),\n (\"ts\", \">=\", ts_start),\n (\"ts\", \"<=\", ts_end),\n ], order=\"ts ASC\")\n\n if len(qso_ids) > 1:\n _logger.info(\"DUPE for %s on %s in %s with %d QSO\" % (\n callsign, datetime_day.strftime(\"%Y-%m-%d\"), mode, len(qso_ids)\n ))\n\n count = 0\n for qso_id in qso_ids:\n qso_id.dupe = \"dupe\" if count > 0 else \"normal\"\n count += 1\n","repo_name":"sardylan/stationlog","sub_path":"awards/awards_naval/models/qso.py","file_name":"qso.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22666401843","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('questionnaire', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Run',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('runid', models.CharField(max_length=32, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='answers', to='questionnaire.Run', null=True),\n ),\n migrations.AddField(\n model_name='runinfo',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_infos', to='questionnaire.Run', null=True),\n ),\n migrations.AddField(\n model_name='runinfohistory',\n name='run',\n field=models.ForeignKey(on_delete=models.CASCADE, related_name='run_info_histories', to='questionnaire.Run', null=True),\n ),\n ]\n","repo_name":"Apeirogon-inc/survey-app","sub_path":"core/migrations/0002_auto_20160929_1320.py","file_name":"0002_auto_20160929_1320.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70630072556","text":"class Solution:\n def maxDepth(self, root: 'Node') -> int:\n self.maxes = []\n self.travel(root, 0)\n if len(self.maxes) > 0:\n return max(self.maxes)\n else:\n return 0\n \n def travel(self, root, count):\n if root:\n count += 1\n for i in root.children:\n self.travel(i, count)\n if len(root.children) == 0:\n self.maxes.append(count)","repo_name":"slaytr/leetcode-solutions","sub_path":"maximumdepthnarytree.py","file_name":"maximumdepthnarytree.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"21538815712","text":"import json\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\n\ngeodict = {}\ndef initialize():\n global geodict\n with open('./data/melbourne.geojson') as json_file:\n data = json.load(json_file)\n for p in data['features']:\n id = p['id']\n name = p['properties']['SA2_NAME16']\n geodict[id] = {}\n geodict[id]['name'] = name\n if p['geometry']['type'] == 'Polygon':\n geodict[id]['type'] = 'polygon'\n coorlists = p['geometry']['coordinates'][0]\n polygonlist = []\n for coor in coorlists:\n polygonlist.append((coor[0],coor[1]))\n polygon = Polygon(polygonlist)\n geodict[id]['polygon'] = polygon\n else:\n geodict[id]['type'] = 'MultiPolygon'\n geodict[id]['polygons'] = []\n for polyCoors in p['geometry']['coordinates']:\n polygonlist = []\n for coor in polyCoors[0]:\n polygonlist.append((coor[0],coor[1]))\n geodict[id]['polygons'].append(Polygon(polygonlist))\n\n\ndef whichSurburb(longtitude, latitude):\n point = Point(longtitude, latitude)\n for i in geodict.keys():\n if geodict[i]['type'] == 'polygon':\n if geodict[i]['polygon'].contains(point):\n return i\n else:\n for polygon in geodict[i]['polygons']:\n if polygon.contains(point):\n return i\n return -1\n\n\ndef handle_raw(raw):\n longtitude = raw['coordinates']['coordinates'][0]\n latitude = raw['coordinates']['coordinates'][1]\n return whichSurburb(longtitude, latitude), longtitude, latitude\n","repo_name":"CaviarChen/CCC_Project_2","sub_path":"preprocessor/surburbHandler.py","file_name":"surburbHandler.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"71322605676","text":"from models import *\r\n\r\ndef get_time_series():\r\n\r\n #Se possivel, fazer serie temporal para analisar consumo.\r\n \r\n return\r\n\r\n\r\ndef is_alerta(leitura):\r\n\r\n if len(Alerta_Sensor.select().where(Alerta_Sensor.fk_leitura == leitura)) > 0 :\r\n return True\r\n \r\n return False\r\n\r\n\r\ndef analysis():\r\n\r\n sensors = [s for s in Sensor.select()]\r\n\r\n min_consumo = 50\r\n\r\n for s in range(len(sensors)):\r\n \r\n leituras = [l for l in Leitura.select().where(Leitura.fk_sensor == s+1)]\r\n\r\n for i in range(len(leituras)): \r\n\r\n if i > 1:\r\n\r\n j = i-1\r\n\r\n comparacao = leituras[j]\r\n\r\n while(j >= 1):\r\n \r\n if(is_alerta(comparacao)):\r\n j-=1\r\n comparacao = leituras[j] \r\n\r\n else: break\r\n\r\n if (leituras[i].consumo >= 1.2* comparacao.consumo ) and (comparacao.consumo > min_consumo):\r\n\r\n if( not is_alerta(leituras[i]) ):\r\n Alerta_Sensor.create(fk_leitura=leituras[i])","repo_name":"guiatsu/Caca_Vazamentos","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"5312547484","text":"#Write a Python program to display your details like name, age, address in three different lines\r\n\r\ndef personal_details():\r\n name = \"Lam\"\r\n age = 20\r\n address = \"BaVi - HaNoi\"\r\n print(\"Name : {}\\nAge : {}\\nAddress : {}\".format(name, age, address))\r\n\r\npersonal_details()\r\n\r\n","repo_name":"manhlam/python-core","sub_path":"bai37_w3.py","file_name":"bai37_w3.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5465651530","text":"import pymysql\n\nfrom classes_rod.classes_round_of_drinks import Person, Drink, Order\n\npeople_list = []\ndrinks_list = []\npast_orders = []\n\ndef load_db():\n data = read_db(\"people\")\n for line in data:\n people_list.append(Person(line[0], line[1], line[2], line[3], line[4]))\n data = read_db(\"drinks\")\n for line in data:\n drinks_list.append(Drink(line[0], line[1], line[2], line[3]))\n data = read_db(\"past_orders\")\n for line in data:\n past_orders.append(Order(line[0], line[1], line[2], line[3]))\n return people_list, drinks_list, past_orders\n\ndef connect_to_db():\n connection = pymysql.connect(\n host = \"localhost\",\n port = 3306,\n user = \"root\",\n password = \"password\",\n db = \"brew\",\n charset='utf8mb4',\n )\n return connection\n\ndef read_db(table_name):\n connection = connect_to_db()\n\n data = []\n cursor = connection.cursor()\n cursor.execute(f\"SELECT * FROM {table_name}\")\n data = cursor.fetchall()\n cursor.close()\n connection.close()\n return data\n\ndef write_db(table_name):\n connection = connect_to_db()\n\n cursor = connection.cursor()\n cursor.execute(f\"DELETE FROM {table_name}\")\n if \"people\" in table_name:\n for person in people_list:\n cursor.execute(f\"INSERT INTO people () values (NULL, '{person.first_name}', '{person.last_name}', '{person.age}', '{person.race}')\")\n elif \"drink\" in table_name:\n for drink in drinks_list:\n cursor.execute(f\"INSERT INTO drinks () values (NULL, '{drink.name}', '{drink.price}', '{drink.is_mixer}')\")\n else:\n for order in past_orders:\n cursor.execute(f\"\"\"INSERT INTO past_orders () values (NULL, '{order.person}', \"{order.people_drinks}\", '{order.cost}')\"\"\")\n\n connection.commit()\n cursor.close()\n connection.close()","repo_name":"HarryAnkcorn/my_brew_app","sub_path":"db_handling_rod/db_handling_round_of_drinks.py","file_name":"db_handling_round_of_drinks.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2378962610","text":"from aioextensions import (\n collect,\n run,\n)\nfrom async_lru import (\n alru_cache,\n)\nfrom charts.generators.bar_chart import (\n format_csv_data,\n)\nfrom charts.generators.bar_chart.mttr_benchmarking_cvssf import (\n _get_historic_verification,\n)\nfrom charts.generators.bar_chart.utils import (\n Benchmarking,\n get_valid_subjects,\n get_vulnerability_reattacks,\n GROUP_CATEGORIES,\n ORGANIZATION_CATEGORIES,\n PORTFOLIO_CATEGORIES,\n)\nfrom charts.generators.common.colors import (\n EXPOSURE,\n)\nfrom charts.generators.common.utils import (\n BAR_RATIO_WIDTH,\n get_max_axis,\n)\nfrom charts.generators.stacked_bar_chart.exposed_over_time_cvssf import (\n get_group_document,\n)\nfrom charts.generators.stacked_bar_chart.util_class import (\n RiskOverTime,\n)\nfrom charts.generators.stacked_bar_chart.utils import (\n get_current_time_range,\n)\nfrom charts.utils import (\n format_cvssf,\n get_portfolios_groups,\n iterate_groups,\n iterate_organizations_and_groups,\n json_dump,\n)\nfrom custom_utils.findings import (\n get_group_findings,\n)\nfrom dataloaders import (\n Dataloaders,\n get_new_context,\n)\nfrom db_model.vulnerabilities.types import (\n VulnerabilityVerification,\n)\nfrom decimal import (\n Decimal,\n)\nimport math\nfrom organizations.domain import (\n get_all_active_group_names,\n)\nfrom statistics import (\n mean,\n)\n\n\ndef format_cvssf_log(cvssf: Decimal) -> Decimal:\n if cvssf <= Decimal(\"0.0\"):\n return cvssf.quantize(Decimal(\"0.1\"))\n\n return Decimal(math.log2(cvssf))\n\n\ndef format_max_value(data: tuple[Decimal, ...]) -> Decimal:\n if data:\n return sorted(data, reverse=True)[0]\n\n return Decimal(\"1.0\")\n\n\n@alru_cache(maxsize=None, typed=True)\nasync def get_data_one_group(group: str, loaders: Dataloaders) -> Benchmarking:\n group_findings = await get_group_findings(\n group_name=group, loaders=loaders\n )\n vulnerabilities = await loaders.finding_vulnerabilities.load_many_chained(\n [finding.id for finding in group_findings]\n )\n historics_verification: tuple[\n tuple[VulnerabilityVerification, ...], ...\n ] = await collect(\n tuple(\n _get_historic_verification(loaders, vulnerability)\n for vulnerability in vulnerabilities\n if vulnerability.verification\n ),\n workers=4,\n )\n\n number_of_reattacks = sum(\n get_vulnerability_reattacks(historic_verification=historic)\n for historic in historics_verification\n )\n\n group_document: RiskOverTime = await get_group_document(group, loaders)\n document = get_current_time_range(tuple([group_document]))[0][0]\n values: list[Decimal] = [\n Decimal(document[name][date]).quantize(Decimal(\"0.1\"))\n for date in tuple(document[\"date\"])[-12:]\n for name in document\n if name != \"date\"\n ]\n\n return Benchmarking(\n is_valid=number_of_reattacks > 10,\n subject=group.lower(),\n mttr=values[-1] if len(values) > 0 else Decimal(\"0.0\"),\n number_of_reattacks=number_of_reattacks,\n )\n\n\n@alru_cache(maxsize=None, typed=True)\nasync def get_data_many_groups(\n organization_id: str,\n groups: tuple[str, ...],\n loaders: Dataloaders,\n) -> Benchmarking:\n groups_data: tuple[Benchmarking, ...] = await collect(\n tuple(get_data_one_group(group, loaders) for group in groups),\n workers=16,\n )\n\n exposure: Decimal = (\n Decimal(sum(group_data.mttr for group_data in groups_data)).quantize(\n Decimal(\"0.1\")\n )\n if groups_data\n else Decimal(\"0.0\")\n )\n number_of_reattacks = sum(\n group_data.number_of_reattacks for group_data in groups_data\n )\n\n return Benchmarking(\n is_valid=number_of_reattacks > 100,\n subject=organization_id,\n mttr=exposure,\n number_of_reattacks=number_of_reattacks,\n )\n\n\ndef get_average_entities(*, entities: list[Benchmarking]) -> Decimal:\n return (\n Decimal(mean([subject.mttr for subject in entities])).quantize(\n Decimal(\"0.1\")\n )\n if entities\n else Decimal(\"0.0\")\n )\n\n\ndef get_best_exposure(*, subjects: list[Benchmarking]) -> Decimal:\n return (\n Decimal(min(subject.mttr for subject in subjects)).quantize(\n Decimal(\"0.1\")\n )\n if subjects\n else Decimal(\"0.0\")\n )\n\n\ndef get_worst_exposure(*, subjects: list[Benchmarking]) -> Decimal:\n return (\n Decimal(max(subject.mttr for subject in subjects)).quantize(\n Decimal(\"0.1\")\n )\n if subjects\n else Decimal(\"0.0\")\n )\n\n\ndef format_data(\n all_data: tuple[Decimal, Decimal, Decimal, Decimal],\n categories: list[str],\n) -> dict:\n data = tuple(format_cvssf(value) for value in all_data)\n max_value: Decimal = list(\n sorted(\n [abs(value) for value in data],\n reverse=True,\n )\n )[0]\n\n max_axis_value: Decimal = (\n get_max_axis(value=max_value)\n if max_value > Decimal(\"0.0\")\n else Decimal(\"0.0\")\n )\n\n return dict(\n data=dict(\n columns=[\n [\n \"Exposure\",\n *[format_cvssf_log(value) for value in data],\n ]\n ],\n colors={\n \"Exposure\": EXPOSURE,\n },\n labels=True,\n type=\"bar\",\n ),\n axis=dict(\n x=dict(\n categories=categories,\n type=\"category\",\n ),\n y=dict(\n min=0,\n padding=dict(\n bottom=0,\n top=0,\n ),\n label=dict(\n text=\"CVSSF\",\n position=\"inner-top\",\n ),\n tick=dict(\n count=5,\n ),\n **(\n {}\n if max_axis_value == Decimal(\"0.0\")\n else dict(max=format_cvssf_log(max_axis_value))\n ),\n ),\n ),\n bar=dict(\n width=dict(\n ratio=BAR_RATIO_WIDTH,\n ),\n ),\n tooltip=dict(\n show=False,\n ),\n legend=dict(\n show=False,\n ),\n mttrBenchmarking=True,\n maxValue=format_max_value(data),\n maxValueLog=format_max_value(\n tuple(format_cvssf_log(value) for value in data)\n ),\n originalValues=[\n Decimal(value).quantize(Decimal(\"0.1\")) for value in data\n ],\n grid=dict(\n x=dict(\n show=False,\n ),\n y=dict(\n show=True,\n ),\n ),\n hideYAxisLine=True,\n hideXTickLine=True,\n exposureBenchmarkingCvssf=True,\n )\n\n\nasync def generate() -> None: # pylint: disable=too-many-locals\n loaders: Dataloaders = get_new_context()\n organizations: list[tuple[str, tuple[str, ...]]] = []\n portfolios: list[tuple[str, tuple[str, ...]]] = []\n group_names: list[str] = list(\n sorted(\n await get_all_active_group_names(loaders),\n reverse=True,\n )\n )\n\n async for org_id, _, org_groups in iterate_organizations_and_groups():\n organizations.append((org_id, org_groups))\n\n async for org_id, org_name, _ in iterate_organizations_and_groups():\n for portfolio, p_groups in await get_portfolios_groups(org_name):\n portfolios.append(\n (f\"{org_id}PORTFOLIO#{portfolio}\", tuple(p_groups))\n )\n\n all_groups_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_one_group(\n group_name,\n loaders,\n )\n for group_name in group_names\n ),\n workers=8,\n )\n\n all_organizations_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_many_groups(\n organization_id=organization[0],\n groups=organization[1],\n loaders=loaders,\n )\n for organization in organizations\n ),\n workers=8,\n )\n\n all_portfolios_data: tuple[Benchmarking, ...] = await collect(\n tuple(\n get_data_many_groups(\n organization_id=portfolio[0],\n groups=portfolio[1],\n loaders=loaders,\n )\n for portfolio in portfolios\n ),\n workers=8,\n )\n\n best_exposure: Decimal = get_best_exposure(\n subjects=[\n organization\n for organization in all_organizations_data\n if organization.is_valid\n ]\n )\n\n worst_organazation_exposure: Decimal = get_worst_exposure(\n subjects=[\n organization\n for organization in all_organizations_data\n if organization.is_valid\n ],\n )\n\n best_group_exposure: Decimal = get_best_exposure(\n subjects=[group for group in all_groups_data if group.is_valid]\n )\n\n worst_group_exposure: Decimal = get_worst_exposure(\n subjects=[group for group in all_groups_data if group.is_valid],\n )\n\n best_portfolio_exposure: Decimal = get_best_exposure(\n subjects=[\n portfolio\n for portfolio in all_portfolios_data\n if portfolio.is_valid\n ]\n )\n worst_portfolio_exposure: Decimal = get_worst_exposure(\n subjects=[\n portfolio\n for portfolio in all_portfolios_data\n if portfolio.is_valid\n ],\n )\n\n header: str = \"Categories\"\n alternative: str = \"Exposure\"\n\n async for group in iterate_groups():\n document = format_data(\n all_data=(\n (\n await get_data_one_group(\n group,\n loaders,\n )\n ).mttr,\n best_group_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_groups_data,\n )\n ),\n worst_group_exposure,\n ),\n categories=GROUP_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"group\",\n subject=group,\n csv_document=format_csv_data(\n document=document, header=header, alternative=alternative\n ),\n )\n\n async for org_id, _, org_groups in iterate_organizations_and_groups():\n document = format_data(\n all_data=(\n (\n await get_data_many_groups(\n organization_id=org_id,\n groups=org_groups,\n loaders=loaders,\n )\n ).mttr,\n best_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_organizations_data,\n )\n ),\n worst_organazation_exposure,\n ),\n categories=ORGANIZATION_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"organization\",\n subject=org_id,\n csv_document=format_csv_data(\n document=document, header=header, alternative=alternative\n ),\n )\n\n async for org_id, org_name, _ in iterate_organizations_and_groups():\n for portfolio, pgroup_names in await get_portfolios_groups(org_name):\n document = format_data(\n all_data=(\n (\n await get_data_many_groups(\n organization_id=f\"{org_id}PORTFOLIO#{portfolio}\",\n groups=pgroup_names,\n loaders=loaders,\n )\n ).mttr,\n best_portfolio_exposure,\n get_average_entities(\n entities=get_valid_subjects(\n all_subjects=all_portfolios_data,\n )\n ),\n worst_portfolio_exposure,\n ),\n categories=PORTFOLIO_CATEGORIES,\n )\n json_dump(\n document=document,\n entity=\"portfolio\",\n subject=f\"{org_id}PORTFOLIO#{portfolio}\",\n csv_document=format_csv_data(\n document=document,\n header=header,\n alternative=alternative,\n ),\n )\n\n\nif __name__ == \"__main__\":\n run(generate())\n","repo_name":"cognettings/vulscanner","sub_path":"integrates/charts/generators/bar_chart/exposure_benchmarking_cvssf.py","file_name":"exposure_benchmarking_cvssf.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"7395629687","text":"import cv2\nimport matplotlib\nimport numpy as np\nimport sys\nimport tensorflow as tf\nfrom distutils.version import StrictVersion\nfrom googletrans import Translator\nimport os\nfrom flask import Flask, render_template\nfrom werkzeug.utils import secure_filename\nfrom flask import request\n\napp = Flask(__name__)\nsys.path.append(\"..\")\n\ndef find_tag(dict):\n if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')\n from utils import label_map_util\n matplotlib.use('tkagg')\n\n MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\n MODEL_FILE = MODEL_NAME + '.tar.gz'\n DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'\n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n PATH_TO_IMAGE = os.path.join(dict)\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n image = cv2.imread(PATH_TO_IMAGE)\n image_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\n objects = []\n threshold = 0.00\n\n for index, value in enumerate(classes[0]):\n object_dict = {}\n if scores[0, index] > threshold:\n object_dict[\"score\"] = \\\n scores[0, index]\n object_dict[\"name\"] = \\\n (category_index.get(value)).get('name')\n objects.append(object_dict)\n\n rate = 0\n category = ''\n for object in objects:\n if object[\"score\"] > rate:\n rate = object[\"score\"]\n category = object[\"name\"]\n\n return category\n\nclass Result:\n tr = \"\"\n fr = \"\"\n de = \"\"\n es = \"\"\n en = \"\"\n\n\ndef translate(tag):\n result = Result()\n translator = Translator()\n result.en = tag\n result.tr = translator.translate(tag, dest='tr').text\n result.fr = translator.translate(tag, dest='fr').text\n result.de = translator.translate(tag, dest='de').text\n result.es = translator.translate(tag, dest='es').text\n return result\n\n@app.route('/')\ndef upload_file1():\n return render_template('upload.html')\n\n@app.route('/uploader', methods=['GET', 'POST'])\ndef upload_file():\n f = request.files['file']\n f.save(\"pictures/\" + secure_filename(f.filename))\n dict = \"pictures/\" + secure_filename(f.filename)\n tag = find_tag(dict)\n if tag != \"\":\n html = \"BK Visual Dictionary\"\n result = translate(tag)\n body = \"
English: \" + result.en + \"
Spanish: \"+ result.es + \"
German: \" + result.de + \"
French: \" + result.fr + \"
Turkish: \" + result.tr + \"
\"\n html = html + body + \"\"\n return html\n else:\n return \"No object detected\"\n\nif __name__ == '__main__':\n app.run()\n\n\n","repo_name":"nburak/Visual-Dictionary-Implementation-of-Tensorflow-Object-Detection-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"21061773805","text":"from scipy.spatial import distance as dist\nimport numpy as np\n\n\nclass Mouth:\n def __init__(self):\n self.mar = None\n self.status = None\n\n\ndef mouth_aspect_ratio(landmarks):\n A = dist.euclidean(landmarks[37, :], landmarks[84, :]) # 51, 59 # media(37,84)\n B = dist.euclidean(landmarks[267, :], landmarks[314, :]) # 53, 57 # media(267,314)\n C = dist.euclidean(landmarks[61, :], landmarks[291, :]) # 49, 55 #media(61,291)\n mar = (A + B) / (2.0 * C)\n return mar\n\n\nMOUTH_AR_THRESH = 0.6\n\n\n# (mStart, mEnd) = (49, 68)\n\ndef mouth_open(frame, faces):\n landmarks = np.array(faces[0].landmarks)[:, :2]\n outer_bottom = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291]\n outer_top = [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291]\n inner_bottom = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308]\n inner_top = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308]\n\n mouth_obj = Mouth()\n mouth = landmarks[outer_bottom.extend(inner_top), :]\n mouthMAR = mouth_aspect_ratio(landmarks)\n mar = mouthMAR\n mouth_obj.mar = mar\n\n if mar >= MOUTH_AR_THRESH:\n mouth_obj.status = \"mouth open\"\n elif mar < MOUTH_AR_THRESH:\n mouth_obj.status = \"mouth close\"\n\n return mouth_obj\n","repo_name":"shreyaspj20/Reliable-proctoring-AI","sub_path":"audio_and_oral_movements/oral_movement.py","file_name":"oral_movement.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"73"} +{"seq_id":"73675070635","text":"import os\nimport pandas as pd\nimport torch\nimport librosa\nfrom torch.utils.data import Dataset\n\nclass CommonVoiceDataset(Dataset):\n def __init__(self, data_root, tsv_file, sample_rate=22050, transform=None):\n \"\"\"\n Initialize the Common Voice dataset.\n\n Args:\n data_root (str): Path to the root directory containing the clips folder and TSV files.\n tsv_file (str): Filename of the TSV file containing metadata (e.g., \"train.tsv\", \"test.tsv\", \"dev.tsv\").\n sample_rate (int, optional): Sample rate to use for the audio data.\n transform (callable, optional): Optional transform to apply to the audio data.\n \"\"\"\n self.data_root = data_root\n self.sample_rate = sample_rate\n self.metadata = pd.read_csv(os.path.join(data_root, tsv_file), delimiter=\"\\t\")\n self.transform = transform\n\n def __len__(self):\n return len(self.metadata)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n # Get the path to the audio file and load it\n audio_file = os.path.join(self.data_root, \"clips\", self.metadata.iloc[idx][\"path\"])\n waveform, sample_rate = librosa.load(audio_file, sr=self.sample_rate)\n waveform = torch.from_numpy(waveform).unsqueeze(0)\n \n # Apply the optional transform\n if self.transform:\n waveform = self.transform(waveform)\n\n # Get the corresponding text label\n text = self.metadata.iloc[idx][\"sentence\"]\n\n return waveform, text\n\n\nif __name__ == \"__main__\":\n data_root = \"/path/to/common_voice/data\"\n train_tsv = \"train.tsv\"\n\n train_dataset = CommonVoiceDataset(data_root, train_tsv)\n\n # Access a sample from the dataset\n waveform, text = train_dataset[0]\n print(\"Waveform shape:\", waveform.shape)\n print(\"Text:\", text)\n","repo_name":"andorxornot/PersonalNeuralCodec","sub_path":"pnc/datasets/common_voice_dataset.py","file_name":"common_voice_dataset.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9809821377","text":"\"\"\"\nAuthor: Sreekanth Palagiri\nfile:\nRename this file to TSP_x.py where x is your student number \n\"\"\"\nimport os\nimport sys \nimport random\nimport string\nimport logging\nfrom datetime import datetime\nimport numpy as np\nfrom prolibs.plotmodule import plotfit\nfrom matplotlib.lines import Line2D\nfrom prolibs.Individual import Individual\nimport statistics\nimport csv\n\n# set system path\ndirectory = os.getcwd()\nsys.path.insert(0,directory+\"/prolibs\")\nsys.path.insert(0,directory+\"/TSPdataset\")\nsys.path.insert(0,directory+\"/files\")\n\nlogging.basicConfig(level=logging.ERROR, filename=directory+\"/files/\"+\"logfile.txt\",\n format=\"%(message)s\")\n\n# seed program for consistent results with student id\nmyStudentNum = 184198 \nrandom.seed(myStudentNum)\n\nconfig = {1:['random','uniformCrossover','inversionMutation','randomSelection'],\n 2:['random','pmxCrossover','reciprocalExchangeMutation','randomSelection'],\n 3:['random','uniformCrossover','reciprocalExchangeMutation','stochasticUniversalSampling'],\n 4:['random','pmxCrossover','reciprocalExchangeMutation','stochasticUniversalSampling'],\n 5:['random','pmxCrossover','inversionMutation','stochasticUniversalSampling'],\n 6:['random','uniformCrossover','inversionMutation','stochasticUniversalSampling'],\n 7:['NearestNeighbour','pmxCrossover','inversionMutation','stochasticUniversalSampling'],\n 8:['NearestNeighbour','uniformCrossover','inversionMutation','stochasticUniversalSampling']\n }\n\n#variables for plotting\niteration = []\ntimeperiter=[]\nbestdistance=[]\nbestfitness=[]\naveragefitness=[]\nminfitness=[]\nmaxfitness=[]\nmedianfitness=[]\n\n\nclass BasicTSP:\n def __init__(self, _fName, _popSize, _mutationRate, _maxIterations):\n \"\"\"\n Parameters and general variables\n \"\"\"\n self.population = []\n self.matingPool = []\n self.best = None\n self.popSize = _popSize\n self.genSize = None\n self.mutationRate = _mutationRate\n self.maxIterations = _maxIterations\n self.iteration = 0\n self.now = datetime.now()\n self.fName = _fName\n self.data = {}\n\n self.readInstance()\n self.initPopulation()\n\n def readInstance(self):\n \"\"\"\n Reading an instance from fName\n \"\"\"\n file = open(directory+'//TSPdataset//'+self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()\n \n def initPopulation(self):\n \"\"\"\n Creating random individuals in the population\n \"\"\"\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data,config[confignum][0])\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n logging.error(ind_i.genes)\n print (\"Best initial sol: Distance\",self.best.getDistance(),' Fitness:', self.best.getFitness())\n\n def updateBest(self, candidate):\n if self.best == None or candidate.getFitness() > self.best.getFitness():\n self.best = candidate.copy()\n print('Best Fitness:',candidate.getFitness(),' Iteration:',self.iteration)\n \n\n def randomparentSelection(self):\n \"\"\"\n Random (uniform) selection of two individuals\n \"\"\"\n indA = self.matingPool[ random.randint(0, self.popSize-1) ]\n indB = self.matingPool[ random.randint(0, self.popSize-1) ]\n return [indA, indB]\n\n def stochasticUniversalSampling(self):\n \"\"\"\n Your stochastic universal sampling Selection Implementation\n \"\"\"\n logging.info('Population before stochastic selection at iteration:'+ str(self.iteration))\n popfit=[] # Use to log population fitness in log, not used in logic \n for i in range(self.popSize):\n logging.info(self.population[i].genes) # logging info for validation \n popfit.append(self.population[i].fitness)\n logging.info(popfit)\n\n self.matingPool = []\n f=sum(i.fitness for i in self.population) #Sum of fitness values\n p=f/self.popSize # Distance between successive points\n rn = random.uniform(0,p) #generating random number rn as starting point\n logging.info('f:'+str(f)+' p:'+str(p)+' rn:'+str(rn)+' popsize:'+str(self.popSize)) # logging info for validation\n \"\"\"\n creating N pointers of length rn+p*1,rn+P*2,...rn+P*N where p is distance between points \n and N is popsize. We are considering population size as no. of parents \n \"\"\"\n pointers =[ rn + i*p for i in range(self.popSize) ]\n logging.info('pointers:')# logging info for validation\n logging.info(pointers)# logging info for validation\n \"\"\"Pseudo code:\n For each pointer i in pointers, while fitness sum of Population[0..j] < P\n j++\n add Population[j] to matingpool. \n \"\"\"\n for i in range(len(pointers)):\n j,fitsubtotal=0,self.population[0].fitness\n while (fitsubtotal <= pointers[i]):\n j+=1\n fitsubtotal+=self.population[j].fitness\n self.matingPool.append(self.population[j].copy())\n logging.info('Population After stochastic selection (selection of mating pool):')\n for i in range(len(self.matingPool)):\n logging.info(self.matingPool[i].genes) # logging info for validation \n \n def uniformCrossover(self, indA, indB):\n \"\"\"\n Uniform Crossover Implementation\n \"\"\"\n selector = [random.randint(0, 1) for i in range(self.genSize)]#1 position doesnt change, 0 position changes\n selector = ['' if a==0 else 1 for a in selector] #replace space with empty\n A=[a if b!= '' else '' for a,b in zip(indA.genes,selector)] #make a new child with spaces where genes can be replaced\n B=[a if b!= '' else '' for a,b in zip(indB.genes,selector)] #make a new child with spaces where genes can be replaced\n #iterate trough parent, if gene is not present in child add it at first empty space\n for i in indB.genes:\n if i in A:\n pass\n else:\n for j in range(len(A)):\n if A[j] == '':\n A[j]=i\n break\n for i in indA.genes:\n if i in B:\n pass\n else:\n for j in range(len(B)):\n if B[j] == '':\n B[j]=i\n break\n indA.genes,indB.genes= A,B\n\n def pmxCrossover(self, indA, indB):\n \"\"\"\n PMX Crossover Implementation\n \"\"\"\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n A = ['']* min(indexA,indexB) + indA.genes[min(indexA,indexB):max(indexA,indexB)] + [''] * (self.genSize - max(indexA,indexB))\n B = ['']* min(indexA,indexB) + indB.genes[min(indexA,indexB):max(indexA,indexB)] + [''] * (self.genSize - max(indexA,indexB))\n for a, b in zip(indA.genes[min(indexA,indexB):max(indexA,indexB)],indB.genes[min(indexA,indexB):max(indexA,indexB)]):\n if b not in A:\n x=indB.genes.index(a)\n while A[x] !='':\n x=indB.genes.index(A[x])\n A[x]=b \n if a not in B:\n y=indA.genes.index(b)\n while B[y] !='':\n y=indA.genes.index(B[y])\n B[y]=a \n for i in range(self.genSize):\n if A[i] == '':\n A[i] = indB.genes[i]\n if B[i] == '':\n B[i] = indA.genes[i]\n\n indA.genes=A\n indB.genes=B\n\n def reciprocalExchangeMutation(self, ind):\n \"\"\"\n Your Reciprocal Exchange Mutation implementation\n \"\"\"\n if random.random() > self.mutationRate:\n return\n \n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n \n def inversionMutation(self, ind):\n \"\"\"\n Inversion Mutation implementation\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n tmp=ind.genes[min(indexA,indexB):max(indexA,indexB)]\n tmp.reverse()\n ind.genes[min(indexA,indexB):max(indexA,indexB)]=tmp\n \n def crossover(self, indA, indB):\n \"\"\"\n Executes a 1 order crossover and returns a new individual\n \"\"\"\n child = []\n tmp = {}\n\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n for i in range(0, self.genSize):\n if i >= min(indexA, indexB) and i <= max(indexA, indexB):\n tmp[indA.genes[i]] = False\n else:\n tmp[indA.genes[i]] = True\n aux = []\n for i in range(0, self.genSize):\n if not tmp[indB.genes[i]]:\n child.append(indB.genes[i])\n else:\n aux.append(indB.genes[i])\n child += aux\n return child\n\n def mutation(self, ind):\n \"\"\"\n Mutate an individual by swaping two cities with certain probability (i.e., mutation rate)\n \"\"\"\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)\n\n def randomSelection(self):\n \"\"\"\n Updating the mating pool before creating a new generation\n \"\"\"\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )\n \n def newGeneration(self):\n for i in range(0, round(self.popSize/2)):\n \"\"\"\n Depending of your experiment you need to use the most suitable algorithms for:\n 1. Select two candidates\n 2. Apply Crossover\n 3. Apply Mutation\n \"\"\"\n indA, indB = self.randomparentSelection()\n logging.info('Randomly selected parents for mating:'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][1])(indA, indB)\n logging.info('After '+ config[confignum][1] +':'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][2])(indA)\n logging.info('After Parent 1 '+ config[confignum][2] +':'+ str(indA.genes) + str(indB.genes))\n getattr(ga,config[confignum][2])(indB)\n logging.info('After Parent 2 '+ config[confignum][2] +':'+ str(indA.genes) + str(indB.genes))\n \n print (\"iteration: \",self.iteration, \"Best Distance: \",self.best.getDistance(),\"Best Fitness: \",self.best.getFitness())\n logging.info (\"iteration: \"+ str(self.iteration)+\" best distance:\"+ str(self.best.getDistance())+\" best Fitness:\"+str(self.best.getFitness()))\n logging.info (\"Gene with best Fitness:\"+ str(self.best.genes))\n \n def GAStep(self):\n \"\"\"\n One step in the GA main algorithm\n 1. Updating mating pool with current population\n 2. Creating a new Generation\n \"\"\"\n getattr(ga,config[confignum][3])()\n self.newGeneration()\n \n #updating fitness of new generation and catpuring statistics \n iteration.append(self.iteration)\n fitness =[]\n for ind in self.matingPool:\n ind.computeFitness()\n self.updateBest(ind)\n fitness.append(ind.getFitness())\n self.population=self.matingPool\n \n bestdistance.append(self.best.getDistance())\n bestfitness.append(self.best.getFitness())\n averagefitness.append(statistics.mean(fitness))\n minfitness.append(min(fitness))\n maxfitness.append(max(fitness))\n medianfitness.append(statistics.median(fitness))\n timetaken = datetime.now() - self.now\n timeperiter.append(timetaken.microseconds/1000)\n self.now = datetime.now()\n \n def search(self):\n \"\"\"\n General search template.\n Iterates for a given number of steps\n \"\"\"\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: Distance - \",self.best.getDistance(),' Fitness:', self.best.getFitness())\n print(\"Gene with best Fitness:\", str(self.best.genes))\n with open(directory+\"/files/\"+\"configstats.csv\", mode='a') as stats_file:\n stats_file_writer = csv.writer(stats_file)\n stats_file_writer.writerow(['Configuration '+str(confignum),self.iteration,self.best.getFitness(),self.best.getDistance(),statistics.mean(timeperiter),(sum(timeperiter)/1000)])\n stats_file.close()\n\nif len(sys.argv) < 2:\n print (\"Error - Incorrect input\")\n print (\"Expecting python BasicTSP.py [instance] \")\n sys.exit(0)\n\nproblem_file = sys.argv[1]\n\nprint('Please choose configuration from below:')\nfor i in config:\n print(i,':',config[i])\nconfignum = int(input())\n\nif confignum < 1 or confignum > 8 :\n print (\"Error - Incorrect input for config\")\n sys.exit(0)\n\nga = BasicTSP(problem_file, 300, 0.1, 500)\nga.search()\nfilename = directory+\"/files/\"+'Configuration '+str(confignum)\nplotfit(iteration,bestfitness,averagefitness,medianfitness,minfitness,maxfitness,filename,'Configuration '+str(confignum))\n","repo_name":"sreekanthpalagiri/geneticalgorithms-tsp","sub_path":"tsp_R00184198.py","file_name":"tsp_R00184198.py","file_ext":"py","file_size_in_byte":14225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27121682216","text":"import random\r\n\r\nval = random.randint(0, 20)\r\nprint('welcome to this guess game\\n')\r\nprint(val)\r\n\r\nguess = int(input(\"try to find the number i have between 0 and 20\\n\"))\r\nwhile (guess!=val):\r\n if (guess > val):\r\n print(\"Your guess is to long\")\r\n guess = int(input(\"try to find the number i have between 0 and 20\\n\"))\r\n else:\r\n print(\"Your guess is to short!\")\r\n guess = int(input(\"try to find the number i have between 0 and 20\\n\"))\r\nif (guess == val):\r\n print(\"Congratulations you got it!\")\r\n\r\n\r\n\r\n '''elif (guess < val):\r\n print(\"Your guess is to short\")\r\n guess = int(input(\"try to find the number i have between o and 20\\n\"))\r\n else:\r\n print(\"Congratulations you got it!\")\r\n '''","repo_name":"triom/Random-guess-","sub_path":"Random guess.py","file_name":"Random guess.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9776142498","text":"from socket import *\n\nHOST = 'www.baidu.com'\nPORT = 80\nBUFSIZ = 1024\nADDR = (HOST, PORT)\ntcpClisock = socket(AF_INET, SOCK_STREAM)\ntcpClisock.connect(ADDR)\ntcpClisock.send(str('GET/\\n').encode('utf-8'))\ndata = tcpClisock.recv(BUFSIZ).decode('utf-8')\nwith open(r\"webpage.txt\", 'w') as f:\n f.write(data)\n","repo_name":"cdliang11/some_exercises","sub_path":"core-programming-python-3rd/chapter-2/homework/2-11.py","file_name":"2-11.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71164501037","text":"import numpy as np\nimport tensorflow as tf\nimport gym\nfrom utils import *\nimport os\nimport time\nfrom Agents.TRPOAgentDiscrete import TRPO as TRPOD\nfrom Agents.TRPOAgent import TRPO\nimport argparse\nimport logging\nimport json\nfrom mpi4py import MPI\nimport sys\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\ncomm_size = comm.Get_size()\n# cpu = MPI.Get_processor_name()\n# print(\"Hello world from processor {}, process {} out of {}\".format(cpu,rank,comm_size))\n# sys.stdout.flush()\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nRESULTS_DIR = os.path.join(os.getcwd(), 'Results')\nif not os.path.exists(RESULTS_DIR):\n os.mkdir(RESULTS_DIR)\n\nparser = argparse.ArgumentParser(description='TRPO.')\nparser.add_argument(\"--task\", type=str, default='SpaceInvaders-ram-v0')\nparser.add_argument(\"--timesteps_per_batch\", type=int, default=40000)\nparser.add_argument(\"--n_steps\", type=int, default=1000000000)\nparser.add_argument(\"--n_iter\", type=int, default=100)\nparser.add_argument(\"--gamma\", type=float, default=.995)\nparser.add_argument(\"--max_kl\", type=float, default=.01)\nparser.add_argument(\"--cg_damping\", type=float, default=0.1)\nparser.add_argument(\"--monitor\", type=bool, default=False)\nparser.add_argument(\"--parallel_balancing\", type=str, default=\"timesteps\") # timesteps, episodes\nparser.add_argument(\"--discrete\", type=bool, default=True)\n\n# change these parameters for hyperparameter adaptation (kvfrans)\nparser.add_argument(\"--decay_method\", type=str, default=\"none\") # adaptive, none\nparser.add_argument(\"--timestep_adapt\", type=int, default=0)\nparser.add_argument(\"--kl_adapt\", type=float, default=0)\n\nargs = parser.parse_args()\nargs.max_pathlength = gym.spec(args.task).timestep_limit\nif rank == 0:\n print(args)\n sys.stdout.flush()\n\n# initialize TRPO learner on all processes, distribute the starting weights\nlearner_env = gym.make(args.task)\n\nif args.discrete:\n learner = TRPOD(args, learner_env)\nelse:\n learner = TRPO(args, learner_env)\nif rank == 0:\n # statbar = tf.contrib.keras.utils.Progbar(args.n_iter )\n new_policy_weights = learner.get_starting_weights()\nelse:\n new_policy_weights = None\n\nstart_time = time.time()\nhistory = {}\nhistory[\"rollout_time\"] = []\nhistory[\"learn_time\"] = []\nhistory[\"bcast_time\"] = []\nhistory[\"gather_time\"] = []\nhistory[\"iteration_time\"] = []\nhistory[\"mean_reward\"] = []\nhistory[\"timesteps\"] = []\nhistory[\"maxkl\"] = []\nhistory[\"episodes\"] = []\n\n# start it off with a big negative number\nlast_reward = -1000000\nrecent_total_reward = 0\n\ntotalsteps = 0\n\nstarting_timesteps = args.timesteps_per_batch\nstarting_kl = args.max_kl\n\niteration = 0\nisDone = 0\n\nlogging.getLogger().setLevel(logging.WARNING)\n\nwhile isDone == 0:\n iteration += 1\n\n # synchronize model and update actor weights locally\n bcast_start = time.time()\n new_policy_weights = comm.bcast(new_policy_weights, root=0)\n learner.set_policy_weights(new_policy_weights)\n bcast_time = (time.time() - bcast_start)\n\n # start worker processes collect experience for a minimum args.timesteps_per_batch timesteps\n rollout_start = time.time()\n data_paths, data_rewards = learner.rollout(args.timesteps_per_batch / comm_size)\n rollout_time = (time.time() - rollout_start)\n\n # gathering of experience on root process\n gather_start = time.time()\n paths, episodes_rewards = gather_paths(data_paths, data_rewards, comm, rank, args.parallel_balancing)\n gather_time = (time.time() - gather_start)\n\n # only master process does learning on TF graph\n if rank == 0:\n learn_start = time.time()\n if args.decay_method != \"none\":\n learner.adjust_kl(args.max_kl)\n new_policy_weights, stats = learner.learn(paths, episodes_rewards)\n learn_time = (time.time() - learn_start)\n iteration_time = rollout_time + learn_time + gather_time + bcast_time\n\n print((\"\\n-------- Iteration %d ----------\" % iteration))\n print((\"Reward Statistics:\"))\n for k, v in stats.items():\n print(\"\\t{} = {:.3f}\".format(k,v))\n print((\"Timing Statistics:\"))\n print((\"\\tBroadcast time = %.3f s\" % bcast_time))\n print((\"\\tRollout time = %.3f s\" % rollout_time))\n print((\"\\tGather time = %.3f s\" % gather_time))\n print((\"\\tLearn time = %.3f s\" % learn_time))\n print((\"\\tTotal iteration time = %.3f s\" % (rollout_time + learn_time + gather_time + bcast_time)))\n\n history[\"rollout_time\"].append(rollout_time)\n history[\"learn_time\"].append(learn_time)\n history[\"bcast_time\"].append(bcast_time)\n history[\"gather_time\"].append(gather_time)\n history[\"iteration_time\"].append(rollout_time + learn_time + gather_time + bcast_time)\n history[\"mean_reward\"].append(stats[\"Avg_Reward\"])\n history[\"timesteps\"].append(args.timesteps_per_batch)\n history[\"maxkl\"].append(args.max_kl)\n history[\"episodes\"].append(stats['Episodes'])\n\n # compute 100 episode average reward\n ep = 0\n it = iteration-1\n rew = 0\n while ep < 100 and it >= 0:\n ep += history['episodes'][it]\n rew += history['mean_reward'][it]*history['episodes'][it]\n it -= 1\n if ep == 0:\n print(\"*** Problem: no complete episodes collected, increase timesteps_per_batch!\")\n isDone = 1\n else:\n print((\"Cumulative Reward Statistics:\"))\n print((\"\\tMaximum Avg_reward = %.3f from iteration %d\" % (np.max(history[\"mean_reward\"]), 1+np.argmax(history[\"mean_reward\"]))))\n print((\"\\tLast %d Episode Avg_reward = %.3f\" % (ep, (rew / ep))))\n\n print((\"Cumulative Mean Timing Statistics:\"))\n print((\"\\tBroadcast time = %.3f s\" % np.mean(history[\"bcast_time\"])))\n print((\"\\tRollout time = %.3f s\" % np.mean(history[\"rollout_time\"])))\n print((\"\\tGather time = %.3f s\" % np.mean(history[\"gather_time\"])))\n print((\"\\tLearn time = %.3f s\" % np.mean(history[\"learn_time\"])))\n print((\"\\tTotal iteration time = %.3f s\" % np.mean(history[\"iteration_time\"])))\n\n # hyperparameter adaptation (kvfrans)\n recent_total_reward += stats[\"Avg_Reward\"]\n if args.decay_method == \"adaptive\":\n if iteration % 10 == 0:\n if recent_total_reward < last_reward:\n print(\"Policy is not improving. Decrease KL and increase steps.\")\n if args.timesteps_per_batch < 20000:\n args.timesteps_per_batch += args.timestep_adapt\n if args.max_kl > 0.001:\n args.max_kl -= args.kl_adapt\n else:\n print(\"Policy is improving. Increase KL and decrease steps.\")\n if args.timesteps_per_batch > 1200:\n args.timesteps_per_batch -= args.timestep_adapt\n if args.max_kl < 0.01:\n args.max_kl += args.kl_adapt\n last_reward = recent_total_reward\n recent_total_reward = 0\n if args.decay_method == \"adaptive-margin\":\n if iteration % 10 == 0:\n scaled_last = last_reward + abs(last_reward * 0.05)\n print((\"Last reward: %f Scaled: %f Recent: %f\" % (last_reward, scaled_last, recent_total_reward)))\n if recent_total_reward < scaled_last:\n print(\"Policy is not improving. Decrease KL and increase steps.\")\n if args.timesteps_per_batch < 10000:\n args.timesteps_per_batch += args.timestep_adapt\n if args.max_kl > 0.001:\n args.max_kl -= args.kl_adapt\n else:\n print(\"Policy is improving. Increase KL and decrease steps.\")\n if args.timesteps_per_batch > 1200:\n args.timesteps_per_batch -= args.timestep_adapt\n if args.max_kl < 0.01:\n args.max_kl += args.kl_adapt\n last_reward = recent_total_reward\n recent_total_reward = 0\n # print((\"Current step number is \" + str(args.timesteps_per_batch) + \" and KL is \" + str(args.max_kl)))\n\n if iteration % 10 == 0:\n with open(\"Results/%s-%d-%f-%d\" % (args.task, starting_timesteps, starting_kl, comm_size), \"w\") as outfile:\n json.dump(history,outfile)\n learner.save_weights(\"{}-{}-{}-{}_{}.ckpt\".format(args.task, starting_timesteps, starting_kl, comm_size, iteration))\n\n # statbar.add(1, [('Iteration Time',iteration_time ), (\"Brodcast Time\", bcast_start),\n # (\"Rollout time\", rollout_time), (\"Gather Time\", gather_time),\n # (\"Learn time\", learn_time)] + list(stats.items()))\n\n totalsteps += stats[\"Timesteps\"]\n print((\"%d total steps have happened (Elapsed time = %.3f s)\" % (totalsteps,time.time() - start_time)))\n sys.stdout.flush()\n if iteration >= args.n_iter or totalsteps >= args.n_steps:\n isDone = 1\n else:\n new_policy_weights = None\n\n isDone = comm.bcast(isDone, root=0)\n\nif rank == 0:\n print((\"\\n----- Evaluation complete! -----\"))\n","repo_name":"jacobperricone/238FinalProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10020015127","text":"\ndef common(a,b):\n c =[]\n print(\"List 1 \",a)\n print(\"List 2 \",b)\n for i in a:\n if i in b:\n if i in c:\n continue\n c.append(i)\n print(\"The common elements in two list \",c)\n\nlist1 = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nlist2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] \ncommon(list1,list2)\n","repo_name":"ThasniyaBeevi/python","sub_path":"lab 1/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29845194079","text":"#coding:utf8\n#\n# Run this code to get the final results reported in our ijcai paper.\nfrom io import open\nimport string\nimport re\nimport random\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nimport pprint,copy\nuse_cuda = torch.cuda.is_available()\nfrom gnn_with_args import *\nfrom event_chain import EventChain\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\ndef get_event_chains(event_list):\n return ['%s_%s' % (ev[0],ev[2]) for ev in event_list]\n\ndef get_word_embedding(word,word_id,id_vec,emb_size):\n if word in word_id:\n return id_vec[word_id[word]]\n else:\n return np.zeros(emb_size,dtype=np.float32)\n\ndef get_vec_rep(questions,word_id,id_vec,emb_size,predict=False):\n rep = np.zeros((5*len(questions),9,emb_size),dtype=np.float32)\n correct_answers=[]\n for i,q in enumerate(questions):\n context_chain=get_event_chains(q[0])\n choice_chain=get_event_chains(q[1])\n correct_answers.append(q[2])\n for j,context in enumerate(context_chain):\n context_vec=get_word_embedding(context,word_id,id_vec,emb_size)\n rep[5*i:5*(i+1),j,:]=context_vec\n for k,choice in enumerate(choice_chain):\n choice_vec=get_word_embedding(choice,word_id,id_vec,emb_size)\n rep[5*i+k,-1,:]=choice_vec\n if not predict:\n input_data=Variable(torch.from_numpy(rep))\n else:\n input_data=Variable(torch.from_numpy(rep),volatile=True)\n correct_answers = Variable(torch.from_numpy(np.array(correct_answers)))\n return input_data,correct_answers\n\n\nclass Word2VecAttention(nn.Module):\n def __init__(self):\n super(Word2VecAttention, self).__init__()\n self.linear_u_one=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.linear_u_one2=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.linear_u_two=nn.Linear(HIDDEN_DIM,1,bias=True)\n self.linear_u_two2=nn.Linear(HIDDEN_DIM,1,bias=False)\n self.sigmoid=nn.Sigmoid()\n self.tanh=nn.Tanh()\n\n def compute_scores(self,input_data): \n weight=Variable(torch.zeros((len(input_data),8,1)).fill_(1./8))\n weighted_input=torch.mul(input_data[:,0:8,:],weight) \n a=torch.sum(weighted_input,1)\n b=input_data[:,8,:]/8.0\n scores=-torch.norm(a-b, 2, 1).view(-1,5)\n return scores\n\n def forward(self, input_data):\n return self.compute_scores(input_data)\n\n def correct_answer_position(self,L,correct_answers):\n num_correct1 = torch.sum((L[:,0] == correct_answers).type(torch.FloatTensor))\n num_correct2 = torch.sum((L[:,1] == correct_answers).type(torch.FloatTensor))\n num_correct3 = torch.sum((L[:,2] == correct_answers).type(torch.FloatTensor))\n num_correct4 = torch.sum((L[:,3] == correct_answers).type(torch.FloatTensor))\n num_correct5 = torch.sum((L[:,4] == correct_answers).type(torch.FloatTensor))\n print (\"%d / %d 1st max correct: %f\" % (num_correct1.data[0], len(correct_answers),num_correct1 / len(correct_answers) * 100.))\n print (\"%d / %d 2ed max correct: %f\" % (num_correct2.data[0], len(correct_answers),num_correct2 / len(correct_answers) * 100.))\n print (\"%d / %d 3rd max correct: %f\" % (num_correct3.data[0], len(correct_answers),num_correct3 / len(correct_answers) * 100.))\n print (\"%d / %d 4th max correct: %f\" % (num_correct4.data[0], len(correct_answers),num_correct4 / len(correct_answers) * 100.))\n print (\"%d / %d 5th max correct: %f\" % (num_correct5.data[0], len(correct_answers),num_correct5 / len(correct_answers) * 100.))\n\n def predict(self, input_data, targets):\n scores=self.forward(input_data)\n sorted, L = torch.sort(scores,descending=True)\n self.correct_answer_position(L,targets)\n selections=L[:,0]\n pickle.dump((selections != targets),open('../data/test.answer','wb'))\n num_correct = torch.sum((selections == targets).type(torch.FloatTensor))\n accuracy = num_correct / len(targets) *100.0 \n return accuracy\n\n def weights_init(self,m):\n if isinstance(m, nn.Embedding):\n nn.init.xavier_uniform(m.weight)\n elif isinstance(m, nn.GRU):\n nn.init.xavier_uniform(m.weight_hh_l0)\n nn.init.xavier_uniform(m.weight_ih_l0)\n nn.init.constant(m.bias_hh_l0,0)\n nn.init.constant(m.bias_ih_l0,0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight)\n # nn.init.uniform(m.weight)\n # nn.init.normal(m.weight)\n\ndef train(questions):\n model=Word2VecAttention()\n input_data_test,correct_answers_test=get_vec_rep(questions,word_id,id_vec,HIDDEN_DIM,predict=True)\n accuracy=model.predict(input_data_test,correct_answers_test)\n print('Test Acc: ',accuracy.data[0])\n\n\ndef process_test(scores,test_index):\n for index in test_index:\n scores[index]=np.min(scores)\n return scores\n\ndef get_acc(scores,correct_answers,name='scores',save=False):\n selections = np.argmax(scores, axis=1)\n num_correct = int(np.sum(selections == correct_answers))\n if save:\n pickle.dump((selections == correct_answers),open('./scores/'+name,'wb'),2)\n samples = len(correct_answers)\n accuracy = float(num_correct) / samples * 100.\n # print (\"%d / %d correct: %f\" % (num_correct, samples, accuracy))\n return accuracy\n\nif __name__ == '__main__':\n test_index=pickle.load(open('../data/test_index.pickle','rb'))\n\n HIDDEN_DIM = 128\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=50\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all_chain.data','rb')))\n word_id,id_vec,word_vec=get_hash_for_word('/users3/zyli/github/OpenNE/output/verb_net/1_property/deepwalk_128_unweighted_with_args.txt',verb_net3_mapping_with_args)\n \n HIDDEN_DIM = 128*4\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=1000\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all_chain.data','rb')))\n model=trans_to_cuda(EventGraph_With_Args(len(word_vec),HIDDEN_DIM,word_vec,L2_penalty,MARGIN,LR,T))\n model.load_state_dict(torch.load('../data/gnn_euclid_acc_52.380001068115234_.model'))\n\n data=test_data.all_data()\n correct_answers=data[2].cpu().data.numpy()\n scores1=model(data[1],data[0]).cpu().data.numpy() \n scores1=process_test(scores1,test_index)\n print (get_acc(scores1,correct_answers,'scores1'))\n\n HIDDEN_DIM = 128*4\n L2_penalty=0.00001\n MARGIN=0.015\n LR=0.0001\n T=1\n BATCH_SIZE=1000\n EPOCHES=520\n PATIENTS=300\n test_data=Data_data(pickle.load(open('../data/corpus_index_test_with_args_all.data','rb')))\n data=test_data.all_data()\n model=trans_to_cuda(EventChain(embedding_dim=HIDDEN_DIM,hidden_dim=HIDDEN_DIM,vocab_size=len(word_vec),word_vec=word_vec,num_layers=1,bidirectional=False))\n model.load_state_dict(torch.load('../data/event_chain_acc_50.98999786376953_.model'))\n accuracy,accuracy1,accuracy2,accuracy3,accuracy4,scores2=model.predict_with_minibatch(data[1],data[2])\n scores2=scores2.cpu().data.numpy() \n scores2=process_test(scores2,test_index)\n print (get_acc(scores2,correct_answers,'scores2'))\n\n scores3=pickle.load(open('../data/event_comp_test.scores','rb'),encoding='bytes')\n scores3=process_test(scores3,test_index)\n print (get_acc(scores3,correct_answers,'scores3'))\n\n\n scores1=preprocessing.scale(scores1)\n scores2=preprocessing.scale(scores2)\n scores3=preprocessing.scale(scores3)\n\n best_acc=0. \n best_i_j_k=(0,0)\n for i in np.arange(-3,3,0.1):\n for j in np.arange(-3,3,0.1):\n acc=get_acc(scores3*i+scores1*j,correct_answers)\n if best_acc creating model '{}'\".format(args.arch))\r\n\r\n if args.low_rank:\r\n from models.general_framework import resnet_frame\r\n from models.general_framework.convs.low_rank_conv2d import low_rank_conv2d\r\n conv_layer = partial(low_rank_conv2d, lora_alpha=args.low_rank_alpha, r_ratio=args.low_rank_r_ratio,\r\n fix_sparse=args.low_rank_fix_sparse, fix_low_rank=args.low_rank_fix_low_rank,\r\n tune_U=args.low_rank_tune_U, tune_V=args.low_rank_tune_V,\r\n tune_V_S=args.low_rank_tune_V_S, tune_U_S=args.low_rank_tune_U_S,\r\n tune_all=args.low_rank_tune_all,\r\n keep_noise=args.low_rank_keep_noise,\r\n reshape_consecutive=args.low_rank_reshape_consecutive,\r\n decompose_no_s=args.low_rank_decompose_no_s, lora_mode=args.low_rank_lora_mode)\r\n model = resnet_frame.__dict__[args.arch](conv_layer=conv_layer)\r\n else:\r\n from models.origin import resnet\r\n model = resnet.__dict__[args.arch]()\r\n\r\n in_dim = model.fc.in_features # 2048\r\n model.fc = proj_head_simclr(in_dim, output_cnt=args.mlpout)\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n log.info(\"remove maxpooling and enlarge conv layer for small resolution\")\r\n model.conv1 = nn.Conv2d(3, model.conv1.out_channels, kernel_size=3, stride=1, padding=1, bias=False)\r\n model.maxpool = nn.Identity()\r\n\r\n process_group = torch.distributed.new_group(list(range(world_size)))\r\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group)\r\n\r\n # For multiprocessing distributed, DistributedDataParallel constructor\r\n # should always set the single device scope, otherwise,\r\n # DistributedDataParallel will use all available devices.\r\n torch.cuda.set_device(args.local_rank)\r\n model.cuda(args.local_rank)\r\n # When using a single GPU per process and per\r\n # DistributedDataParallel, we need to divide the batch size\r\n # ourselves based on the total number of GPUs we have\r\n\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n find_unused_parameters=False)\r\n\r\n # define loss function (criterion) and optimizer\r\n criterion = nn.CrossEntropyLoss().cuda(args.local_rank)\r\n\r\n if args.checkpoint_pretrain != '':\r\n checkpoint = torch.load(args.checkpoint_pretrain, map_location=\"cpu\")\r\n if 'state_dict' in checkpoint:\r\n state_dict = checkpoint['state_dict']\r\n elif 'P_state' in checkpoint:\r\n state_dict = checkpoint['P_state']\r\n else:\r\n state_dict = checkpoint\r\n\r\n state_dict = remove_state_dict_module(state_dict)\r\n state_dict = check_and_cvt_pretrain_type(state_dict, model.module.state_dict(), log)\r\n\r\n model_dict = model.module.state_dict()\r\n ori_model_keys_num = model_dict.keys().__len__()\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n shape = state_dict['conv1.weight'].shape\r\n if shape[-1] != 3:\r\n out_shape = [3, 3]\r\n state_dict['conv1.weight'] = F.interpolate(state_dict['conv1.weight'], out_shape)\r\n\r\n overlap_state_dict = {k: v for k, v in state_dict.items() if k in model_dict.keys()}\r\n overlap_keys_num = overlap_state_dict.keys().__len__()\r\n\r\n model_dict.update(overlap_state_dict)\r\n\r\n model.module.load_state_dict(model_dict)\r\n\r\n log.info(\"Load SimCLR Pre-trained Model! [{}/{}]\"\r\n .format(overlap_keys_num, ori_model_keys_num))\r\n\r\n log.info('read pretrain model {}'.format(args.checkpoint_pretrain))\r\n\r\n if args.low_rank:\r\n from low_rank import prepare_low_rank\r\n params = prepare_low_rank(model, args.low_rank_compress_step, args.low_rank_lambda_s,\r\n args.low_rank_r_ratio, args.checkpoint_pretrain, args.low_rank_keep_noise, log,\r\n args.dataset, args.lr * args.low_rank_UV_lr_ratio, args.low_rank_reshape_consecutive,\r\n args.low_rank_decompose_no_s, args.low_rank_lora_mode, args.low_rank_sparse_ratio)\r\n if args.low_rank_only_decompose:\r\n return\r\n else:\r\n params = model.parameters()\r\n\r\n optimizer = setup_optimizer(args.optimizer, params, args.lr,\r\n momentum=args.momentum,\r\n weight_decay=args.weight_decay, log=log)\r\n\r\n if args.resume:\r\n if os.path.isfile(os.path.join(save_dir, 'checkpoint.pth.tar')):\r\n log.info(\"=> loading checkpoint '{}'\".format(os.path.join(save_dir, 'checkpoint.pth.tar')))\r\n if args.gpu is None:\r\n checkpoint = torch.load(os.path.join(save_dir, 'checkpoint.pth.tar'))\r\n else:\r\n # Map model to be loaded to specified single gpu.\r\n loc = 'cuda:{}'.format(args.local_rank)\r\n checkpoint = torch.load(os.path.join(save_dir, 'checkpoint.pth.tar'), map_location=loc)\r\n args.start_epoch = checkpoint['epoch']\r\n model.load_state_dict(checkpoint['state_dict'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n\r\n log.info(\"=> loaded checkpoint '{}' (epoch {})\"\r\n .format(args.resume, checkpoint['epoch']))\r\n else:\r\n log.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n log.info(\"no available checkpoint, start from scratch!!!!!!!!!!!\")\r\n log.info(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n\r\n cudnn.benchmark = True\r\n\r\n for epoch in range(args.start_epoch, args.epochs):\r\n train_sampler.set_epoch(epoch)\r\n adjust_learning_rate(optimizer, epoch, args, log)\r\n\r\n train_simclr(train_loader, model, optimizer, epoch, log, args, local_rank, world_size)\r\n\r\n if global_rank == 0:\r\n save_dict = {\r\n 'epoch': epoch + 1,\r\n 'arch': args.arch,\r\n 'state_dict': model.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n }\r\n\r\n save_checkpoint(save_dict, is_best=False,\r\n filename=os.path.join(save_dir, 'checkpoint.pth.tar'.format(epoch + 1)))\r\n\r\n if (epoch + 1) % args.save_freq == 0 or (epoch + 1) == args.epochs:\r\n save_checkpoint(save_dict, is_best=False,\r\n filename=os.path.join(save_dir, 'checkpoint_{}.pth.tar'.format(epoch + 1)))\r\n\r\n # remove checkpoint for resuming after training finished\r\n if (epoch + 1) == args.epochs and args.save_freq > 800:\r\n os.system(\"rm {}\".format(os.path.join(save_dir, 'checkpoint.pth.tar'.format(epoch + 1))))\r\n\r\n\r\ndef init_dataset(args, log):\r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n\r\n if args.dataset == \"cifar10\" or args.dataset == \"cifar100\":\r\n image_size = 32\r\n else:\r\n image_size = 224\r\n\r\n if args.aug_plus:\r\n # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709\r\n s = args.color_jitter_strength\r\n log.info(\"employed augmentation strength is {}\".format(s))\r\n augmentation = transforms.Compose([\r\n transforms.RandomResizedCrop(image_size, scale=(0.2, 1.)),\r\n transforms.RandomApply([\r\n transforms.ColorJitter(0.4 * s, 0.4 * s, 0.4 * s, 0.1 * s) # not strengthened\r\n ], p=0.8),\r\n transforms.RandomGrayscale(p=0.2),\r\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n normalize\r\n ])\r\n else:\r\n # MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978\r\n assert False\r\n\r\n if args.dataset == \"imagenet\":\r\n # Data loading code\r\n root, txt_train, _, _, pathReplaceDict = get_imagenet_root_split(args.data, args.customSplit)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False, pathReplace=pathReplaceDict)\r\n elif args.dataset == \"cifar10\" or args.dataset == \"cifar10_large\":\r\n # the data distribution\r\n root, train_idx, _ = get_cifar10_data_split(args.data, args.customSplit, ssl=True)\r\n\r\n train_idx = list(np.load(train_idx))\r\n train_dataset = subsetCIFAR10(root=root, sublist=train_idx, download=True,\r\n transform=TwoCropsTransform(augmentation))\r\n elif args.dataset == \"cifar100\" or args.dataset == \"cifar100_large\":\r\n # the data distribution\r\n root, train_idx, _ = get_cifar100_data_split(args.data, args.customSplit, ssl=True)\r\n\r\n train_idx = list(np.load(train_idx))\r\n train_dataset = subsetCIFAR100(root=root, sublist=train_idx, download=True,\r\n transform=TwoCropsTransform(augmentation))\r\n elif args.dataset == \"food-101\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_food101_data_split(args.data, args.customSplit, ssl=True)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n elif args.dataset == \"EuroSAT\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_EuroSAT_data_split(args.data, args.customSplit, ssl=True)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n elif args.dataset == \"iNaturalist_sub1000\":\r\n # Data loading code\r\n root, txt_train, _, _ = get_iNaturalist_sub1000_data_split(args.data, args.customSplit)\r\n train_dataset = Custom_Dataset(\r\n root,\r\n txt_train,\r\n TwoCropsTransform(augmentation),\r\n pre_load=False)\r\n else:\r\n raise ValueError(\"No such dataset: {}\".format(args.dataset))\r\n\r\n return train_dataset\r\n\r\n\r\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\r\n torch.save(state, filename)\r\n if is_best:\r\n shutil.copyfile(filename, 'model_best.pth.tar')\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch, args, log):\r\n \"\"\"Decay the learning rate based on schedule\"\"\"\r\n lr = args.lr\r\n if args.cos: # cosine lr schedule\r\n lr = cosine_annealing(epoch, args.epochs, lr, 1e-6, warmup_steps=10)\r\n else: # stepwise lr schedule\r\n for milestone in args.schedule:\r\n lr *= 0.1 if epoch >= milestone else 1.\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n log.info(\"current lr is {}\".format(lr))\r\n\r\n\r\ndef cosine_annealing(step, total_steps, lr_max, lr_min, warmup_steps=0):\r\n assert warmup_steps >= 0\r\n\r\n if step < warmup_steps:\r\n lr = lr_max * step / warmup_steps\r\n else:\r\n lr = lr_min + (lr_max - lr_min) * 0.5 * (\r\n 1 + np.cos((step - warmup_steps) / (total_steps - warmup_steps) * np.pi))\r\n\r\n return lr\r\n\r\n\r\ndef train_simclr(train_loader, model, optimizer, epoch, log, args, local_rank, world_size):\r\n losses = AverageMeter()\r\n losses.reset()\r\n data_time_meter = AverageMeter()\r\n train_time_meter = AverageMeter()\r\n\r\n end = time.time()\r\n\r\n for i, (inputs, _) in enumerate(train_loader):\r\n\r\n data_time = time.time() - end\r\n data_time_meter.update(data_time)\r\n\r\n inputs = torch.stack(inputs, dim=1)\r\n d = inputs.size()\r\n # print(\"inputs origin shape is {}\".format(d))\r\n inputs = inputs.view(d[0] * 2, d[2], d[3], d[4]).cuda(non_blocking=True)\r\n\r\n model.train()\r\n\r\n features = model(inputs)\r\n\r\n features = gather_features(features, local_rank, world_size)\r\n\r\n loss = nt_xent(features, t=args.simclr_t)\r\n\r\n # normalize the loss\r\n loss = loss * world_size\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n\r\n optimizer.step()\r\n\r\n losses.update(float(loss.detach().cpu() / world_size), inputs.shape[0])\r\n\r\n train_time = time.time() - end\r\n end = time.time()\r\n train_time_meter.update(train_time)\r\n\r\n # torch.cuda.empty_cache()\r\n if i % args.print_freq == 0 or i == len(train_loader) - 1:\r\n log.info('Epoch: [{0}][{1}/{2}]\\t'\r\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\r\n 'data_time: {data_time.val:.2f} ({data_time.avg:.2f})\\t'\r\n 'train_time: {train_time.val:.2f} ({train_time.avg:.2f})\\t'.format(\r\n epoch, i, len(train_loader), loss=losses,\r\n data_time=data_time_meter, train_time=train_time_meter))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"VITA-Group/DnA","sub_path":"train_simclr.py","file_name":"train_simclr.py","file_ext":"py","file_size_in_byte":23740,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"73"} +{"seq_id":"10815832880","text":"import requests\nimport json\nimport pandas as pd\nfrom datetime import datetime\n\nastroport_factory_address = \"terra1fnywlw4edny3vw44x04xd67uzkdqluymgreu7g\"\nterraswap_factory_address = \"terra1ulgw0td86nvs4wtpsc80thv6xelk76ut7a7apj\"\n\nassets_file = open(\"assets.json\", \"r\")\nassets = json.load(assets_file)\n\n\ndef get_pair_liquidity(factory_address, asset_address):\n pair_liquidity = 0\n try:\n pair = requests.get(\n f\"https://lcd.terra.dev/wasm/contracts/{factory_address}/store?query_msg={{%22pair%22:{{%22asset_infos%22:[{{%22token%22:{{%22contract_addr%22:%22{asset_address}%22}}}},{{%22native_token%22:{{%22denom%22:%22uusd%22}}}}]}}}}\"\n ).json()\n pair_address = pair[\"result\"][\"contract_addr\"]\n pool = requests.get(\n f\"https://lcd.terra.dev/wasm/contracts/{pair_address}/store?query_msg=%7B%22pool%22:%7B%7D%7D\"\n ).json()\n pool_assets = pool[\"result\"][\"assets\"]\n native_amount = 0\n token_amount = 0\n for pool_asset in pool_assets:\n if \"token\" in pool_asset[\"info\"]:\n token_amount = int(pool_asset[\"amount\"])\n elif \"native_token\" in pool_asset[\"info\"]:\n assert pool_asset[\"info\"][\"native_token\"][\"denom\"] == \"uusd\"\n native_amount = int(pool_asset[\"amount\"])\n pair_liquidity = (native_amount * 2) / 1e6\n except:\n pass\n return pair_liquidity\n\n\ndef main():\n asset_symbols = assets.keys()\n asset_addresses = assets.values()\n asset_terraswap_liquidity = []\n asset_astroport_liquidity = []\n for (asset_symbol, asset_address) in assets.items():\n pair_liquidity = get_pair_liquidity(terraswap_factory_address, asset_address)\n asset_terraswap_liquidity.append(pair_liquidity)\n pair_liquidity = get_pair_liquidity(astroport_factory_address, asset_address)\n asset_astroport_liquidity.append(pair_liquidity)\n\n asset_data_sources = []\n df = pd.DataFrame(\n {\n \"asset_symbols\": asset_symbols,\n \"asset_address\": asset_addresses,\n \"asset_terraswap_liquidity\": asset_terraswap_liquidity,\n \"asset_astroport_liquidity\": asset_astroport_liquidity,\n }\n )\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%d_%m_%y\")\n df.to_csv(f\"asset_liquidity_info/asset_liquidity_info_{formatted_date}.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tansawit/terra-assets-liquidity-data","sub_path":"asset_liquidity_data.py","file_name":"asset_liquidity_data.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71186369516","text":"from math import ceil\n\nshells = {}\nwhile True:\n arr = [item for item in input().split()]\n if arr[0] == 'Aggregate':\n break\n if arr[0] in shells:\n tempArr = shells[arr[0]]\n if int(arr[1]) not in tempArr:\n tempArr.append(int(arr[1]))\n else:\n tempArr = []\n tempArr.append(int(arr[1]))\n shells[arr[0]] = tempArr\nfor item in shells:\n print(f'{item} -> ', end=\"\")\n print(*shells[item], sep=\", \", end=\"\")\n sumShell = sum(shells[item])\n countShell = len(shells[item])\n average = ceil(sumShell - sumShell / countShell)\n print(f' ({average})')\n","repo_name":"maddrum/Python_Fundamentials_Course","sub_path":"Lecture 4_Dictionaries/10. Shellbound.py","file_name":"10. Shellbound.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"5196689267","text":"import random\n\n# answer = random.randint(0, 100)\n# print(answer)\n# cnt = 0\n# while True : \n# guess = input(\"Please guess the number (between 0 - 100) :\")\n# cnt += 1\n# try :\n# guess = int(guess)\n# except Exception:\n# print(\"Invalid number, please guess again.\")\n# continue\n\n# if guess < answer :\n# print(\"Your guess was under\")\n# elif guess > answer :\n# print(\"Your guess was over\")\n# else :\n# break\n\n# print(\"Congratulations!\")\n\nclass GuessNumber :\n def __init__(self, number, min=0, max=100) :\n self.number = number\n self.min = min\n self.max = max\n self.guesses = 0\n \n def get_guess(self) :\n guess = input(f'Please guess a number ({self.min} - {self.max}):')\n\n if self.valid_number(guess) :\n return int(guess)\n else :\n print(\"Please enter a valid number.\")\n return self.get_guess()\n\n def valid_number(self, str_number) :\n try :\n number = int(str_number)\n except Exception :\n return False\n \n return self.min <= number <= self.max\n\n def play(self) :\n while True :\n self.guesses += 1 \n\n guess = self.get_guess()\n\n if guess < self.number :\n print(\"Your guess was under.\")\n elif guess > self.number :\n print(\"Your guess was over\")\n else :\n break\n print(f\"Congratulations! You guessed it in {self.guesses} guesses\")\n print(f\"Given number was {self.number}\")\n\nanswer = random.randint(0, 100)\ngame = GuessNumber(answer, 0, 100)\ngame.play()","repo_name":"myspark02/python","sub_path":".vscode/SWDesign/GuessNumber.py","file_name":"GuessNumber.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39976393201","text":"import os\n\n\ndef main():\n names = []\n try:\n Dir = input(\"enter the directory: \")\n for i in os.listdir(Dir):\n names += [i]\n except:\n print(f\"there is no such directory as {Dir} \")\n main()\n names = list(filter(lambda i: \"- Copy\" in i, names))\n if not names:\n print(\"there is no copies to delete XD\")\n main()\n areYouSure = input(\"Are you sure you want to delete y/n: \").lower()\n if areYouSure == 'y':\n for i in os.listdir(Dir):\n if i in names:\n os.remove(f\"{Dir}\\\\{i}\")\n elif areYouSure == 'n':\n print(\"k have a nice day <3\")\n exit()\n main()\n\n\nmain()\n","repo_name":"assersamir03/copyCleaner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20748442676","text":"import math\nimport sys\nimport time\n\nimport torch\n\nfrom ..utils import utils\nfrom ..utils.metric_logger import MetricLogger, SmoothedValue\n\n\ndef train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, writer):\n model.train()\n metric_logger = MetricLogger(delimiter=\" \", writer=writer)\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n\n epoch_results = {}\n\n lr_scheduler = None\n if epoch == 0:\n warmup_factor = 1. / 1000\n warmup_iters = min(1000, len(data_loader) - 1)\n\n lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)\n\n for images, targets in metric_logger.log_every(data_loader, print_freq, epoch, header):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n loss_dict = model(images, targets)\n\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n loss_value = losses_reduced.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training:\\n{}\".format(loss_value, loss_dict))\n sys.exit(1)\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n metric_logger.update(loss=losses_reduced, **loss_dict_reduced)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n\n\n@torch.no_grad()\ndef evaluate(model, data_loader, device, writer, epoch, threshold=0.5):\n n_threads = torch.get_num_threads()\n # FIXME remove this and make paste_masks_in_image run on the GPU\n torch.set_num_threads(1)\n cpu_device = torch.device(\"cpu\")\n model.eval()\n metric_logger = MetricLogger(delimiter=\" \", writer=writer)\n header = 'Test:'\n\n total, correct = 0, 0\n\n for image, targets in metric_logger.log_every(data_loader, 50, epoch=epoch, header=header):\n image = list(img.to(device) for img in image)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n targets_labels = torch.as_tensor([int(1 in target[\"labels\"]) for target in targets], dtype=torch.int8)\n\n torch.cuda.synchronize()\n model_time = time.time()\n outputs = model(image)\n\n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]\n # Filter score only superior as threshold=0.5\n outputs_filtred = []\n for output in outputs:\n output[\"labels\"] = output[\"labels\"][output[\"scores\"] >= threshold]\n # output[\"scores\"] = output[\"scores\"][output[\"scores\"] >= threshold]\n if 1 in output[\"labels\"]:\n outputs_filtred.append(1)\n\n outputs_filtred = torch.as_tensor(outputs_filtred, dtype=torch.int8)\n model_time = time.time() - model_time\n\n total += len(image)\n correct += (targets_labels == outputs_filtred).sum().item()\n\n res = {target[\"image_id\"].item(): output for target, output in zip(targets, outputs)}\n metric_logger.update(model_time=model_time)\n\n print(\"Test accuracy :\", correct / total)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n\n torch.set_num_threads(n_threads)\n writer.add_scalar(\"Accuracy/eval\", correct / total, epoch)\n\n\nimport numpy as np\n\n\ndef get_model_scores(pred_boxes):\n \"\"\"Creates a dictionary of from model_scores to image ids.\n Args:\n pred_boxes (dict): dict of dicts of 'boxes' and 'scores'\n Returns:\n dict: keys are model_scores and values are image ids (usually filenames)\n \"\"\"\n model_score = {}\n for img_id, val in pred_boxes.items():\n for score in val['scores']:\n if score not in model_score.keys():\n model_score[score] = [img_id]\n else:\n model_score[score].append(img_id)\n return model_score\n\n\ndef iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):\n SMOOTH = 1e-6\n # You can comment out this line if you are passing tensors of equal shape\n # But if you are passing output from UNet or something it will most probably\n # be with the BATCH x 1 x H x W shape\n outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W\n intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0\n union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0\n iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0\n thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds\n return thresholded # Or thresholded.mean() if you are interested in average across the batch\n\n\ndef calc_precision_recall(image_results):\n \"\"\"Calculates precision and recall from the set of images\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n tp, fp, fn = 0, 0, 0\n precision, recall = 0, 0\n for img_id, res in image_results.items():\n tp += res['TP']\n fp += res['FP']\n fn += res['FN']\n try:\n precision = tp / (tp + fp)\n except ZeroDivisionError:\n precision = 0.0\n try:\n recall = tp / (tp + fn)\n except ZeroDivisionError:\n recall = 0.0\n return precision, recall\n\n\ndef get_single_image_results(gt_boxes, pred_boxes, iou_thr):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n \"\"\"\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n if len(all_pred_indices) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n if len(all_gt_indices) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n for ipb, pred_box in enumerate(pred_boxes):\n for igb, gt_box in enumerate(gt_boxes):\n iou = calc_iou(gt_box, pred_box)\n\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n iou_sort = np.argsort(ious)[::1]\n if len(iou_sort) == 0:\n tp = 0\n fp = 0\n fn = 0\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in iou_sort:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n return {'true_positive': tp, 'false_positive': fp, 'false_negative': fn}\n\n\ndef get_avg_precision_at_iou(gt_boxes, pred_bb, iou_thr=0.5):\n model_scores = get_model_scores(pred_bb)\n sorted_model_scores = sorted(model_scores.keys())\n # Sort the predicted boxes in descending order (lowest scoring boxes first):\n for img_id in pred_bb.keys():\n arg_sort = np.argsort(pred_bb[img_id]['scores'])\n pred_bb[img_id]['scores'] = np.array(pred_bb[img_id]['scores'])[arg_sort].tolist()\n pred_bb[img_id]['boxes'] = np.array(pred_bb[img_id]['boxes'])[arg_sort].tolist()\n\n pred_boxes_pruned = deepcopy(pred_bb)\n\n precisions = []\n recalls = []\n model_thrs = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]):\n # On first iteration, define img_results for the first time:\n print(\"Mode score : \", model_score_thr)\n img_ids = gt_boxes.keys() if ithr == 0 else model_scores[model_score_thr]\n for img_id in img_ids:\n\n gt_boxes_img = gt_boxes[img_id]\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score <= model_score_thr:\n pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n # Recalculate image results for this image\n print(img_id)\n img_results[img_id] = get_single_image_results(gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr=0.5)\n # calculate precision and recall\n prec, rec = calc_precision_recall(img_results)\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls > recall_level).flatten()\n prec = max(precisions[args])\n print(recalls, \"Recall\")\n print(recall_level, \"Recall Level\")\n print(args, \"Args\")\n print(prec, \"precision\")\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n return {\n 'avg_prec': avg_prec,\n 'precisions': precisions,\n 'recalls': recalls,\n 'model_thrs': model_thrs}\n","repo_name":"yohann84L/faster_rcnn_test_case","sub_path":"src/model/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":10609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24038194243","text":"##parameters=\n\"\"\"Publish BlogEntry\"\"\"\n\n# $Id$\n\nwftool = context.portal_workflow\n\ntry:\n wftool.doActionFor(context,\n 'publish',\n comment=\"Publishing BlogEntry\")\n psm = \"psm_blog_entry_publish_done\"\nexcept:\n psm = \"psm_blog_entry_publish_not_possible\"\n\nif context.REQUEST is not None:\n context.REQUEST.RESPONSE.redirect(context.absolute_url()+'?portal_status_message='+psm)\n","repo_name":"nuxeo-cps/products--CPSBlog","sub_path":"skins/cpsblog/blog_entry_publish.py","file_name":"blog_entry_publish.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39877040828","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef loadPrices(fileName):\n dat = pd.read_csv(fileName, index_col=0)\n # dat = dat.set_index(pd.DatetimeIndex(dat['date']))\n dat.index = pd.DatetimeIndex(dat.index)\n # dat = dat.drop('date', 1)\n return dat\n\n\ndef normalizeValues(table, newColumn, existingColum):\n priceAtT0 = table.iloc[0][existingColum]\n table[newColumn] = table.apply(lambda row: (row[existingColum] / priceAtT0), axis=1)\n return table\n\nusdByn = loadPrices('USDBYN2010_2020.csv')\nusdByn = normalizeValues(usdByn, 'procent', 'price')\n\nXaxis = usdByn.index\nYaxis = usdByn['procent'].values\n\nplt.plot(Xaxis, Yaxis)\nplt.grid()\nplt.show()","repo_name":"Prizrak4416/book-of-recipes","sub_path":"data_sience/src/get_grafik_BYN.py","file_name":"get_grafik_BYN.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73664291117","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom sklearn.metrics import classification_report\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass DisambiguationModel(nn.Module):\n def __init__(self, dim, layer_sizes, drop_outs):\n super(DisambiguationModel, self).__init__()\n self.lin_layers = nn.ModuleList([nn.Linear(dim, layer_sizes[0])])\n for i in range(len(layer_sizes)-1):\n self.lin_layers.append(nn.ReLU())\n self.lin_layers.append(nn.Dropout(drop_outs[i]))\n self.lin_layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))\n\n def forward(self, x):\n for l in self.lin_layers:\n x = l(x)\n return x\n\nclass ModelWrapper():\n def __init__(self, config, input_dim, save_path=None, device='cpu'):\n self.config = config\n self.device = device\n self.writer = SummaryWriter('{}/log'.format(save_path))\n self.model = DisambiguationModel(input_dim, config['layer_sizes'], config['drop_outs']).to(device)\n self.loss_fn = nn.BCEWithLogitsLoss().to(device)\n self.optim = optim.Adam(self.model.parameters())\n self.epoch = 0\n self.save_path = save_path\n\n def train(self, train_set):\n self.model.train()\n cum_loss = 0\n for idx, sample in enumerate(train_set):\n self.optim.zero_grad()\n pred = self.model(sample[0].to(self.device))\n loss = self.loss_fn(torch.squeeze(pred), sample[1].float().to(self.device))\n loss.backward()\n self.optim.step()\n cum_loss += loss.detach()\n if idx != 0 and idx % 4000 == 0:\n print(\"Batch {} Avg. Loss {}\".format(idx, cum_loss / 100)) \n cum_loss = 0\n\n def test(self, test_set):\n self.model.eval()\n predictions = []\n true = []\n for idx, sample in enumerate(test_set):\n with torch.no_grad():\n pred = self.model(sample[0].to(self.device))\n pred_class = torch.squeeze(torch.sigmoid(pred))\n predictions.extend(pred_class.cpu().numpy())\n true.extend(sample[1])\n if idx != 0 and idx % 4000 == 0:\n print(\"At batch {}\".format(idx))\n return true, predictions\n\n def predict(self, test_set='train', test_set_ext='train'):\n self.model.eval()\n predictions = []\n for idx, sample in enumerate(self.inputs[test_set][test_set_ext]['loader']):\n with torch.no_grad():\n pred = self.model(sample[0])\n pred_class = torch.squeeze(torch.sigmoid(pred))\n predictions.extend(pred_class.cpu().numpy())\n return predictions\n\n def eval(self, true, predictions, threshold=.5, epoch=0, write=True):\n predictions = [0 if pred <= threshold else 1 for pred in predictions]\n print(classification_report(true, predictions))\n classification_dict = classification_report(true, predictions, output_dict=True)\n if write and 'True' in classification_dict:\n for k,v in classification_dict['True'].items():\n self.writer.add_scalar(k, v, epoch)\n\n def save(self):\n print(\"Saving model\")\n torch.save({\n 'epoch': self.epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optim.state_dict(),\n }, '{}/model.pth'.format(self.save_path))\n\n def load(self):\n if 'checkpoint' in self.config and self.config['checkpoint']:\n print(\"Loading model from checkpoint\")\n checkpoint_data = torch.load(self.config['checkpoint'], map_location=self.device)\n self.model.load_state_dict(checkpoint_data['model_state_dict'])\n self.optim.load_state_dict(checkpoint_data['optimizer_state_dict'])\n self.epoch = checkpoint_data['epoch'] \n\n","repo_name":"dave-s477/SoMeNLP","sub_path":"somenlp/entity_disambiguation/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"21874183464","text":"''' Telegram '''\nimport logging\n\nimport arrow\nfrom flask import Blueprint, g, redirect, request, url_for\nfrom flask.wrappers import Response\nfrom werkzeug.wrappers import Response as ResponseBase\n\nimport setting\nfrom models.telegram_db import TelegramDB\nfrom module.mc import MC\nfrom module.telegram_bot import TelegramBot\n\nVIEW_TELEGRAM = Blueprint('telegram', __name__, url_prefix='/telegram')\n\n\n@VIEW_TELEGRAM.route('/r', methods=('POST', ))\ndef receive() -> ResponseBase:\n ''' receive '''\n data = request.get_json()\n logging.info('[telegram] %s', data)\n\n if data and TelegramBot.is_command_start_linkme(data):\n uuid_data = TelegramBot.gen_uuid(chat_id=data['message']['from']['id'])\n TelegramBot.temp_fetch_user_data(data=data)\n\n resp = TelegramBot(token=setting.TELEGRAM_TOKEN).send_message(\n chat_id=data['message']['from']['id'],\n text='請繼續前往志工平台登入驗證,感謝!',\n reply_markup={\n 'inline_keyboard': [\n [{'text': '驗證(verify)',\n 'url': f\"https://{setting.DOMAIN}/telegram/verify/{uuid_data['uuid']}\"}, ],\n ]},\n )\n\n logging.info('[Telegram][Send] %s', resp.json())\n\n return Response('', status=200)\n\n\n@VIEW_TELEGRAM.route('/verify/', methods=('GET', 'POST'))\ndef link_telegram_verify(tg_uuid: str) -> ResponseBase:\n ''' Link Telegram verify '''\n if request.method == 'GET':\n mem_cache = MC.get_client()\n data = mem_cache.get(f'tg:{tg_uuid}')\n if not data:\n return Response('Expired. `/linkme` again', status=406)\n\n user_data = mem_cache.get(f\"tgu:{data['chat_id']}\")\n if data and user_data:\n save_data = {'uid': g.user['account']\n ['_id'], 'added': arrow.now().naive}\n save_data.update(user_data)\n TelegramDB().add(save_data)\n\n TelegramBot(token=setting.TELEGRAM_TOKEN).send_message(\n chat_id=save_data['id'],\n text='與 [%(uid)s](https://volunteer.coscup.org/user/%(uid)s) 完成帳號綁定!(Completed)' % save_data) # pylint: disable=line-too-long\n\n mem_cache.delete_multi(\n [f'tg:{tg_uuid}', f\"tgu:{g.user['account']['_id']}\"])\n\n logging.info('[Telegram][Send] linkme: %s %s',\n save_data['id'], save_data['uid'])\n\n return redirect(url_for('setting.link_telegram', _scheme='https', _external=True))\n\n return Response('Expired. `/linkme` again', status=406)\n\n return Response('', status=404)\n","repo_name":"COSCUP/COSCUP-Volunteer","sub_path":"view/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"73"} +{"seq_id":"74499576555","text":"import argparse\nfrom utils import config\nfrom dataset.data_loader import get_test_loader\nimport cv2\nimport os\n\nimport torch\nimport torch.nn as nn\n\ndef main():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('--model', type=str, default='UNet', choices=['UNet'])\n arg('--output-path', type=str, default='./output/')\n arg('--config-file', type=str, default='./config/train_config.yaml')\n args = parser.parse_args()\n\n cfg = config.Config(args.config_file)\n csv_dir = cfg.csv_dir\n\n network = args.model\n model_weight_path = 'checkpoint/model_' + args.model + '.pt'\n\n data_loader = get_test_loader(cfg, args.image_path, csv_dir)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n network = nn.DataParallel(network)\n network.to(device)\n weight = torch.load(model_weight_path)\n network.load_state_dict(weight['model'])\n\n with torch.no_grad():\n network.eval()\n for test_image, test_name in data_loader:\n test_image = test_image.to(device)\n outputs = network(test_image)\n test_pred = outputs.squeeze().data.cpu().numpy()\n test_mask = (test_pred > cfg.Threshold).astype('int') * 255\n cv2.imwrite(os.path.join(args.output_path, '%s.png' % test_name), test_mask)\n\nif __name__ == '__main__':\n main()","repo_name":"Howrunz/model_train","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5509778214","text":"from pyspark.sql.functions import col\nfrom pyspark.ml.ALS import ALS\nfrom tester import roem_cross_val\n\ndef train_model(data, model):\n \"\"\"This assumes you have tested models using reduced data and don't\n necessarily know what are the best params\"\"\"\n\n print(\"Model Parameters:\")\n print(\"Rank: {}\".format(model.getRank()))\n print(\"MaxIter: {}\".format(model.getMaxIter()))\n print(\"RegParam: {}\".format(model.getRegParam()))\n print(\"Aplpha: {}\".format(model.getAlpha()))\n\n return model.fit(data)\n\ndef get_user_predictions(df, model, users=[], n=5):\n \"\"\"Get top recommendations for the requested users, if none are \n provided, recommendations are generated for all data\n\n - must be a list\n \"\"\"\n\n assert isinstance(users, list)\n\n if len(users) > 0:\n df = df.filter(col(\"userId\").isin(users))\n\n return df.group_by(\"userId\").sort(col(\"prediction\")).limit(n)\n","repo_name":"renatomatz/ALS_tools","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36404304623","text":"\"\"\"Module containing methods used to create and drop table\"\"\"\n\ndef get_tables():\n \"\"\"List of all tables with their name and sql_statement to create it in a PostgreSQL database server\"\"\"\n\n tables = [\n {\n \"table\": \"SELLERS\",\n \"sql_statement\":\n \"\"\"\n CREATE TABLE SELLERS(\n seller_id CHAR(32) NOT NULL PRIMARY KEY\n )\n \"\"\"\n },\n {\n \"table\": \"CUSTOMERS\",\n \"sql_statement\":\n \"\"\"\n CREATE TABLE CUSTOMERS(\n customer_id CHAR(32) NOT NULL PRIMARY KEY,\n customer_unique_id CHAR(32) NOT NULL,\n customer_zip_code_prefix CHAR(5) NOT NULL,\n customer_city VARCHAR(64) NOT NULL,\n customer_state CHAR(2) NOT NULL\n )\n \"\"\"\n },\n {\n \"table\": \"PRODUCTS\",\n \"sql_statement\":\n \"\"\"\n CREATE TABLE PRODUCTS(\n product_id CHAR(32) NOT NULL PRIMARY KEY,\n product_category_name VARCHAR(100),\n product_name_lenght FLOAT,\n product_description_lenght FLOAT,\n product_photos_qty INT,\n product_weight_g FLOAT NOT NULL,\n product_length_cm FLOAT NOT NULL,\n product_height_cm FLOAT NOT NULL,\n product_width_cm FLOAT NOT NULL,\n product_category_name_english VARCHAR(100)\n )\n \"\"\"\n },\n {\n \"table\": \"ORDERS\",\n \"sql_statement\":\n \"\"\"\n CREATE TABLE ORDERS(\n order_id CHAR(32) NOT NULL PRIMARY KEY,\n customer_id CHAR(32) NOT NULL,\n order_status VARCHAR(32) NOT NULL,\n order_purchase_timestamp TIMESTAMP NOT NULL,\n order_approved_at TIMESTAMP,\n order_delivered_carrier_date TIMESTAMP,\n order_delivered_customer_date TIMESTAMP,\n order_estimated_delivery_date TIMESTAMP NOT NULL,\n CONSTRAINT fk_customer_id FOREIGN KEY (customer_id) REFERENCES customers (customer_id)\n )\n \"\"\"\n },\n {\n \"table\": \"ITEMS\",\n \"sql_statement\":\n \"\"\"\n CREATE TABLE ITEMS(\n order_id CHAR(32) NOT NULL,\n order_item_id INT NOT NULL,\n seller_id CHAR(32) NOT NULL,\n product_id CHAR(32) NOT NULL,\n shipping_limit_date TIMESTAMP NOT NULL,\n price FLOAT NOT NULL,\n freight_value FLOAT NOT NULL,\n CONSTRAINT pk_items PRIMARY KEY (order_id, order_item_id),\n CONSTRAINT fk_order_id FOREIGN KEY (order_id) REFERENCES orders (order_id),\n CONSTRAINT fk_seller_id FOREIGN KEY (seller_id) REFERENCES sellers (seller_id),\n CONSTRAINT fk_product_id FOREIGN KEY (product_id) REFERENCES products (product_id)\n )\n \"\"\"\n }\n ]\n\n return tables\n\n\ndef drop_table(cursor, table):\n print(f\"\\t{table}..\")\n cursor.execute(f\"DROP TABLE IF EXISTS {table} CASCADE\")\n\n\ndef create_table(cursor, table, sql_statement):\n print(f\"\\t{table}..\")\n cursor.execute(sql_statement)\n","repo_name":"RomainTL/free2move_technical_test","sub_path":"free2move-py/src/free2move/create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42049314","text":"import sys\nfrom collections import defaultdict\n\nN = int(input())\nAB = [list(map(int, input().split())) for _ in range(N)]\nevents = defaultdict(int)\nevq = [(i, 0) for i in range(N)]\nfor a, b in AB:\n a -= 1\n b = a+b\n events[a] += 1\n events[b] -= 1\nevq = sorted(events.items())\npre = 0\nuser_cnt = 0\nanss = [0]*(N+1)\nfor t, c in evq:\n anss[user_cnt] += t-pre\n user_cnt += c\n pre = t\n\nprint(*anss[1:])\n","repo_name":"tanakaht/atcoder","sub_path":"problems/abc221/abc221_d.py","file_name":"abc221_d.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31281743007","text":"# __coding=utf8__\n# /** 作者:zengyanghui **/\n# coding=utf-8\n\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport time\nimport os.path\nimport xlrd\nimport xlwt\n\nimport tabulate\n\n\n# import win32api\n# import win32ui\n# import win32con\n#\n# import win32com\n# from win32com.shell import shell\n\n\n# # 1表示打开文件对话框\n# dlg = win32ui.CreateFileDialog(1)\n# # 设置打开文件对话框中的初始显示目录\n# dlg.SetOFNInitialDir('E:/Python')\n# # 弹出文件选择对话框\n# dlg.DoModal()\n# # 获取选择的文件名称\n# filename = dlg.GetPathName()\n# print(filename)\n\n\n# xx=shell.SHGetPathFromIDList()\n# print(xx)\n\n\ndef list_all_files(rootdir, filekey_list):\n if len(filekey_list) > 0:\n filekey_list = filekey_list.replace(\",\", \" \")\n filekey = filekey_list.split(\" \")\n else:\n filekey = ''\n\n _files = []\n list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n for i in range(0, len(list)):\n path = os.path.join(rootdir, list[i])\n if os.path.isdir(path):\n _files.extend(list_all_files(path, filekey_list))\n if os.path.isfile(path):\n if (path.find(\"~\") < 0) and (path.find(\".DS_Store\") < 0): # 带~符号表示临时文件,不读取\n if len(filekey) > 0:\n for key in filekey:\n # print(path)\n filename = \"\".join(path.split(\"\\\\\")[-1:])\n # print(\"文件名:\",filename)\n\n key = key.replace(\"!\", \"!\")\n\n if key.find(\"!\") >= 0:\n # print(\"反向选择:\",key)\n if filename.find(key.replace(\"!\", \"\")) >= 0: # 此文件不要读取\n # print(\"{} 不应该包含 {},所以剔除:\".format(filename,key ))\n pass\n elif filename.find(key) > 0: # 只做文件名的过滤\n _files.append(path)\n\n else:\n _files.append(path)\n\n # print(_files)\n return _files\n\n\ndef read_excel(filename):\n # 原代码,备份:\n # print(filename)\n if filename.find(\"xls\") > 0:\n df = pd.read_excel(filename, dtype=str)\n else:\n df = pd.read_csv(filename, dtype=str, encoding=\"gb18030\")\n if filename.find(\"TAOBAO\") > 0:\n plat = \"淘宝\"\n elif filename.find(\"TMALL\") > 0:\n plat = \"天猫\"\n elif filename.find(\"JD\") > 0:\n plat = \"京东\"\n elif filename.find(\"PDD\") > 0:\n plat = \"拼多多\"\n elif filename.find(\"DY\") > 0:\n plat = \"抖音\"\n # elif filename.find(\"快手\") > 0:\n # plat = \"快手\"\n elif filename.find(\"XHS\") > 0:\n plat = \"小红书\"\n elif filename.find(\"KAOLA\") > 0:\n plat = \"网易考拉\"\n elif filename.find(\"YZ\") > 0:\n plat = \"有赞\"\n else:\n plat = \"\"\n df[\"平台\"] = plat\n\n if \"开始时间\" in df.columns:\n df[\"订单数量\"] = df[\"订单数量\"].astype(float)\n df[\"订单数量\"] = df[\"订单数量\"].astype(int)\n df[\"订单金额\"] = df[\"订单金额\"].astype(float)\n df[\"开始时间\"] = df[\"开始时间\"].astype(str).apply(lambda x: x.replace(\".\", \"-\"))\n df[\"开始时间\"] = df[\"开始时间\"].astype(\"datetime64[ns]\")\n df[\"年度\"] = df[\"开始时间\"].apply(lambda x: x.year)\n df[\"月份\"] = df[\"开始时间\"].apply(lambda x: x.month)\n temp_df = df.groupby([\"平台\", \"店铺名称\", \"年度\", \"月份\"]).agg({\"订单数量\": \"sum\", \"订单金额\": \"sum\"})\n temp_df = pd.DataFrame(temp_df).reset_index()\n # temp_df.dropna(subset=[\"店铺名称\"],axis=0,inplace=True)\n return temp_df\n else:\n dict = {\"平台\": \"\", \"店铺名称\": \"\", \"年度\": \"\", \"月份\": \"\", \"订单数量\": \"\", \"订单金额\": \"\"}\n temp_df = pd.DataFrame(dict, index=[0])\n # print(temp_df.dtypes)\n # print(temp_df.to_markdown())\n return temp_df\n\n # temp_df = pd.read_excel(filename,dtype=str)\n # print(temp_df.dtypes)\n # for column_name in temp_df.columns:\n # temp_df.rename(columns={column_name:column_name.replace(\" \",\"\").replace(\"\\n\",\"\").strip()},inplace=True)\n # if column_name == \"子订单编号\":\n # temp_df.rename(columns={\"子订单编号\":\"订单号\"},inplace=True)\n # elif column_name == \"订单编号\":\n # temp_df.rename(columns={\"订单编号\": \"订单号\"},inplace=True)\n # elif column_name == \"主订单编号\":\n # temp_df.rename(columns={\"主订单编号\": \"订单号\"},inplace=True)\n # elif column_name == \"主订单编号\":\n # temp_df.rename(columns={\"主订单编号\": \"订单号\"},inplace=True)\n # elif column_name == \"物流单号\":\n # temp_df.rename(columns={\"物流单号\": \"运单号码(HWBNo.)\"},inplace=True)\n # elif column_name == \"单号\":\n # temp_df.rename(columns={\"单号\": \"运单号码(HWBNo.)\"},inplace=True)\n # elif column_name == \"快递单号\":\n # temp_df.rename(columns={\"快递单号\": \"运单号码(HWBNo.)\"},inplace=True)\n # elif column_name == \"总价\":\n # temp_df.rename(columns={\"总价\": \"订单金额\"},inplace=True)\n # elif column_name == \"实收款(到付按此收费)\":\n # temp_df.rename(columns={\"实收款(到付按此收费)\": \"订单金额\"},inplace=True)\n # temp_df = temp_df.loc[:,~temp_df.columns.duplicates()]\n # print(temp_df.columns)\n # temp_df = temp_df[[\"运单号码(HWBNo.)\",\"订单号\",\"订单金额\"]].copy()\n # temp_df = temp_df.loc[:, ~temp_df.columns.duplicated()]\n # temp_df = temp_df[columns].T.drop_duplicates().T\n # print(temp_df.to_markdown())\n # print(temp_df.columns)\n # print(len(temp_df))\n # return temp_df\n\n\ndef get_all_files(rootdir, filekey):\n filelist = list_all_files(rootdir, filekey)\n # print(filelist)\n mySeries = pd.Series(filelist)\n df = pd.DataFrame(mySeries)\n df.columns = [\"filename\"]\n # df[\"filename\"] = df[\"filename\"].apply(lambda x: x.strip().replace(\" \", \"\"))\n\n df = df[~df[\"filename\"].str.contains(\"账单\")]\n df = df[~df[\"filename\"].str.contains(\"订单\")]\n df = df[~df[\"filename\"].str.contains(\"回款\")]\n # df=df[~df[\"filename\"].str.contains(\"账单\")]\n df = df[df[\"filename\"].str.contains(\"总表\")]\n\n # print(df.to_markdown())\n # print(\"抽查是否还有快递!\")\n # print(df[df.filename.str.contains(\"快递\")].to_markdown())\n return df\n\n\n# def read_all_excel(rootdir, filekey):\n# df_files = get_all_files(rootdir, filekey)\n# for index, file in df_files.iterrows():\n# if 'df' in locals().keys(): # 如果变量已经存在\n# dd = read_excel(file[\"filename\"])\n# dd[\"filename\"] = file[\"filename\"]\n# print(\"进度表:{}/{} 文件{},行数{}\".format(index + 1, df_files.shape[0], file[\"filename\"], dd.shape[0]))\n# df = df.append(dd)\n# else:\n# df = read_excel(file[\"filename\"])\n# df[\"filename\"] = file[\"filename\"]\n# print(\"进度表:{}/{} 文件{},行数{}\".format(index + 1, df_files.shape[0], file[\"filename\"], df.shape[0]))\n#\n# return df\n\ndef get_amount(filename):\n df = pd.read_excel(r\"/Users/maclove/PycharmProjects/pythonConda/data/文件分类1.xlsx\", sheet_name=\"Sheet2\")\n # df = pd.read_excel(filename)\n # print(df.to_markdown())\n for index, row in df.iterrows():\n # print(filename)\n # print(index)\n # print(row)\n if filename.find(row[\"平台\"]) >= 0:\n # print(row[\"平台\"])\n try:\n amount_column = row[\"金额字段\"]\n # print(\"文件名:\", filename, \" 金额字段为:\", amount_column)\n if filename.find(\"小红书\") > 0:\n tempdb = pd.read_excel(filename, sheet_name=\"商品销售\")\n else:\n tempdb = pd.read_excel(filename)\n if filename.find(\"快手\") > 0:\n tempdb = tempdb.apply(lambda x: x.astype(str).str.replace(\"¥\", \"\"))\n if \"实付款\" in tempdb.columns:\n tempdb[\"实付款\"] = tempdb[\"实付款\"].astype(float)\n elif \"实付款(元)\" in tempdb.columns:\n tempdb[\"实付款(元)\"] = tempdb[\"实付款(元)\"].astype(float)\n else:\n pass\n # print(tempdb.head(1).to_markdown())\n if amount_column.find(\",\") > 0:\n # print(amount_column,\" is in \",tempdb.columns)\n amount_columns = amount_column.split(\",\")\n for acl in amount_columns:\n if acl in tempdb.columns:\n return tempdb[[acl]].sum()\n else:\n return tempdb[[amount_column]].sum()\n except Exception as e:\n print(\"没有找到金额字段!\", filename)\n return 0\n print(\"没有找到平台!\", filename)\n return 0\n\n\ndef read_all_excel(rootdir, filekey):\n df_files = get_all_files(rootdir, filekey)\n for index, file in df_files.iterrows():\n if 'df' in locals().keys(): # 如果变量已经存在\n dd = read_excel(file[\"filename\"])\n dd[\"filename\"] = file[\"filename\"]\n print(\"进度表:{}/{} 文件{},行数{}\".format(index + 1, df_files.shape[0], file[\"filename\"], dd.shape[0]))\n\n df = df.append(dd)\n # hangshu= dd.shape[0]\n # df = df.append( file[\"filename\"],hangshu] )\n # print(file[\"filename\"],hangshu)\n\n # print(file[\"filename\"] )\n # amount=get_amount(file[\"filename\"])\n # print(file[\"filename\"], dd.shape[0], amount)\n\n else:\n df = read_excel(file[\"filename\"])\n df[\"filename\"] = file[\"filename\"]\n # print(file[\"filename\"], df.shape[0])\n print(\"进度表:{}/{} 文件{},行数{}\".format(index + 1, df_files.shape[0], file[\"filename\"], df.shape[0]))\n\n return df\n\n\ndef combine_excel():\n print('请选择要合并的文件目录(请确定所有excel的表头都是相同的哦!):')\n # filedir=\"\"\n # filedir = input()\n # myTuple = shell.SHBrowseForFolder(0, None, \"\", 64)\n try:\n # path = shell.SHGetPathFromIDList(myTuple[0])\n filedir = input()\n except:\n print(\"你没有输入任何目录 :(\")\n sys.exit()\n return\n\n # filedir=path.decode('ansi')\n print(\"你选择的路径是:\", filedir)\n\n global default_dir\n default_dir = filedir\n\n if len(filedir) == 0:\n print(\"你没有输入任何目录 :(\")\n sys.exit()\n return\n\n print('请输入要合并的文件名称或者后缀匹配符:(比如都必须��含 序时账 三个字,那么就请输入 \"序时账\" , 不输入就表示所有excel都要合并!)')\n filekey = input()\n\n if len(filedir) == 0:\n print(\"你没有输入任何关键词 :(\")\n filekey = ''\n # sys.exit()\n # return\n\n print(\"你希望在'{}'目录下找到所有的包含“{}”文件,然后合并。\".format(filedir, filekey))\n\n table = read_all_excel(filedir, filekey)\n table.drop_duplicates(inplace=True)\n\n if len(table) > 500000:\n table.to_csv(\"data/导出订单的数量和金额.csv\", index=False)\n else:\n table.to_excel(\"data/导出订单的数量和金额.xlsx\", index=False)\n\n return table\n\n\ndef caiwu_xushizhang():\n df = combine_excel()\n if 'df' in locals().keys(): # 如果变量已经存在\n # print(df.head(10).to_markdown())\n print(df.head(3))\n # df.to_clipboard(index=False)\n print(\"ok1\")\n # if len(df)>500000:\n # df.to_csv(default_dir + r\"\\合并表格.csv\")\n # else:\n # df.to_excel(default_dir + r\"\\合并表格.xlsx\")\n # print(\"生成完毕,现在关闭吗?yes/no\")\n # byebye = input()\n # print('bybye:', byebye)\n else:\n print(\"不好意思,什么也没有做哦 :(\")\n # pyinstaller -p D:\\Anaconda3\\envs\\duizhang -F .\\xushizhang.py\n\n\nif __name__ == \"__main__\":\n print(\"开始:\", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n # print(\"abc\".find(\"x\"))\n\n # caiwu_xushizhang()\n\n combine_excel()\n\n print(\"ok\")\n","repo_name":"jackyxugz/python-data-analysis","sub_path":"导出数据统计订单数和金额.py","file_name":"导出数据统计订单数和金额.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"70410228716","text":"#coding:utf-8\nfrom django.contrib import admin\n\nfrom .models import (EquipmentType,\n Equipment,\n PropertyType,\n Property,\n EA,\n Reserve,\n Contract,\n Period,\n Prices)\nfrom .helpers import get_cache_props, get_cache_type\nfrom .mixins import PropertyMixin\n\nfrom collections import Counter\nimport hashlib\n\n\n@admin.register(EquipmentType)\nclass EquipmentTypeAdmin(admin.ModelAdmin):\n pass\n\n\nclass PropertyInline(admin.TabularInline):\n model = Property\n extra = 1\n\n\nclass PricesInline(admin.TabularInline):\n model = Prices\n extra = 1\n\n\n@admin.register(Equipment)\nclass EquipmentAdmin(PropertyMixin, admin.ModelAdmin):\n list_display = ('type', 'article', 'count', '_property')\n inlines = [PropertyInline, PricesInline]\n\n def response_add(self, request, new_object, post_url_continue=None):\n obj = self.after_saving_model_and_related_inlines(new_object)\n return super(EquipmentAdmin, self).response_add(request, obj, post_url_continue)\n\n def response_change(self, request, obj):\n obj = self.after_saving_model_and_related_inlines(obj)\n return super(EquipmentAdmin, self).response_change(request, obj)\n\n def after_saving_model_and_related_inlines(self, obj):\n # вычисляю хеш (тип + основные свойства)\n props = obj.property_set.filter(general=True)\n hash_base = u'_'.join([\n obj.type.name,\n u'_'.join([u'{}_{}'.format(p.type.name, p.value) for p in props])])\n obj.hash = hashlib.md5(hash_base.encode('utf-8')).hexdigest()\n obj.save()\n\n # нормализуем количество\n live_hash = Equipment.objects.values_list('hash', flat=True).distinct()\n for item_hash in live_hash:\n eq_sum = sum(Equipment.objects.filter(hash=item_hash).values_list('count', flat=True))\n try:\n ea = EA.objects.get(hash=item_hash)\n diff = eq_sum - (ea.count_in + ea.count_out)\n if diff:\n ea.count_in += diff\n ea.save()\n except EA.DoesNotExist:\n EA.objects.create(type=obj.type, count_in=obj.count, hash=item_hash)\n\n # удаляем те, у которых невалидный хеш\n # случай, когда у товара меняется хеш на новый\n # и в он был единственным представителем старого хеша\n EA.objects.exclude(hash__in=live_hash).delete()\n\n return obj\n\n\n@admin.register(PropertyType)\nclass PropertyTypeAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Property)\nclass PropertyAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(EA)\nclass EAAdmin(PropertyMixin, admin.ModelAdmin):\n list_display = ('type', 'count_in', 'count_out', '_property')\n\n\n@admin.register(Reserve)\nclass ReserveAdmin(admin.ModelAdmin):\n list_display = ('user', 'status', '_inventory', '_reserve')\n\n def _inventory(self, obj):\n \"\"\"\n return: (4)Ботинки => Размер: 40 , (1)Сноуборд => Длина: 160, ...\n \"\"\"\n eqs = [u'{} => {}'.format(get_cache_type(item.hash),\n get_cache_props(item.hash))\n for item in obj.equipments.all()]\n return u' , '.join([u'({}){}'.format(v, k)\n for k, v in Counter(eqs).items()])\n _inventory.short_description = u'Инвентарь на руках'\n\n def _reserve(self, obj):\n \"\"\"\n return: (4)Ботинки => Размер: 40 , (1)Сноуборд => Длина: 160, ...\n \"\"\"\n return u' , '.join([u'({}){} => {}'.format(item.count,\n get_cache_type(item.ea.hash),\n get_cache_props(item.ea.hash))\n for item in obj.reserveea_set\n .select_related('ea__hash')\n .order_by('-count')])\n _reserve.short_description = u'Забронировано'\n\n\n@admin.register(Period)\nclass PeriodAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Contract)\nclass ContractAdmin(admin.ModelAdmin):\n list_display = ('reserve', 'period', 'total', 'deposit', 'zip', 'active')\n","repo_name":"cephey/rent","sub_path":"src/inventory/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18916657665","text":"import cv2\r\nimport numpy as np\r\ncap=cv2.VideoCapture(0)\r\nwhile(cap.isOpened()):\r\n ret,frame=cap.read()\r\n frame=cv2.flip(frame,1)\r\n roi=frame[100:900, 100:900]\r\n cv2.rectangle(frame,(100,100),(300,300),(0,255,0),0)\r\n hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\r\n cv2.imshow('frame',hsv)\r\n cap.get(3)\r\n cap.get(4)\r\n lower_lim = np.array([0,20,70], dtype=np.uint8)\r\n upper_lim = np.array([20,255,255], dtype=np.uint8)\r\n mask = cv2.inRange(hsv, lower_lim, upper_lim)\r\n mask = cv2.dilate(mask,kernel,iterations = 4)\r\n mask = cv2.GaussianBlur(mask,(5,5),100)\r\n if cv2.waitKey(1000) & 0xFF == ord('q'):\r\n break\r\n #cv2.imshow('frame',frame)\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"Shashank-Shukla/Devsoc19","sub_path":"newtry.py","file_name":"newtry.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71858001195","text":"# 求一个3*3矩阵主对角线元素之和\n\nimport numpy as np\n\narr_list = []\n\nfor i in range(9):\n num = float(input('请输入:'))\n arr_list.append(num)\n\narr = np.array(arr_list)\narr.resize([3,3])\nsum = arr[0,0] + arr[1,1] + arr[2,2]\nprint('主对角线元素之和为:',sum)\n\n","repo_name":"hululy/100-example-python","sub_path":"ex38.0.py","file_name":"ex38.0.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24720606894","text":"import sys\nimport time\nimport torch\nimport copy\n\ndef train(model, dataLoaders, criterion, optimizer, device, nEpochs=25):\n since = time.time()\n trainHistory, valHistory = [], []\n bestWeights = copy.deepcopy(model.state_dict())\n bestAccuracy = 0.0\n bestEpoch = 0\n\n print('Training')\n\n for epoch in range(nEpochs):\n print('Epoch {}/{}'.format(epoch, nEpochs - 1))\n print('-' * 10)\n\n for phase in ['training', 'validation']:\n if phase == 'training':\n model.train()\n else:\n model.eval()\n\n runningLoss, runningCorrects = 0.0, 0\n\n batch = 0\n print('{} batch'.format(phase), end=' ')\n for inputs, labels in dataLoaders[phase]:\n if batch % 10 == 0:\n print('{}/{}'.format(batch, len(dataLoaders[phase])), end=' ')\n sys.stdout.flush()\n batch += 1\n if batch % 200 == 0:\n print()\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n # Forward pass\n with torch.set_grad_enabled(phase == 'training'):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, predictions = torch.max(outputs, 1)\n\n # Backward pass if needed\n if phase == 'training':\n loss.backward()\n optimizer.step()\n\n runningLoss += loss.item() * inputs.size(0)\n runningCorrects += torch.sum(predictions == labels.data)\n\n nSamples = len(dataLoaders[phase].sampler.indices)\n epochLoss = runningLoss / nSamples\n epochAccuracy = float(runningCorrects) / nSamples\n\n print('\\n{} loss: {:.4f}; accuracy: {:.4f}'.format(phase, epochLoss, epochAccuracy), end='')\n\n if phase == 'training':\n trainHistory.append(epochAccuracy)\n print()\n elif phase == 'validation':\n if epochAccuracy > bestAccuracy:\n bestAccuracy = epochAccuracy\n bestEpoch = epoch\n bestWeights = copy.deepcopy(model.state_dict())\n valHistory.append(epochAccuracy)\n print(' (best validation accuracy so far is {:.4f} after epoch {})'.format(bestAccuracy, bestEpoch))\n\n print()\n\n timeElapsed = time.time() - since\n print('Training time {:.0f}m {:.0f}s'.format(timeElapsed // 60, timeElapsed % 60))\n print('Best validation accuracy: {:4f}'.format(bestAccuracy))\n\n model.load_state_dict(bestWeights)\n return model, trainHistory, valHistory\n\n\ndef test(model, testLoader, device):\n print('Testing')\n model.eval()\n corrects = 0\n\n allLabels, allPredictions = [], []\n batch = 0\n for inputs, labels in testLoader:\n if batch % 10 == 0:\n print('{}/{}'.format(batch, len(testLoader)), end=' ')\n sys.stdout.flush()\n batch += 1\n if batch % 200 == 0:\n print()\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n allLabels.extend(labels.tolist())\n\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n _, predictions = torch.max(outputs, 1)\n allPredictions.extend(predictions.tolist())\n\n corrects += torch.sum(predictions == labels.data)\n\n nSamples = len(testLoader.sampler.indices)\n accuracy = float(corrects) / nSamples\n\n print('\\nTest accuracy on {} samples: {:.4f}'.format(nSamples, accuracy))\n\n return accuracy, allLabels, allPredictions\n","repo_name":"Oliver-ss/Applying-machine-learning-to-investigate-long-term-insect-plant-interactions","sub_path":"Classification/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"8960747090","text":"import discord\nfrom discord.ext import commands\nimport random\nfrom insults import get_long_insult\nfrom annoy_brad_logic import annoy_brad\nimport os\nimport sys\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(SCRIPT_DIR))\n\nfrom insults import get_long_insult, get_short_insult\n\nclass roll_dice(commands.Cog): # create a class for our cog that inherits from commands.Cog\n # this class is used to create a cog, which is a module that can be added to the bot\n\n def __init__(self, bot): # this is a special method that is called when the cog is loaded\n self.bot = bot\n\n @commands.slash_command(description=\"roll some dice\", name= \"roll\")\n # pycord will figure out the types for you\n async def roll_dice(\n self,\n ctx, \n number_of_dice: discord.Option(int, max_value= 99,\n description=\"the number of dice you want to roll\"), \n dice_size: discord.Option(int,description=\"the number of sides to the dice\"), \n is_distinct: discord.Option(bool, description=\"Do you want this set of dice to be distinct?\", default=False, required=False)\n ):\n\n if (number_of_dice > dice_size) & is_distinct:\n await ctx.respond(f\"OI! You {get_long_insult()}, You cant choose more dice then possible distinct options\")\n return\n rolls = get_dice_rolls(number_of_dice, dice_size, is_distinct)\n \n rolls.sort()\n\n embed = discord.Embed(\n title=\"YOUR FUCKING DICE\",\n description=f\"Here you go you **{get_long_insult().upper()}**\",\n color=discord.Colour.dark_gold(), # Pycord provides a class with default colors you can choose from\n )\n embed.add_field(name=\"ROLLS:\", value=f\"`{rolls}`\", inline=False)\n embed.set_thumbnail(url=\"https://scontent.xx.fbcdn.net/v/t1.15752-9/278403172_399692048829552_6640220989778099445_n.jpg?stp=dst-jpg_s403x403&_nc_cat=101&ccb=1-5&_nc_sid=aee45a&_nc_ohc=fp1v8cyJAJwAX8OItsD&_nc_ad=z-m&_nc_cid=0&_nc_ht=scontent.xx&oh=03_AVJGPV02ajRAuuVrZxJxjwaIpQNKrbd1MTu_QNLywsnqsw&oe=6289995B\")\n embed.set_author(name=\"Hive Helper Regina\", icon_url=\"https://scontent.xx.fbcdn.net/v/t1.15752-9/278403172_399692048829552_6640220989778099445_n.jpg?stp=dst-jpg_s403x403&_nc_cat=101&ccb=1-5&_nc_sid=aee45a&_nc_ohc=fp1v8cyJAJwAX8OItsD&_nc_ad=z-m&_nc_cid=0&_nc_ht=scontent.xx&oh=03_AVJGPV02ajRAuuVrZxJxjwaIpQNKrbd1MTu_QNLywsnqsw&oe=6289995B\")\n brad = await annoy_brad(ctx)\n if not brad:\n await ctx.respond(embed=embed)\n \ndef get_dice_rolls(number_of_dice, dice_size, is_distinct):\n roll_number = 0\n rolls = []\n while roll_number < number_of_dice:\n roll = random.randint(1, dice_size)\n if is_distinct:\n if roll in rolls:\n continue\n else:\n rolls.append(roll)\n roll_number += 1\n continue\n rolls.append(roll)\n roll_number+=1\n return rolls\n\ndef setup(bot): # this is called by Pycord to setup the cog\n bot.add_cog(roll_dice(bot)) # add the cog to the bot\n\n","repo_name":"jackWilliamFreeman/hive-helper-regina","sub_path":"cogs/roll_dice.py","file_name":"roll_dice.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25587801024","text":"try:\n from aerisapisdk import aerframesdk\n from aerisapisdk import aerisconfig\n from aerisapisdk.exceptions import ApiException\n print('Using the aerisapisdk installed from pip')\nexcept ModuleNotFoundError:\n print('Using the currently-checked-out aerisapisdk')\n import os\n import inspect\n import sys\n current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n parent_dir = os.path.dirname(current_dir)\n sys.path.insert(0, parent_dir)\n from aerisapisdk import aerframesdk\n from aerisapisdk import aerisconfig\n from aerisapisdk.exceptions import ApiException\n\nimport argparse\nimport json\nimport logging\nimport sched\nimport time\n\n# only query for location once an hour\nLOCATION_REQUEST_PERIOD_SECONDS = 60*60\nlogger = None\n\n\ndef begin_loop(account_id, api_key, device_id, device_id_type):\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(1, 1, get_location_and_make_noise,\n (account_id, api_key, device_id, device_id_type, scheduler, None))\n scheduler.run()\n\n\ndef get_location_and_make_noise(account_id, api_key, device_id, device_id_type, scheduler, original_location):\n try:\n new_location = aerframesdk.get_location(account_id, api_key, device_id_type, device_id)\n logger.debug(f'Latest location = {new_location}')\n # if there actually is a current location (instead of it being unknown...)\n if is_location_present(new_location):\n # set the original location to the current location\n if original_location is None:\n original_location = new_location\n logger.info(f'The original \"stay-put\" location of the device is {original_location}')\n\n # check to see if the location changed\n if location_changed(new_location, original_location):\n logger.warn(f'The device moved!')\n\n except ApiException as e:\n logger.error(f'There was a problem calling the API', exc_info=e)\n except BaseException as e:\n logger.error(f'Something else went horribly wrong', exc_info=e)\n\n # run this function again after some delay\n scheduler.enter(LOCATION_REQUEST_PERIOD_SECONDS, 1, get_location_and_make_noise,\n (account_id, api_key, device_id, device_id_type, scheduler, original_location))\n\n\ndef is_location_present(loc):\n \"\"\"\n Checks to see if a location result has actual data, or if it is the \"no location available\" response.\n\n Parameters\n ----------\n loc: dict\n\n Returns\n -------\n True if there is actually some location data in there.\n \"\"\"\n if loc['mcc'] == 0:\n return False\n\n return True\n\n\ndef location_changed(new_loc, prev_loc):\n \"\"\"\n Examines device locations to determine if a device has moved.\n Parameters\n ----------\n new_loc: dict\n prev_loc: dict\n\n Returns\n -------\n bool\n True if the device has moved.\n \"\"\"\n result = False\n if prev_loc is None:\n return False\n for attribute in ('mcc', 'mnc', 'lac', 'cellId'):\n if new_loc[attribute] != prev_loc[attribute]:\n logger.warn(f'Device has moved from {attribute} {prev_loc[attribute]} to {new_loc[attribute]}')\n result = True\n return result\n\n\ndef configure_logging(level):\n global logger\n\n date_format_string = '%Y-%m-%dT%H:%M:%S%z'\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt=date_format_string)\n formatter.converter = time.gmtime\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n\n # root logger is good enough\n logger = logging.getLogger('aerframe_budget_geofence')\n logger.setLevel(level)\n logger.addHandler(ch)\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--config-file', required=True,\n help='path to a configuration file to use, like the one generated by aeriscli config')\n argparser.add_argument('--imsi', required=True, help='the IMSI you want to try to geofence')\n\n args = argparser.parse_args()\n\n # point aerisconfig at our configuration file\n aerisconfig.load_config(args.config_file)\n # load api key and account ID from the same configuration file\n with open(args.config_file, 'r') as f:\n config_dict = json.load(f)\n api_key = config_dict['apiKey']\n account_id = config_dict['accountId']\n del config_dict\n\n device_id = args.imsi\n device_id_type = 'IMSI'\n\n configure_logging(logging.INFO)\n logger.info('Starting...')\n\n # start the loop\n begin_loop(account_id, api_key, device_id, device_id_type)\n","repo_name":"aeristhings/aeris-apisdk-py","sub_path":"sample/aerframe_budget_geofence.py","file_name":"aerframe_budget_geofence.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28558618650","text":"class Solution:\n def isValid(s: str) -> bool:\n stack = []\n dict = {\"(\" : \")\", \"{\" : \"}\", \"[\" : \"]\"}\n for char in s:\n if char in dict.keys():\n stack.append(dict[char]) \n elif not stack and stack[-1] != 0:\n return False\n else:\n stack.pop()\n return len(stack) == 0\n\n\ns = \"([)]\" #cases from leet code\nprint(Solution.isValid(s))\n\n\n","repo_name":"raj-hegde/problem-set","sub_path":"leetcode/valid_parantheses.py","file_name":"valid_parantheses.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71355054999","text":"from typing import Any, List, Dict\n\nfrom anytree import Node, RenderTree, ContRoundStyle\n\n\nclass RenderState:\n \" Render state class. View shablbot module active in tree style. \"\n def __init__(self, modules: Dict[str, Any], main_root: Node = None):\n self.modules = modules\n self.main_root = main_root\n\n self.node: Node = self.__create_node()\n\n def __create_node(self) -> Node:\n root = self.main_root\n\n for name_module, object in self.modules.items():\n if not root: subroot = Node(name_module)\n else: subroot = Node(name_module, parent=root)\n\n if isinstance(object, Dict):\n [Node(str(v), parent=subroot) for _, v in object.items()]\n elif isinstance(object, List):\n [Node(str(item), parent=subroot) for item in object]\n\n return root if root else subroot\n\n def render(self, style = ContRoundStyle) -> None:\n \"\"\" Render tree with state bot modules.\n\n Args:\n style ([type], optional): Style how need rendered items. Defaults to ContRoundStyle.\n \"\"\"\n print(RenderTree(self.node, style=style()).by_attr())\n\n\ndef render_state(name_module: str, module: Any) -> None:\n \"\"\" Render state module bot. Use tree.\n\n Args:\n name_module (str): Modules name\n module (Any): Object for check node\n \"\"\"\n render_state = RenderState({ name_module: module })\n render_state.render()\n\n\ndef render_state_all_components(list_components: List[Any]) -> None:\n \"\"\" Render state all modules bot in tree style.\n\n Args:\n list_components (List[Any]): components bot for need rebder state. All componnets have 'get_main_data_object()' function\"\n \"\"\"\n render_state = RenderState(\n modules={\n comp.__class__.__name__ : comp.get_main_data_object()\n for comp in list_components\n },\n main_root=Node(\"Shablbot\")\n )\n render_state.render()\n","repo_name":"Blackgard/shablbot","sub_path":"shablbot/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"85"} +{"seq_id":"74687599637","text":"'''\nThis is the main script for the different plotting routines. It defines \nthe common style (pyTeX) and imports parent moduls\n\nif final is set to True, pgf is used as the backend and a copy is saved\nin the folder img\n'''\n\nfinal = False \n\nfrom pathlib import Path\nimport numpy as np\n\ncwd = Path(__file__).parent\n\n\ndef figsize(scale=1):\n '''Create nicely proportioned figure\n\n This function calculates the optimal figuresize for any given scale\n (the ratio between figuresize and textwidth. A figure with scale 1\n covers the entire writing area). Therefor it is important to know \n the textwidth of your target document. This can be obtained by using\n the command \"\\the\\textwidth\" somewhere inside your document.\n '''\n\n width_pt = 355.6595 # textwidth from latex\n in_per_pt = 1.0/72.27 # Convert pt to inch\n golden = 1.61803398875 # Aesthetic ratio \n width = width_pt * in_per_pt * scale # width in inches\n height = width / golden # height in inches\n return [width,height]\n\nimport matplotlib as mpl\n\n\nimport matplotlib.pyplot as plt\n\nif final:\n plt.style.use(str(cwd / 'TeX.mplstyle'))\n\n# create dictionary for colors\n#names = ['blue','orange','red','cyan','green','yellow','purple','pink','brown','gray']\n#colors = dict(zip(names,plt.rcParams['axes.prop_cycle'].by_key()['color']))\n\ndef newfig(scale=1,ratio=None):\n '''Create a new figure object\n\n We use the function figsize to create a figure of corresponding size.\n If the option ratio is choosen, the width of the plot is still taken\n from figsize but the ratio of the figure is determined by ratio.\n '''\n\n # we using jupyter this is required to close open plots\n #plt.clf()\n if not final:\n scale*=2\n\n size = figsize(scale)\n if not ratio:\n fig = plt.figure(figsize=size)\n else:\n fig = plt.figure(figsize=(size[0],ratio*size[0]))\n\n return fig\n\n\n\n","repo_name":"fschmnn/pnlf","sub_path":"src/pnlf/plot/style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"27959340359","text":"# Rating: ~ 3.0 / 10\n# Link: https://open.kattis.com/problems/listgame\n# Complexity: O(K) where K is the number of prime factors of a given number\n# Memory: O(1)\n\nfrom math import ceil\n\ndef main():\n n = int(input())\n num_factors = 0\n factor = 2\n # check if num is greater than square root of number\n while (factor**2 <= n):\n # divide if possible\n if (n % factor == 0):\n n /= factor\n num_factors += 1\n else:\n factor += 1\n\n # exactly one factor will always be left over\n num_factors += 1\n print(num_factors)\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewjmcgehee/kattis","sub_path":"Python/listgame/listgame.py","file_name":"listgame.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"11830406944","text":"import sys\ninput = sys.stdin.readline\n\nstair_num = int(input())\nstair = [int(input()) for _ in range(stair_num)]\ndp = [0] * stair_num\n\nif len(stair) <= 2:\n print(sum(stair))\nelse:\n dp[0] = stair[0]\n dp[1] = stair[0] + stair[1]\n\n for i in range(2, stair_num):\n dp[i] = max(dp[i-3] + stair[i-1] + stair[i], dp[i-2] + stair[i])\n \n print(dp[-1])","repo_name":"pokavv/backjoon","sub_path":"2579_230522.py","file_name":"2579_230522.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"388623755","text":"# Import the dependencies\nimport sqlalchemy\nfrom flask import Flask, jsonify, send_file\nfrom flask_cors import CORS\n\n# Python SQL toolkit and Object Relational Mapper\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, inspect\nimport pandas as pd\n\n#######################################################\n# Database Setup\n#######################################################\n\nengine = create_engine(\"sqlite:///../data/fastfood_obesity.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nobesity = Base.classes.obesity\nfastfood = Base.classes.fastfood\n\n# Get column names for metadata\ninspector = inspect(engine)\n\n#######################################################\n# Flask Setup\n#######################################################\n\napp = Flask(__name__)\nCORS(app)\n\n#######################################################\n# Flask Routes\n#######################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/metadata
\"\n f\"/api/v1.0/aff_ob
\"\n f\"/api/v1.0/geojson\"\n )\n\n@app.route(\"/api/v1.0/metadata\")\ndef get_metadata():\n# Create our session (link) from Python to the DB\n session = Session(engine)\n conn = engine.connect()\n #query \n Fastfood = pd.read_sql(\"SELECT * FROM Fastfood\", conn)\n Obesity = pd.read_sql(\"SELECT * FROM Obesity\", conn)\n joined_table = pd.read_sql('Select fastfood.State, fastfood.\"All fast food restaurants\",fastfood.\"Full-service restaurants\", fastfood.Subway, fastfood.Starbucks, fastfood.McDonalds, fastfood.\"Dunkin Donut\", fastfood.\"Burger King\", fastfood.\"Taco Bell\", fastfood.Dominos, fastfood.Wendys, fastfood.\"Dairy Queen\", obesity.Prevalence, obesity.\"95% CI\" from Fastfood INNER JOIN Obesity ON (Fastfood.State = Obesity.State)', conn)\n js = joined_table.to_json(orient='records')\n session.close()\n\n return js\n\n@app.route(\"/api/v1.0/aff_ob\")\ndef get_data():\n session = Session(engine)\n data = session.query(\n fastfood.State, \n obesity.Prevalence, \n getattr(fastfood, \"All fast food restaurants\")\n ).filter(\n fastfood.State == obesity.State\n ).order_by(\n getattr(fastfood, \"All fast food restaurants\")\n ).all()\n session.close()\n\n # Convert data to a list of dictionaries\n data_dicts = [dict(zip([\"State\", \"Prevalence\", \"All fast food restaurants\"], row)) for row in data]\n\n # Convert to JSON\n return jsonify(data_dicts)\n\n@app.route(\"/api/v1.0/geojson\")\ndef get_geojson():\n return send_file(\"../data/us-states-obesity.geojson\", mimetype='application/geo+json')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"JamesBeckmeyer/Fast-Food-Establishments-per-Capita-Compared-to-State-Obesity-Rates","sub_path":"Database and ETL/code/obesity_app.py","file_name":"obesity_app.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"40765488776","text":"# notes... sometimes I need to configure a proxy to do external calls to ws. \n# for that: \n\n\nimport json\n# import urllib2\n# people = json.load(urllib2.urlopen(url_oData_people))\n\n\nimport requests\nurl_oData_people = \"http://services.odata.org/TripPinRESTierService/(S(pk4yy1pao5a2nngmm2ecx0hy))/People\"\n\n# response = requests.get( url_oData_people )\n# people = response.json()\n# print(people)\n\n\n# CONVERT JOSN into object -> Pandas or dictionary array.7\n\nmovie_json = \"\"\"\n{\n\"Title\":\"Johnny 5\",\n\"Year\":\"2001\",\n\"Runtime\":\"119 min\",\n\"Country\":\"USA\"\n}\n\"\"\"\n\nmovie_data = json.loads(movie_json)\nprint(type(movie_data), movie_data)\n \nprint(\"The title is {}\".format(movie_data.get('Title')))\nmovie_json_text_2 = json.dumps(movie_data)\nprint(type(movie_json_text_2), movie_json_text_2)\n","repo_name":"davidvela/MyFirstPythonProject","sub_path":"zPythonLearn/pn_readJSON.py","file_name":"pn_readJSON.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"70084501078","text":"import struct\n\nfrom miasm2.jitter.loader.elf import vm_load_elf\nfrom miasm2.analysis.machine import Machine\nfrom miasm2.jitter.csts import PAGE_READ, PAGE_WRITE, EXCEPT_ACCESS_VIOL, EXCEPT_DIV_BY_ZERO, EXCEPT_PRIV_INSN\n\nfrom sibyl.config import config\n\n\nclass Replay(object):\n '''\n Class used to run a snapshot and check that it recognize or not a given function code\n Potential replay errors are stored in self.learnexception\n '''\n\n def __init__(self, testcreator, replayed_snapshot):\n '''\n @testcreator: TestCreator instance with associated information\n @replayed_snapshot: snapshot to be used\n '''\n self.isFuncFound = False\n self.filename = testcreator.program\n self.learned_addr = testcreator.address\n self.snapshot = replayed_snapshot\n self.replayexception = []\n self.abicls = testcreator.abicls\n self.machine = Machine(testcreator.machine)\n self.trace = testcreator.trace\n self.logger = testcreator.logger\n self.ira = self.machine.ira()\n self.ptr_size = self.ira.sizeof_pointer()/8\n\n def use_snapshot(self, jitter):\n '''Initilize the VM with the snapshot informations'''\n for reg, value in self.snapshot.input_reg.iteritems():\n setattr(jitter.cpu, reg, value)\n\n # Set values for input memory\n for addr, mem in self.snapshot.in_memory.iteritems():\n assert mem.access != 0\n if not jitter.vm.is_mapped(addr, mem.size):\n jitter.vm.add_memory_page(addr, mem.access, mem.data)\n else:\n if jitter.vm.get_mem_access(addr) & 0b11 == mem.access & 0b11:\n jitter.vm.set_mem(addr, mem.data)\n else:\n # TODO memory page is already set but have not the\n # same access right. However delete page does not\n # exist\n jitter.vm.set_mem(addr, mem.data)\n\n def compare_snapshot(self, jitter):\n '''Compare the expected result with the real one to determine if the function is recognize or not'''\n func_found = True\n\n for reg, value in self.snapshot.output_reg.iteritems():\n if value != getattr(jitter.cpu, reg):\n self.replayexception += [\"output register %s wrong : %i expected, %i found\" % (reg, value, getattr(jitter.cpu, reg))]\n func_found = False\n\n for addr, mem in self.snapshot.out_memory.iteritems():\n self.logger.debug(\"Check @%s, %s bytes: %r\", hex(addr), hex(mem.size), mem.data[:0x10])\n if mem.data != jitter.vm.get_mem(addr, mem.size):\n self.replayexception += [\"output memory wrong at 0x%x: %s expected, %s found\" % (addr + offset, repr(mem.data), repr(jitter.vm.get_mem(addr + offset, mem.size)))]\n func_found = False\n\n return func_found\n\n def end_func(self, jitter):\n if jitter.vm.is_mapped(getattr(jitter.cpu, self.ira.ret_reg.name), 1):\n self.replayexception += [\"return value might be a pointer\"]\n\n self.isFuncFound = self.compare_snapshot(jitter)\n\n jitter.run = False\n return False\n\n def run(self):\n '''Main function that is in charge of running the test and return the result:\n true if the snapshot has recognized the function, false else.'''\n\n # Retrieve miasm tools\n jitter = self.machine.jitter(config.miasm_engine)\n\n vm_load_elf(jitter.vm, open(self.filename, \"rb\").read())\n\n # Init segment\n jitter.ir_arch.do_stk_segm = True\n jitter.ir_arch.do_ds_segm = True\n jitter.ir_arch.do_str_segm = True\n jitter.ir_arch.do_all_segm = True\n\n FS_0_ADDR = 0x7ff70000\n jitter.cpu.FS = 0x4\n jitter.cpu.set_segm_base(jitter.cpu.FS, FS_0_ADDR)\n jitter.vm.add_memory_page(\n FS_0_ADDR + 0x28, PAGE_READ, \"\\x42\\x42\\x42\\x42\\x42\\x42\\x42\\x42\", \"Stack canary FS[0x28]\")\n\n # Init the jitter with the snapshot\n self.use_snapshot(jitter)\n\n # Get the return address for our breakpoint\n return_addr = struct.unpack(\"P\", jitter.vm.get_mem(jitter.cpu.RSP,\n 0x8))[0]\n jitter.add_breakpoint(return_addr, self.end_func)\n\n # Run the execution\n jitter.init_run(self.learned_addr)\n\n try:\n jitter.continue_run()\n assert jitter.run == False\n except AssertionError:\n # set the replayexception to the correct error\n if jitter.vm.get_exception() & EXCEPT_ACCESS_VIOL:\n self.replayexception += [\"access violation\"]\n elif jitter.vm.get_exception() & EXCEPT_DIV_BY_ZERO:\n self.replayexception += [\"division by zero\"]\n elif jitter.vm.get_exception() & EXCEPT_PRIV_INSN:\n self.replayexception += [\"execution of private instruction\"]\n else:\n self.replayexception += [\"exception no %i\" % (jitter.vm.get_exception())]\n self.isFuncFound = False\n\n return self.isFuncFound\n","repo_name":"cea-sec/Sibyl","sub_path":"sibyl/learn/replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":512,"dataset":"github-code","pt":"85"} +{"seq_id":"12738271185","text":"from flask import Blueprint, render_template, request, send_file, send_from_directory\n\nbp = Blueprint('configure_css', __name__, url_prefix='/configure')\n\n@bp.route('/', methods=['GET', 'POST'])\ndef configure():\n \n args = request.args\n\n file_path = args['file']\n\n print('FILE PATH:', file_path)\n\n from aws_s3.create_presigned_url import create_presigned_url\n image_url = create_presigned_url(file_path)\n\n return render_template('file_upload/optimise_for_web_background.html', file_path=file_path, image_url=image_url)\n","repo_name":"vchapandrews/Seamlessly","sub_path":"app/blueprints/configure_css_background.py","file_name":"configure_css_background.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30984323294","text":"#####################################################################\n# This file is part of the 4D Light Field Benchmark. #\n# #\n# This work is licensed under the Creative Commons #\n# Attribution-NonCommercial-ShareAlike 4.0 International License. #\n# To view a copy of this license, #\n# visit http://creativecommons.org/licenses/by-nc-sa/4.0/. #\n#####################################################################\n\nimport os\nimport sys\nimport torch\nimport numpy as np\n\n\ndef write_pfm(data, fpath, scale=1, file_identifier=b\"Pf\", dtype=\"float32\"):\n # PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html\n\n data = np.flipud(data)\n height, width = np.shape(data)[:2]\n values = np.ndarray.flatten(np.asarray(data, dtype=dtype))\n endianess = data.dtype.byteorder\n # print(endianess)\n\n if endianess == '<' or (endianess == '=' and sys.byteorder == 'little'):\n scale *= -1\n\n with open(fpath, 'wb') as file:\n # print(file_identifier + b'\\n')\n file.write(file_identifier + b'\\n')\n file.write(b'%d %d\\n' % (width, height))\n file.write(b'%d\\n' % scale)\n file.write(values)\n\n\ndef read_pfm(fpath, expected_identifier=b\"Pf\"):\n # PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html\n\n with open(fpath, 'rb') as f:\n # header\n identifier = _get_next_line(f)\n if identifier != expected_identifier:\n raise Exception('Unknown identifier. Expected: \"%s\", got: \"%s\".' % (\n expected_identifier, identifier))\n\n try:\n line_dimensions = _get_next_line(f).decode('ascii')\n # print(line_dimensions)\n dimensions = line_dimensions.split(' ')\n width = int(dimensions[0].strip())\n height = int(dimensions[1].strip())\n except:\n raise Exception('Could not parse dimensions: \"%s\". '\n 'Expected \"width height\", e.g. \"512 512\".' % line_dimensions)\n\n try:\n line_scale = _get_next_line(f)\n scale = float(line_scale)\n assert scale != 0\n if scale < 0:\n endianness = \"<\"\n else:\n endianness = \">\"\n except:\n raise Exception('Could not parse max value / endianess information: \"%s\". '\n 'Should be a non-zero number.' % line_scale)\n\n try:\n data = np.fromfile(f, \"%sf\" % endianness)\n data = np.reshape(data, (height, width))\n data = np.flipud(data)\n with np.errstate(invalid=\"ignore\"):\n data *= abs(scale)\n except:\n raise Exception(\n 'Invalid binary values. Could not create %dx%d array from input.' % (height, width))\n\n return data\n\n\ndef _get_next_line(f):\n next_line = f.readline().rstrip()\n # ignore comments\n while next_line.startswith(b'#'):\n next_line = f.readline().rstrip()\n return next_line\n\n\nif __name__ == '__main__':\n data = torch.rand(256, 256).mul(10000.0)\n data_np = data.numpy()\n write_pfm(data_np, 'temp.pfm')\n data_est_np = torch.from_numpy(read_pfm('temp.pfm').copy()).float()\n print(torch.min(torch.abs(data_est_np - data)))\n print(torch.max(torch.abs(data_est_np - data)))\n print(torch.mean(torch.abs(data_est_np - data)))\n print(data_est_np.min(), data.min())\n print(data_est_np.max(), data.max())\n","repo_name":"tedyhabtegebrial/monocular_view_synthesis","sub_path":"monocular/src/datasets/carla/pfm_rw.py","file_name":"pfm_rw.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"35796351307","text":"# set connection string\n# get the connection string from sas azure portal\n\n# export AZURE_STORAGE_CONNECTION_STRING=\"\"\nimport os\n\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n\n# Retrieve the connection string for use with the application. The storage\n# connection string is stored in an environment variable on the machine\n# running the application called AZURE_STORAGE_CONNECTION_STRING. If the environment variable is\n# created after the application is launched in a console or with Visual Studio,\n# the shell or application needs to be closed and reloaded to take the\n# environment variable into account.\nconnect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\nairflow_home = os.environ.get(\"AIRFLOW_HOME\")\n\nprint('connection started.')\n\nblob_service_client = BlobServiceClient.from_connection_string(connect_str)\n\nupload_file_path = f\"{airflow_home}/data/yellow_tripdata_2019-01.parquet\"\n\n# Create a blob client using the local file name as the name for the blob\nblob_client = blob_service_client.get_blob_client(\n container='datastores', blob=\"yellow_tripdata_2019-01.parquet\")\n\nprint('blob created.')\n\n\nwith open(upload_file_path, \"rb\") as data:\n blob_client.upload_blob(data)\n\nprint('complteed.')\n","repo_name":"anil-chhetri/airflow-practise","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"32736871912","text":"import csv\nimport pandas as pd\nimport numpy as np\n\nshare = pd.read_csv('/Users/jinjun/project/python/600833.SH.csv', index_col=0)\n\n\n\n\ncount = 0 #统计阿龙指标\nup_index = 70 # 阿龙指标 up值\ndown_index = 40 # 阿龙指标 down值\n\n# 阿龙指标策略 1:符合策略,-1:不符合策略,0:不符合策略\ndef strategy_one(up,down):\n if up > up_index and down < down_index:\n return 1\n elif up < down_index and up_index < down:\n return -1\n else:\n return 0\n\n# 顺势指标策略\ndef cal_z_status(row):\n if row['顺势指标x'] == row['顺势指标x-shift']:\n return 0\n if (row['顺势指标x'] == 1 and row['顺势指标x-shift'] == 0) or (row['顺势指标x'] == 1 and row['顺势指标x-shift'] == -1) or (row['顺势指标x'] == 0 and row['顺势指标x-shift'] == -1):\n return 1\n elif (row['顺势指标x'] == 0 and row['顺势指标x-shift'] == 1) or (row['顺势指标x'] == -1 and row['顺势指标x-shift'] == 0) or (row['顺势指标x'] == -1 and row['顺势指标x-shift'] == 1):\n return -1\n else:\n return 100\n \n\n#统计阿龙指标在某一个周期持续的时间\ndef count_position(row):\n if row['aroon_up_dates'] == row['aloon_shift-1']:\n global count\n count = count + 1\n return count\n else:\n result = count + 1\n count = 0\n return result\n\n\n# 阿隆指标的最大值\ndef aloon_max(row):\n if row['aroon_up_dates'] == 1 and row['count_aloon_position'] == 1:\n try:\n result = share.loc[row.name + 30:row.name, 'close']\n print(result.max(),share.loc[row.name, 'close'])\n except:\n pass\n \n else:\n return 0\n\n#计算 X 位置\ndef cal_x_position(row):\n if row['顺势指标'] > 100:\n return 1\n elif row['顺势指标'] > -100 and row['顺势指标'] < 100:\n return 0\n else:\n return -1\n \n\n#计算 Y 位置\ndef cal_y_position(row):\n if row['顺势指标x'] > row['顺势指标shift'] :\n return 1\n elif row['顺势指标x'] < row['顺势指标shift']:\n return -1\n else:\n return 0\n\n\n# 阿龙指数 up越大越好,down越小越好; 我们这里设置阿龙指数的up和down为0.5,即up和down的范围为-0.5~0.5\ndef aroon_position():\n share['aroon_up_dates'] = share.apply(lambda x: strategy_one(x['阿隆指标up'],x['阿隆指标down']),axis=1) #阿隆指标是否符合策略 1:符合策略,-1:不符合策略,0:不符合策略\n share['aloon_shift-1'] = share['aroon_up_dates'].shift(1) # 前一天周期的结果\n share['count_aloon_position'] = share.apply(count_position,axis=1) # 统计阿龙指标在某一个周期持续的时间\n\n share['aloon_max'] = share.apply(aloon_max,axis=1) #阿隆指标的最大值\n share['顺势指标shift'] = share['顺势指标'].shift(1) # 前一天周期的结果\n share['顺势指标x'] = share.apply(cal_x_position,axis=1) # 顺势指标策略\n share['顺势指标x-shift'] = share['顺势指标x'].shift(1) # 前一天周期的结果\n share['顺势指标y'] = share.apply(cal_y_position,axis=1) # 前一天周期的结果\n share['顺势指标z_status'] = share.apply(cal_z_status,axis=1) # 前一天周期的结果\n share.to_csv('123.csv')\n # position = share.loc[:, '阿隆指标down']\n # for item in position:\n # print(item)\n\n\n\naroon_position()\n\n\n\n#pandas取最后10个数据\n# result = share.tail(10) # 取最后10个数据\n\n\n","repo_name":"jinjunnn/python","sub_path":"analyzedata.py","file_name":"analyzedata.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7314986467","text":"# -*- coding: UTF-8 -*-\n'''\n@Project :week02 \n@File :1493.py\n@IDE :PyCharm \n@Author : Hwang\n@Date :2021-11-17 오후 4:06 \n'''\n\nimport sys\n\nlength, width, height = map(int, sys.stdin.readline().split())\ntype_cube = int(sys.stdin.readline())\ncube_arr = sorted(list(list(map(int,sys.stdin.readline().split())) for _ in range(type_cube)),reverse=True)\n\n# total_vol = length*width*height\n#\n# for i in range(len(cube_arr)-1,-1,-1):\n# cube_size = 2 ** cube_arr[i][0]\n# cube_num = cube_arr[i][1]\n# cube_vol = cube_size ** 3\n#\n# min_dist = min(length, width, height)\n#\n# if cube_size > min_dist:\n# continue\n# else:\n# # print(min_dist)\n# # print(cube_size)\n# temp = 0\n# if total_vol // cube_vol > cube_num:\n# temp = cube_vol*cube_num\n# else:\n# temp= total_vol//cube_vol\n#\n#\n# def find_cube(l,w,h, n):\n# if n == -1:\n# return 0\n# min_dist = min(l,w,h)\n# cube_size = 2**n\n#\n# if cube_size > min_dist:\n# return 0\n#\n# find_cube(l-cube_size, w, h,n-1)\n# find_cube(l-cube_size,w-cube_size,h,n-1)\n# find_cube(cube_size,cube_size,h-cube_size,n-1)\n#\n\nvolume = length*width*height\nans = 0\nbefore = 0\n\nfor w, cnt in cube_arr:\n before <<=3\n v = 2**w # cube length\n maxCnt = (length//v) * (width//v) * (height//v) - before\n maxCnt = min(cnt, maxCnt)\n ans+= maxCnt\n before+=maxCnt\n\nif before == volume:\n print(ans)\nelse:\n print(-1)\n\n","repo_name":"DongGeun974/computingThinking","sub_path":"week02/1493.py","file_name":"1493.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"71069450519","text":"import pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport streamlit as st\nimport plotly.graph_objects as go\nimport plotly.figure_factory as ff\n\n# Configurando a página\nst.set_page_config(\n page_title=\"Dashboard MUNIC\",\n page_icon=\"dashboard_adidas/logoadidas.png\",\n layout=\"wide\",\n)\n\npaleta = [\"#63A3B2\", \"#EF7158\"]\namarelo = [\"#FBD178\"]\nazul = [\"#63A3B2\"]\nlaranja = ['#EF7158']\n\n# Função para calcular frequência relativa\ndef freq_rel(coluna):\n contagem = np.sum(coluna)\n calc = round((coluna / contagem) * 100, 2)\n return calc\n\n# Importando dados formatados\ndf = pd.ExcelFile(\"Base_MUNIC_2021 - Atualizada.xlsx\")\n\n# Nome das tabelas atuais\ndf_planilhas = df.sheet_names\n# Importando cada planilha separadamente\ndf_pre = pd.read_excel(df, sheet_name=df_planilhas[0]) # Informações atuais do prefeito\ndf_edu = pd.read_excel(df, sheet_name=df_planilhas[1]) # Educação\ndf_cul = pd.read_excel(df, sheet_name=df_planilhas[2]) # Cultura\ndf_esp = pd.read_excel(df, sheet_name=df_planilhas[3]) # Esporte\ndf_sau = pd.read_excel(df, sheet_name=df_planilhas[4]) # Saúde\n\n# ----------------------------------------------------------------------\n\n# Alterando nome das colunas\ndicionario_pre = df_pre.rename(\n columns={\n \"Mun\": \"Municipio\",\n \"Mpeg02\": \"Mandato 2020\",\n \"Mpeg03\": \"Sexo\",\n \"Mpeg04\": \"Idade\",\n \"Mpeg05\": \"Raça\",\n \"Mpeg051\": \"Autodeclara\",\n \"Mpeg06\": \"Escolaridade\",\n \"Pop estimada 2021\": \"Pop\",\n },\n inplace=True,\n)\n# Alterando valores da variável 'Regiao'\nmapeamento_pre = {\n \"1 - Norte\": \"Norte\",\n \"2 - Nordeste\": \"Nordeste\",\n \"3 - Sudeste\": \"Sudeste\",\n \"4 - Sul\": \"Sul\",\n \"5 - Centro-Oeste\": \"Centro-Oeste\",\n}\ndf_pre[\"Regiao\"] = df_pre[\"Regiao\"].replace(mapeamento_pre)\n\n\n\n# Alterando nome das colunas\ndicionario_edu = df_edu.rename (columns={ 'Mun': 'Municipio',\n 'Medu01': 'Orgao Gestor',\n 'Medu03': 'Sexo',\n 'Medu04': 'Idade',\n 'Medu05': 'Raça',\n 'Medu051': 'Autodeclara',\n 'Medu06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_edu = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_edu['Regiao'] = df_edu['Regiao'].replace(mapeamento_edu)\n\n\n\n# Alterando nome das colunas\ndicionario_cul = df_cul.rename (columns={ 'Mun': 'Municipio',\n 'Mcul01': 'Orgao Gestor',\n 'Mcul03': 'Sexo',\n 'Mcul04': 'Idade',\n 'Mcul05': 'Raça',\n 'Mcul051': 'Autodeclara',\n 'Mcul06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_cul = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_cul['Regiao'] = df_cul['Regiao'].replace(mapeamento_cul)\n\n\n# Alterando nome das colunas\ndicionario_esp = df_esp.rename (columns={ 'Mun': 'Municipio',\n 'Mesp01': 'Orgao Gestor',\n 'Mesp03': 'Sexo',\n 'Mesp04': 'Idade',\n 'Mesp05': 'Raça',\n 'Mesp051': 'Autodeclara',\n 'Mesp06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_esp = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_esp['Regiao'] = df_esp['Regiao'].replace(mapeamento_esp)\n\n# Alterando nome das colunas\ndicionario_sau = df_sau.rename (columns={ 'Mun': 'Municipio',\n 'Msau01': 'Orgao Gestor',\n 'Msau03': 'Sexo',\n 'Msau04': 'Idade',\n 'Msau05': 'Raça',\n 'Msau051': 'Autodeclara',\n 'Msau06': 'Escolaridade'\n}, inplace=True)\n\n# Alterando valores da coluna 'Regiao'\nmapeamento_sau = {\n '1 - Norte': 'Norte',\n '2 - Nordeste': 'Nordeste',\n '3 - Sudeste': 'Sudeste',\n '4 - Sul': 'Sul',\n '5 - Centro-Oeste': 'Centro-Oeste',\n}\ndf_sau['Regiao'] = df_sau['Regiao'].replace(mapeamento_sau)\n\n\n# ----------------------------------------------------------------------\n\n\nst.title('Perfil de mulheres em cargos públicos municipais')\n\n# Usando guias para navegar entre as páginas\ntabs = st.tabs([\"Geral\", \"Prefeituras\", \"Educação\", \"Cultura\", \"Esporte\", \"Saúde\"])\n\n# ----------------------------------------------------------------------\n\nwith tabs[0]:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n colge1,colge2 = st.columns([1, 2])\n\n with colge1:\n\n # Separando os sexos\n pre_fem = pd.DataFrame(df_pre[df_pre['Sexo'] == 'Feminino'])\n pre_mas = pd.DataFrame(df_pre[df_pre['Sexo'] == 'Masculino'])\n # Contando quantos prefeitos por sexo\n contagem_pre_fem = len(pre_fem)\n contagem_pre_mas = len(pre_mas)\n\n # Separando os sexos\n edu_fem = pd.DataFrame(df_edu[df_edu['Sexo'] == 'Feminino'])\n edu_mas = pd.DataFrame(df_edu[df_edu['Sexo'] == 'Masculino'])\n # Contagem\n contagem_edu_fem = len(edu_fem)\n contagem_edu_mas = len(edu_mas)\n\n # Separando os sexos\n cul_fem = pd.DataFrame(df_cul[df_cul['Sexo'] == 'Feminino'])\n cul_mas = pd.DataFrame(df_cul[df_cul['Sexo'] == 'Masculino'])\n # Contagem\n contagem_cul_fem = len(cul_fem)\n contagem_cul_mas = len(cul_mas)\n\n # Separando os sexos\n esp_fem = pd.DataFrame(df_esp[df_esp['Sexo'] == 'Feminino'])\n esp_mas = pd.DataFrame(df_esp[df_esp['Sexo'] == 'Masculino'])\n # Contagem\n contagem_esp_fem = len(esp_fem)\n contagem_esp_mas = len(esp_mas)\n\n # Separando os sexos\n sau_fem = pd.DataFrame(df_sau[df_sau['Sexo'] == 'Feminino'])\n sau_mas = pd.DataFrame(df_sau[df_sau['Sexo'] == 'Masculino'])\n # Contagem\n contagem_sau_fem = len(sau_fem)\n contagem_sau_mas = len(sau_mas)\n \n # Feminino\n geral_fem = (contagem_pre_fem, contagem_edu_fem, contagem_cul_fem, contagem_esp_fem, contagem_sau_fem)\n contagem_geral_fem = np.sum(geral_fem)\n\n # Masculino\n geral_mas = (contagem_pre_mas, contagem_edu_mas, contagem_cul_mas, contagem_esp_mas, contagem_sau_mas)\n contagem_geral_mas = np.sum(geral_mas)\n\n # Dados\n labels = [\"Feminino\", \"Masculino\"]\n valores = [contagem_geral_fem, contagem_geral_mas]\n\n # Criar a figura de pizza\n st.write(\"Distribuição de Gênero nos Orgãos Públicos Municipais\")\n fig_pizza = px.pie(\n names=labels,\n values=valores,\n # title=\"Distribuição de Gênero Geral\",\n width=400,\n hole=0.5\n )\n\n # Exibir a figura no Streamlit\n st.plotly_chart(fig_pizza, use_container_width=False)\n\n with colge2:\n pre_fem = pd.DataFrame(df_pre[df_pre[\"Sexo\"] == \"Feminino\"])\n edu_fem = pd.DataFrame(df_edu[df_edu[\"Sexo\"] == \"Feminino\"])\n cul_fem = pd.DataFrame(df_cul[df_cul[\"Sexo\"] == \"Feminino\"])\n esp_fem = pd.DataFrame(df_esp[df_esp[\"Sexo\"] == \"Feminino\"])\n sau_fem = pd.DataFrame(df_sau[df_sau[\"Sexo\"] == \"Feminino\"])\n geral_fem = pd.concat([pre_fem, edu_fem, cul_fem, esp_fem, sau_fem])\n geral_fem.drop(columns=['Faixa_pop'], inplace=True)\n\n dados = geral_fem['Pop']\n\n intervalos = [\n (0, 5000),\n (5001, 10000),\n (10001, 20000),\n (20001, 50000),\n (50001, 100000),\n (100001, 500000),\n (500001, max(dados))\n ]\n\n def atribuir_faixa_populacional(valor):\n for i, (min_intervalo, max_intervalo) in enumerate(intervalos):\n if min_intervalo <= valor <= max_intervalo:\n return f\"{min_intervalo} - {max_intervalo}\"\n\n # Calcula a contagem da faixa populacional\n geral_fem['Faixa Populacional'] = geral_fem['Pop'].apply(atribuir_faixa_populacional)\n contagem_faixa_populacional = geral_fem['Faixa Populacional'].value_counts().reset_index()\n contagem_faixa_populacional.columns = ['Faixa Populacional', 'Contagem']\n\n # Calcula a frequência relativa\n contagem_faixa_populacional['Frequencia_Relativa'] = contagem_faixa_populacional['Contagem'] / contagem_faixa_populacional['Contagem'].sum()\n\n # Cria um gráfico de barras empilhadas com go.Figure()\n st.write('Faixa Populacional nas Cidades com Liderança Feminina')\n fig = go.Figure()\n\n # Adiciona barras empilhadas ao gráfico\n for i, row in contagem_faixa_populacional.iterrows():\n faixa = row['Faixa Populacional']\n freq_relativa = row['Frequencia_Relativa']\n contagem = row['Contagem']\n hovertext = f'Faixa: {faixa}
Contagem: {contagem}
Frequência Relativa: {freq_relativa:.2%}'\n fig.add_trace(go.Bar(\n x=[faixa],\n y=[freq_relativa], # Use a frequência relativa como altura da barra\n text=[contagem],\n textposition='outside',\n name=faixa,\n hoverinfo='text',\n hovertext=hovertext,\n marker_color=laranja[i % len(laranja)],\n width=1, # Define a largura da barra como 1 (1 unidade da frequência relativa)\n ))\n\n # Atualiza layout do gráfico\n fig.update_layout(\n xaxis_title='Faixa Populacional',\n yaxis_title='Frequência Relativa',\n # title='Faixa Populacional nas Cidades com Liderança Feminina',\n width=930,\n height=450,\n barmode='stack', # Empilha as barras\n )\n\n # Exibe o gráfico no Streamlit\n st.plotly_chart(fig, use_container_width=False)\n\n # Substituir \"Nao informou\" por NaN\n geral_fem['Idade'].replace('Nao informou', pd.NA, inplace=True)\n\n # Excluir as linhas onde a coluna \"Idade\" é NaN\n geral_fem.dropna(subset=['Idade'], inplace=True)\n\n # Transformar as variáveis de object para int\n geral_fem['Idade'] = geral_fem['Idade'].astype(int)\n\n # Definir as faixas etárias\n faixas_etarias = [(0, 30), (30, 60), (60, 100)]\n faixas_etarias_labels = ['19-29', '30-59', '60+']\n\n # Inicialize o Streamlit com três colunas\n col1, col2, col3 = st.columns(3)\n\n for i, faixa in enumerate(faixas_etarias):\n min_age, max_age = faixa\n faixa_label = faixas_etarias_labels[i]\n\n # Filtrar dados para a faixa etária\n filtered_data = geral_fem[(geral_fem['Idade'] >= min_age) & (geral_fem['Idade'] <= max_age)]\n\n # Calcular a frequência absoluta\n freq_absoluta = len(filtered_data)\n\n # Calcular a frequência relativa\n freq_relativa = freq_absoluta / len(geral_fem)\n\n # Criar um gráfico de distplot para a faixa etária atual\n fig = ff.create_distplot([filtered_data['Idade']], [faixa_label], bin_size=5, show_curve=True)\n\n # Configurar layout do gráfico\n fig.update_layout(\n xaxis_title='Idade',\n yaxis_title='Densidade',\n width=800,\n height=500,\n )\n\n # Exibir o título acima do gráfico\n with col1 if i == 0 else col2 if i == 1 else col3:\n st.write(f'Distribuição de Idades - {faixa_label}')\n st.plotly_chart(fig, use_container_width=True)\n\n# ----------------------------------------------------------------------\n\nwith tabs[1]:\n colpre1, colpre2 = st.columns([1,2])\n\n with colpre1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n pre_fem = pd.DataFrame(df_pre[df_pre[\"Sexo\"] == \"Feminino\"])\n\n contagem_pre_fem = len(pre_fem)\n\n valores_pre = [contagem_pre_fem, len(df_pre) - contagem_pre_fem]\n rotulos_pre = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas Prefeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_pre[0], valores_pre[0])\n card2.metric(rotulos_pre[1], valores_pre[1])\n\n figpie1 = px.pie(\n values=valores_pre,\n names=rotulos_pre,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_pre_fem = pre_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_pre_fem['Percentagem'] = freq_rel(escolaridade_pre_fem['Frequência acumulada'])\n escolaridade_pre_fem = escolaridade_pre_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_pre_fem['Percentagem'] = escolaridade_pre_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_pre_fem = escolaridade_pre_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das Prefeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_pre_fem[['Escolaridade', 'Percentagem']])\n with colpre2: \n colpre = st.columns(2)\n # Agrupando por estados\n uf_pre = pre_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_pre['Frequência relativa'] = freq_rel(uf_pre['Frequência acumulada'])\n uf_pre = uf_pre.rename(columns={'UF': 'Estados'})\n uf_pre = uf_pre.sort_values(by='Frequência acumulada', ascending=False)\n uf_pre = uf_pre.reset_index()\n uf_pre = uf_pre.drop('index', axis=1)\n\n uf_pre_grafico = uf_pre[['Estados', 'Frequência acumulada']]\n uf_pre_rotulo = uf_pre['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_pre = pre_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_pre['Percentagem'] = freq_rel(rg_pre['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_pre = rg_pre.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres Prefeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_pre['Regiao'][0], rg_pre['Percentagem'][0])\n rg2.metric(rg_pre['Regiao'][1], rg_pre['Percentagem'][1])\n rg3.metric(rg_pre['Regiao'][2], rg_pre['Percentagem'][2])\n rg4.metric(rg_pre['Regiao'][3], rg_pre['Percentagem'][3])\n rg5.metric(rg_pre['Regiao'][4], rg_pre['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres Prefeitas por Estado')\n figbar1 = px.bar(uf_pre,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_pre_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_pre = pre_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_pre.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_pre = pre_fem[pre_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_pre = autodeclara_sim_pre['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_pre.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das Prefeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_pre, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_pre['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_pre['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('Prefeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_pre, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_pre['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[2]:\n coledu1, coledu2 = st.columns([1,2])\n\n with coledu1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n edu_fem = pd.DataFrame(df_edu[df_edu[\"Sexo\"] == \"Feminino\"])\n\n contagem_edu_fem = len(edu_fem)\n\n valores_edu = [contagem_edu_fem, len(df_edu) - contagem_edu_fem]\n rotulos_edu = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas edufeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_edu[0], valores_edu[0])\n card2.metric(rotulos_edu[1], valores_edu[1])\n\n figpie1 = px.pie(\n values=valores_edu,\n names=rotulos_edu,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_edu_fem = edu_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_edu_fem['Percentagem'] = freq_rel(escolaridade_edu_fem['Frequência acumulada'])\n escolaridade_edu_fem = escolaridade_edu_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_edu_fem['Percentagem'] = escolaridade_edu_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_edu_fem = escolaridade_edu_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das edufeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_edu_fem[['Escolaridade', 'Percentagem']])\n with coledu2: \n coledu = st.columns(2)\n # Agrupando por estados\n uf_edu = edu_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_edu['Frequência relativa'] = freq_rel(uf_edu['Frequência acumulada'])\n uf_edu = uf_edu.rename(columns={'UF': 'Estados'})\n uf_edu = uf_edu.sort_values(by='Frequência acumulada', ascending=False)\n uf_edu = uf_edu.reset_index()\n uf_edu = uf_edu.drop('index', axis=1)\n\n uf_edu_grafico = uf_edu[['Estados', 'Frequência acumulada']]\n uf_edu_rotulo = uf_edu['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_edu = edu_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_edu['Percentagem'] = freq_rel(rg_edu['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_edu = rg_edu.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres edufeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_edu['Regiao'][0], rg_edu['Percentagem'][0])\n rg2.metric(rg_edu['Regiao'][1], rg_edu['Percentagem'][1])\n rg3.metric(rg_edu['Regiao'][2], rg_edu['Percentagem'][2])\n rg4.metric(rg_edu['Regiao'][3], rg_edu['Percentagem'][3])\n rg5.metric(rg_edu['Regiao'][4], rg_edu['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres edufeitas por Estado')\n figbar1 = px.bar(uf_edu,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_edu_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_edu = edu_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_edu.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_edu = edu_fem[edu_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_edu = autodeclara_sim_edu['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_edu.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das edufeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_edu, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_edu['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_edu['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('edufeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_edu, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_edu['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[3]:\n colcul1, colcul2 = st.columns([1,2])\n\n with colcul1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n cul_fem = pd.DataFrame(df_cul[df_cul[\"Sexo\"] == \"Feminino\"])\n\n contagem_cul_fem = len(cul_fem)\n\n valores_cul = [contagem_cul_fem, len(df_cul) - contagem_cul_fem]\n rotulos_cul = [\"Feminino\", \"Masculino\"]\n\n st.write(\"Distribuição de Gênero nas culfeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_cul[0], valores_cul[0])\n card2.metric(rotulos_cul[1], valores_cul[1])\n\n figpie1 = px.pie(\n values=valores_cul,\n names=rotulos_cul,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_cul_fem = cul_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_cul_fem['Percentagem'] = freq_rel(escolaridade_cul_fem['Frequência acumulada'])\n escolaridade_cul_fem = escolaridade_cul_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_cul_fem['Percentagem'] = escolaridade_cul_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_cul_fem = escolaridade_cul_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das culfeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_cul_fem[['Escolaridade', 'Percentagem']])\n with colcul2: \n colcul = st.columns(2)\n # Agrupando por estados\n uf_cul = cul_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_cul['Frequência relativa'] = freq_rel(uf_cul['Frequência acumulada'])\n uf_cul = uf_cul.rename(columns={'UF': 'Estados'})\n uf_cul = uf_cul.sort_values(by='Frequência acumulada', ascending=False)\n uf_cul = uf_cul.reset_index()\n uf_cul = uf_cul.drop('index', axis=1)\n\n uf_cul_grafico = uf_cul[['Estados', 'Frequência acumulada']]\n uf_cul_rotulo = uf_cul['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_cul = cul_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_cul['Percentagem'] = freq_rel(rg_cul['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_cul = rg_cul.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres culfeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_cul['Regiao'][0], rg_cul['Percentagem'][0])\n rg2.metric(rg_cul['Regiao'][1], rg_cul['Percentagem'][1])\n rg3.metric(rg_cul['Regiao'][2], rg_cul['Percentagem'][2])\n rg4.metric(rg_cul['Regiao'][3], rg_cul['Percentagem'][3])\n rg5.metric(rg_cul['Regiao'][4], rg_cul['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres culfeitas por Estado')\n figbar1 = px.bar(uf_cul,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_cul_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_cul = cul_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_cul.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_cul = cul_fem[cul_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_cul = autodeclara_sim_cul['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_cul.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das culfeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_cul, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_cul['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_cul['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('culfeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_cul, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_cul['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[4]:\n colesp1, colesp2 = st.columns([1,2])\n\n with colesp1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n esp_fem = pd.DataFrame(df_esp[df_esp[\"Sexo\"] == \"Feminino\"])\n\n contagem_esp_fem = len(esp_fem)\n\n valores_esp = [contagem_esp_fem, len(df_esp) - contagem_esp_fem]\n rotulos_esp = [\"Feminino\", \"Masespino\"]\n\n st.write(\"Distribuição de Gênero nas espfeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_esp[0], valores_esp[0])\n card2.metric(rotulos_esp[1], valores_esp[1])\n\n figpie1 = px.pie(\n values=valores_esp,\n names=rotulos_esp,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_esp_fem = esp_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_esp_fem['Percentagem'] = freq_rel(escolaridade_esp_fem['Frequência acumulada'])\n escolaridade_esp_fem = escolaridade_esp_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_esp_fem['Percentagem'] = escolaridade_esp_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_esp_fem = escolaridade_esp_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das espfeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_esp_fem[['Escolaridade', 'Percentagem']])\n with colesp2: \n colesp = st.columns(2)\n # Agrupando por estados\n uf_esp = esp_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_esp['Frequência relativa'] = freq_rel(uf_esp['Frequência acumulada'])\n uf_esp = uf_esp.rename(columns={'UF': 'Estados'})\n uf_esp = uf_esp.sort_values(by='Frequência acumulada', ascending=False)\n uf_esp = uf_esp.reset_index()\n uf_esp = uf_esp.drop('index', axis=1)\n\n uf_esp_grafico = uf_esp[['Estados', 'Frequência acumulada']]\n uf_esp_rotulo = uf_esp['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_esp = esp_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_esp['Percentagem'] = freq_rel(rg_esp['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_esp = rg_esp.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres espfeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_esp['Regiao'][0], rg_esp['Percentagem'][0])\n rg2.metric(rg_esp['Regiao'][1], rg_esp['Percentagem'][1])\n rg3.metric(rg_esp['Regiao'][2], rg_esp['Percentagem'][2])\n rg4.metric(rg_esp['Regiao'][3], rg_esp['Percentagem'][3])\n rg5.metric(rg_esp['Regiao'][4], rg_esp['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres espfeitas por Estado')\n figbar1 = px.bar(uf_esp,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_esp_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_esp = esp_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_esp.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_esp = esp_fem[esp_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_esp = autodeclara_sim_esp['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_esp.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das espfeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_esp, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_esp['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_esp['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('espfeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_esp, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_esp['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------\n\nwith tabs[5]:\n colsau1, colsau2 = st.columns([1,2])\n\n with colsau1:\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n sau_fem = pd.DataFrame(df_sau[df_sau[\"Sexo\"] == \"Feminino\"])\n\n contagem_sau_fem = len(sau_fem)\n\n valores_sau = [contagem_sau_fem, len(df_sau) - contagem_sau_fem]\n rotulos_sau = [\"Feminino\", \"Massauino\"]\n\n st.write(\"Distribuição de Gênero nas saufeituras\")\n card1, card2 = st.columns([1, 2])\n\n card1.metric(rotulos_sau[0], valores_sau[0])\n card2.metric(rotulos_sau[1], valores_sau[1])\n\n figpie1 = px.pie(\n values=valores_sau,\n names=rotulos_sau,\n color_discrete_sequence=paleta,\n width=400,\n hole=0.5\n )\n st.plotly_chart(figpie1, use_container_width=False)\n\n escolaridade_sau_fem = sau_fem.groupby('Escolaridade')['Escolaridade'].count().reset_index(name='Frequência acumulada')\n escolaridade_sau_fem['Percentagem'] = freq_rel(escolaridade_sau_fem['Frequência acumulada'])\n escolaridade_sau_fem = escolaridade_sau_fem.reset_index().drop('index', axis=1).sort_values(by='Percentagem', ascending=False)\n escolaridade_sau_fem['Percentagem'] = escolaridade_sau_fem['Percentagem'].apply(lambda x: f'{x:.2f}%')\n escolaridade_sau_fem = escolaridade_sau_fem.reset_index()\n\n st.write('Frequência do Nível Escolar das saufeitas')\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.dataframe(escolaridade_sau_fem[['Escolaridade', 'Percentagem']])\n with colsau2: \n colsau = st.columns(2)\n # Agrupando por estados\n uf_sau = sau_fem.groupby('UF').size().reset_index(name='Frequência acumulada')\n uf_sau['Frequência relativa'] = freq_rel(uf_sau['Frequência acumulada'])\n uf_sau = uf_sau.rename(columns={'UF': 'Estados'})\n uf_sau = uf_sau.sort_values(by='Frequência acumulada', ascending=False)\n uf_sau = uf_sau.reset_index()\n uf_sau = uf_sau.drop('index', axis=1)\n\n uf_sau_grafico = uf_sau[['Estados', 'Frequência acumulada']]\n uf_sau_rotulo = uf_sau['Frequência relativa'].apply(lambda x: f'{x:.2f}%')\n\n\n # Agrupando por região\n rg_sau = sau_fem.groupby('Regiao')['Regiao'].count().reset_index(name='Contagem')\n rg_sau['Percentagem'] = freq_rel(rg_sau['Contagem']).apply(lambda x: f'{x:.2f}%')\n rg_sau = rg_sau.reset_index().drop('index', axis=1).sort_values(by='Contagem', ascending=False)\n\n st.write('Frequência de Mulheres saufeitas por Região', size=16)\n rg1, rg2, rg3, rg4, rg5 = st.columns(5)\n\n rg1.metric(rg_sau['Regiao'][0], rg_sau['Percentagem'][0])\n rg2.metric(rg_sau['Regiao'][1], rg_sau['Percentagem'][1])\n rg3.metric(rg_sau['Regiao'][2], rg_sau['Percentagem'][2])\n rg4.metric(rg_sau['Regiao'][3], rg_sau['Percentagem'][3])\n rg5.metric(rg_sau['Regiao'][4], rg_sau['Percentagem'][4])\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n\n st.write('Frequência de Mulheres saufeitas por Estado')\n figbar1 = px.bar(uf_sau,\n x='Estados',\n y='Frequência acumulada',\n #labels={'Frequência acumulada': 'Frequência Acumulada'},\n color_discrete_sequence=amarelo,\n width=900,\n height=390,\n )\n # Adicionando rótulos às barras\n figbar1.update_traces(\n text=uf_sau_rotulo, \n textposition='outside', \n )\n\n # Exibir o gráfico no Streamlit\n st.plotly_chart(figbar1, use_container_width=False)\n\n graf1, graf2 = st.columns(2)\n \n # Dados de contagem de raça/paleta\n contagem_raca_paleta_sau = sau_fem['Raça'].value_counts().reset_index()\n contagem_raca_paleta_sau.columns = ['Raça/paleta', 'Contagem']\n\n # Dados de autodeclarações \"Sim\" por raça/paleta\n autodeclara_sim_sau = sau_fem[sau_fem['Autodeclara'] == 'Sim']\n contagem_autodeclara_sim_sau = autodeclara_sim_sau['Raça'].value_counts().reset_index()\n contagem_autodeclara_sim_sau.columns = ['Raça/paleta', 'Contagem']\n\n graf1.write('Classificação de Raça/Cor das saufeitas')\n # Criar o gráfico de barras lado a lado\n figbar2 = px.bar(contagem_raca_paleta_sau, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=laranja\n )\n rotulo1 = contagem_raca_paleta_sau['Contagem']\n figbar2.update_traces(text=contagem_raca_paleta_sau['Contagem'], textposition='outside', showlegend=False)\n \n \n # Exibir o gráfico no Streamlit\n graf1.plotly_chart(figbar2, use_container_width=False)\n\n graf2.write('saufeitas que falaram \"Sim\" para a classificação Étnico Racial')\n # Criar o gráfico de barras lado a lado\n figbar3 = px.bar(contagem_autodeclara_sim_sau, x='Raça/paleta', y='Contagem',\n labels={'Raça/paleta': 'Raça/Cor', 'value': 'Contagem'},\n width=450,\n height=450,\n color_discrete_sequence=azul\n )\n figbar3.update_traces(text=contagem_autodeclara_sim_sau['Contagem'], textposition='outside', showlegend=False)\n\n \n # Exibir o gráfico no Streamlit\n graf2.plotly_chart(figbar3, use_container_width=False)\n\n# ----------------------------------------------------------------------","repo_name":"mayalajesus/datascience","sub_path":"analise_basemunic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":39734,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"72212814998","text":"'''In the last mission, we worked with just one data set, the 2015 World Happiness Report, to explore data aggregation. However, it's very common in practice to work with more than one data set at a time.\nOften, you'll find that you need additional data to perform analysis or you'll find that you have the data, but need to pull it from mulitiple sources. In this mission, we'll learn a couple of different techniques for combining data using pandas to easily handle situations like these.\nWe'll use what we learned in the last mission to analyze the 2015, 2016, and 2017 World Happiness Reports. Specifically, we'll look to answer the following question:\nDid world happiness increase, decrease, or stay about the same from 2015 to 2017?\nAs a reminder, these reports assign each country a happiness score based on a poll question that asks respondents to rank their life on a scale of 0 - 10, so \"world happiness\" refers to this definition specifically.\nBelow is a preview of the 2015 report:\nCountry\tRegion\tHappiness Rank\tHappiness Score\tStandard Error\tEconomy (GDP per Capita)\tFamily\tHealth (Life Expectancy)\tFreedom\tTrust (Government Corruption)\tGenerosity\tDystopia Residual\n0\tSwitzerland\tWestern Europe\t1\t7.587\t0.03411\t1.39651\t1.34951\t0.94143\t0.66557\t0.41978\t0.29678\t2.51738\n1\tIceland\tWestern Europe\t2\t7.561\t0.04884\t1.30232\t1.40223\t0.94784\t0.62877\t0.14145\t0.43630\t2.70201\n2\tDenmark\tWestern Europe\t3\t7.527\t0.03328\t1.32548\t1.36058\t0.87464\t0.64938\t0.48357\t0.34139\t2.49204\n3\tNorway\tWestern Europe\t4\t7.522\t0.03880\t1.45900\t1.33095\t0.88521\t0.66973\t0.36503\t0.34699\t2.46531\n4\tCanada\tNorth America\t5\t7.427\t0.03553\t1.32629\t1.32261\t0.90563\t0.63297\t0.32957\t0.45811\t2.45176\nBelow are descriptions for some of the columns:\nCountry - Name of the country\nRegion - Name of the region the country belongs to\nHappiness Rank - The rank of the country, as determined by its happiness score\nHappiness Score - A score assigned to each country based on the answers to a poll question that asks respondents to rate their happiness on a scale of 0-10\nLet's start by reading the 2015, 2016, and 2017 reports into a pandas dataframe and adding a Year column to each to make it easier to distinguish between them.\n\nInstructions\n\nWe've already read the World_Happiness_2015.csv file into a dataframe called happiness2015.\nUse the pandas.read_csv() function to read the World_Happiness_2016.csv file into a dataframe called happiness2016 and the World_Happiness_2017.csv file into a dataframe called happiness2017.\nAdd a column called Year to each dataframe with the corresponding year. For example, the Year column in happiness2015 should contain the value 2015 for each row.'''\nimport pandas as pd\nimport numpy as np\n\nhappiness_2015 = pd.read_csv(\"World_Happiness_2015.csv\")\nhappiness_2016 = pd.read_csv(\"World_Happiness_2016.csv\")\nhappiness_2017 = pd.read_csv(\"World_Happiness_2017.csv\")\n\n'''Añadimos columnas a los df'''\n\nhappiness_2015['Year'] = 2015\nhappiness_2016['Year'] = 2016\nhappiness_2017['Year'] = 2017\n\n'''Let's start by exploring the pd.concat() function. The concat() function combines dataframes one of two ways:\nStacked: Axis = 0 (This is the default option.)\nConcat_Updated\nSide by Side: Axis = 1\nConcat_Axis1\nSince concat is a function, not a method, we use the syntax below:\nConcat_syntax\nIn the next exercise, we'll use the concat() function to combine subsets of happiness2015 and happiness2016 and then debrief the results on the following screen.\nBelow are the subsets we'll be working with:\nhead_2015 = happiness2015[['Country','Happiness Score', 'Year']].head(3)\nCountry\tHappiness Score\tYear\n0\tSwitzerland\t7.587\t2015\n1\tIceland\t7.561\t2015\n2\tDenmark\t7.527\t2015\nhead_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)\nCountry\tHappiness Score\tYear\n0\tDenmark\t7.526\t2016\n1\tSwitzerland\t7.509\t2016\n2\tIceland\t7.501\t2016\nLet's use the concat() function to combine head_2015 and head_2016 next.\n\nInstructions\n\nWe've already saved the subsets from happiness2015 and happiness2016 to the variables head_2015 and head_2016.\nUse the pd.concat() function to combine head_2015 and head_2016 along axis = 0. Remember to pass the head_2015 and head_2016 into the function as a list. Assign the result to concat_axis0.\nUse the pd.concat() function to combine head_2015 and head_2016 along axis = 1. Remember to pass head_2015 and head_2016 into the function as a list and set the axis parameter equal to 1. Assign the result to concat_axis1.\nUse the variable inspector to view concat_axis0 and concat_axis1.\nAssign the number of rows in concat_axis0 to a variable called question1.\nAssign the number of rows in concat_axis1 to a variable called question2.'''\n\nhead_2015 = happiness_2015[['Country','Happiness Score', 'Year']].head(3)\nhead_2016 = happiness_2016[['Country','Happiness Score', 'Year']].head(3)\n\nconcat_axis0 = pd.concat([head_2015, head_2016], axis = 0)\nconcat_axis1 = pd.concat([head_2015, head_2016], axis = 1)\nquestion1 = 6\nquestion2 = 3\n\nhead_2015 = happiness_2015[['Year','Country','Happiness Score', 'Standard Error']].head(4)\nhead_2016 = happiness_2016[['Country','Happiness Score', 'Year']].head(3)\nconcat_axis0 = pd.concat([head_2015, head_2016])\n'''Note that because the Standard Error column didn't exist in head_2016, NaN values were created to signify those values are missing. By default, the concat function will keep ALL of the data, no matter if missing values are created.\nAlso, notice again the indexes of the original dataframes didn't change. If the indexes aren't meaningful, it can be better to reset them. This is especially true when we create duplicate indexes, because they could cause errors as we perform other data cleaning tasks.\nLuckily, the concat function has a parameter, ignore_index, that can be used to clear the existing index and reset it in the result. Let's practice using it next.'''\n\nconcat_update_index = pd.concat([head_2015, head_2016], ignore_index = True)\n\n'''Next, we'll explore the pd.merge() function - a function that can execute high performance database-style joins. Note that unlike the concat function, the merge function only combines dataframes horizontally (axis=1) and can only combine two dataframes at a time. However, it can be valuable when we need to combine very large dataframes quickly and provides more flexibility in terms of how data can be combined, as we'll see in the next couple screens.\nWith the merge() function, we'll combine dataframes on a key, a shared index or column. When choosing a key, it's good practice to use keys with unique values to avoid duplicating data.\nYou can think of keys as creating a link from one dataframe to another using the common values or indexes. For example, in the diagram below, we linked the dataframes using common values in the Country columns.\nMerge_link\nIn the diagram below, we use those common country values to join or merge the dataframes.\nMerge\nWe'll explore the merge function in the next exercise using just three rows from happiness2015 and happiness2016:\nhappiness2015[['Country','Happiness Rank','Year']].iloc[2:5]\nCountry\tHappiness Rank\tYear\n2\tDenmark\t3\t2015\n3\tNorway\t4\t2015\n4\tCanada\t5\t2015\nhappiness2016[['Country','Happiness Rank','Year']].iloc[2:5]\nCountry\tHappiness Rank\tYear\n2\tIceland\t3\t2016\n3\tNorway\t4\t2016\n4\tFinland\t5\t2016\nWe'll use the following syntax:\nMerge_syntax\nLet's practice using the merge() function next.\n\nInstructions\nWe've already saved three rows from happiness2015 and happiness2016 to variables named three_2015 and three_2016.\nUse the pd.merge() function to join three_2015 and three_2016 on the Country column. Assign the result to merged.'''\n\nthree_2015 = happiness_2015[['Country','Happiness Rank','Year']].iloc[2:5]\nthree_2016 = happiness_2016[['Country','Happiness Rank','Year']].iloc[2:5]\n\nmerged = pd.merge(three_2015, three_2016, on = 'Country')\n\n'''Joining three_2015 and three_2016 in the last exercise resulted in a dataframe with just one row:\npd.merge(left=three_2015, right=three_2016, on='Country')\nCountry\tHappiness Rank_x\tYear_x\tHappiness Rank_y\tYear_y\n0\tNorway\t4\t2015\t4\t2016\nLet's look back to three_2015 and three_2016 to understand why. Since we joined the dataframes on the Country column, or used it as the key, the merge() function looked to match elements in the Country column in BOTH dataframes.\nJoin_columns\nThe one country returned in merged was \"Norway\", the only element that appeared in the Country column in BOTH three_2015 and three_2016.\nThis way of combining, or joining, data is called an inner join. An inner join returns only the intersection of the keys, or the elements that appear in both dataframes with a common key.\nThe term \"join\" originates from SQL (or structured query language), a language used to work with databases. If you're a SQL user, you'll recognize the following concepts. If you've never used SQL, don't worry! No prior knowledge is neccessary for this mission, but we will learn SQL later in this path.\nThere are actually four different types of joins:\nInner: only includes elements that appear in both dataframes with a common key\nOuter: includes all data from both dataframes\nLeft: includes all of the rows from the \"left\" dataframe along with any rows from the \"right\" dataframe with a common key; the result retains all columns from both of the original dataframes\nRight: includes all of the rows from the \"right\" dataframe along with any rows from the \"left\" dataframe with a common key; the result retains all columns from both of the original dataframes\nIf the definition for outer joins sounds familiar, it's because we've already seen examples of outer joins! Recall that when we combined data using the concat function, it kept all of the data from all dataframes, no matter if missing values were created.\n\nSince it's much more common to use inner and left joins for database-style joins, we'll focus on these join types for the remainder of the mission, but encourage you to explore the other options on your own.\nLet's experiment with changing the join type next.\n\nInstructions\n\nUpdate merged to use a left join instead of an inner join. Set the how parameter to 'left' in merge(). Assign the result to merged_left.\nUpdate merged_left so that the left parameter equals three_2016 and the right parameter equals three_2015. Assign the result to merged_left_updated.\nBased on the results of this exercise, when using a left join, does changing the dataframe assigned to the left and right parameters change the result? Try to answer this question before moving onto the next screen.'''\n\nmerged = pd.merge(left=three_2015, right=three_2016, on='Country')\nmerged_left = pd.merge(three_2015, three_2016, how = 'left', on = 'Country')\nmerged_left_updated = pd.merge(three_2016, three_2015, how = 'left', on = 'Country')\n\n'''Instructions\n\nUpdate merged to use the suffixes _2015 and _2016. Set the suffixes parameter to ('_2015', '_2016') in merge(). Assign the result to merged_suffixes.\nUpdate merged_updated to use the suffixes _2015 and _2016. Notice that the \"left\" dataframe is three_2016 and the \"right\" dataframe is three_2015. Assign the result to merged_updated_suffixes.'''\n\nmerged_updated = pd.merge(left=three_2016, right=three_2015, how = 'left', on='Country')\nmerged_suffixes = pd.merge(left=three_2015, right=three_2016, how = 'left', on='Country', suffixes = ('_2015', '_2016'))\nmerged_updated_suffixes = pd.merge(left=three_2016, right=three_2015, how = 'left', on='Country',suffixes = ('_2016', '_2015'))\n\n'''Instructions\n\nWe've already saved four_2015 and three_2016. In this exercise, we'll use a left join to combine four_2015 and three_2016.\nPredict the number of rows and columns the resulting dataframe will have. Assign the number of rows to a variable called rows and the number of columns to a variable called columns.\nTo change the join type used in merge_index to a left join, set the how parameter equal to 'left'. Save the result to merge_index_left.\nUpdate rows and columns so that each contains the correct number of rows and columns in merge_index_left.'''\n\nfour_2015 = happiness2015[['Country','Happiness Rank','Year']].iloc[2:6]\nthree_2016 = happiness2016[['Country','Happiness Rank','Year']].iloc[2:5]\nmerge_index = pd.merge(left = four_2015,right = three_2016, left_index = True, right_index = True, suffixes = ('_2015','_2016'))\nrows = 4\ncolumns = 6\nmerge_index_left = pd.merge(left = four_2015, right = three_2016, how = 'left', left_index = True, right_index = True, suffixes = ('_2015', '_2016'))\n\n'''Learn\nLet's summarize what we learned in this mission:\npd.concat()\tpd.merge()\nDefault Join Type\tOuter\tInner\nCan Combine More Than Two Dataframes at a Time?\tYes\tNo\nCan Combine Dataframes Vertically\n(axis=0) or Horizontally (axis=1)?\nBoth\tHorizontally\nSyntax\tConcat (Vertically)\nconcat([df1,df2,df3])\nConcat (Horizontally)\nconcat([df1,df2,df3], axis = 1)\nMerge (Join on Columns)\nmerge(left = df1, right = df2, how = 'join_type', on = 'Col')\nMerge (Join on Index)\nmerge(left = df1, right = df2, how = 'join_type', left_index = True, right_index = True)\nYou may still be wondering about when to use each of the functions and methods we've learned. This table can help you understand the limitations of each, but that decision will mostly depend on the problem you're trying to solve.\nLet's review our original question next:\nDid world happiness increase, decrease, or stay about the same from 2015 to 2017?\nTo answer this question, we'll create a bar plot in which each bar represented the mean happiness score for one of the years. We completed a similar task in the last mission when we calculated the mean happiness score for each region using the df.pivot_table() method:\n#Use a pivot table to aggregate the data according to Region.\nmeans = happiness2015.pivot_table('Happiness Score', 'Region', aggfunc=np.mean)\nHappiness Score\nRegion\t\nAustralia and New Zealand\t7.285000\nCentral and Eastern Europe\t5.332931\nEastern Asia\t5.626167\nLatin America and Caribbean\t6.144682\nMiddle East and Northern Africa\t5.406900\nNorth America\t7.273000\nSoutheastern Asia\t5.317444\nSouthern Asia\t4.580857\nSub-Saharan Africa\t4.202800\nWestern Europe\t6.689619\nThen, we plotted the results with the df.plot() method.\n\n#Use the `df.plot()` method to plot the results.\nmeans.plot(kind='barh', title='Mean Happiness Scores by Region', xlim=(0,10))\nMean_Happiness\nLet's repeat the same steps, but this time, we'll group the data by the Year column. First, let's rename the Happiness.Score column so it matches the column name in happiness2015 and happiness2016:\nhappiness2017.rename(columns={'Happiness.Score': 'Happiness Score'}, inplace=True)\nNext, we need to combine happiness2015, happiness2016, and happiness2017, so that we can group the result by the Year column.\nLet's use what we learned in this mission to combine the dataframes.\n\nInstructions\n\nWe've already created a Year column in happiness2017 and renamed the Happiness.Score column to Happiness Score.\nUse either the pd.concat() function or the pd.merge() function to combine happiness2015, happiness2016, and happiness2017. Assign the result to combined.\nThink about whether you need to combine the data horizontally or vertically in order to create a dataframe that can be grouped by year, and decide which function (pd.concat() or pd.merge()) you can use to combine the data.\nUse the df.pivot_table() method to create a pivot table from the combined dataframe. Set Year as the index and Happiness Score as the values. Assign the result to pivot_table_combined.\nUse the df.plot() method to create a bar chart of the results. Set the kind parameter to barh, the title to 'Mean Happiness Scores by Year', and the xlim parameter to (0,10).\nTry to answer the following question based on the results of this exercise: Did world happiness increase, decrease, or stay about the same from 2015 to 2017?'''\n\ncombined = pd.concat([happiness_2015, happiness_2016, happiness_2017])\npivot_table_combined = combined.pivot_table('Happiness Score', 'Year', aggfunc = np.mean)\npivot_table_combined.plot(kind = 'barh', title = 'Mean Happiness Scores by Year', xlim = (0, 10))\n","repo_name":"adely1509/Data-Cleaning-and-Analysis","sub_path":"Combining Data With Pandas.py","file_name":"Combining Data With Pandas.py","file_ext":"py","file_size_in_byte":16069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73214229077","text":"import asyncio as aio\nfrom utils import *\nfrom pl import PerfectLink\nfrom node_config import current_node, nodes, timeout\n\nclass BestEffortBroadcast():\n def __init__(self, upper_layer):\n self.pl = PerfectLink(self)\n self.upper_layer = upper_layer\n\n async def broadcast(self, message: Message):\n beb_m = Message(MessageType.BEB, current_node(), message)\n calls = []\n for node in nodes():\n calls.append(self.pl.send(beb_m, node, timeout))\n await aio.gather(*calls)\n\n async def deliver(self, message: Message):\n if message.type == MessageType.ACK:\n return await self.upper_layer.deliver(message) # if message is of type ACK let upper_layer handle it\n elif message.type != MessageType.BEB:\n raise ValueError('Best Effort Broadcast handles only messages with type ACK or BEB')\n await self.upper_layer.deliver(message.body)\n\n async def run(self):\n return await self.pl.run()\n","repo_name":"bsmietanka/dist_algs","sub_path":"beb.py","file_name":"beb.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"29125401483","text":"from common.stuff import *\n\nimport os\nimport tty\nimport fcntl\nimport select\nimport socket\nimport termios\nimport threading\n\nclass TextConsole:\n DEF_CTRL_SOCKET = \"/tmp/trendcar-ctrl.sock\"\n DEF_MAX_IDLE_TAKING_OVER = 3\n DEF_MAX_IDLE_DEACTIVATING = 1.5\n\n SUCCESS = 0\n ERR_CONNECTION_NOT_READY = 1\n ERR_SEND_COMMAND = 2\n ERR_READ_SCRIPT = 3\n\n STATE_INIT = 0\n STATE_STARTING = 1\n STATE_STARTED = 2\n STATE_STOPPING = 3\n STATE_STOPPED = 4\n _serving_mutex = threading.Lock()\n _serving_event = threading.Condition(_serving_mutex)\n _serving_thread = None\n _serving_state = STATE_INIT\n\n _driving_mutex = threading.Lock()\n _driving_event = threading.Condition(_driving_mutex)\n _driving_thead = None\n _driving_state = STATE_INIT\n _driving_last_activated = None\n _max_idle_deactivating = DEF_MAX_IDLE_DEACTIVATING\n _steering = 0.0\n _throttle = 0.0\n\n _control = None\n _taking_over = False\n _taking_over_started = None\n _max_idle_taking_over = DEF_MAX_IDLE_TAKING_OVER\n _dashboard = {}\n\n _ctrl_socket_path = None\n _ctrl_socket = None\n _client_context = {}\n\n\n @staticmethod\n def attach_control(control):\n TextConsole.detach_control()\n if control:\n control.register_dashboard_observer(TextConsole._on_pre_observe_dashboard , priority = Control.DASHBOARD_PRIORITY_HIGH)\n control.register_dashboard_observer(TextConsole._on_post_observe_dashboard, priority = Control.DASHBOARD_PRIORITY_LOW )\n TextConsole._control = control\n return True\n\n\n @staticmethod\n def detach_control():\n if TextConsole._control is not None:\n TextConsole._control.unregister_dashboard_observer(TextConsole._on_pre_observe_dashboard)\n TextConsole._control.unregister_dashboard_observer(TextConsole._on_post_observe_dashboard)\n TextConsole._control = None\n return True\n return False\n\n @staticmethod\n def start(control = None):\n with TextConsole._serving_mutex:\n while TextConsole._serving_state in (TextConsole.STATE_STARTING, TextConsole.STATE_STOPPING):\n TextConsole._serving_event.wait()\n\n if TextConsole._serving_state == TextConsole.STATE_STARTED:\n return True\n\n if TextConsole._serving_state in (TextConsole.STATE_STOPPED, TextConsole.STATE_INIT):\n TextConsole._serving_state = TextConsole.STATE_STARTING\n TextConsole._serving_event.notify_all()\n\n try:\n if control is not None:\n TextConsole.attach_control(control)\n\n TextConsole._serving_thread = threading.Thread(target = TextConsole.serving_loop, name = \"tc.serving_loop\")\n TextConsole._serving_thread.start()\n\n TextConsole._driving_thread = threading.Thread(target = TextConsole.driving_loop, name = \"tc.driving_loop\")\n TextConsole._driving_thread.start()\n except:\n error_exc(\"Unable to start text console thread\")\n\n if control is not None:\n TextConsole.detach_control()\n\n with TextConsole._driving_mutex:\n TextConsole._driving_state = TextConsole.STATE_STOPPED\n TextConsole._driving_event.notify_all()\n\n with TextConsole._serving_mutex:\n TextConsole._serving_state = TextConsole.STATE_STOPPED\n TextConsole._serving_event.notify_all()\n\n return False\n\n with TextConsole._driving_mutex:\n while TextConsole._driving_state == TextConsole.STATE_STARTING:\n TextConsole._driving_event.wait()\n started = TextConsole._driving_state == TextConsole.STATE_STARTED\n\n with TextConsole._serving_mutex:\n while TextConsole._serving_state == TextConsole.STATE_STARTING:\n TextConsole._serving_event.wait()\n started = TextConsole._serving_state == TextConsole.STATE_STARTED\n\n if not started and control is not None:\n TextConsole.detach_control()\n\n return started\n\n\n @staticmethod\n def stop():\n with TextConsole._driving_mutex:\n while TextConsole._driving_state in (TextConsole.STATE_STARTING, TextConsole.STATE_STOPPING):\n TextConsole._driving_event.wait()\n\n if TextConsole._driving_state not in (TextConsole.STATE_STOPPED, TextConsole.STATE_INIT):\n TextConsole._driving_state = TextConsole.STATE_STOPPING\n TextConsole._driving_event.notify_all()\n\n with TextConsole._serving_mutex:\n while TextConsole._serving_state in (TextConsole.STATE_STARTING, TextConsole.STATE_STOPPING):\n TextConsole._serving_event.wait()\n\n if TextConsole._serving_state not in (TextConsole.STATE_STOPPED, TextConsole.STATE_INIT):\n TextConsole._taking_over = False\n TextConsole._serving_state = TextConsole.STATE_STOPPING\n\n if TextConsole._ctrl_socket:\n try:\n TextConsole._ctrl_socket.close()\n except:\n pass\n\n TextConsole._serving_event.notify_all()\n\n with TextConsole._driving_mutex:\n TextConsole._driving_event.notify_all()\n\n while TextConsole._driving_state == TextConsole.STATE_STOPPING:\n TextConsole._driving_event.wait()\n\n driving_thread_stopped = TextConsole._driving_state == TextConsole.STATE_STOPPED\n\n if driving_thread_stopped and TextConsole._driving_thread:\n TextConsole._driving_thread.join()\n TextConsole._driving_thread = None\n\n with TextConsole._serving_mutex:\n TextConsole._serving_event.notify_all()\n\n while TextConsole._serving_state == TextConsole.STATE_STOPPING:\n TextConsole._serving_event.wait()\n\n serving_thread_stopped = TextConsole._serving_state == TextConsole.STATE_STOPPED\n\n if serving_thread_stopped and TextConsole._serving_thread:\n TextConsole._serving_thread.join()\n TextConsole._serving_thread = None\n\n TextConsole.detach_control()\n return driving_thread_stopped and serving_thread_stopped\n\n\n @staticmethod\n def serving_loop():\n set_thread_name(TextConsole._serving_thread.getName())\n try:\n with TextConsole._serving_mutex:\n try:\n TextConsole._ctrl_socket_path = config.get(\"TEXTCONSOLE\", \"ctrl_socket\", TextConsole.DEF_CTRL_SOCKET)\n\n if os.path.exists(TextConsole._ctrl_socket_path):\n os.remove(TextConsole._ctrl_socket_path)\n\n try:\n TextConsole._ctrl_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n TextConsole._ctrl_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n TextConsole._ctrl_socket.bind(TextConsole._ctrl_socket_path)\n TextConsole._ctrl_socket.listen(5)\n except:\n error_exc(\"TextConsole: Failed to bind unix socket %s\" % (TextConsole._ctrl_socket_path))\n return False\n\n TextConsole._serving_state = TextConsole.STATE_STARTED\n TextConsole._serving_event.notify_all()\n except:\n error_exc(\"TextConsole: Failed to start daemon\")\n\n if TextConsole._ctrl_socket:\n info(\"TextConsole: Started, listening on %s\", TextConsole._ctrl_socket_path)\n\n ctrl_socket_fd = TextConsole._ctrl_socket.fileno()\n readfds = [ctrl_socket_fd]\n\n while TextConsole._serving_state == TextConsole.STATE_STARTED:\n try:\n try:\n readable, _, _ = select.select([ctrl_socket_fd], [], [])\n\n if ctrl_socket_fd not in readable:\n continue\n except:\n continue\n\n _client_sock, _ = TextConsole._ctrl_socket.accept()\n\n if _client_sock is None:\n continue\n\n info(\"TextConsole: Client connection accepted - %s\", repr(_client_sock))\n\n try:\n _client_mutex = threading.Lock()\n\n TextConsole._client_context[_client_sock] = {\n \"client_sock\": _client_sock,\n \"mutex\" : _client_mutex,\n \"event\" : threading.Condition(_client_mutex),\n \"state\" : TextConsole.STATE_STARTING,\n \"quiet\" : False,\n \"recvq\" : bytearray(),\n \"sendq\" : bytearray(),\n }\n\n TextConsole._client_context[_client_sock][\"thread\"] = threading.Thread(\n target = TextConsole.process_command,\n args = (_client_sock, TextConsole._client_context[_client_sock]),\n name = \"tc.process_cmd\"\n )\n TextConsole._client_context[_client_sock][\"thread\"].start()\n except:\n _client_sock.close()\n warn_exc(\"TextConsole: Client connection dropped - %s\", repr(_client_sock))\n\n except KeyboardInterrupt:\n info(\"TextConsole: Daemon loop interrupted\")\n break\n except:\n warn_exc(\"TextConsole: Exception occurred in daemon loop\")\n\n debug(\"TextConsole: Notifying clients\")\n for ctx in TextConsole._client_context.values():\n with ctx[\"mutex\"]:\n if ctx[\"state\"] != TextConsole.STATE_STOPPED:\n ctx[\"state\"] = TextConsole.STATE_STOPPING\n ctx[\"event\"].notify_all()\n\n try:\n ctx[\"client_sock\"].close()\n except:\n pass\n\n\n debug(\"TextConsole: Stopping clients\")\n for ctx in TextConsole._client_context.values():\n with ctx[\"mutex\"]:\n while ctx[\"state\"] != TextConsole.STATE_STOPPED:\n ctx[\"event\"].wait()\n\n ctx[\"thread\"].join()\n del TextConsole._client_context[ctx[\"client_sock\"]]\n\n finally:\n if TextConsole._ctrl_socket:\n try:\n TextConsole._ctrl_socket.close()\n except:\n pass\n\n if TextConsole._ctrl_socket_path and os.path.exists(TextConsole._ctrl_socket_path):\n try:\n os.remove(TextConsole._ctrl_socket_path)\n except:\n pass\n\n with TextConsole._serving_mutex:\n TextConsole._serving_state = TextConsole.STATE_STOPPED\n TextConsole._serving_event.notify_all()\n info(\"TextConsole: Stopped\")\n\n\n @staticmethod\n def _add_throttle(delta, limit = None):\n if delta != 0:\n with TextConsole._driving_mutex:\n if delta > 0:\n TextConsole._throttle = min(TextConsole._throttle + delta, 1.0 if limit is None else limit)\n else:\n TextConsole._throttle = max(TextConsole._throttle + delta, -1.0 if limit is None else limit)\n\n TextConsole._driving_last_activated = monotonic()\n TextConsole._driving_event.notify_all()\n sys.stdout.flush()\n\n return TextConsole._throttle\n\n\n @staticmethod\n def _set_throttle(value):\n if value != TextConsole._throttle:\n with TextConsole._driving_mutex:\n TextConsole._throttle = min(max(value, -1.0), 1.0)\n TextConsole._driving_last_activated = monotonic()\n TextConsole._driving_event.notify_all()\n sys.stdout.flush()\n\n return TextConsole._throttle\n\n\n @staticmethod\n def _add_steering(delta, limit = None):\n if delta != 0:\n with TextConsole._driving_mutex:\n if delta > 0:\n TextConsole._steering = min(TextConsole._steering + delta, 90.0 if limit is None else limit)\n elif delta < 0:\n TextConsole._steering = max(TextConsole._steering + delta, -90.0 if limit is None else limit)\n\n TextConsole._driving_last_activated = monotonic()\n TextConsole._driving_event.notify_all()\n\n return TextConsole._steering\n\n\n @staticmethod\n def _set_steering(value):\n if value != TextConsole._steering:\n with TextConsole._driving_mutex:\n TextConsole._steering = min(max(value, -90.0), 90.0)\n TextConsole._driving_last_activated = monotonic()\n TextConsole._driving_event.notify_all()\n\n return TextConsole._steering\n\n\n @staticmethod\n def driving_loop():\n set_thread_name(TextConsole._driving_thread.getName())\n try:\n with TextConsole._driving_mutex:\n if TextConsole._control is None:\n error(\"TextConsole: Driving requires valid control\")\n return False\n\n TextConsole._driving_state = TextConsole.STATE_STARTED\n TextConsole._driving_event.notify_all()\n info(\"TextConsole: Driving loop started\")\n\n while TextConsole._driving_state == TextConsole.STATE_STARTED:\n flipped = TextConsole._dashboard.get(\"flipped\", False)\n steering = TextConsole._steering\n throttle = TextConsole._throttle\n activated = TextConsole._driving_last_activated\n idle = monotonic() - activated if activated else -1\n\n if idle < 0 or idle > TextConsole._max_idle_deactivating:\n if steering > 0.0:\n steering = TextConsole._add_steering(-2.0 * throttle, 0.0)\n elif steering < 0.0:\n steering = TextConsole._add_steering( 2.0 * throttle, 0.0)\n\n if steering != 0.0:\n throttle = TextConsole._add_throttle(-0.01, 0.0)\n else:\n throttle = TextConsole._add_throttle(-0.05, 0.0)\n\n if TextConsole._control.drive(steering, throttle, flipped = flipped):\n debug(\"TextConsole: Drive with steering = %0.2f, throttle = %0.2f\", steering, throttle)\n TextConsole.set_taking_over(True)\n\n if TextConsole._driving_last_activated != activated:\n continue\n\n with TextConsole._driving_mutex:\n if TextConsole._steering == 0.0 and TextConsole._throttle == 0.0:\n TextConsole._driving_event.wait()\n else:\n if idle < 0 or idle > TextConsole._max_idle_deactivating:\n interval = 0.1\n else:\n interval = TextConsole._max_idle_deactivating\n\n TextConsole._driving_event.wait(timeout = interval)\n\n finally:\n with TextConsole._driving_mutex:\n TextConsole._driving_state = TextConsole.STATE_STOPPED\n TextConsole._driving_event.notify_all()\n info(\"TextConsole: Driving loop stopped\")\n\n\n @staticmethod\n def set_taking_over(taking_over):\n TextConsole._taking_over = taking_over\n TextConsole._taking_over_started = monotonic() if taking_over else None\n\n\n @staticmethod\n def get_taking_over():\n return TextConsole._taking_over\n\n\n @staticmethod\n def _on_pre_observe_dashboard(dashboard):\n TextConsole._dashboard = dashboard\n\n if TextConsole.get_taking_over():\n start = TextConsole._taking_over_started\n if start is not None and monotonic() - start >= TextConsole._max_idle_taking_over:\n TextConsole.set_taking_over(False)\n\n return TextConsole.get_taking_over()\n\n\n @staticmethod\n def _on_post_observe_dashboard(dashboard):\n TextConsole._dashboard = dashboard\n return False\n\n\n @staticmethod\n def _send_to_queue(client_context, data):\n with client_context[\"mutex\"]:\n client_context[\"sendq\"].extend(data)\n client_context[\"event\"].notify_all()\n\n\n @staticmethod\n def _cmd_help(key, value, client_sock, client_context):\n TextConsole._send_to_queue(client_context,\n b\"\"\"# -- Trend Car Text Console --\\n\"\"\" \\\n b\"\"\"#\\n\"\"\" \\\n b\"\"\"# Supported key strokes/keywords:\\n\"\"\" \\\n b\"\"\"# ?, h Display this help message\\n\"\"\" \\\n b\"\"\"# m Mute/Unmute the response\\n\"\"\" \\\n b\"\"\"# ; Report status\\n\"\"\" \\\n b\"\"\"# w, i, Throttle up\\n\"\"\" \\\n b\"\"\"# s, k, Throttle down\\n\"\"\" \\\n b\"\"\"# a, j, Steer left\\n\"\"\" \\\n b\"\"\"# d, l, Steer right\\n\"\"\" \\\n b\"\"\"# Brake\\n\"\"\" \\\n b\"\"\"# Toggle autodrive mode\\n\"\"\" \\\n b\"\"\"# ~ Toggle remote control mode\\n\"\"\" \\\n b\"\"\"# q, , Exit the text console\\n\"\"\" \\\n b\"\"\"#\\n\"\"\"\n )\n\n\n @staticmethod\n def _cmd_mute(key, value, client_sock, client_context):\n client_context[\"quiet\"] = not client_context[\"quiet\"]\n\n\n @staticmethod\n def _send_state_updates(client_sock, client_context):\n if not client_context[\"quiet\"]:\n TextConsole._send_to_queue(client_context, b\"STATE: steering=%d, throttle=%0.2f, autodrive=%s, remotecontrol=%s\\n\" % (\n TextConsole._steering,\n TextConsole._throttle,\n b\"started\" if AutoPilot.get_autodrive_started() else b\"stopped\",\n b\"enabled\" if AutoPilot.get_remote_control_enabled() else b\"disabled\",\n ))\n\n\n @staticmethod\n def _cmd_status(key, value, client_sock, client_context):\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_up(key, value, client_sock, client_context):\n TextConsole.set_taking_over(True)\n\n if value is None:\n TextConsole._add_throttle(0.1)\n else:\n TextConsole._set_throttle(abs(value))\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_down(key, value, client_sock, client_context):\n TextConsole.set_taking_over(True)\n\n if value is None:\n TextConsole._add_throttle(-0.1)\n else:\n TextConsole._set_throttle(-abs(value))\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_right(key, value, client_sock, client_context):\n TextConsole.set_taking_over(True)\n\n if value is None:\n TextConsole._add_steering(10.0)\n else:\n TextConsole._set_steering(abs(value))\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_left(key, value, client_sock, client_context):\n TextConsole.set_taking_over(True)\n\n if value is None:\n TextConsole._add_steering(-10.0)\n else:\n TextConsole._set_steering(-abs(value))\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_brake(key, value, client_sock, client_context):\n TextConsole.set_taking_over(True)\n\n throttle = TextConsole._throttle\n\n if value is None:\n throttle = TextConsole._set_throttle(throttle / 4)\n else:\n throttle = TextConsole._set_throttle(throttle * (1.0 - max(min(abs(value), 1.0), 0.0)))\n\n if throttle == 0.0:\n TextConsole._set_steering(0.0)\n elif -0.1 <= throttle <= 0.1:\n TextConsole._set_throttle(0.0)\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_autodrive(key, value, client_sock, client_context):\n if not AutoPilot.get_remote_control_enabled():\n started = AutoPilot.get_autodrive_started()\n\n if value is not None:\n if bool(value) == started:\n return\n\n required_to_start = bool(value)\n else:\n required_to_start = not started\n\n if required_to_start:\n AutoPilot.start_autodrive()\n else:\n AutoPilot.stop_autodrive()\n TextConsole._set_steering(0.0)\n TextConsole._set_throttle(0.0)\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_remotecontrol(key, value, client_sock, client_context):\n enabled = AutoPilot.get_remote_control_enabled()\n\n if value is not None:\n if bool(value) == enabled:\n return\n\n required_to_enable = bool(value)\n else:\n required_to_enable = not enabled\n\n if required_to_enable:\n AutoPilot.enable_remote_control()\n else:\n AutoPilot.disable_remote_control()\n\n TextConsole._set_steering(0.0)\n TextConsole._set_throttle(0.0)\n\n TextConsole._send_state_updates(client_sock, client_context)\n\n\n @staticmethod\n def _cmd_quit(key, value, client_sock, client_context):\n client_context[\"state\"] = TextConsole.STATE_STOPPING\n\n\n _command_map = {\n \"?\" : \"help\",\n \"h\" : \"help\",\n 'm' : 'mute',\n ';' : 'status',\n \"\\x1b[A\": \"up\",\n \"w\" : \"up\",\n \"i\" : \"up\",\n \"\\x1b[B\": \"down\",\n \"s\" : \"down\",\n \"k\" : \"down\",\n \"\\x1b[C\": \"right\",\n \"d\" : \"right\",\n \"l\" : \"right\",\n \"\\x1b[D\": \"left\",\n \"a\" : \"left\",\n \"j\" : \"left\",\n \" \" : \"brake\",\n \"\\t\" : \"autodrive\",\n \"~\" : \"remotecontrol\",\n \"q\" : \"quit\",\n }\n\n _command_value = None\n\n\n @staticmethod\n def _get_next_command(cmdbuf):\n while len(cmdbuf) > 0:\n key = cmdbuf[0: 1].decode('iso8859-1')\n\n if key.isdigit() or key in ['+', '-', '.']:\n if TextConsole._command_value is None:\n TextConsole._command_value = []\n TextConsole._command_value.append(key)\n del cmdbuf[0:1]\n continue\n\n value = None\n\n if TextConsole._command_value is not None:\n try:\n debug(\"parsing %s\", repr(TextConsole._command_value))\n\n value = float(\"\".join(TextConsole._command_value))\n TextConsole._command_value = None\n except:\n debug_exc(\"exception occurred while parsing %s\", repr(TextConsole._command_value))\n\n\n if ord(key) == 0x1b: #ESC\n n = len(cmdbuf)\n\n if n == 1:\n break\n\n if cmdbuf[1: 2].decode('iso8859-1') == '[':\n if n >= 3:\n key = cmdbuf[0: 3].decode('iso8859-1')\n del cmdbuf[0: 3]\n if key in TextConsole._command_map:\n return TextConsole._command_map[key], key, value\n break\n\n del cmdbuf[0:1]\n\n if key in TextConsole._command_map:\n return TextConsole._command_map[key], key, value\n\n return None, None, None\n\n\n @staticmethod\n def _sending_thread(client_sock, client_context):\n set_thread_name(\"tc.sending_thread\")\n\n while client_context[\"state\"] == TextConsole.STATE_STARTED:\n with client_context[\"mutex\"]:\n if len(client_context[\"sendq\"]) == 0:\n client_context[\"event\"].wait()\n continue\n\n data = client_context[\"sendq\"]\n client_context[\"sendq\"] = bytearray()\n\n client_sock.send(data)\n\n\n @staticmethod\n def _receiving_thread(client_sock, client_context):\n set_thread_name(\"tc.receiving_thread\")\n\n while client_context[\"state\"] == TextConsole.STATE_STARTED:\n data = client_sock.recv(4096)\n\n if len(data) == 0:\n break\n\n with client_context[\"mutex\"]:\n client_context[\"recvq\"].extend(data)\n client_context[\"event\"].notify_all()\n\n\n @staticmethod\n def process_command(client_sock, client_context):\n set_thread_name(client_context[\"thread\"].getName())\n\n sending_thread = None\n receiving_thread = None\n\n try:\n info(\"TextConsole: Started processing commands - %s\", repr(client_sock))\n\n with client_context[\"mutex\"]:\n if client_context[\"state\"] != TextConsole.STATE_STARTING:\n return\n\n client_context[\"state\"] = TextConsole.STATE_STARTED\n client_context[\"event\"].notify_all()\n\n\n sending_thread = threading.Thread(target = TextConsole._sending_thread , args = (client_sock, client_context), name = \"tc.sending_thread\" )\n receiving_thread = threading.Thread(target = TextConsole._receiving_thread, args = (client_sock, client_context), name = \"tc.receiving_thread\")\n sending_thread.start()\n receiving_thread.start()\n\n try:\n cmdbuf = bytearray()\n TextConsole._cmd_help(None, None, client_sock, client_context)\n\n while client_context[\"state\"] == TextConsole.STATE_STARTED:\n with client_context[\"mutex\"]:\n if len(client_context[\"recvq\"]) == 0:\n client_context[\"event\"].wait()\n continue\n\n data = client_context[\"recvq\"]\n client_context[\"recvq\"] = bytearray()\n\n if len(data) == 0:\n break\n\n cmdbuf.extend(data)\n cmd, key, value = TextConsole._get_next_command(cmdbuf)\n\n if cmd is not None:\n try:\n func = eval(\"TextConsole._cmd_%s\" % cmd)\n func(key, value, client_sock, client_context)\n except:\n error_exc(\"TextConsole: Exception occurred while executing command %s - %s\", cmd, repr(client_sock))\n continue\n\n except:\n error_exc(\"TextConsole: Exception occurred - %s\", repr(client_sock))\n\n finally:\n info(\"TextConsole: Client connection closed - %s\", repr(client_sock))\n\n try:\n client_sock.close()\n except:\n pass\n\n with client_context[\"mutex\"]:\n client_context[\"state\"] = TextConsole.STATE_STOPPED\n client_context[\"event\"].notify_all()\n\n if sending_thread:\n sending_thread.join()\n\n if receiving_thread:\n receiving_thread.join()\n\n\n @staticmethod\n def console(cmd, script, interactive, quiet):\n _connected = False\n _ctrl_socket = None\n _ctrl_socket_path = config.get(\"TEXTCONSOLE\", \"ctrl_socket\", TextConsole.DEF_CTRL_SOCKET)\n\n if not os.path.exists(_ctrl_socket_path):\n error(\"TextConsole: Control socket was not ready: %s\", _ctrl_socket_path)\n return TextConsole.ERR_CONNECTION_NOT_READY \n\n console_fd = sys.stdin.fileno()\n\n if sys.stdin.isatty():\n old_tty_attrs = termios.tcgetattr(console_fd)\n\n fcntl.fcntl(console_fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(console_fd, fcntl.F_GETFL))\n\n try:\n try:\n _ctrl_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n if not _ctrl_socket:\n error(\"TextConsole: Unable to create control socket\")\n return TextConsole.ERR_CONNECTION_NOT_READY \n\n for tries in range(20):\n try:\n _ctrl_socket.connect(_ctrl_socket_path)\n _connected = True\n break\n except:\n debug_exc(\"TextConsole: Waiting for socket %s becoming ready\", _ctrl_socket_path)\n\n time.sleep(1)\n\n if not _connected:\n error(\"TextConsole: Unable to connect to socket %s\", _ctrl_socket_path)\n return TextConsole.ERR_CONNECTION_NOT_READY\n\n except:\n error_exc(\"TextConsole: Exception occurred while connecting to control socket: %s\", _ctrl_socket_path)\n return TextConsole.ERR_CONNECTION_NOT_READY \n\n\n commands = []\n\n if quiet:\n _ctrl_socket.send(b'm')\n\n if script:\n try:\n with open(script, \"r\") as f:\n commands.append(f.read())\n except:\n error_exc(\"TextConsole: Exception occurred while sending command to control socket: %s\", _ctrl_socket_path)\n return TextConsole.ERR_READ_SCRIPT\n\n if cmd:\n commands.append(cmd)\n\n if len(commands) > 0:\n for c in commands:\n _ctrl_socket.send(c.encode('iso8859-1'))\n\n if not interactive:\n return TextConsole.SUCCESS\n\n try:\n ctrl_socket_fd = _ctrl_socket.fileno()\n readfds = [console_fd, ctrl_socket_fd]\n\n if sys.stdin.isatty():\n tty.setcbreak(console_fd)\n\n while True:\n readable, _, _ = select.select(readfds, [], [])\n\n if console_fd in readable:\n ch = os.read(console_fd, 1)\n\n if len(ch) == 0: # EOF\n break\n if sys.stdin.isatty() and ord(ch) in (3, 4): # EOF/Ctrl-D, or Ctrl-C\n break\n\n _ctrl_socket.send(ch)\n\n if ctrl_socket_fd in readable:\n buf = _ctrl_socket.recv(4096)\n\n if len(buf) == 0:\n break\n\n if not quiet:\n sys.stdout.write(buf.decode('iso8859-1'))\n sys.stdout.flush()\n\n except KeyboardInterrupt:\n pass\n\n finally:\n if _ctrl_socket:\n _ctrl_socket.close()\n\n sys.stdout.flush()\n\n if sys.stdin.isatty():\n termios.tcsetattr(console_fd, termios.TCSADRAIN, old_tty_attrs)\n\n return TextConsole.SUCCESS\n\n","repo_name":"TrendMicro-Volunteer-Club/formula-trend-toi","sub_path":"trendcar/trendcar/driver/textconsole.py","file_name":"textconsole.py","file_ext":"py","file_size_in_byte":32327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"73348154196","text":"# -*- coding: utf-8 -*-\nfrom re import UNICODE\nimport ssl\nfrom .parsers import AnchorHTMLParser, URLParser\nfrom .sitemap import SiteMapXML\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass WebCrawler(object):\n \"\"\"\n URL of the website\n Maximum recursion depth allowed (defaulted to 3)\n \"\"\"\n\n def __init__(self, url, max_depth=3):\n self.url = url\n self.max_depth = max_depth\n self.website_content = {}\n\n def get_url_info(self):\n \"\"\"\n Extract information from the parsed URL\n \"\"\"\n self.parsed_url = URLParser(self.url)\n self.domain = self.parsed_url.get_domain()\n self.prefix = self.parsed_url.get_prefix()\n self.root_path = self.parsed_url.get_path()\n\n def is_argument_valid(self):\n \"\"\"\n Verify valid URL\n \"\"\"\n parsed_url = URLParser(self.url)\n print('parsed url',parsed_url)\n test_request, error = self.test_http_get_request(self.url)\n if not parsed_url.get_domain() or not test_request:\n print (error)\n return False\n return True\n\n def crawl_it(self):\n \"\"\"\n Set URL metadata\n Initialize crawling execution\n Generate XML\n \"\"\"\n if not self.is_argument_valid():\n raise Exception('%s is not a valid URL' % self.url)\n self.get_url_info()\n urlset =self.perform_crawling([self.root_path], self.max_depth)\n return urlset\n # sitemap_xml = SiteMapXML(self.website_content, self.prefix, self.domain)\n # sitemap_xml.generate()\n\n def perform_crawling(self, urls_set, max_depth):\n \"\"\"\n Navigate through urls (GET info, SET info, search for links, add new links)\n Respect some constraints (visited page, max depth recursion)\n \"\"\"\n # create a set instead of list\n # because we want unique values\n new_urls_set = set()\n # infinte loop protection\n if max_depth:\n # make sure we just hit the url once\n gen = (url for url in urls_set if url not in self.website_content)\n for url in gen:\n # get response from url\n response, lastmod = self.get(url)\n print('----111111----------',response)\n # set url info\n self.set(url, response, lastmod)\n print('----2222----------')\n\n # get all links inside the response\n links_from_response = self.get_links_from_response(response)\n print('----3333----------',links_from_response)\n\n # put new_urls_set and links_from_response together\n new_urls_set = new_urls_set.union(links_from_response)\n print('----4444----------',new_urls_set)\n\n # recursion call (making sure max_depth gets decremented)\n self.perform_crawling(new_urls_set, max_depth-1)\n return new_urls_set\n def get_links_from_response(self, response):\n \"\"\"\n Extract links from the response using a parser\n https://docs.python.org/2/library/htmlparser.html#HTMLParser.HTMLParser.feed\n \"\"\"\n links = set()\n\n soup = BeautifulSoup(response, \"html.parser\")\n\n #Does something with page\n \n print('count link',soup.find_all('a', href=True))\n for link in soup.find_all('a', href=True):\n\n is_valid = self.is_this_link_valid(link['href'])\n print('isvalid',link['href'])\n if is_valid:\n links.add(link)\n return links \n\n # anchor_parser = AnchorHTMLParser()\n # anchor_parser.feed(response)\n # links = set()\n # for link in anchor_parser.handle_starttag():\n # is_valid = self.is_this_link_valid(link)\n # if is_valid:\n # links.add(link)\n # return links\n\n def is_this_link_valid(self, link):\n if not isinstance(link, (str, UNICODE)):\n return False\n if link.startswith('/') or link.startswith(self.domain) or link.startswith('http' + self.domain):\n return True\n # return False\n\n def set(self, current_url, response, lastmod):\n \"\"\"\n SET URL information\n \"\"\"\n # print 'Setting URL: ' + current_url\n self.website_content[current_url] = {'response': response, 'lastmod': lastmod}\n\n def get(self, current_url):\n \"\"\"\n Get URL via HTTP\n \"\"\"\n print('Fetching URL: ' + current_url)\n response_raw, lastmod = self.http_get_request(current_url)\n return (response_raw, lastmod)\n\n def http_get_request(self, url):\n \"\"\"\n HTTP Request using urllib\n \"\"\"\n try:\n # Check url contains the domain already\n if not self.domain in url:\n complete_url = \"%s://%s%s\" % (self.prefix, self.domain, url)\n else:\n complete_url = url\n print('complete url',complete_url)\n # This packages the request (it doesn't make it)\n response = requests.get(complete_url)\n # Sends the request and catches the response\n # response = urllib.urlopen(request)\n # print(response.content)\n response_raw = response.content\n try:\n lastmod = response.headers['last-modified'] or response.headers['date']\n except:\n lastmod=None\n print(lastmod)\n except:\n print('Something went wrong for this URL: [%s]' % (url))\n response_raw = str()\n lastmod = None\n\n return (response_raw, lastmod)\n\n def test_http_get_request(self, url):\n \"\"\"\n Test HTTP Request using urllib (given url)\n \"\"\"\n try:\n # This packages the request (it doesn't make it)\n print('test url connection',url)\n response = requests.head(url)\n # Sends the request and catches the response\n except Exception as e:\n return (False, e)\n return (True, None)\n","repo_name":"wanghaisheng/hongsedianyingzimu","sub_path":"app/webcrawler.py","file_name":"webcrawler.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"6342677990","text":"# Understanding OOP with freecodecamp\n# Day 11 of 100DaysOfCode [Part 2]\n\n# Inheritance\nfrom oops_with_freecodecamp import Item\n\nclass Phone(Item):\n all = []\n def __init__(self, name: str, price: float, quantity = 1,broken_phones=0):\n # Call to super function to have access to all attibutes/methods\n super().__init__(name, price,quantity)\n\n # Run validations to received arguments\n assert broken_phones >= 0, f\"Broken Phone {broken_phones} is not greater than or equal to zero!\"\n\n # Assign to self object\n self.broken_phones = broken_phones\n\n # add instance to list\n Phone.all.append(self)\n\nphone1 = Phone(\"OnePlus\", 50000, 1, 2)\nphone1.name = \"Apple\"\nprint(phone1.calculate_total_price())\nprint(phone1.name)\nprint(Item.all)\nprint(Phone.all)","repo_name":"curious-rishabh/100DaysOfCode","sub_path":"30 Days of Python/Day 11/oops_2.py","file_name":"oops_2.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"85"} +{"seq_id":"11330446402","text":"from rest_framework import serializers\nfrom .models import Board, TODO, Reminder\n\n\nclass BoardSerializer(serializers.HyperlinkedModelSerializer):\n todos = serializers.HyperlinkedRelatedField(\n view_name='todo-detail',\n many=True,\n read_only=True,\n )\n uncompleted = serializers.HyperlinkedIdentityField(\n view_name='board-uncompleted'\n )\n\n class Meta:\n model = Board\n fields = ('url', 'name', 'todos', 'uncompleted')\n\n\nclass TODOSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = TODO\n fields = ('url', 'title', 'done', 'board', 'created_at', 'updated_at')\n\n\nclass ReminderSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = Reminder\n fields = ('url', 'post_url', 'text', 'delay')\n\n\nclass ReminderSimpleSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Reminder\n fields = ('__all__')\n","repo_name":"POD666/scalors-test-task","sub_path":"myproject/myproject/todo_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"41826793385","text":"#Determine the encoding of the input file. This will be either utf-8 or latin-1 (or one of its synonyms). Do this by attempting to open the file and read a line assuming utf-8 encoding; if this fails, close the file and reopen with latin-1. If this fails, return an error and exit. This may change as file encoding tools are incorporated (which may allow a greater range of input encodings.)\n\nfrom sys import argv\n\nscript, infile = argv\n\n\ndef find_encoding(infile):\n\ttry: \n\t\twith open(infile) as source:\n\t\t\tline=source.readline()\n\t\t\tfile_type='utf-8'\n\t\t\tsource.close()\n\texcept UnicodeDecodeError:\n\t\tsource.close()\n\t\ttry:\n\t\t\twith open(infile, encoding='latin-1') as source:\n\t\t\t\tline=source.readline()\n\t\t\t\tfile_type='latin-1'\n\t\t\t\tsource.close()\n\t\texcept UnicodeDecodeError:\n\t\t\tprint(\"Unable to determine file type.\")\n\t\t\tsource.close()\n\t\t\tfile_type='unknown'\n\treturn file_type\n\tprint(\"File type is: %s.\" % file_type)\n\n","repo_name":"lammyp/ref-man","sub_path":"function_definitions/find_encoding.py","file_name":"find_encoding.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"26766185569","text":"import discord\nimport os\n#from keep_alive import keep_alive\nfrom markov_update import createMarkovJSONFull\nfrom markov_update import updateMarkovJSONFull\nfrom markov_update import createMarkovJSONUser\nfrom markov_update import updateMarkovJSONUser\nfrom markov_chain import getMarkovJSONDict\nfrom markov_chain import createMarkovChain\n\ntoken = open(\"token.txt\", \"r\").read()\n\nclient = discord.Client()\n\ndef getUser(message, userstring):\n user = \"\"\n for foruser in message.guild.members:\n if str(userstring).lower() == str(foruser).lower():\n user = foruser\n if user == \"\":\n for foruser in message.guild.members:\n if str(userstring).lower() in str(foruser.display_name).lower():\n user = foruser\n if user == \"\":\n user = message.author\n return user\n\ndef sendHelpMessage(message):\n embed=discord.Embed(title=\"markov-bot\", url=\"https://github.com/whambulance/markov-bot\", description=\"Talks based on how others type. He's like you, but better! \\nType !markov to create generate a message for you \\n \", color=0x4bb4f1)\n embed.set_thumbnail(url=\"https://upload.wikimedia.org/wikipedia/commons/7/70/AAMarkov.jpg\")\n embed.add_field(name=\"!mv, !markov [USER]... [OPTION]...\",value=\"Basic syntax\", inline=True)\n embed.add_field(name=\"-s --startswith [STR]\", value=\"Chain starting word\", inline=False)\n embed.add_field(name=\"-l, --length [INT]\", value=\"Chain length (def: rand 1 - 14) (max: 100)\", inline=True)\n embed.add_field(name=\"!mkjson\",value=\"Update your Markov JSON Dictionary\", inline=True)\n return embed\n\n@client.event\nasync def on_message(message):\n\n if message.author.bot:\n exit\n\n elif (\"help\" in str(message.content).lower() or \"aid\" in str(message.content).lower() or \"hand\" in str(message.content).lower() or \"assist\" in str(message.content).lower()) and (\"markov\" in str(message.content).lower() or \"mk\" in str(message.content).lower()):\n embed = sendHelpMessage(message)\n await message.channel.send(embed=embed)\n elif message.content.startswith(\"!mk \") or message.content.startswith(\"!markov \") or message.content == \"!mk\" or message.content == \"!markov\":\n user = \"\"\n startswith = \"\"\n length = 0\n\n splitMessage = message.content.split()\n for index, i in enumerate(splitMessage):\n if i == \"-s\" or i == \"--startswith\":\n startswith = splitMessage[index+1]\n elif index == 1 and \"-\" not in i:\n user = i\n elif i == \"-l\" or i == \"--length\":\n length = int(splitMessage[index+1])\n \n if user != \"\":\n messageUser = getUser(message, user)\n else:\n messageUser = message.author\n if length > 100:\n length = 100\n\n print (\"$:makeMarkov -u \" + messageUser.display_name + \" -s \" + startswith + \" -l \" + str(length))\n\n userJSONDict = getMarkovJSONDict(message, messageUser)\n markovChain = createMarkovChain(userJSONDict, startswith, length)\n\n if markovChain != \"\":\n print (\"Printed: \" + str(markovChain))\n print (\"\")\n if messageUser.display_name[0].islower():\n newnick = messageUser.display_name[0:20] + \" markov\"\n elif messageUser.display_name.isupper():\n newnick = messageUser.display_name[0:20] + \" MARKOV\"\n else:\n newnick = messageUser.display_name[0:20] + \" Markov\"\n me = message.guild.me\n await discord.Member.edit(me, nick=newnick)\n await message.channel.send(markovChain)\n await discord.Member.edit(me, nick=\"\")\n \n elif (\"!mkjson\" in str(message.content).lower()):\n splitMessage = message.content.split()\n print (\"$:\" + message.content[1:999] + \" by \" + message.author.display_name)\n for index, i in enumerate(splitMessage):\n msgCount = None\n msgUser = \"\"\n if str(message.author.id) == \"120242398176477186\":\n if i == \"createchannel\":\n print(\"$:\")\n msgCount = None\n await createMarkovJSONFull(message, msgCount)\n return\n elif i == \"createuser\":\n print(\"$:\")\n msgUser = getUser(message, splitMessage[index+1])\n await createMarkovJSONUser(message, msgUser)\n return\n #elif i == \"updatechannel\":\n #print(\"$:\")\n #msgCount = None\n #await updateMarkovJSONFull(message, msgCount)\n #return\n elif i == \"updateuser\":\n print(\"$:\")\n msgUser = getUser(message, splitMessage[index+1])\n await updateMarkovJSONUser(message, msgUser)\n return \n await updateMarkovJSONUser(message, message.author)\n\n elif (\"!mktest\" in str(message.content).lower()):\n splitMessage = message.content.split()\n markovJSONDict = getMarkovJSONDict(message, message.author)\n createMarkovChain(markovJSONDict, \"\", 0)\n\n#keep_alive()\nclient.run(token)","repo_name":"whambulance/markov-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16831450502","text":"import spotipy\n#To access authorised Spotify data\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport pandas as pd\nimport re\nimport string\nimport config\n\n'''\n Function Name: get_playlist_tracks\n Description:\n Retrieves the list of songs in a playlist\n Input:\n username (string) - username of track creator\n playlist_id (string) - URI of playlist\n Output:\n List of tracks (dictionaries)\n https://developer.spotify.com/documentation/web-api/reference/#category-tracks\n'''\ndef getPlaylistTracks(username,playlist_id):\n results = sp.user_playlist_tracks(username,playlist_id)\n tracks = results['items']\n while results['next']:\n results = sp.next(results)\n tracks.extend(results['items'])\n return tracks\n\ndef parseTrack(track):\n name = track['track']['name']\n url = track['track']['preview_url']\n uri = track['track']['uri']\n return [name, url, uri]\n\n\nif __name__ == \"__main__\":\n regex = re.compile(\".*?\\((.*?)\\)\")\n\n # client_id and client_secret are defined in hidden config.py\n credentials_manager = SpotifyClientCredentials(client_id=config.client_id,\n client_secret=config.client_secret)\n\n # spotify object to access API\n sp = spotipy.Spotify(client_credentials_manager=credentials_manager)\n \n # get the requested playlists\n playlists = pd.read_csv('csv/playlists.csv')\n\n # generate the song information\n songdf = pd.DataFrame(columns=['songName','songURL', 'songURI'])\n for index, row in playlists.iterrows():\n # get the songs in the playlist\n tracks = getPlaylistTracks(row['Creator'],row['URI'])\n print(int(index/len(playlists)*10000)/100, row['Playlist Name'])\n for track in tracks:\n trackInfo = parseTrack(track)\n if (trackInfo):\n songdf.loc[len(songdf)] = trackInfo\n \n print(\"Number of Songs Before Removing Duplicates: \", len(songdf))\n songdf.drop_duplicates(subset=['songURI'], keep='first', inplace = True)\n print(\"Number of Songs After Removing Duplicates: \", len(songdf))\n songdf.to_csv('csv/songlist.csv', index=None)\n","repo_name":"jason-shoe/dnn-lofi-generation","sub_path":"data/retrieval/songlist-retrieval.py","file_name":"songlist-retrieval.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"39725244338","text":"jogadas = int(input(\"\"))\nganhadores = [0 for i in range(jogadas)]\nfor i in range(jogadas):\n\tjogadores = input(\"\")\n\tjog1, escolha1, jog2, escolha2 = jogadores.split()\n\n\tnums = input(\"\")\n\tnum1 = int(nums.split()[0])\n\tnum2 = int(nums.split()[1])\n\n\tsoma = num1 + num2\n\t\n\tif soma % 2 == 0:\n\t\tif escolha1 == \"PAR\":\n\t\t\tganhadores[i] = jog1\n\t\telse:\n\t\t\tganhadores[i] = jog2\n\telif soma % 2 == 1:\n\t\tif escolha1 == \"IMPAR\":\n\t\t\tganhadores[i] = jog1\n\t\telse:\n\t\t\tganhadores[i] = jog2\n\t\nfor ganhador in ganhadores:\n\tprint(ganhador)","repo_name":"tibetteixeira/gemp","sub_path":"De Quem E a vez.py","file_name":"De Quem E a vez.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"14643808079","text":"import numpy as np\nfrom PIL import ImageGrab\nfrom cv2 import cv2\nimport time\nfrom directkeys import PressKey, ReleaseKey, Z, Q, S, D\nfrom grabscreen import grab_screen\nfrom draw_lanes import draw_lanes\n\ndef draw_lines(image, lines):\n for line in lines:\n coords = line[0]\n cv2.line(image, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 5)\n\n\ndef roi(image, vertices):\n # Blank mask\n mask = np.zeros_like(image)\n \n # Fill the pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, 255)\n\n # Extract pixels from image only where mask pixels are nonzero\n masked = cv2.bitwise_and(image, mask)\n\n\n return masked \n\ndef process_img(image):\n original_image = image\n # Convert to gray\n processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Edge detection\n processed_img = cv2.Canny(processed_img, threshold1=120, threshold2=160)\n processed_img = cv2.GaussianBlur(processed_img, (5,5),0)\n #vertices = np.array([[10, 400], [10, 300], [250, 100], [450, 100], [640, 300], [640, 400]], np.int32)\n vertices = np.array([[10, 350], [10, 300], [320, 210], [325, 210], [640, 300], [640, 350]])\n processed_img = roi(processed_img, [vertices])\n\n # edges\n lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, np.array([]), 20, 10)\n m1 = 0\n m2 = 0\n try:\n l1, l2, m1,m2 = draw_lanes(original_image,lines)\n cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 5)\n cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 5)\n except Exception as e:\n print(str(e))\n pass\n try:\n for coords in lines:\n coords = coords[0]\n try:\n cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 5)\n \n \n except Exception as e:\n print(str(e))\n except Exception as e:\n pass\n\n return processed_img, original_image, m1, m2\n\ndef straight():\n ReleaseKey(Q)\n ReleaseKey(D)\n PressKey(Z)\n\ndef left():\n ReleaseKey(Z)\n ReleaseKey(D)\n PressKey(Q)\n \ndef right():\n ReleaseKey(Z)\n ReleaseKey(Q)\n PressKey(D)\n \ndef slow_down():\n ReleaseKey(Z)\n ReleaseKey(Q)\n ReleaseKey(D)\n\nfor i in list(range(4))[::-1]:\n print(i+1)\n time.sleep(1)\n\n'''PressKey(Z)\ntime.sleep(3)\nReleaseKey(Z)'''\n\ndef main():\n last_time = time.time()\n while(True):\n screen = grab_screen(region=(0,40,640,480))\n new_screen, original_image, m1, m2 = process_img(screen)\n print('Loop took {} seconds'.format(time.time() - last_time))\n last_time = time.time()\n # Showing output\n cv2.imshow('window', new_screen)\n cv2.imshow('window2', cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))\n\n '''if m1 < 0 and m2 < 0: \n right()\n elif m1 > 0 and m2 > 0:\n left()\n else:\n straight()'''\n\n # Quitting opencv\n if cv2.waitKey(25) & 0xFF == ord('q'):\n # Destroy\n cv2.destroyAllWindows()\n break\n\nif __name__ == \"__main__\":\n main()","repo_name":"mav3rick177/gtasa_ai_driver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"29753072800","text":"binary = int(input())\nlength = 32 - len(bin(binary)[2:])\ninitial = length * \"0\" + bin(binary)[2:]\ntmp_lst = list(length * \"0\" + bin(binary)[2:])\n\nfor i in range(len(tmp_lst)):\n if tmp_lst[i] == \"0\":\n tmp_lst[i] = \"1\"\n else:\n tmp_lst[i] = \"0\"\n\nresult = bin(int(\"\".join(tmp_lst), 2) + 1)[2:]\n\ncnt = 0\nfor i in range(32):\n if initial[i] != result[i]:\n cnt += 1\n\nprint(cnt)\n","repo_name":"CALKO9611/ALGORITHM","sub_path":"BAEKJOON/1. BRONZE/Ⅰ/24389/24389.py","file_name":"24389.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"28706081664","text":"import inspect\nimport unittest\n\nfrom transformers import is_torch_available, is_vision_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_torch_available():\n import torch\n\n from transformers import MODEL_MAPPING, PoolFormerConfig, PoolFormerForImageClassification, PoolFormerModel\n from transformers.models.poolformer.modeling_poolformer import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import PoolFormerImageProcessor\n\n\nclass PoolFormerConfigTester(ConfigTester):\n def create_and_test_config_common_properties(self):\n config = self.config_class(**self.inputs_dict)\n self.parent.assertTrue(hasattr(config, \"hidden_sizes\"))\n self.parent.assertTrue(hasattr(config, \"num_encoder_blocks\"))\n\n\nclass PoolFormerModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n image_size=64,\n num_channels=3,\n num_encoder_blocks=4,\n depths=[2, 2, 2, 2],\n sr_ratios=[8, 4, 2, 1],\n hidden_sizes=[16, 32, 64, 128],\n downsampling_rates=[1, 4, 8, 16],\n is_training=False,\n use_labels=True,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n initializer_range=0.02,\n num_labels=3,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.num_channels = num_channels\n self.num_encoder_blocks = num_encoder_blocks\n self.sr_ratios = sr_ratios\n self.depths = depths\n self.hidden_sizes = hidden_sizes\n self.downsampling_rates = downsampling_rates\n self.is_training = is_training\n self.use_labels = use_labels\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n labels = None\n if self.use_labels:\n labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)\n\n config = PoolFormerConfig(\n image_size=self.image_size,\n num_channels=self.num_channels,\n num_encoder_blocks=self.num_encoder_blocks,\n depths=self.depths,\n hidden_sizes=self.hidden_sizes,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n initializer_range=self.initializer_range,\n )\n\n return config, pixel_values, labels\n\n def create_and_check_model(self, config, pixel_values, labels):\n model = PoolFormerModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(pixel_values)\n expected_height = expected_width = self.image_size // 32.0\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, pixel_values, labels = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_torch\nclass PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):\n all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else ()\n pipeline_model_mapping = (\n {\"feature-extraction\": PoolFormerModel, \"image-classification\": PoolFormerForImageClassification}\n if is_torch_available()\n else {}\n )\n\n test_head_masking = False\n test_pruning = False\n test_resize_embeddings = False\n test_torchscript = False\n has_attentions = False\n\n def setUp(self):\n self.model_tester = PoolFormerModelTester(self)\n self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n @unittest.skip(\"PoolFormer does not use inputs_embeds\")\n def test_inputs_embeds(self):\n pass\n\n @unittest.skip(\"PoolFormer does not have get_input_embeddings method and get_output_embeddings methods\")\n def test_model_common_attributes(self):\n pass\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n hidden_states = outputs.hidden_states\n\n expected_num_layers = self.model_tester.num_encoder_blocks\n self.assertEqual(len(hidden_states), expected_num_layers)\n\n # verify the first hidden states (first block)\n self.assertListEqual(\n list(hidden_states[0].shape[-3:]),\n [\n self.model_tester.hidden_sizes[0],\n self.model_tester.image_size // 4,\n self.model_tester.image_size // 4,\n ],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n def test_training(self):\n if not self.model_tester.is_training:\n return\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n for model_class in self.all_model_classes:\n if model_class in get_values(MODEL_MAPPING):\n continue\n model = model_class(config)\n model.to(torch_device)\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n loss = model(**inputs).loss\n loss.backward()\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = PoolFormerModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n return image\n\n\n@require_torch\nclass PoolFormerModelIntegrationTest(unittest.TestCase):\n @slow\n def test_inference_image_classification_head(self):\n image_processor = PoolFormerImageProcessor()\n model = PoolFormerForImageClassification.from_pretrained(\"sail/poolformer_s12\").to(torch_device)\n\n inputs = image_processor(images=prepare_img(), return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n\n # verify the logits\n expected_shape = torch.Size((1, 1000))\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device)\n self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))\n","repo_name":"huggingface/transformers","sub_path":"tests/models/poolformer/test_modeling_poolformer.py","file_name":"test_modeling_poolformer.py","file_ext":"py","file_size_in_byte":8655,"program_lang":"python","lang":"en","doc_type":"code","stars":115573,"dataset":"github-code","pt":"85"} +{"seq_id":"19779873497","text":"#Loops Homework 1\r\n\r\n# Try to find out if the number you received from the user is perfect.\r\n#A perfect number is a whole number, an integer greater than zero;\r\n# and when you add up all of the factors less than that number, you get that number.\r\n\r\na = int(input(\"Enter a number :\"))\r\nb = list(range(1,a))\r\nc= list()\r\nd= 0\r\nfor i in b:\r\n if a%i == 0:\r\n c.append(i)\r\nfor i in c:\r\n d += i5\r\nif d == a :\r\n print(\"It is a perfect number!!!\")\r\nelse:\r\n print(\"It is not a perfect number!!!\")\r\n\r\n# I have done without going back to notes but i think there will be short way. :)","repo_name":"osmanemresener/Python-Studies","sub_path":"For - While/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"8819683623","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport logging\n\nfrom PySide import QtGui\nfrom PySide import QtCore\n\nimport matplotlib\nmatplotlib.use(\"Qt4Agg\")\nmatplotlib.rcParams['backend.qt4'] = 'PySide'\n\nimport mpl_qt.ui.main as main\nimport mpl_qt.ui.plot as plot\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass QuiverModel(object):\n\n def __init__(self, pxy, vxy):\n self.xy = pxy\n self.xyvalue = vxy\n\n\nclass TableModel(QtCore.QAbstractTableModel):\n\n def __init__(self, parent, pxy, vxy, *args):\n if pxy.shape != vxy.shape:\n raise ValueError(\"pxy and vxy have to be of same shape\")\n super(TableModel, self).__init__(parent, *args)\n self.model = np.concatenate([pxy, vxy], axis=1)\n self.header = [\"x\", \"y\", \"xvalue\", \"yvalue\"]\n\n def rowCount(self, parent):\n return len(self.model)\n\n def columnCount(self, parent):\n return len(self.model[0])\n\n def data(self, index, role):\n if not index.isValid():\n return None\n elif role != QtCore.Qt.DisplayRole:\n return None\n return str(self.model[index.row(), index.column()])\n\n def headerData(self, col, orientation, role):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return self.header[col]\n return None\n\n# def sort(self, col, order):\n # \"\"\"sort table by given column number col\"\"\"\n # self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n # self.mylist = sorted(self.mylist,\n # key=operator.itemgetter(col))\n # if order == Qt.DescendingOrder:\n # self.mylist.reverse()\n # self.emit(SIGNAL(\"layoutChanged()\"))\n\n\nclass MainWindow(QtGui.QMainWindow, main.Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n\n # define model\n gx, gy = np.meshgrid(np.linspace(-5, 5, 5), np.linspace(-5, 5, 5))\n pxy = np.array([gx.flatten(), gy.flatten()]).T\n phi = 0.5 * np.pi\n m = np.array([[np.cos(phi), np.sin(phi)],\n [-np.sin(phi), np.cos(phi)]])\n pxy2 = np.dot(pxy, m)\n #pxy2 = pxy + np.array([1, 0])\n vxy = pxy - pxy2\n\n # set up views and signals and slots\n LOGGER.debug(\"set up\")\n self.model = QuiverModel(pxy, vxy)\n self.quiver_plot = plot.QuiverPlotWidget(parent=self, model=self.model)\n self.mesh_plot = plot.MeshplotWidget(parent=self, model=self.model)\n\n self.tabWidget.addTab(self.quiver_plot, \"quiver\")\n self.tabWidget.addTab(self.mesh_plot, \"mesh\")\n\n self.table_view = QtGui.QTableView()\n self.tmodel = TableModel(self, pxy, vxy)\n self.table_view.setModel(self.tmodel)\n self.tabWidget.addTab(self.table_view, \"data\")\n\n self.scaleEdit.setText(str(self.quiver_plot.scale))\n self.scaleEdit.editingFinished.connect(self.on_edit_scale)\n self.keylengthEdit.setText(str(self.quiver_plot.key_length))\n self.keylengthEdit.editingFinished.connect(self.on_edit_key_length)\n\n def on_edit_key_length(self):\n LOGGER.debug(\"on_edit_key_length\")\n self.quiver_plot.key_length = float(self.keylengthEdit.text())\n\n def on_edit_scale(self):\n LOGGER.debug(\"on_edit_scale\")\n scale = float(self.scaleEdit.text())\n self.quiver_plot.scale = scale\n self.mesh_plot.scale = scale\n","repo_name":"micviklui/mpl_qt","sub_path":"mpl_qt/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"16769992145","text":"import urllib\nfrom paypal_settings import settings\nfrom datetime import datetime\nfrom models import paypal_ec_token\nfrom paypal_tools import api_tools\nfrom exceptions import invalid_response_exception\n\nclass direct_payment(object, api_tools):\n \"\"\"\n Communicate with Paypal, a particular merchant services provider.\n \"\"\"\n\n def __init__(self, data, settings = settings()):\n \"\"\"\n Constructor\n \n @param data dictionary of transaction attribute values indexed by name\n @type data dict\n \"\"\"\n\n self.data = data\n\n # Paypal requires a 4-digit year, which is unusual\n current_year = str(datetime.utcnow().year)\n ed = str(self.data['exp_date'])\n new_year = current_year[:2] + ed[2:]\n # That's right, I'm assuming that the expiration is within 100 years.\n # Youwannafightaboutit????\n if int(new_year) < int(current_year):\n new_year = str(int(current_year[:2]) + 1) + ed\n self.data['exp_date'] = ed[:2] + new_year\n\n # Enforce length limits as defined by Paypal\n self.data['invoice_number'] = self.data['invoice_number'][:127]\n self.data['address_label'] = self.data['address_label'][:100]\n self.data['first_name'] = self.data['first_name'][:25]\n self.data['last_name'] = self.data['last_name'][:25]\n self.data['country'] = self.data['country'][:2]\n self.data['state'] = self.data['state'][:40]\n self.data['city'] = self.data['city'][:40]\n self.data['zip'] = self.data['zip'][:20]\n\n self.settings = settings\n \n def translate(self, data):\n \"\"\"\n Convert attribute names from our own schema to Paypal's.\n \n @param data dictionary of attribute names and values that will be passed to the gateway\n @type data dict\n @return dictionary of attribute names and values with Paypal-compatible names.\n @rtype dict\n \"\"\"\n\n #: Paypal attribute names indexed by corresponding PR names.\n mapping = {\n 'transaction_id' : 'TRANSACTIONID',\n 'card_type' : 'CREDITCARDTYPE',\n 'invoice_number' : 'INVNUM',\n 'first_name' : 'FIRSTNAME',\n 'address_label' : 'STREET',\n 'country' : 'COUNTRYCODE',\n 'last_name' : 'LASTNAME',\n 'card_number' : 'ACCT',\n 'sales_tax' : 'TAXAMT',\n 'exp_date' : 'EXPDATE',\n 'ip' : 'IPADDRESS',\n 'state' : 'STATE',\n 'amount' : 'AMT',\n 'city' : 'CITY',\n 'cvv2' : 'CVV2',\n 'zip' : 'ZIP',\n }\n \n ret = {}\n for atr in data:\n d = data[atr]\n if d is not None:\n ret[mapping[atr]] = d\n return ret\n \n \n def charge(self):\n \"\"\"\n stub that calls the right method\n \"\"\"\n\n return self.DoDirectPayment()\n\n def credit(self):\n \"\"\"\n stub that calls the right method\n \"\"\"\n\n return self.RefundTransaction()\n\n def DoDirectPayment(self):\n \"\"\"\n Execute a transaction.\n \"\"\"\n\n # Translate attribute names to what Paypal wants to see.\n self.data = self.translate(self.data)\n # Add Paypal-specific transaction attributes.\n self.data.update({\n 'PAYMENTACTION' : 'Sale',\n 'METHOD' : 'DoDirectPayment',\n })\n\n url_data = urllib.urlencode(self.add_common_parameters(self.data))\n return self.gen_response(self.get_url(url_data))\n\n def RefundTransaction(self):\n \"\"\" returnd a transaction \"\"\"\n\n self.data = self.translate(self.data)\n url_data = urllib.urlencode(\n self.add_common_parameters({\n 'TRANSACTIONID' : self.data['TRANSACTIONID'],\n 'REFUNDTYPE' : 'Partial',\n 'METHOD' : 'RefundTransaction',\n 'AMT' : self.data['AMT'],\n }))\n\n return self.gen_response(self.get_url(url_data))\n\nclass express_checkout(object, api_tools):\n \"\"\"\n Use paypal's Express Checkout feature.\n \"\"\"\n\n def __init__(self, settings = settings()):\n self.settings = settings\n self.credentials = self.add_common_parameters({})\n\n def get_token_url(self, amount):\n \"\"\"\n Get the URL, including token, to use for accessing Paypal.\n \n @param amount Amount of sale\n \n @return URL\n \"\"\"\n\n data = self.credentials.copy()\n data.update({\n 'METHOD' : 'SetExpressCheckout',\n 'RETURNURL' : self.settings.return_url,\n 'CANCELURL' : self.settings.cancel_url,\n 'AMT' : amount,\n 'NOSHIPPING' : '1',\n })\n\n response = self.get_url(urllib.urlencode(data))\n if 'TOKEN' not in response:\n raise invalid_response_exception(str(response) + str(data))\n\n token = paypal_ec_token(token = response['TOKEN'], amount = amount,\n time=datetime.utcnow())\n token.save()\n\n # the 'useraction=commit' makes the paypal site appear to complete the transaction with\n # something like a \"Pay Now\" button, instead of sending the user back to us to review\n # the details before submitting a payment\n return token.token, '%s&useraction=commit&token=%s' % (settings.express_checkout_url,\n token.token)\n \n# vim:tabstop=4 shiftwidth=4 expandtab\n","repo_name":"AmericanResearchInstitute/poweru-server","sub_path":"ecommerce/paypal.py","file_name":"paypal.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"71995610517","text":"#\n# Protocol implementation of the 433 MHz OOK protocol known as\n# - KaKu\n# - self-learning Nexa (Arctech?) remote\n#\n# 1. Application protocol: Each message sent over the radio is a 32\n# bit word, sent MSB first. The bit definitions are, from MSB to LSB:\n#\n# 26 bits: group address (unique for each remote control)\n# 1 bit: group flag (for addressing all switches of this group)\n# 1 bit: action on/off flag\n# 4 bits: switch id within this group (button number on remote)\n# (4 bits optional: dimmer level)\n#\n# An example from my Nexa remote, decoded, same order as on oscilloscope:\n#\n# 0 0 0 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 1 0\n# 0x 1 8 7 e 0 8 9 2\n# \\_________________________________________________/ \\/\\/\\_____/\n# group address gr on switch\n#\n# 2. Manchester encoding: The bits are manchester encoded when sent\n# over the air, so one application bit is sent as two bits (symbols)\n# over the air. An application layer 0 bit is sent on the radio as a\n# 0 symbol and a 1 symbol. A 1 is sent as 1-0. (Or the other way\n# around, depending on how you define the radio layer symbols).\n#\n# Dimming: An absolute dimming level can be appended to the message.\n# The on/off bit then takes on a third value is then sent as 00 in\n# the manchester coded representation.\n#\n# The message begins with a START symbol and ends with a STOP symbol.\n#\n# 3. Symbols on the radio layer: The radio uses on off keying (OOK),\n# where the carrier is sent for a high level and no carrier is sent\n# for a low level. The symbols are defined thus:\n#\n# |----|\n# START | |\n# | |_________________________________________________\n# 1T 10T\n#\n# |----|\n# 0 | |\n# | |_____\n# 1T 1T\n#\n# |----|\n# 1 | |\n# | |_________________________\n# 1T 5T\n#\n# |----|\n# STOP | |\n# | |____________________\n# 1T 4T\n#\n# The time unit T is about 260us. A 0 takes 0.52ms and 1 takes 1.56ms\n# to send, so an application layer message takes about 71ms to send\n# in its entirety.\n#\n# The first bits (00011000) of the example message look like this on\n# the modulation input of the radio transmitter (I represents\n# carrier, _ represents no carrier):\n# I__________I_I_____I_I_____I_I_____I_____I_I_____I_I_I_____I_I_____I_I_____...\n#\n# start 0 1 0 1 0 1 1 0 1 0 0 1 0 1 0 1 ...\n#\n\nimport logging\n\nclass KakuProtocol:\n BIT_TIME = 260e-6\n\n @staticmethod\n def encode_bytestream(bits):\n return bytearray(0)\n \n @staticmethod\n def bits(d, width):\n out = \"{0:0{width}b}\".format(d, width=width)\n assert(len(out) == width)\n return list(map(int, out))\n\n @staticmethod\n def bitlist_to_bytearray(bits):\n \"\"\"Return a left-aligned byte representation of the bits, zero-padded\"\"\"\n out = bytearray()\n n = 0\n for i, b in enumerate(bits):\n n = (n << 1) | b\n if i % 8 == 7:\n out.append(n)\n n = 0\n i += 1\n if i % 8:\n out.append(n << (8 - (i % 8)))\n return out\n\n @staticmethod\n def symbol(bit):\n return {\n 0: [ 1, 0, 1, 0, 0, 0, 0, 0 ],\n 1: [ 1, 0, 0, 0, 0, 0, 1, 0 ],\n \"start\": [ 1 ] + [ 0 ] * 10,\n \"stop\": [ 1 ] + [ 0 ] * 10\n }[bit]\n\n @classmethod\n def encode_message(cls, group_address, group_flag, on, switch_id, dim=None):\n log = logging.getLogger(\"proto\")\n log.debug(\"Encoding group=%06x switch=%d on=%d\" % (group_address, switch_id, int(on)))\n bits = cls.symbol(\"start\")\n for b in cls.bits(group_address, 26):\n bits += cls.symbol(b)\n bits += cls.symbol(int(group_flag == True))\n bits += cls.symbol(int(on == True))\n for b in cls.bits(switch_id, 4):\n bits += cls.symbol(b)\n bits += cls.symbol(\"stop\")\n return cls.bitlist_to_bytearray(bits)\n\nif __name__ == \"__main__\":\n tx_data = KakuProtocol.encode_message(0xaaaaaa, False, True, 1)\n print(tx_data.hex())\n","repo_name":"JonasNorling/mqtt_rfm69_ook_switch","sub_path":"protocol_kaku.py","file_name":"protocol_kaku.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"21748197702","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n Sequence Labeling Model.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport sys\nsys.path.append('..')\nfrom model.config import conf as conf\nfrom model.modules.bilstm import Bilstm\nfrom model.modules.crf import CRF\nfrom model.modules.feature import CharFeature, WordFeature, PositionFeature\nimport numpy as np\nclass Bilstm_LR_Model(nn.Module):\n\n def __init__(self, word_embeddings, word_require_grad,\n char_embedding_shape, filter_sizes, filter_nums, \n target_size, average_batch=True, use_cuda=True):\n \"\"\"\n Args:\n feature_names: list(str), 特征名称, 不包括`label`和`char`\n\n feature_size_dict: dict({str: int}), 特征表大小字典\n feature_dim_dict: dict({str: int}), 输入特征dim字典\n pretrained_embed_dict: dict({str: np.array})\n require_grad_dict: bool, 是否更新feature embedding的权重\n\n # char parameters\n use_char: bool, 是否使用字符特征, default is False\n filter_sizes: list(int), 卷积核尺寸, default is [3]\n filter_nums: list(int), 卷积核数量, default is [32]\n\n # rnn parameters\n rnn_unit_type: str, options: ['rnn', 'lstm', 'gru']\n num_rnn_units: int, rnn单元数\n num_layers: int, 层数\n bi_flag: bool, 是否双向, default is True\n\n use_crf: bool, 是否使用crf层\n\n dropout_rate: float, dropout rate\n\n average_batch: bool, 是否对batch的loss做平均\n use_cuda: bool\n \"\"\"\n super(Bilstm_LR_Model, self).__init__()\n word_embedding_shape = (len(word_embeddings),len(word_embeddings[0]))\n # word level feature layer\n self.word_feature_layer = WordFeature(word_embedding_shape, \n word_embeddings, word_require_grad)\n\n self.char_feature_layer = CharFeature(char_embedding_shape, \n filter_sizes, filter_nums)\n trans_input_dim = word_embedding_shape[1] + sum(filter_nums)\n # feature dropout\n self.dropout_feature = nn.Dropout(conf.dropout_rate)\n input_size = trans_input_dim\n # trans layer\n self.bilstm_layer = Bilstm(input_size, conf.trans_output_size)\n\n # trans dropout\n self.dropout_trans = nn.Dropout(conf.dropout_rate)\n\n # crf layer\n self.crf_layer = CRF(target_size, average_batch, use_cuda)\n\n # dense layer\n hidden_input_dim = conf.trans_output_size * 3\n ex_target_size = target_size + 2\n self.hidden2tag = nn.Linear(hidden_input_dim, ex_target_size)\n\n # loss\n self.loss_function = self.crf_layer.neg_log_likelihood_loss\n self.local_loss_function = nn.CrossEntropyLoss(reduce=False)\n self.local_loss_function_nil = self.CrossEntropyLoss_nil\n self.loss_function_ratio = self.crf_layer.neg_log_likelihood_loss_ratio\n self.loss_function_nil = self.crf_layer.neg_log_likelihood_loss_nil\n self.average_batch = average_batch\n self.begins = [0. for i in range(ex_target_size)]\n for i in conf.begin:\n self.begins[i] = 1.\n if conf.only_nil:\n self.begins[0] = 1.\n self.begins = torch.tensor(self.begins, dtype=torch.float32).cuda().view(1,ex_target_size)\n self.insides = [0. for i in range(ex_target_size)]\n for i in conf.inside:\n self.insides[i] = 1.\n if conf.only_nil:\n self.insides[0] = 1.\n self.insides = torch.tensor(self.insides, dtype=torch.float32).cuda().view(1,ex_target_size)\n \n def CrossEntropyLoss_nil(self, feats, tags):\n isBNil = (tags == conf.fuzzy2id['B-Nil'])\n isINil = (tags == conf.fuzzy2id['I-Nil'])\n notfuzzy = 1 - (isBNil + isINil)\n todiv = float(len(conf.begin))\n if conf.only_nil:\n todiv += 1\n loss = torch.sum(-feats*self.begins*(isBNil.float().unsqueeze(1))/todiv, dim=1)\n loss = loss+torch.sum(-feats*self.insides*(isINil.float().unsqueeze(1))/todiv, dim=1)\n temp = tags*notfuzzy.long()\n temp = temp.unsqueeze(1)\n\n temp = -torch.gather(feats,1,temp)\n temp = temp.squeeze()\n temp = temp*notfuzzy.float()\n loss = loss+temp\n loss = loss+ torch.log(torch.sum(torch.exp(feats), dim=1))\n return loss\n\n def loss(self, feats, mask, tags):\n \"\"\"\n Args:\n feats: size=(batch_size, seq_len, tag_size)\n mask: size=(batch_size, seq_len)\n tags: size=(batch_size, seq_len)\n \"\"\"\n loss_value = self.loss_function(feats, mask, tags)\n if self.average_batch:\n batch_size = feats.size(0)\n loss_value = loss_value/float(batch_size)\n return loss_value\n \n def fuzzy_loss(self, feats, mask, tags, locations, ratio = False):\n batch_size = tags.size(0)\n seq_len = tags.size(1)\n confirmed = np.zeros((batch_size,seq_len))\n for l in range(len(locations)):\n confirmed[l][locations[l]] = 1\n tags2 = tags*(torch.tensor(confirmed,dtype=torch.long).cuda())+torch.tensor(conf.NOT_CONFIRM_IDX*(1-confirmed),dtype=torch.long).cuda()\n \n if ratio:\n loss_value = self.loss_function_ratio(feats,mask,tags2,tags)\n else:\n loss_value = self.loss_function(feats,mask,tags2, fuzzy=True)\n if self.average_batch:\n loss_value = loss_value/float(batch_size)\n return loss_value\n\n def fuzzy_loss_nil(self, feats, mask, tags, locations):\n batch_size = tags.size(0)\n seq_len = tags.size(1)\n confirmed = np.zeros((batch_size,seq_len))\n for l in range(len(locations)):\n confirmed[l][locations[l]] = 1\n confirmed = torch.tensor(confirmed,dtype=torch.long).cuda()\n tags_notconf = tags*confirmed+conf.fuzzy2id['not_conf']*(1-confirmed)\n not_nil = (tags >= 0).long()\n\n tags_nofuzzy = tags*not_nil\n loss_value = self.loss_function_nil(feats,mask,tags_notconf,tags_nofuzzy)\n if self.average_batch:\n loss_value = loss_value/float(batch_size)\n return loss_value\n def local_loss(self, feats, tags, locations):\n seq_len = feats.size(1)\n flat_feats = feats.view(-1,feats.size(-1))\n flat_tags = tags.view(-1)\n \n if conf.use_nil:\n losses = self.local_loss_function_nil(flat_feats,flat_tags)\n else:\n losses = self.local_loss_function(flat_feats,flat_tags)\n flat_locations = []\n local_mask = torch.zeros(losses.size()).float().cuda()\n location_count = 0.\n for tempi in range(len(locations)):\n start = tempi * seq_len\n location_count = location_count+len(locations[tempi])\n for loc in locations[tempi]:\n local_mask[start + loc] = 1.\n losses = local_mask * losses\n local_losses = torch.sum(losses)/location_count\n return local_losses\n \n def weighted_local_loss(self,feats,tags,tags_np,locations,weight):\n seq_len = feats.size(1)\n flat_feats = feats.view(-1,feats.size(-1))\n flat_tags = tags.view(-1)\n tags_np = tags_np.reshape(-1)\n losses = self.local_loss_function(flat_feats,flat_tags)\n flat_locations = []\n local_mask = torch.zeros(losses.size()).float().cuda()\n location_count = 0.\n for tempi in range(len(locations)):\n start = tempi * seq_len\n location_count =location_count+ len(locations[tempi])\n for loc in locations[tempi]:\n local_mask[start + loc] = weight[tags_np[start+loc]]\n losses = local_mask * losses\n local_losses = torch.sum(losses)/location_count\n return local_losses\n def forward(self, word_input, char_input, mask, length):\n \"\"\"\n Args:\n inputs: list\n \"\"\"\n batch_size = word_input.size(0)\n max_len = word_input.size(1)\n\n # word level feature\n word_feature = self.word_feature_layer(word_input)\n\n # char level feature\n char_feature = self.char_feature_layer(char_input)\n\n try:\n word_feature = torch.cat([word_feature, char_feature], 2)\n except:\n print (word_feature.shape)\n print (char_feature.shape)\n print (word_input.shape)\n print (char_input.shape)\n print (mask.shape)\n print (word_input)\n print (char_input)\n print (mask)\n exit(0)\n word_feature = self.dropout_feature(word_feature)\n # transformer layer\n bilstm_outputs = self.bilstm_layer(word_feature, length)\n lefts = [torch.zeros(bilstm_outputs.size(0),1,bilstm_outputs.size(-1)).cuda()]\n rights = [torch.zeros(bilstm_outputs.size(0),1,bilstm_outputs.size(-1)).cuda()]\n for tempi in range(bilstm_outputs.size(1)-1):\n lefts.append(torch.max(lefts[-1],bilstm_outputs[:,tempi:tempi+1,:]))\n for tempi in range(bilstm_outputs.size(1)-1,0,-1):\n rights.append(torch.max(rights[-1],bilstm_outputs[:,tempi:tempi+1,:]))\n rights.reverse()\n trans_outputs_lr = torch.cat([torch.cat(lefts,dim=1),bilstm_outputs,torch.cat(rights,dim=1)],dim=2)\n trans_outputs_lr = self.dropout_trans(trans_outputs_lr.view(-1, trans_outputs_lr.size(-1)))\n trans_feats = self.hidden2tag(trans_outputs_lr)\n return trans_feats.view(batch_size, max_len, trans_feats.size(-1))\n\n def predict(self, bilstm_outputs, actual_lens, mask=None):\n batch_size = bilstm_outputs.size(0)\n tags_list = []\n path_score, best_paths = self.crf_layer(bilstm_outputs, mask)\n return best_paths.cpu().data.numpy()\n def local_predict(self, logits):\n return torch.argmax(logits,dim=2).view(-1)\n","repo_name":"zig-kwin-hu/Low-Resource-Name-Tagging","sub_path":"code/model/modules/bilstm_model.py","file_name":"bilstm_model.py","file_ext":"py","file_size_in_byte":9900,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"85"} +{"seq_id":"5413062316","text":"# import os\nimport time\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom multiprocessing import Process, Pipe, Manager\nfrom flask import Flask, request\nfrom flask_classy import FlaskView, route\nimport subprocess\nimport threading\nimport json\n\n# --- This is a nice implementation of a simple timer I found online -DW --- #\n_tm = 0\n\n\ndef stopwatch(msg=''):\n tm = time.time()\n global _tm\n if _tm == 0:\n _tm = tm\n return\n print(\"%s: %.2f ms\" % (msg, 1000.0 * (tm - _tm)))\n _tm = tm\n# ------------------------------------------------------------------------- #\n\n\ndef serial_watchdog(com_pipe, debug):\n \"\"\"\n Function to be called as a process. Watches the serial ports and looks for devices plugged in\n or removed.\n Underscore at beginning prevents flask_classy from making it a route in the Flask server.\n :param com_pipe: this end of the pipe to communicate with main server process.\n :param debug: Flag whether or not debugging should be turned on.\n :return:\n \"\"\"\n _keep_communicating2 = True\n _com_freq = 2.0 # (Hz)\n _com_period = 1.0 / _com_freq # (s)\n _debug = debug\n\n _current_ports_by_ids = {}\n _new_ports_by_ids = {}\n\n while _keep_communicating2:\n\n # Do the timing of this process:\n _thread_start_time = time.time()\n\n if com_pipe.poll():\n\n _in_message = com_pipe.recv()\n\n if _in_message[0] == \"com_period\":\n _com_period = _in_message[1]\n\n if _in_message[0] == \"shutdown\":\n break\n\n # Find the serial ports and whether they belong to an arduino\n # TODO: This takes a very long time, is there a faster way?\n proc = subprocess.Popen('/home/mist-1/Work/ControlSystem/usb.sh',\n stdout=subprocess.PIPE, shell=True)\n\n output = proc.stdout.read().strip()\n\n _device_added = False\n _device_removed = False\n _device_ids = _current_ports_by_ids.keys()\n\n # Loop through all found devices and add them to a new list, remove them from the current list\n for line in output.split(\"\\n\"):\n\n if \"Arduino\" in line:\n\n port, raw_info = line.split(\" - \")\n serial_number = raw_info.split(\"_\")[-1]\n\n _new_ports_by_ids[serial_number] = port\n\n if serial_number not in _device_ids:\n _device_added = True\n else:\n del _current_ports_by_ids[serial_number]\n\n # Now, let's check if there are any devices still left in the current dict\n if len(_current_ports_by_ids) > 0:\n _device_removed = True\n\n _current_ports_by_ids = _new_ports_by_ids\n _new_ports_by_ids = {}\n\n if _debug:\n if _device_removed:\n print(\"Arduino(s) were removed\")\n if _device_added:\n print(\"Arduino(s) were added\")\n\n if _device_added or _device_removed:\n\n if _debug:\n print(\"Updated List:\")\n for key, item in _current_ports_by_ids.items():\n print (\"Arduino {} at port {}\".format(key, item))\n\n # Reverse ports_by_ids:\n _current_ids_by_ports = {}\n for myid, myport in _current_ports_by_ids.items():\n _current_ids_by_ports[myport] = myid\n\n pipe_message = [\"updated_list\", _current_ports_by_ids, _current_ids_by_ports]\n com_pipe.send(pipe_message)\n # _ports_by_ids = _current_ports_by_ids\n # _ids_by_ports = _current_ids_by_ports\n\n # Do the timing of this process:\n _sleepy_time = _com_period - time.time() + _thread_start_time\n\n if _sleepy_time > 0.0:\n\n if _debug:\n print(\"Watchdog alive, sleeping for {} s.\".format(_sleepy_time))\n\n time.sleep(_sleepy_time)\n\n\nclass ServerView(FlaskView):\n def __init__(self):\n self._debug = False\n\n # A pool of threads to communicate with the arduinos\n self._threadpool = ThreadPool(10)\n\n self._welcome_message = \"Hi, this is the MIST-1 Control System server running on a RasPi.\"\n\n self._pipe_server, pipe_serial_watcher = Pipe()\n\n # self._manager = Manager()\n # self._ports_by_ids = self._manager.dict()\n # self._ids_by_ports = self._manager.dict()\n\n self._watch_proc = Process(target=serial_watchdog, args=(pipe_serial_watcher,\n self._debug,))\n self._watch_proc.daemon = True\n\n # self._watchdog_thread = None\n\n self._keep_communicating = False\n self._initialized = False\n\n # TODO: Add another Pipe/Process combo for displaying stuff on our new display\n\n def kill(self):\n \"\"\"\n Shutdown routine.\n :return:\n \"\"\"\n print(\"Shutting Down!\")\n\n self._keep_communicating = False\n\n self._threadpool.terminate()\n self._threadpool.join()\n\n if self._watch_proc.is_alive():\n\n self._watch_proc.terminate()\n self._watch_proc.join()\n\n else:\n\n print(\"Watchdog already dead.\")\n\n func = request.environ.get('werkzeug.server.shutdown')\n\n if func is None:\n\n raise RuntimeError('Not running with the Werkzeug Server')\n\n func()\n\n return \"Shutting down...\"\n\n @route(\"/device/all/\")\n def all_devices(self):\n\n return json.dumps(self._ports_by_ids.copy())\n\n def initialize(self):\n\n if self._initialized:\n\n return \"Server has already been initialized\"\n\n else:\n\n self._keep_communicating = True\n\n threading.Timer(0.1, self._listen_to_pipe).start()\n # self._watchdog_thread = threading.Thread(target=self._listen_to_pipe)\n # self._watchdog_thread.start()\n\n time.sleep(0.2)\n\n response = self._start_watchdog()\n\n self._initialized = True\n\n return \"Initializing Control System RasPi server services...{}\".format(response)\n\n def hello(self):\n\n return self._welcome_message\n\n def _start_watchdog(self):\n\n if not self._watch_proc.is_alive():\n\n self._watch_proc.start()\n\n return \"Started the watchdog process.\"\n\n else:\n\n return \"There was already a watchdog process running!\"\n\n def _listen_to_pipe(self):\n\n if self._pipe_server.poll():\n\n gui_message = self._pipe_server.recv()\n\n if gui_message[0] == \"updated_list\":\n\n if self._debug:\n print(\"Updating ports/ids in main server\")\n\n self._ports_by_ids = gui_message[1]\n self._ids_by_ports = gui_message[2]\n\n print(self._ports_by_ids)\n print(self._ids_by_ports)\n\n if self._keep_communicating:\n threading.Timer(1.0, self._listen_to_pipe).start()\n\n\nif __name__ == \"__main__\":\n\n app = Flask(__name__)\n # server = ServerView()\n ServerView.register(app)\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"DanielWinklehner/Ion-Source-Control-System","sub_path":"Software/RasPiServer_v2/RasPiServer_v2.py","file_name":"RasPiServer_v2.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"24584479895","text":"from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict\nfrom collections import OrderedDict\nfrom torch_geometric.nn.conv.gen_conv import GENConv\nfrom torch_geometric.nn.conv.graph_conv import GraphConv\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor, device, dtype\n\nfrom torch_geometric.nn.conv import GCNConv, SAGEConv, GCN2Conv, SGConv\nfrom .layers import QBatchNorm1d, QLinear, QReLU, QGCNConv, QDropout, QDropout2, QSAGEConv, QGENConv, QGraphConv, QGCN2Conv\nfrom .conf import config\nfrom .gatconv import CustomGATConv, QCustomGATConv\n\n\nclass QModule(nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n QModule.convert_layers(model)\n\n @staticmethod\n def convert_layers(module):\n for name, child in module.named_children():\n # Do not convert layers that are already quantized\n if isinstance(child, (QBatchNorm1d, QLinear, QReLU, QGCNConv, QGCN2Conv, QDropout, QSAGEConv, QGENConv, QCustomGATConv)):\n continue\n if isinstance(child, nn.BatchNorm1d) and config.enable_quantized_bn:\n setattr(module, name, QBatchNorm1d(child.num_features, child.eps, child.momentum,\n child.affine, child.track_running_stats))\n elif isinstance(child, nn.Linear):\n setattr(module, name, QLinear(child.in_features, child.out_features,\n child.bias is not None))\n elif isinstance(child, nn.ReLU):\n setattr(module, name, QReLU())\n elif isinstance(child, nn.Dropout):\n if config.dropout2:\n setattr(module, name, QDropout2(child.p))\n else:\n setattr(module, name, QDropout(child.p))\n elif isinstance(child, GCNConv):\n setattr(module, name, QGCNConv(child.in_channels, child.out_channels, child.improved, child.cached,\n child.add_self_loops, child.normalize, child.bias is not None,\n aggr=child.aggr))\n elif isinstance(child, GCN2Conv):\n beta = child.beta\n shared_weights = child.weight2 is None\n setattr(module, name, QGCN2Conv(child.channels, alpha=child.alpha, theta=None, layer=None, shared_weights=shared_weights,\n cached=child.cached, add_self_loops=child.add_self_loops, normalize=child.normalize))\n curconv = getattr(module, name)\n curconv.beta = child.beta\n elif isinstance(child, SAGEConv):\n setattr(module, name, QSAGEConv(child.in_channels, child.out_channels, child.normalize, child.root_weight,\n child.lin_l.bias is not None))\n elif isinstance(child, GraphConv):\n setattr(module, name, QGraphConv(child.in_channels, child.out_channels, child.aggr, child.lin_l.bias is not None))\n elif isinstance(child, CustomGATConv):\n setattr(module, name, QCustomGATConv(child.in_channels, child.out_channels, child.heads, child.concat, \n child.negative_slope, child.dropout, child.add_self_loops, child.bias is not None, \n child.residual, child.use_attn_dst))\n elif isinstance(child, GENConv):\n msg_norm = child.msg_norm is not None\n learn_msg_scale = True if (msg_norm and child.msg_norm.scale.requires_grad) else False\n learn_p = isinstance(child.p, torch.nn.Parameter)\n is_softmax = child.aggr == 'softmax'\n if is_softmax and isinstance(child.t, torch.nn.Parameter):\n learn_t = True\n else:\n learn_t = False\n num_layers = 0\n norm = 'batch'\n for m in child.mlp:\n if isinstance(m, torch.nn.Linear):\n num_layers += 1\n if isinstance(m, (nn.BatchNorm1d, nn.LayerNorm, nn.InstanceNorm1d)):\n if isinstance(m, nn.BatchNorm1d):\n pass\n elif isinstance(m, nn.LayerNorm):\n norm = 'layer'\n elif isinstance(m, nn.InstanceNorm1d):\n norm = 'instance'\n\n setattr(module, name, QGENConv(child.in_channels, child.out_channels, child.aggr, child.initial_t, learn_t, child.initial_p, learn_p, \n msg_norm, learn_msg_scale, norm, num_layers, child.eps))\n else:\n QModule.convert_layers(child)\n\n def forward(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],\n strict: bool = True):\n # remove the prefix \"model.\" added by this wrapper\n new_state_dict = OrderedDict([(\"model.\" + k, v) for k, v in state_dict.items()])\n return super().load_state_dict(new_state_dict, strict)\n\n def state_dict(self, destination=None, prefix='', keep_vars=False):\n ret = super().state_dict(destination, prefix, keep_vars)\n\n # remove the prefix \"model.\" added by this wrapper\n ret = OrderedDict([(k[6:], v) for k, v in ret.items()])\n return ret\n\n def reset_parameters(self):\n self.model.reset_parameters()\n\n\n @torch.no_grad()\n def mini_inference(self, x_all, loader):\n return self.model.mini_inference(x_all, loader)","repo_name":"warai-0toko/Exact","sub_path":"exact/exact/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"85"} +{"seq_id":"17870716030","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nimport pyqtgraph as pg\nimport sys\nimport traceback\nimport csv, os, time, math\n\n\nclass MonitoringData(QtWidgets.QMainWindow):\n def __init__(self, pkg, device_name):\n super().__init__()\n self.pkg = pkg # 包名\n self.device_name = device_name # 设备名\n self.timer_interval(2000) # 数据刷新时间间隔\n self.data_list = []\n self.cpu_data = []\n self.memory_data = []\n self.fps_data = []\n # self.cpucsvfile = open('./CPU_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('cpu', [('timestamp', 'CPU(%)')]) # 定义cpu数据列表title\n # self.memcsvfile = open('./Memory_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('mem', [('timestamp', 'Memory(MB)')]) # 定义Memory数据列表title\n # self.fpscsvfile = open('./FPS_' + time.strftime(\"%Y_%m_%d_%H_%M_%S\") + '.csv', 'w', encoding='utf8', newline='')\n # self.save_data('fps', [('timestamp', 'FPS')]) # 定义FPS数据列表title\n # 创建监控窗口\n self.setWindowTitle(\"App性能数据显示\")\n self.App_monitoring_data = QtWidgets.QWidget() # 创建一个主部件\n self.setCentralWidget(self.App_monitoring_data) # 设置窗口默认部件\n self.resize(1200, 900) # 设置窗口大小\n # 创建cpu监控图像\n self.cpu_image = QtWidgets.QGridLayout() # 创建cpu网格布局\n self.App_monitoring_data.setLayout(self.cpu_image) # 设置cpu的主部件为网格\n self.cpu_plot_widget = QtWidgets.QWidget() # cpu的widget部件作为K线图部件\n self.plot_layout = QtWidgets.QGridLayout() # cpu的网格布局层\n self.cpu_plot_widget.setLayout(self.plot_layout) # 设置K线图部件的布局层\n self.cpu_plot_plt = pg.PlotWidget(title='CPU', left='CPU(%)') # cpu的绘图部件\n self.cpu_plot_plt.showGrid(x=True, y=True) # 显示cpu图形\n self.plot_layout.addWidget(self.cpu_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.cpu_image.addWidget(self.cpu_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.cpu_plot_plt.setYRange(max=120, min=0) # 设置cpu的纵坐标范围\n # 创建Memory监控图像\n self.mem_image = QtWidgets.QGridLayout() # 创建memory网格布局\n self.App_monitoring_data.setLayout(self.mem_image) # 设置memory主部件的布局为网格\n self.mem_plot_widget = QtWidgets.QWidget() # memory的widget部件作为K线图部件\n self.mem_plot_layout = QtWidgets.QGridLayout() # memory的网格布局层\n self.mem_plot_widget.setLayout(self.mem_plot_layout) # 设置K线图部件的布局层\n self.mem_plot_plt = pg.PlotWidget(title='Memory', left='Pss Total(MB)') # memory绘图部件\n self.mem_plot_plt.showGrid(x=True, y=True) # 显示memory图形\n self.plot_layout.addWidget(self.mem_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.mem_image.addWidget(self.mem_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.mem_plot_plt.setYRange(max=600, min=0) # 设置memory的纵坐标范围\n # 创���FPS监控图像\n self.fps_image = QtWidgets.QGridLayout() # 创建fps网格布局\n self.App_monitoring_data.setLayout(self.fps_image) # 设置fps主部件的布局为网格\n self.fps_plot_widget = QtWidgets.QWidget() # fps的widget部件作为K线图部件\n self.fps_plot_layout = QtWidgets.QGridLayout() # fps的网格布局层\n self.fps_plot_widget.setLayout(self.fps_plot_layout) # 设置K线图部件的布局层\n self.fps_plot_plt = pg.PlotWidget(title='FPS', left='FPS') # fps绘图部件\n self.fps_plot_plt.showGrid(x=True, y=True) # 显示fps图形网格\n self.plot_layout.addWidget(self.fps_plot_plt) # 添加绘图部件到K线图部件的网格布局层\n self.fps_image.addWidget(self.fps_plot_widget, 1, 0, 3, 3) # 将上述部件添加到布局层中\n self.fps_plot_plt.setYRange(max=70, min=0) # 设置fps的纵坐标范围\n\n def timer_interval(self, timeinterval):\n \"\"\"启动定时器 时间间隔秒\"\"\"\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.get_cpu_info)\n self.timer.timeout.connect(self.get_memory_info)\n self.timer.timeout.connect(self.get_fps_info)\n self.timer.start(timeinterval)\n\n def get_current_time(self):\n \"\"\"获取当前时间\"\"\"\n currenttime = time.strftime(\"%H:%M:%S\", time.localtime())\n return currenttime\n\n def get_cpu_info(self):\n \"\"\"获取cpu数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys cpuinfo | findstr {}\".format(self.device_name, self.pkg))\n print('-------')\n print(result)\n # result = os.popen(\"adb -s {} shell top -m 100 -n 1 -d 1 | findstr {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readline().split(\" \") # 将获取的行数据使用空格进行分割\n print(res)\n if res == ['']: # 处理没有数据的情况\n print('no data')\n pass\n else:\n cpuvalue1 = list(filter(None, res))[2] # 获取cpu\n cpuvalue = cpuvalue1.strip('%') # 去除%号\n current_time = self.get_current_time()\n if cpuvalue == 'R': # 过滤cpu等于R\n pass\n else:\n cpu = float(cpuvalue)\n print(\"CPU:\", cpu)\n # self.save_data('cpu', [(current_time, cpuvalue)]) # 将数据保存到Excel\n self.data_list.append(cpu) # 将数据写入列表\n self.cpu_plot_plt.plot().setData(self.data_list, pen='g') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def get_memory_info(self):\n \"\"\"获取Memory数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys meminfo {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readlines()\n for line in res:\n if \"TOTAL:\" in line: # 不同手机adb shell dumpsys meminfo packagename 获取的Pss Total 不同,有的手机是TOTAL:,有的是TOTAL PSS:,这里做了一下兼容\n print(line)\n pss_total1 = line.split(\" \")[18] # 将获取的行数据使用空格进行分割并取出第 18个元素\n elif 'TOTAL PSS:' in line:\n print(line)\n pss_total1 = line.split(\" \")[15] # 将获取的行数据使用空格进行分割并取出第 15个元素\n else:\n continue\n pss_total = round(float(pss_total1) / 1024, 2) # 单位换算成MB,保留2位小数\n current_time = self.get_current_time()\n print(\"Memory:\", pss_total)\n # self.save_data('mem', [(current_time, pss_total)]) # 将数据保存到Excel\n self.memory_data.append(pss_total) # 将数据加入列表\n self.mem_plot_plt.plot().setData(self.memory_data, pen='y') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def get_fps_info(self):\n \"\"\"获取fps数据\"\"\"\n try:\n result = os.popen(\"adb -s {} shell dumpsys gfxinfo {}\".format(self.device_name, self.pkg)) # 执行adb命令\n res = result.readlines() # 获取所有行数据\n frame_count = 0 # 定义frame_count初始值\n vsync_overtime_s = [] # 定义vsync_overtime_s列表\n jank_num = 0 # 定义jank_num初始值\n for line in res: # 循环行\n if '\\t' in line: # 取出带\\t的所有行\n if '\\tcom.kmxs.reader' in line: # 过滤\\tcom.kmxs.reader数据\n r = False\n elif '\\tDraw' in line: # 过滤\\tDraw数据\n r = False\n elif '/android.view' in line:\n r = False\n else:\n frame_count = frame_count + 1 # 循环次数\n fps = line.split('\\t') # 分离数据\n # print(fps)\n Draw = float(fps[1]) # 取数据\n Prepare = float(fps[2]) # 取数据\n Process = float(fps[3]) # 取数据\n Execute = float(fps[4].replace('\\n', '')) # 取数据\n render_time = Draw + Prepare + Process + Execute # 计算render_time\n # print(render_time)\n # print('Native Heap is ', Native_Heap_mem)\n if render_time > 16.67: # 大于16.67认为是一次卡顿\n jank_num += 1 # 计算卡顿次数\n vsync_overtime = math.ceil(render_time / 16.67) - 1 # 向上取整\n vsync_overtime_s.append(vsync_overtime) # 添加到列表\n else:\n continue\n\n vsync_overtime_sum = sum(vsync_overtime_s) # 计算列表中所有数据的和\n fps_sum = frame_count + vsync_overtime_sum\n if fps_sum == 0:\n fps = 0\n print(\"手机屏幕静止\")\n else:\n fps = round(frame_count * 60 / fps_sum, 2) # 计算fps,并保留2位小数\n current_time = self.get_current_time()\n # self.save_data('fps', [(current_time, fps)]) # 将数据保存到Excel\n print(\"FPS:\", fps)\n self.fps_data.append(fps) # 将数据加入列表\n self.fps_plot_plt.plot().setData(self.fps_data, pen='m') # 将数据载入图像中\n except Exception as e:\n print(traceback.print_exc())\n\n def save_data(self, data_type, cpudata):\n pass\n # \"\"\"保存数据到Excel\"\"\"\n # if data_type == 'cpu':\n # writer = csv.writer(self.cpucsvfile) # 写入Excel\n # writer.writerows(cpudata) # 将数据写入Excel\n # elif data_type == 'mem':\n # writer = csv.writer(self.memcsvfile)\n # writer.writerows(cpudata)\n # elif data_type == 'fps':\n # writer = csv.writer(self.fpscsvfile)\n # writer.writerows(cpudata)\n # else:\n # print('data_type error!')\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n # data = MonitoringData('com.kmxs.reader', '154030353600A5G') # 请修改包名和设备号\n data = MonitoringData('com.eg.android.AlipayGphone', '154030353600A5G') # 请修改包名和设备号\n data.show()\n sys.exit(app.exec_())\n","repo_name":"gaoaolei/yufa","sub_path":"曲线绘制/性能曲线.py","file_name":"性能曲线.py","file_ext":"py","file_size_in_byte":11038,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"85"} +{"seq_id":"26857868747","text":"from pathlib import Path\nfrom unittest.mock import Mock\n\nfrom ton_engine.analysis.sql_analysis import SQLAnalysis\n\n\ndef test_run():\n datasource_mock = Mock()\n datasource_mock.run_query = Mock(return_value=['1', '2'])\n\n sql_analysis = SQLAnalysis(\n slug='abc',\n datasource=datasource_mock,\n sql_file_path=Path(__file__).parent / Path('data/sample.sql'),\n )\n\n result = sql_analysis.run()\n\n datasource_mock.run_query.assert_called_with(query=\"SELECT *\\nFROM Students;\")\n\n assert result == ['1', '2']\n","repo_name":"monireh-yousefi/ton-engine","sub_path":"test/ton_engine/analysis/test_sql_analysis.py","file_name":"test_sql_analysis.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"85"} +{"seq_id":"32686835217","text":"# method 1:\n# Time Complexity: O(nm)\n# Space Complexity: O(m+n)\nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n r_ind = set()\n c_ind = set()\n nrow = len(matrix)\n ncol = len(matrix[0])\n for i in range(nrow):\n for j in range(ncol):\n if matrix[i][j] == 0:\n r_ind.add(i)\n c_ind.add(j)\n \n for i in r_ind:\n for j in range(ncol):\n matrix[i][j] = 0\n \n for j in c_ind:\n for i in range(nrow):\n matrix[i][j] = 0\n \n\n\n# method 2:\n# we can use the first cell of every row and column as a flag: \n# matrix[0][0] can be flag for either the first row or the first col\n# we let matrix[0][0] to be the flag for the first row, \n# and use an additioanl boolean to be the flag for the first column\n# Time Complexity: O(nm)\n# Space Complexity: O(1) \nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \n nrow = len(matrix)\n ncol = len(matrix[0])\n is_col = False\n for i in range(nrow):\n if matrix[i][0] == 0:\n is_col = True\n for j in range(1, ncol):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n \n for i in range(1, nrow):\n for j in range(1, ncol):\n if not matrix[i][0] or not matrix[0][j]:\n matrix[i][j] = 0\n \n if matrix[0][0] == 0:\n for j in range(ncol):\n matrix[0][j] = 0\n \n if is_col:\n for i in range(nrow):\n matrix[i][0] = 0\n\n\n \n\n","repo_name":"stellapeng/Data-Scientist-Algo-Problems-in-Python","sub_path":"Matrix/73. Set Matrix Zeroes.py","file_name":"73. Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23825679223","text":"class Solution:\n def isHappy(self, n):\n s = set()\n return self.helper(n, s)\n \n def helper(self, n, s):\n if n == 1: \n return True \n n_str = str(n)\n sum = 0\n for c in n_str: \n sum += int(c) * int(c) \n if sum in s: \n return False \n s.add(sum)\n return self.helper(sum, s)","repo_name":"juhan-tarn/WallBreaker","sub_path":"week2/happy-number.py","file_name":"happy-number.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"74331397079","text":"\"\"\"Play games.\"\"\"\n\nimport prompt\n\nCOUNT_ROUND = 3\nWELCOME_QUESTION = 'May I have your name? '\n\n\ndef ask_question(text_question):\n \"\"\"Ask question of user.\n\n Parameters:\n text_question: text of question\n\n Returns:\n answer: answer of user.\n\n \"\"\"\n return prompt.string(text_question)\n\n\ndef start_game(game):\n \"\"\"Play games.\n\n Parameters:\n game: module game of Brain\n\n \"\"\"\n print('Welcome to the Brain Games!')\n print()\n name = ask_question(WELCOME_QUESTION)\n print('Hello, {0}!'.format(name))\n print(game.DESCRIPTION)\n counter = 0\n while counter < COUNT_ROUND:\n question, answer_correct = game.get_question_and_answer()\n print('Question: {0}'.format(question))\n answer_of_user = ask_question('Your answer: ')\n if answer_of_user == answer_correct:\n print('Correct!')\n counter += 1\n else:\n print(\"\"\"\\\n'{0}' is wrong answer ;(. Correct answer was '{1}'\nLet's try again, {2}!\\\n \"\"\".format(answer_of_user, answer_correct, name),\n )\n break\n\n else:\n print('Congratulations, {0}!'.format(name))\n","repo_name":"gabady13/python-project-lvl1","sub_path":"brain_games/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"973187727","text":"import os\nimport sys\nimport openai\nimport pysrt\n\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\ntext_input = sys.stdin.read()\nsrt = pysrt.from_string(text_input)\n\nbase_prompt = (\n \"You are going to be an English to Korean translator.\"\n \"I will give you a transcript from a video talking about the difference\"\n \"different levels of software engineers.\"\n \"Please translate the following into polite Korean starting from\"\n \"[START] until [END]:\\n[START]\\n\"\n)\n\n\ndef translate(text):\n prompt = base_prompt + text + \"\\n[END]\"\n#\n res = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=prompt,\n max_tokens=3000,\n temperature=0\n )\n\n raw_translation = res.choices[0].text\n stripped_start = raw_translation.replace('[START]', '').strip()\n translation = stripped_start.replace('[END]', '').strip()\n return translation\n\n\nfor index, subtitle in enumerate(srt):\n subtitle.text = translate(subtitle.text)\n print(subtitle, flush=True)\n","repo_name":"jerhage/gpt-vid-translator","sub_path":"src/lib/server/scripts/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"9079559145","text":"from sklearn.datasets import load_digits\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom IPython.display import clear_output\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nmnist = load_digits(2)\r\n\r\nX,y = mnist.data, mnist.target\r\n\r\nprint(\"y [shape - %s]:\" % (str(y.shape)), y[:10])\r\nprint(\"X [shape - %s]:\" % (str(X.shape)))\r\n\r\nprint('X:\\n',X[:3,:10])\r\nprint('y:\\n',y[:10])\r\nplt.imshow(X[0].reshape([8,8]))\r\nplt.show()\r\n\r\nweights = tf.Variable(initial_value=np.ones(5))\r\ninput_X = tf.placeholder(\"float32\", shape=(None, None,))\r\ninput_y = tf.placeholder(\"float32\", shape=(None, ))\r\n\r\npredicted_y = tf.placeholder(\"float32\", shape=(None, ))\r\nloss = tf.reduce_mean(predicted_y - input_y)\r\noptimizer = tf.train.MomentumOptimizer(0.01,0.9).minimize(loss, var_list=predicted_y)\r\n\r\ntrain_function = tf.losses.log_loss(X, y, weights)\r\npredict_function = tf.nn.top_k(X)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\nfor i in range(5):\r\n loss_i = loss(i)\r\n\r\n print(\"loss at iter %i:%.4f\" % (i, loss_i))\r\n\r\n print(\"train auc:\", roc_auc_score(y_train, predict_function(X_train)))\r\n print(\"test auc:\", roc_auc_score(y_test, predict_function(X_test)))\r\n\r\nprint(\"resulting weights:\")\r\nplt.imshow(tf.contrib.keras.backend.get_value(weights).reshape(8, -1))\r\nplt.colorbar()\r\nplt.show()","repo_name":"Gvein/DataMiningInAction","sub_path":"Logistic regression.py","file_name":"Logistic regression.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"30884726211","text":"def solution(id_list, report, k):\n# answer = {}\n# result = {}\n# total = [0] * len(id_list)\n# for data in report:\n# a,b = data.split(\" \")\n# if b not in result:\n# result[b] = 0\n \n# if a not in answer:\n# answer[a] = [b]\n# result[b] +=1 \n# else:\n# if b not in answer[a]: \n# answer[a].append(b)\n# result[b] +=1\n \n# for myKey, datas in answer.items():\n# for data in datas:\n# if result[data] >= k:\n# total[id_list.index(myKey)] +=1\n\n answer = [0] * len(id_list)\n reports = {}\n for r in set(report):\n a,b = r.split(\" \") \n if b not in reports:\n reports[b] = 1\n else: \n reports[b] += 1\n \n for r in set(report):\n a,b = r.split(\" \")\n if reports[b] >= k:\n answer[id_list.index(a)] +=1\n \n return answer","repo_name":"hissue/Program-Solution","sub_path":"프로그래머스/lv1/92334. 신고 결과 받기/신고 결과 받기.py","file_name":"신고 결과 받기.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"4381377373","text":"import numpy as np\nfrom skimage.exposure import rescale_intensity, equalize_hist\nfrom skimage.transform import SimilarityTransform, warp, rotate, rescale, resize\n\n\ndef contrast_stretch_image(img, perc=99.8):\n # Apply it on each channel\n for channel in range(0, img.shape[2]):\n p_lower, p_upper = np.percentile(img[:, :, channel], (100 - perc, perc))\n img[:, :, channel] = rescale_intensity(img[:, :, channel], in_range=(p_lower, p_upper))\n return img\n\n\ndef histo_equalize_image(img):\n for channel in range(0, img.shape[2]):\n img[:, :, channel] = equalize_hist(img[:, :, channel])\n return img\n\n\ndef shifting(image, x_shift, y_shift):\n tform = SimilarityTransform(translation=(x_shift, y_shift))\n shifted_image = warp(image, tform, mode='constant', cval=0)\n return shifted_image\n\n\ndef flipping(image, flip_horizontal, flip_vertical):\n # Do horizontal and/or vertical flipping\n if flip_horizontal:\n image = np.flip(image, axis=1)\n if flip_vertical:\n image = np.flip(image, axis=0)\n return image\n\n\ndef rotation(image, rotation_degree):\n image = rotate(image, rotation_degree)\n return image\n\n\ndef zoom(image, x_dim, y_dim, zooming_factor):\n if len(image.shape) == 3:\n rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=True)\n else:\n rescaled_image = rescale(image, zooming_factor, mode='reflect', anti_aliasing=True, multichannel=False)\n if zooming_factor > 1:\n left = round((rescaled_image.shape[0] - x_dim) / 2)\n right = left + x_dim\n upper = round((rescaled_image.shape[1] - y_dim) / 2)\n lower = upper + y_dim\n cropped_image = rescaled_image[upper:lower, left:right]\n else:\n left = round((x_dim - rescaled_image.shape[0]) / 2)\n right = left + rescaled_image.shape[0]\n upper = round((y_dim - rescaled_image.shape[1]) / 2)\n lower = upper + rescaled_image.shape[1]\n cropped_image = np.zeros(image.shape)\n if len(image.shape) == 2:\n cropped_image[upper:lower, left:right] = rescaled_image\n else:\n cropped_image[upper:lower, left:right, :] = rescaled_image\n return cropped_image\n\n\ndef zoom_resize(image, x_dim, y_dim, zooming_factor):\n if zooming_factor > 1:\n resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n anti_aliasing=True, preserve_range=True)\n # print(round(zooming_factor * x_dim))\n # print(round(zooming_factor * y_dim))\n left = round((round(zooming_factor * x_dim) - x_dim) / 2)\n upper = round((round(zooming_factor * y_dim) - y_dim) / 2)\n right = left + x_dim\n lower = upper + y_dim\n cropped_image = resized_image[upper:lower, left:right]\n else:\n resized_image = resize(image, (round(zooming_factor * x_dim), round(zooming_factor * y_dim)),\n anti_aliasing=True, preserve_range=True)\n # print(round(zooming_factor * x_dim))\n # print(round(zooming_factor * y_dim))\n left = round((x_dim - round(zooming_factor * x_dim)) / 2)\n upper = round((y_dim - round(zooming_factor * y_dim)) / 2)\n right = left + round(zooming_factor * x_dim)\n lower = upper + round(zooming_factor * y_dim)\n # print(upper, lower, left, right)\n cropped_image = np.zeros(image.shape)\n if len(image.shape) == 2:\n cropped_image[upper:lower, left:right] = resized_image\n else:\n cropped_image[upper:lower, left:right, :] = resized_image\n return cropped_image\n\n\ndef signal_reduction(image, channel, signal_reduction_factor):\n if channel == 0:\n image[:, :, 0] = signal_reduction_factor * image[:, :, 0]\n if channel == 1:\n image[:, :, 1] = signal_reduction_factor * image[:, :, 1]\n if channel == 2:\n image[:, :, 2] = signal_reduction_factor * image[:, :, 2]\n return image","repo_name":"imsb-uke/podometric_u_net","sub_path":"network/dataset/image_transformations.py","file_name":"image_transformations.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"18087371147","text":"from lib.color_printer import ColorPrinter\nclass SqsDefaults:\n def __init__(self, config, config_data):\n self.config = config\n try:\n self.deadletter = config_data['deadletter']\n self.env = config_data['env']\n self.max_receive_count = config_data['max_receive_count']\n self.message_retention_period = config_data['message_retention_period']\n self.region = config_data['region']\n self.visibility_timeout = config_data['visibility_timeout']\n except KeyError as e:\n print(f\"Required key {e} not found in sqs_config.\")\n if config.verbose:\n print(\"Success loading SqsDefaults.\")\n\n\n def dump(self):\n cp = ColorPrinter(self.config.verbose)\n print(\"SqsDefaults are:\")\n cp.puts(f\" deadletter: {self.deadletter}\")\n cp.puts(f\" env: {self.env}\")\n cp.puts(f\" region: {self.region}\")\n cp.puts(f\" max_receive_count: {self.max_receive_count}\")\n cp.puts(f\" message_retention_period: {self.message_retention_period}\")\n cp.puts(f\" visibility_timeout: {self.visibility_timeout}\")\n\n","repo_name":"mauricionr/jararaca","sub_path":"lib/sqs_defaults.py","file_name":"sqs_defaults.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"7899967410","text":"\nfrom modeling.surface import Surface\nfrom modeling.spectrum import Spectrum\nfrom modeling.retracking import Brown\nfrom modeling import rc\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\nbrown = Brown()\nsurf = Surface()\n \n\ndf = pd.read_excel(\"data/ModelMomentsCWM.xlsx\", header=[0,1,2], index_col=[0])\ndf0 = pd.read_excel(\"data/check.xlsx\", header=[0,1,2], index_col=[0])\n\nu = df[\"U\"]\nprint(u)\ndf = df[\"model\"][\"Ku\"]\ndf0 = df0[\"ryabkova\"][\"Ku\"]\n\n\ntheta = np.deg2rad(np.linspace(-17, 17, 100))\n\n\ncsu = np.zeros(u.size)\ncsu0 = np.zeros(u.size)\nfor i in range(u.size):\n moments = np.zeros(4)\n moments[:-1] = df.iloc[i][:-1]\n csu[i] = 10*np.log10(surf.crossSection(0, moments))\n moments[:-1] = df0.iloc[i][:-1]\n csu0[i] = 10*np.log10(surf.crossSection(0, moments))\n\ndfcs = pd.DataFrame({\"U\": u.values.flatten(), \"default\": csu0.flatten(), \"cwm\": csu.flatten()})\ndfcs.to_csv(\"crosssec_wind.tsv\", sep=\"\\t\", index=False)\n\n\nmoments = np.zeros(4)\nmoments[:-1] = df.iloc[7][:-1]\ncs = 10*np.log10(surf.crossSection(theta, moments))\nmoments[:-1] = df0.iloc[7][:-1]\ncs0 = 10*np.log10(surf.crossSection(theta, moments))\ndfcs = pd.DataFrame({\"theta\": np.rad2deg(theta), \"default\": cs0, \"cwm\": cs})\ndfcs.to_csv(\"crosssec10.tsv\", sep=\"\\t\", index=False)\n\nt = brown.t()\nP0 = brown.pulse(t)\nP0 *= cs0.max()/P0.max()\nP1 = brown.pulse(t, cwm=True)\nP1 *= cs.max()/P1.max()\n\nplt.plot(t, P0)\nplt.plot(t, P1)\nd = pd.DataFrame({\"t\": t, \"linear\": P0, \"cwm\": P1})\nd.to_csv(\"impulse_cwm10.tsv\", sep=\"\\t\", index=False)\n\n\n\nmoments[:-1] = df.iloc[0][:-1]\ncs = 10*np.log10(surf.crossSection(theta, moments))\nmoments[:-1] = df0.iloc[0][:-1]\ncs0 = 10*np.log10(surf.crossSection(theta, moments))\n\ndfcs = pd.DataFrame({\"theta\": np.rad2deg(theta), \"default\": cs0, \"cwm\": cs})\ndfcs.to_csv(\"crosssec3.tsv\", sep=\"\\t\", index=False)\n\nt = brown.t()\nP0 = brown.pulse(t)\nP0 *= cs0.max()/P0.max()\nP1 = brown.pulse(t, cwm=True)\nP1 *= cs.max()/P1.max()\n\nplt.plot(t, P0)\nplt.plot(t, P1)\nd = pd.DataFrame({\"t\": t, \"linear\": P0, \"cwm\": P1})\nd.to_csv(\"impulse_cwm3.tsv\", sep=\"\\t\", index=False)\n\n\n\n","repo_name":"kannab98/modeling","sub_path":"crosssec-pulse.py","file_name":"crosssec-pulse.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"85"} +{"seq_id":"23807392394","text":"# Task 1\r\nn = int(input())\r\ns = 0\r\nwhile n != 0:\r\n s = s + n\r\n n = int(input())\r\nprint(s)\r\n\r\n\r\n# Task 2\r\na = int(input())\r\nb = int(input())\r\nd = 0\r\nwhile d <= (a * b):\r\n d += 1\r\n if d % a == 0 and d % b == 0:\r\n print(d)\r\n break\r\n\r\n\r\n# Task 3\r\nwhile True:\r\n n = int(input())\r\n if n > 100:\r\n break\r\n elif n < 10:\r\n continue\r\n else:\r\n print(n)\r\n\r\n\r\n# Task 4\r\na = int(input())\r\nb = int(input())\r\nc = int(input())\r\nd = int(input())\r\nfor i in range(c, d + 1):\r\n print(' ', end='\\t')\r\n print(i, end='')\r\nprint()\r\nfor i in range(a, b + 1):\r\n print(i, end='\\t')\r\n for j in range(c, d + 1):\r\n print(i * j, end='\\t')\r\n print()\r\n\r\n\r\n# Task 5\r\na = int(input())\r\nb = int(input())\r\ns = 0\r\ncount = 0\r\nfor i in range(a, b + 1):\r\n if i % 3 == 0:\r\n count +=1\r\n s += i\r\ns = s / count\r\nprint(s)\r\n\r\n\r\n# Task 6\r\nstring = str(input())\r\ncount = 0\r\nlength = len(string)\r\nfor i in string.lower():\r\n if i == 'g' or i == 'c':\r\n count += 1\r\ngc = (count / length) * 100\r\nprint(gc)\r\n\r\n\r\n# Task 7\r\ns = str(input())\r\nnew_s = ''\r\ncount = 1\r\nnew_s += s[0]\r\nfor i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n count += 1\r\n else:\r\n if count >= 1:\r\n new_s += str(count)\r\n new_s += s[i+1]\r\n count = 1\r\nif count >= 1:\r\n new_s += str(count)\r\nprint(new_s)\r\n\r\n\r\n# Task 8\r\ns = str(input())\r\nresult = 0\r\nfor i in s.split():\r\n result = result + int(i)\r\nprint(result)\r\n\r\n\r\n# Task 9\r\nstring = input()\r\ns = string.split()\r\nn = s[-1]\r\nfor i in range(len(s)-1):\r\n if i == 0:\r\n print(str(int(s[-1]) + int(s[1])), end=' ')\r\n else:\r\n print(str(int(s[i-1]) + int(s[i+1])), end=' ')\r\nif len(s) == 1:\r\n print(s[0])\r\nelif n == s[-1] and len(s) > 1:\r\n print(str(int(s[-2]) + int(s[0])), end='')\r\n\r\n\r\n# Task 10\r\ns = [i for i in input().split()]\r\nfor i in set(s):\r\n if s.count(i) > 1:\r\n print(i, end=' ')\r\n\r\n\r\n# Task 11\r\nsum = 0\r\nsum_of_square = 0\r\nwhile True:\r\n number = int(input())\r\n sum += number\r\n sum_of_square += (number**2)\r\n if sum == 0:\r\n print(sum_of_square)\r\n break\r\n\r\n\r\n# Task 12\r\nn = int(input())\r\nnew_s = []\r\nresult = []\r\nif n == 1:\r\n print(n)\r\nelse:\r\n for i in range(1, n+1):\r\n new_s += ([i] * i)\r\n if len(new_s) > n:\r\n print(*new_s[:n])\r\n break\r\n\r\n\r\n# Task 13\r\nlst = [int(i) for i in input().split()]\r\nx = int(input())\r\nfor i in range(len(lst)):\r\n if x == lst[i]:\r\n print(i, end=' ')\r\nif x not in lst:\r\n print('Отсутствует')\r\n\r\n\r\n# Task 14\r\na = []\r\nb = input()\r\nwhile b != 'end':\r\n a.append([int(i) for i in b.split()])\r\n b = input()\r\nl = len(a)\r\nfor i in range(l):\r\n li = len(a[i])\r\n for j in range(li):\r\n new_arr = (a[i-l+1][j] + a[i-1][j] + a[i][j-li+1] + a[i][j-1])\r\n print(new_arr, end=' ')\r\n print()\r\n\r\n\r\n# Task 15\r\nn = int(input())\r\nlst = [[0 for i in range(n)] for j in range(n)]\r\na = 1\r\nlow = 0\r\nhigh = n - 1\r\ncount = int((n + 1) / 2)\r\n\r\nfor i in range(count):\r\n for j in range(low, high+1):\r\n lst[i][j] = a\r\n a = a + 1\r\n for j in range(low+1, high+1):\r\n lst[j][high] = a\r\n a = a + 1\r\n for j in range(high-1, low-1, -1):\r\n lst[high][j] = a\r\n a = a + 1\r\n for j in range(high-1, low, -1):\r\n lst[j][low] = a\r\n a = a + 1\r\n low = low + 1\r\n high = high - 1\r\n\r\nfor i in range(n):\r\n for j in range(n):\r\n print(lst[i][j], end=' ')\r\n print() ","repo_name":"PavelValko/stepik","sub_path":"step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11539415898","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis program creates a quit\nbutton. When we press the button,\nthe application terminates.\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QApplication\nfrom PyQt5.QtCore import QCoreApplication\n\nclass Example(QWidget):\n def __init__(self):\n super(Example, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n qbtn = QPushButton('Quit', self)\n qbtn.clicked.connect(QCoreApplication.instance().quit)\n\n # 设置按钮默认大小\n qbtn.resize(qbtn.sizeHint())\n qbtn.move(50, 50)\n\n # 设置窗口的位置和大小\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Quit button')\n self.show()\n\nif __name__ == \"__main__\":\n # 创建应用程序对象\n app = QApplication(sys.argv)\n # 创建用户界面对象\n ex = Example()\n sys.exit(app.exec_())\n\n\n","repo_name":"yyHaker/PythonStudy","sub_path":"modules/pyqt5_/basics/quit.py","file_name":"quit.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"16896341237","text":"@tf_export(\"io.gfile.walk\")\ndef walk_v2(top, topdown=True, onerror=None):\n \"\"\"Recursive directory tree generator for directories.\n Args:\n top: string, a Directory name\n topdown: bool, Traverse pre order if True, post order if False.\n onerror: optional handler for errors. Should be a function, it will be\n called with the error as argument. Rethrowing the error aborts the walk.\n Errors that happen while listing directories are ignored.\n Yields:\n Each yield is a 3-tuple: the pathname of a directory, followed by lists of\n all its subdirectories and leaf files. That is, each yield looks like:\n `(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`.\n Each item is a string.\n \"\"\"\n def _make_full_path(parent, item):\n # Since `join` discards paths before one that starts with the path\n # separator (https://docs.python.org/3/library/os.path.html#join),\n # we have to manually handle that case as `/` is a valid character on GCS.\n if item[0] == os.sep:\n return \"\".join([join(parent, \"\"), item])\n return join(parent, item)\n top = compat.as_str_any(compat.path_to_str(top))\n try:\n listing = list_directory(top)\n except errors.NotFoundError as err:\n if onerror:\n onerror(err)\n else:\n return\n files = []\n subdirs = []\n for item in listing:\n full_path = _make_full_path(top, item)\n if is_directory(full_path):\n subdirs.append(item)\n else:\n files.append(item)\n here = (top, subdirs, files)\n if topdown:\n yield here\n for subdir in subdirs:\n for subitem in walk_v2(\n _make_full_path(top, subdir), topdown, onerror=onerror):\n yield subitem\n if not topdown:\n yield here\n","repo_name":"CookedMelon/GetConstraint","sub_path":"extract_dir/extract/io.gfile.walk.py","file_name":"io.gfile.walk.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"26687502688","text":"import re\nimport rclpy\nfrom rclpy.node import Node\n\nclass Chat(Node):\n\n def __init__(self):\n super().__init__('chat')\n \n self.intent_dict = {\n r\"\\b(?:(?:(?:[Mm]e [Ll]ev[ea])|(?:[Vv][áa]))?\\s?(?:(?:[Pp]ara [oa])|(?:[Aa]t[ée] [oa]))?\\s?(?:[Ll]aboratório|[Ll]aboratorio|[Ll]ab|[Ll]abo))\": 'lab',\n r\"\\b(?:(?:(?:[Mm]e [Ll]ev[ea])|(?:[Vv][áa]))?\\s?(?:(?:[Pp]ara [oa])|(?:[Aa]t[ée] [oa]))?\\s?(?:[Cc]entral|[Gg]rêmio|[Gg]remio))\": 'central',\n r\"\\b(?:(?:(?:[Mm]e [Ll]ev[ea])|(?:[Vv][áa]))?\\s?(?:(?:[Pp]ara [oa])|(?:[Aa]t[ée] [oa]))?\\s?(?:[Ss]ala|[Aa]teliê|[Aa]telie))\": 'sala',\n r\"(?:[Ss]air|[Ee]ncerrar|[Ee]xit)\": 'sair'\n }\n\n self.action_dict = {\n 'lab': self.lab,\n 'central': self.central,\n 'sala': self.sala,\n 'sair': self.sair\n }\n\n def lab(self, _):\n self.get_logger().info('Going to lab')\n\n def central(self, _):\n self.get_logger().info('Going to central')\n\n def sala(self, _):\n self.get_logger().info('Going to sala')\n\n def sair(self, _):\n self.get_logger().info('Exiting')\n rclpy.shutdown()\n exit()\n\n def running(self):\n while rclpy.ok():\n command = input('Enter command: ')\n for key, value in self.intent_dict.items():\n pattern = re.compile(key)\n groups = pattern.findall(command)\n if groups:\n self.action_dict[value](groups[0])\n break \n else:\n self.get_logger().info('Invalid command')\n \ndef main():\n rclpy.init()\n chat = Chat()\n chat.running()\n rclpy.spin(chat)\n chat.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()","repo_name":"gio-rodrigues0/ponderadas_m8","sub_path":"ponderada 3/src/chat/chat/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19425164350","text":"\"\"\"add language to recommend apps\n\nRevision ID: a45f4dfde53b\nRevises: 9f4e3427ea84\nCreate Date: 2023-05-25 17:50:32.052335\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a45f4dfde53b'\ndown_revision = '9f4e3427ea84'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('recommended_apps', schema=None) as batch_op:\n batch_op.add_column(sa.Column('language', sa.String(length=255), server_default=sa.text(\"'en-US'::character varying\"), nullable=False))\n batch_op.drop_index('recommended_app_is_listed_idx')\n batch_op.create_index('recommended_app_is_listed_idx', ['is_listed', 'language'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('recommended_apps', schema=None) as batch_op:\n batch_op.drop_index('recommended_app_is_listed_idx')\n batch_op.create_index('recommended_app_is_listed_idx', ['is_listed'], unique=False)\n batch_op.drop_column('language')\n\n # ### end Alembic commands ###\n","repo_name":"langgenius/dify","sub_path":"api/migrations/versions/a45f4dfde53b_add_language_to_recommend_apps.py","file_name":"a45f4dfde53b_add_language_to_recommend_apps.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":10925,"dataset":"github-code","pt":"73"} +{"seq_id":"31143920856","text":"import socket\nimport sys\n\n#endereço para o qual os dados vão ser enviados\nhost = '192.168.100.255'\n\n#número da porta que o servidor que vai receber os dados está escutando\nport = 1234\n\n#cria um UDP/IP socket\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n#envio para varios\ns.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\nprint('Para sair use CTRL+X e pressione enter\\n')\nmsg = input()\n\nwhile msg != '\\x18':\n\t#envia os dados\n\ts.sendto(msg.encode(),(host, port))\n\tmsg = input()\n\t\nprint('closing socket')\ns.close()","repo_name":"edullapa/datasciencecoursera","sub_path":"clientBroadcast.py","file_name":"clientBroadcast.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36964378615","text":"\n\"\"\"\n@author: Andrew\n\"\"\"\n\n\nimport pandas as pd\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.indeed.co.uk/data-scientist-jobs-in-London,-Greater-London'\n#url = 'https://https://www.indeed.co.uk/jobs?q=data+scientist&l=United+Kingdom'\n\nhtml = urlopen(url)\n\nsoup = BeautifulSoup(html, 'lxml')\ntype(soup)\n\n\njobs = []\n\ntitles = soup.find_all('h2', {\"class\" : \"title\"})\n\nlocations = soup.find_all('span', {\"class\" : \"location accessible-contrast-color-location\"})\n\nsalaries = soup.find_all('span', {\"class\" : \"salaryText\"})\n\n\ntitle = [span.get_text() for span in titles]\nsalary = [span.get_text() for span in salaries]\nlocation = [span.get_text() for span in locations]\n\nsalary = pd.Series(salary)\n\ntitle = pd.Series(title)\n\nlocation = pd.Series(location) \njobs = pd.DataFrame({'Title': title, 'Salary': salary, 'Location': location })\n\njobs.to_csv('DataFrame', index=False)","repo_name":"Andrew-Storey1/ds_salary_proj","sub_path":"soup_scraper.py","file_name":"soup_scraper.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"21507937277","text":"# Logical operators are either \"true or false\" or \" 1 and 0\" \n# equal to ==\n# less than <\n# greater than >\n# less than and equal to <=\n# greater than and equal to >=\n# not equal to !=\n\n# how to ask python about logical operator?\n# 4 is equal to 4\n# print(4==4) \n# print(4!=4)\n# print(10>=5)\n# # application of logical operators\n# dawood_age=4\n# min_age_at_school=5\n# print(dawood_age==min_age_at_school)\n\nmin_age_at_school=5 \ndawood_age=input(\"how old is dawood\") # input function\ndawood_age=int(dawood_age)\nprint(dawood_age==min_age_at_school) #logical operator, keep in mind, it should be integer to execute","repo_name":"786KU/first_github","sub_path":"Python_Chilla/07_conditioal_logics.py","file_name":"07_conditioal_logics.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29624837350","text":"# Converts .jpg images to .png\n# Takes input and output folders as arguments\n\n# Import libraries\nimport sys\nimport os\nfrom PIL import Image\n\n# Error handling\ntry:\n # Get source folder and output folder\n location = str(sys.argv[1])\n new_folder = str(sys.argv[2])\nexcept IndexError:\n print(\"ERROR: Please enter input and output folders.\")\nelse:\n # Create output folder if it doesn't exist\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n # Convert .jpg files in source folder to .png in output folder\n for file in os.listdir(location):\n if '.jpg' in file:\n img = Image.open(location + file)\n name = os.path.splitext(file)[0]\n img.save(new_folder + name + '.png', 'png')\n print(\"All done!\")\n","repo_name":"nwferreri/JPG-to-PNG-converter","sub_path":"JPGtoPNGconverter.py","file_name":"JPGtoPNGconverter.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5556202234","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 17:01:04 2019\n\nPython script versions of ipython notebooks \nlocated in ProteinPatchAnalysis/AutoEncoderNotebooks.\nReads in mnist.pickle that contains mnist dataset\n(Cannot import MNIST directly on CCNI). \nNote that the mnist.pickle file is not included in \ngithub repository because it is too large.\n\n@author: camil\n\"\"\"\n\nimport keras\nimport pickle \nimport numpy as np\n\nfrom keras import layers\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Lambda\nfrom keras.models import Model\nfrom keras import regularizers\nfrom keras import backend as K\n\ninput_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format\n\ndata = open(\"mnist.pickle\",\"rb\")\n[x_train,x_test] = pickle.load(data)\n\n\n\nx_train = x_train.reshape((60000, 28, 28) + (1,))\nx_test = x_test.reshape((10000, 28, 28) + (1,))\n\nbatch_size = 128\nlatent_dim = 2\n\n\nx = layers.Conv2D(32, 3,padding='same', activation='relu')(input_img)\nx = layers.Conv2D(64, 3,padding='same', activation='relu',strides=(2, 2))(x)\nx = layers.Conv2D(64, 3,padding='same', activation='relu')(x)\nx = layers.Conv2D(64, 3,padding='same', activation='relu')(x)\nshape_before_flattening = K.int_shape(x)\nx = layers.Flatten()(x)\nx = layers.Dense(32, activation='relu')(x)\n\nencoder = Model(input_img, x)\nx_encoded = encoder(input_img)\n\nz_mean = layers.Dense(latent_dim)(x_encoded)\nz_log_var = layers.Dense(latent_dim)(x)\n\n\ndef sampling(args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),mean=0., stddev=1.)\n return z_mean + K.exp(z_log_var) * epsilon\n\nz = layers.Lambda(sampling)([z_mean, z_log_var])\n\ndecoder_input = layers.Input(K.int_shape(z)[1:])\nx = layers.Dense(np.prod(shape_before_flattening[1:]),activation='relu')(decoder_input)\nx = layers.Reshape(shape_before_flattening[1:])(x)\nx = layers.Conv2DTranspose(32, 3,padding='same',activation='relu',strides=(2, 2))(x)\nx = layers.Conv2D(1, 3,padding='same',activation='sigmoid')(x)\n\ndecoder = Model(decoder_input, x)\nz_decoded = decoder(z)\n\n\nclass TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n\n def on_epoch_begin(self, batch, logs={}):\n self.epoch_time_start = time.time()\n\n def on_epoch_end(self, batch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n\n\nclass CustomVariationalLayer(keras.layers.Layer):\n \n def vae_loss(self, x, z_decoded):\n x = K.flatten(x)\n z_decoded = K.flatten(z_decoded)\n xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)\n kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n return K.mean(xent_loss+kl_loss) #xent_loss) # + kl_loss)\n \n def call(self, inputs):\n x = inputs[0]\n z_decoded = inputs[1]\n loss = self.vae_loss(x, z_decoded)\n self.add_loss(loss, inputs=inputs)\n return x\n \ny = CustomVariationalLayer()([input_img, z_decoded])\n\nvae = Model(input_img, y)\nvae.compile(optimizer='rmsprop', loss=None)\n\n\ntime_callback = TimeHistory()\n\nhistory = vae.fit(x=x_train, y=None,shuffle=True,epochs=50,batch_size=batch_size,validation_data=(x_test, None),callbacks=[time_callback])\n\n\nhistory_dict = history.history\n\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\n\n\npickle_out = open(\"Histories/losses-xxx.pickle\",\"wb\")\npickle.dump([loss_values,val_loss_values], pickle_out)\npickle_out.close()\n\ntimes = time_callback.times\npickle_out = open(\"Histories/times-xxx.pickle\",\"wb\")\npickle.dump([loss_values,val_loss_values], pickle_out)\npickle_out.close()\n\n\n\n\n# Save the weights\nvae.save_weights('Histories/model_weights-xxx.h5')\n\n# Save the model architecture\nwith open('Histories/model_architecture.json', 'w') as f:\n f.write(vae.to_json())\n\n\n\n\n","repo_name":"garde-group/ProteinPatchAnalysis","sub_path":"DCS_GPU_PracticeRun/MNIST_Autoencoder_Conv_Variational-Script.py","file_name":"MNIST_Autoencoder_Conv_Variational-Script.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"343028114","text":"class Solution:\n # @param {integer[]} nums\n # @return {integer}\n def maxSubArray(self, nums):\n maximum=nums[0]\n totsum=range(0,len(nums))\n totsum[0]=maximum\n for i in range(1,len(nums)):\n totsum[i]=max(totsum[i-1]+nums[i],nums[i])\n maximum=max(maximum,totsum[i])\n return maximum","repo_name":"randxie/leetcode","sub_path":"Medium/53_MaximumSubarray.py","file_name":"53_MaximumSubarray.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22386025905","text":"import pandas as pd\n\ndef timePerNeighborhood(DSTs):\n dstB = [ dstB[['ID', 'NOME']] for dstB in DSTs['dstB']]\n intervals = [ dstD[['ID', 'INTERVAL']] for dstD in DSTs['dstD']]\n\n dfs = [pd.merge(dstB[i], intervals[i], on='ID', how='left') for i in range(len(intervals))]\n\n for df in dfs:\n df['INTERVAL'] = (df['INTERVAL'].dt.total_seconds())\n\n df_somas = []\n for i, df in enumerate(dfs):\n df_soma = df.groupby('NOME')['INTERVAL'].sum().reset_index()\n df_soma['day'] = f'day {i+1}'\n df_somas.append(df_soma)\n \n df_concat = pd.concat(df_somas)\n\n df_media = df_concat.groupby('NOME')['INTERVAL'].mean().reset_index()\n\n # df_final = pd.concat(dfs)\n # df_final = df_final.groupby('NOME')['INTERVAL'].mean().reset_index()\n df_media = df_media.sort_values(by='INTERVAL', ascending=False)\n\n return df_media","repo_name":"thiago1591/analise-poluicao-RJ","sub_path":"src/features/time_per_neighborhood.py","file_name":"time_per_neighborhood.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24149796965","text":"'''\r\nCreated on 18 Jul 2013\r\n\r\n@author: toke.jepsen\r\n'''\r\n\r\n\r\nimport maya.cmds as cmds\r\nimport maya.mel as mel \r\n\r\n\r\nfrom sgtk import TankError\r\nimport sgtk\r\n\r\n#getting context- THIS NEED TO USE THE APP CONTEXT INSTEAD!\r\n#projectPath=cmds.workspace(q=True,fullName=True)\r\n#tk = sgtk.sgtk_from_path(projectPath)\r\n#ctx=tk.context_from_path(projectPath)\r\nctx=self._app.context.entity\r\n \r\n#flush namespaces\r\ncmds.namespace(setNamespace=\"::\")\r\ncurrentNameSpaces = cmds.namespaceInfo(listOnlyNamespaces=True)\r\n \r\ndef removeNamespaces(namespace='::'):\r\n \r\n ignoreNamespaces = ['UI', 'shared']\r\n \r\n cmds.namespace(setNamespace=namespace)\r\n childNamespaces = cmds.namespaceInfo(listOnlyNamespaces=True)\r\n if childNamespaces:\r\n \r\n namespaces=list(set(childNamespaces)-set(ignoreNamespaces))\r\n for n in namespaces:\r\n removeNamespaces(':'+n)\r\n else:\r\n parent=':'+cmds.namespaceInfo(parent=True)\r\n cmds.namespace(setNamespace=parent)\r\n \r\n cmds.namespace(moveNamespace=[namespace,parent],force=True)\r\n \r\n cmds.namespace(removeNamespace=namespace)\r\n \r\n if parent!='::':\r\n removeNamespaces(parent)\r\n \r\nremoveNamespaces()\r\n \r\n#deleting history\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n #delete history\r\n cmds.delete(mesh,ch=True)\r\n \r\ninvisibleMeshes=[]\r\n#check scene for invisible meshes\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n transform=cmds.listRelatives(mesh,parent=True)[0]\r\n \r\n #make visible - need to raise error if some objects are invisible---\r\n if cmds.getAttr(transform+'.v')==0:\r\n invisibleMeshes.append(transform)\r\n \r\n \r\nif invisibleMeshes:\r\n \r\n listString=''\r\n for mesh in invisibleMeshes:\r\n listString+=mesh+','\r\n \r\n #raise sgtk.TankError(\"Unable to perform pre-publish for invisible meshes %s\" % listString)\r\n \r\n#flush any tranforms that arent a mesh\r\nfor transform in cmds.ls(type='transform'):\r\n \r\n if cmds.objExists(transform):\r\n shapes=cmds.listRelatives(transform,shapes=True,fullPath=True)\r\n \r\n #deleting empty transforms\r\n if shapes:\r\n check=False\r\n for shape in shapes:\r\n \r\n #deleting everything but meshes\r\n shapeType=cmds.nodeType(shape)\r\n if shapeType=='mesh':\r\n check=True\r\n if shapeType=='camera':\r\n cams=['front','top','persp','side']\r\n if transform in cams:\r\n check=True\r\n \r\n if not check:\r\n cmds.delete(transform)\r\n else:\r\n cmds.delete(transform)\r\n \r\n#geo group\r\ngeogrp=cmds.group(empty=True,n='geo')\r\n \r\nmeshes=[]\r\n#process meshes\r\nfor mesh in cmds.ls(type='mesh'):\r\n \r\n if cmds.objExists(mesh):\r\n \r\n transform=cmds.listRelatives(mesh,parent=True)[0]\r\n parent=cmds.listRelatives(transform,parent=True)\r\n if parent!=None:\r\n parent=parent[0]\r\n \r\n #make visible - need to raise error if some objects are invisible---\r\n #if cmds.getAttr(transform+'.v')==0:\r\n # raise cmds.error()\r\n \r\n #set pivot to world zero\r\n posGrp=cmds.group(empty=True)\r\n \r\n pivotTranslate = cmds.xform (posGrp, q = True, ws = True, rotatePivot = True)\r\n \r\n cmds.parent(transform, posGrp)\r\n cmds.makeIdentity(transform, a = True, t = True, r = True, s = True)\r\n cmds.xform (transform, ws = True, pivots = pivotTranslate)\r\n \r\n if parent!=None:\r\n cmds.parent(transform,parent)\r\n else:\r\n cmds.parent(transform,w=True)\r\n \r\n cmds.delete(posGrp)\r\n \r\n #deleting any unused nodes\r\n cmd='MLdeleteUnused;'\r\n mel.eval(cmd)\r\n \r\n #adding asset tag\r\n if not cmds.objExists(transform+'.asset'):\r\n cmds.addAttr(transform,ln='asset',dt='string')\r\n cmds.setAttr(transform+'.asset',ctx.entity['name'],type='string')\r\n \r\n #add to group\r\n if parent!=geogrp:\r\n cmds.parent(transform,geogrp)","repo_name":"baitstudio/CodeRepo","sub_path":"temp/validate_maya_scene_pre_publish.py","file_name":"validate_maya_scene_pre_publish.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"73"} +{"seq_id":"31106591729","text":"#\n# @lc app=leetcode.cn id=2125 lang=python3\n#\n# [2125] 银行中的激光束数量\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def numberOfBeams(self, bank: List[str]) -> int:\n validRows = list(filter(lambda i:i>0, [self.sum(s) for s in bank]))\n rowCount = len(validRows)\n if rowCount < 2:\n return 0\n return sum([validRows[i] * validRows[i+1] for i in range(rowCount - 1)])\n\n def sum(self, s:str):\n return sum(int(i) for i in s)\n# @lc code=end\n\nif __name__ ==\"__main__\":\n s = Solution()\n print(s.numberOfBeams([\"011001\",\"000000\",\"010100\",\"001000\"]))\n\n","repo_name":"ChinYoung/leetcode-solution","sub_path":"2125.银行中的激光束数量.py","file_name":"2125.银行中的激光束数量.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19351953385","text":"\"\"\"\nProvides a pandas dataframe.\n\nOf all contexts as paths and as a pattern of context classes. Used by\nflowlist.py.\n\"\"\"\n\nimport pandas as pd\nfrom materialflowlist.globals import inputpath, as_path, flow_list_specs\n\ncontexts = pd.read_csv(inputpath + 'Contexts.csv', na_values='N/A')\n\n# Get levels for max number of compartment classes\nmax_compartment_classes = len(contexts.columns)\n# Define compartment_classes\ncompartment_classes = flow_list_specs['primary_context_classes'] +\\\n flow_list_specs['secondary_context_classes']\n\n# Create dictionary of context levels\ncontext_levels = {}\ncounter = 0\nfor c in compartment_classes:\n context_levels['c_' + str(counter)] = c\n counter = counter + 1\n\n# Drop duplicates just as a check\ncontexts = contexts.drop_duplicates()\n\n# Describe a pattern of compartment classes used in each context\n# Create a clean list with no NAs\ncontext_patterns = []\ncontext_list_na_removed = list()\nfor index, row in contexts.iterrows():\n pattern = [compartment_classes[x] for x in range(0, max_compartment_classes)\n if str(row[x]) != 'nan']\n pattern = ','.join(pattern)\n context_patterns.append(pattern)\n row_list = row.values\n row_list_na_removed = [x for x in row.values if str(x) != 'nan']\n context_list_na_removed.append(row_list_na_removed)\n\n# Using this clean list, generate context paths\ncontext_paths = list()\nfor r in context_list_na_removed:\n #Pass the uuid function the list as a series of string arguments\n compartment_path = as_path(*r)\n context_paths.append(compartment_path)\n\n# Write the context paths and patterns to a dictionary, then df\nd = {'Context': context_paths, 'Pattern': context_patterns}\nall_contexts = pd.DataFrame(data=d)","repo_name":"hottleta/materialflowlist","sub_path":"materialflowlist/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"73"} +{"seq_id":"6565877666","text":"import os\nimport allure\nimport pytest\nfrom api.advice_predicting import Predicting\nfrom common.get_log import log\nfrom conftest import headers_gl\n\n\nclass TestPredicting():\n \"\"\"\n 用户查询测试类\n 1.参数化存放在特定的yml文件中,用三级目录管理用例、参数数据和ids的数据\n 2.critical的用例等级为完整测试,blocker等级为冒烟测试\n 3.每个用例都配合fixture,完成了不同的前置和后置,实现了不同用例互不干扰的状态\n \"\"\"\n\n # 新���公司事件类实例\n event = Predicting()\n\n # 获取参数化的数据\n para_data = event.load_yaml('testcase/advice_predicting/para_advice_predicting.yml')\n\n # 获取不同用例需要的参数化数据以及ids标题数据\n # add_data = para_data[\"add\"][\"data\"]\n # add_ids = para_data[\"add\"][\"ids\"]\n\n get_data = para_data[\"get\"][\"data\"]\n get_ids = para_data[\"get\"][\"ids\"]\n\n #\n # delete_data = para_data[\"delete\"][\"data\"]\n # delete_ids = para_data[\"delete\"][\"ids\"]\n #\n # edit_data = para_data[\"edit\"][\"data\"]\n # edit_ids = para_data[\"edit\"][\"ids\"]\n\n @pytest.mark.parametrize(\"Authorization, status, expect_http_code, type, amount, target\", get_data, ids=get_ids)\n @allure.severity(allure.severity_level.CRITICAL)\n def test_advice_predicting(self, env, Authorization, status, expect_http_code, type, amount, target):\n algo_domain = env['host']['algo_users']\n # Authorization = headers_gl['get']['headers']['Authorization']\n log.info(\"-------------开始获取公司事件测试---------\")\n\n res = self.event.get_advice_predicting(algo_domain, Authorization, type, amount, target)\n log.info(\"-------------测试结束---------\")\n\n # ******** http协议状态码判断 ********\n print('******** http协议状态码判断... ')\n print(\"res.status_code: {0}\".format(res.status_code))\n if res.status_code == expect_http_code:\n print('\\n检测点:http status_code 符合预期!')\n assert True\n else:\n print('WARN --> http请求响应status_code={0}'.format(res.status_code))\n assert False\n # ******** 实际测试输出 ********\n print('******** 实际测试输出... ')\n res_json = res.json()\n if res.status_code == 200:\n real_data = res_json['data']\n error = res_json['errors']\n if real_data: # 数据非空\n # print('\\n检测点:接口返回data非空,符合预期!')\n print('\\n检测点:接口返回data 符合预期!返回数据为:{0}'.format(real_data))\n # 检测返回的数据类型\n # if isinstance(real_data, dict):\n # print('\\n检测点:接口返回data type符合预期!返回数据为:{0}'.format(type(real_data)))\n # else:\n # print('WARN --> data 部分类型与预期不符,预期类型:dict,实际类型:{0}.(real_data={1})'.format(type(real_data),\n # real_data))\n # assert False\n # event_keys = ['clientNumber', 'riskType', 'prefRegion', 'prefSector', 'status',\n # 'riskAckStatus']\n orders_data = res_json['data']['orders']\n # print(orders_data)\n iii = []\n for i in orders_data:\n ii = i['weight']\n # print(ii)\n iii.append(ii)\n weight_total = sum(iii)\n if weight_total == 1:\n print(\"\\n检测点:申购试算,接口返回weight和等于1 符合预期!\")\n assert True\n else:\n print(\"\\n检测点:与预期不符!weight和为:\",weight_total)\n print(\"\\n试算返回的各基金weight:\", iii)\n assert False\n return real_data\n elif error[0]['code'] == 100002:\n assert True\n elif error[0]['code'] == 300008:\n msg = error[0]['message']\n error_code = error[0]['code']\n print(\"\\n检测点:起投金额检测\")\n print(str(error_code),msg)\n # print(real_data)\n assert True\n else:\n assert False\n else:\n pass\n\n\n\nif __name__ == '__main__':\n # a = TestEvents\n # print('Test data: {0}, Test ids: {1}'.format(a.get_data, a.get_ids))\n pytest.main(['-v', 'test_advice_predicting.py'])\n # pytest.main(['-v', '--alluredir', '../report/result', 'test_algo_stock_info.py'])\n # os.system('allure generate ../report/result -o ../report/html --clean')\n","repo_name":"ouyangrunqi/test","sub_path":"module/advice_predicting/test_advice_predicting.py","file_name":"test_advice_predicting.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13763524196","text":"import pandas as pd\r\nimport numpy as np\r\nfrom pprint import pprint\r\nimport csv\r\nfrom dicttoxml import dicttoxml\r\nimport sys\r\nimport infoGain\r\nfile_name = sys.argv[1]\r\noutput_file = sys.argv[2]\r\n\r\n\r\nwith open(file_name) as csv_file:\r\n data_list = list(csv.reader(csv_file))\r\n\r\nm = len(data_list)\r\nn = len(data_list[1])\r\n\r\natt = []\r\n\r\nfor attr in range(0,n):\r\n att.append('att'+ str(attr))\r\n\r\n\r\ndataset = pd.read_csv(file_name,names=att)\r\n\r\n\r\n\r\ndef ID3(data, originaldata, features, target_attribute_name, parent_node_class=None):\r\n\r\n\r\n if len(np.unique(data[target_attribute_name])) <= 1:\r\n return np.unique(data[target_attribute_name])[0]\r\n\r\n\r\n elif len(data) == 0:\r\n return np.unique(originaldata[target_attribute_name])[\r\n np.argmax(np.unique(originaldata[target_attribute_name], return_counts=True)[1])]\r\n\r\n\r\n elif len(features) == 0:\r\n return parent_node_class\r\n\r\n\r\n\r\n else:\r\n\r\n parent_node_class = np.unique(data[target_attribute_name])[\r\n np.argmax(np.unique(data[target_attribute_name], return_counts=True)[1])]\r\n\r\n\r\n item_values = [infoGain.InfoGain(data, feature, att[-1]) for feature in features]\r\n entropy_values = []\r\n entropy_values.append([infoGain.entropyList(data, feature, att[-1]) for feature in features])\r\n best_feature_index = np.argmax(item_values)\r\n best_feature = features[best_feature_index]\r\n\r\n\r\n tree = {best_feature:{}}\r\n\r\n\r\n features = [i for i in features if i != best_feature]\r\n\r\n for value in np.unique(data[best_feature]):\r\n value = value\r\n\r\n sub_data = data.where(data[best_feature] == value).dropna()\r\n\r\n\r\n subtree = ID3(sub_data, dataset, features, att[-1], parent_node_class)\r\n\r\n\r\n tree[best_feature][value] = subtree\r\n\r\n\r\n return (tree)\r\n\r\n\r\ntree = ID3(dataset,dataset,dataset.columns[:-1],att[-1])\r\n\r\nxml = dicttoxml(tree)\r\n\r\nwith open(output_file, \"wb\") as f:\r\n f.write(xml)\r\n\r\n","repo_name":"nandish21594/machine_learning_algorithms","sub_path":"Decision tree/Decision_tree.py","file_name":"Decision_tree.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1140491908","text":"import unittest\n\nfrom bitfinex.crypto import load_keys\nfrom tests.test_data import get_test_data_path\n\n__author__ = 'Gengyu Shi'\n\n\nclass TestAuthentication(unittest.TestCase):\n def test_load_keys(self):\n key_file = get_test_data_path(\"test_keys\")\n api_key, secret_key = load_keys(key_file)\n\n self.assertEqual(api_key, \"dummy_api_key\")\n self.assertEqual(secret_key, \"dummy_secret_key\")\n","repo_name":"shigengyu/bitfinex-python","sub_path":"tests/tests-unit/bitfinex/test_crypto.py","file_name":"test_crypto.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"11876225420","text":"import io\nimport os\n\nfrom shapely.geometry import Polygon, GeometryCollection, MultiPolygon\nfrom typing import List\n\nfrom wai.common.cli.options import TypedOption, FlagOption\nfrom wai.common.geometry import Polygon as WaiPolygon\nfrom wai.common.geometry import Point as WaiPoint\nfrom wai.common.adams.imaging.locateobjects import LocatedObjects, LocatedObject\nfrom wai.annotations.domain.image import Image\nfrom wai.annotations.domain.image import ImageInstance\nfrom wai.annotations.domain.classification import Classification\nfrom wai.annotations.domain.image.classification import ImageClassificationInstance\nfrom wai.annotations.domain.image.object_detection import ImageObjectDetectionInstance\nfrom wai.annotations.core.component import ProcessorComponent\nfrom wai.annotations.core.stream import ThenFunction, DoneFunction\nfrom wai.annotations.core.stream.util import RequiresNoFinalisation\n\n\nREGION_SORTING_NONE = \"none\"\nREGION_SORTING_XY = \"x-then-y\"\nREGION_SORTING_YX = \"y-then-x\"\nREGION_SORTING = [\n REGION_SORTING_NONE,\n REGION_SORTING_XY,\n REGION_SORTING_YX,\n]\n\n\nclass SubImages(\n RequiresNoFinalisation,\n ProcessorComponent[ImageInstance, ImageInstance]\n):\n \"\"\"\n Stream processor which turns RGB images into fake grayscale ones.\n \"\"\"\n\n regions: List[str] = TypedOption(\n \"-r\", \"--regions\",\n type=str,\n nargs=\"+\",\n help=\"the regions (X,Y,WIDTH,HEIGHT) to crop and forward with their annotations\"\n )\n\n region_sorting: str = TypedOption(\n \"-s\", \"--region-sorting\",\n type=str,\n default=REGION_SORTING_NONE,\n help=\"how to sort the supplied region definitions: %s\" % \"|\".join(REGION_SORTING)\n )\n\n include_partial: bool = FlagOption(\n \"-p\", \"--include-partial\",\n help=\"whether to include only annotations that fit fully into a region or also partial ones\"\n )\n\n skip_empty: bool = FlagOption(\n \"-e\", \"--suppress-empty\",\n help=\"suppresses sub-images that have no annotations (object detection)\"\n )\n\n verbose: bool = FlagOption(\n \"--verbose\",\n help=\"for outputting debugging information\"\n )\n\n def _initialize(self):\n \"\"\"\n Parses options.\n \"\"\"\n self._regions_xyxy = []\n self._region_lobjs = []\n for region in self.regions:\n coords = [int(x) for x in region.split(\",\")]\n if len(coords) == 4:\n x, y, w, h = coords\n self._region_lobjs.append(LocatedObject(x=x, y=y, width=w, height=h))\n\n if self.verbose:\n self.logger.info(\"unsorted regions: %s\" % str([str(x) for x in self._region_lobjs]))\n\n if self.region_sorting is not REGION_SORTING_NONE:\n if self.region_sorting == REGION_SORTING_XY:\n def sorting(obj: LocatedObject):\n return \"%06d %06d\" % (obj.x, obj.y)\n elif self.region_sorting == REGION_SORTING_YX:\n def sorting(obj: LocatedObject):\n return \"%06d %06d\" % (obj.y, obj.x)\n else:\n raise Exception(\"Unhandled region sorting: %s\" % self.region_sorting)\n self._region_lobjs.sort(key=sorting)\n if self.verbose:\n self.logger.info(\"sorted regions: %s\" % str([str(x) for x in self._region_lobjs]))\n\n for lobj in self._region_lobjs:\n self._regions_xyxy.append((lobj.x, lobj.y, lobj.x + lobj.width - 1, lobj.y + lobj.height - 1))\n if self.verbose:\n self.logger.info(\"sorted xyxy: %s\" % str(self._regions_xyxy))\n\n def _new_filename(self, filename, index):\n \"\"\"\n Generates a new filename based on the original and the index of the region.\n\n :param filename: the base filename\n :type filename: str\n :param index: the region index\n :type index: int\n :return: the generated filename\n :rtype: str\n \"\"\"\n parts = os.path.splitext(filename)\n pattern = \"-%0\" + str(len(str(len(self._region_lobjs)))) + \"d\"\n return parts[0] + pattern % index + parts[1]\n\n def _bbox_to_shapely(self, lobj: LocatedObject) -> Polygon:\n \"\"\"\n Converts the located object rectangle into a shapely Polygon.\n\n :param lobj: the bbox to convert\n :return: the Polygon\n \"\"\"\n coords = [\n (lobj.x, lobj.y),\n (lobj.x + lobj.width - 1, lobj.y),\n (lobj.x + lobj.width - 1, lobj.y + lobj.height - 1),\n (lobj.x, lobj.y + lobj.height - 1),\n (lobj.x, lobj.y),\n ]\n return Polygon(coords)\n\n def _polygon_to_shapely(self, lobj: LocatedObject) -> Polygon:\n \"\"\"\n Converts the located object polygon into a shapely Polygon.\n\n :param lobj: the polygon to convert\n :return: the Polygon\n \"\"\"\n if not lobj.has_polygon():\n return self._bbox_to_shapely(lobj)\n x_list = lobj.get_polygon_x()\n y_list = lobj.get_polygon_y()\n coords = []\n for x, y in zip(x_list, y_list):\n coords.append((x, y))\n coords.append((x_list[0], y_list[0]))\n return Polygon(coords)\n\n def _fit_annotation(self, index: int, region: LocatedObject, annotation: LocatedObject) -> LocatedObject:\n \"\"\"\n Fits the annotation into the specified region, adjusts size if necessary.\n\n :param index: the index of the region\n :param region: the region to fit the annotation in\n :param annotation: the annotation to fit\n :return: the adjust annotation\n \"\"\"\n sregion = self._bbox_to_shapely(region)\n sbbox = self._bbox_to_shapely(annotation)\n sintersect = sbbox.intersection(sregion)\n minx, miny, maxx, maxy = [int(x) for x in sintersect.bounds]\n result = LocatedObject(x=minx-region.x, y=miny-region.y, width=maxx-minx+1, height=maxy-miny+1, **annotation.metadata)\n result.metadata[\"region_index\"] = index\n result.metadata[\"region_xywh\"] = \"%d,%d,%d,%d\" % (region.x, region.y, region.width, region.height)\n\n if annotation.has_polygon():\n spolygon = self._polygon_to_shapely(annotation)\n else:\n spolygon = self._bbox_to_shapely(annotation)\n\n try:\n sintersect = spolygon.intersection(sregion)\n except:\n self.logger.warning(\"Failed to compute intersection!\")\n sintersect = None\n\n if isinstance(sintersect, GeometryCollection):\n for x in sintersect.geoms:\n if isinstance(x, Polygon):\n sintersect = x\n break\n elif isinstance(sintersect, MultiPolygon):\n for x in sintersect.geoms:\n if isinstance(x, Polygon):\n sintersect = x\n break\n\n if isinstance(sintersect, Polygon):\n x_list, y_list = sintersect.exterior.coords.xy\n points = []\n for i in range(len(x_list)):\n points.append(WaiPoint(x=x_list[i]-region.x, y=y_list[i]-region.y))\n result.set_polygon(WaiPolygon(*points))\n else:\n self.logger.warning(\"Unhandled geometry type returned from intersection, skipping: %s\" % str(type(sintersect)))\n\n return result\n\n def process_element(\n self,\n element: ImageInstance,\n then: ThenFunction[ImageInstance],\n done: DoneFunction\n ):\n if not hasattr(self, \"_regions_xyxy\"):\n self._initialize()\n\n img_in = element.data\n\n pil_image = img_in.pil_image\n for region_index, region_xyxy in enumerate(self._regions_xyxy):\n if self.verbose:\n self.logger.info(\"Applying region %d :%s\" % (region_index, str(region_xyxy)))\n # crop image\n sub_image = pil_image.crop(region_xyxy)\n pil_img_bytes = io.BytesIO()\n sub_image.save(pil_img_bytes, format=img_in.format.pil_format_string)\n img_out = Image(self._new_filename(img_in.filename, region_index), pil_img_bytes.getvalue(), img_in.format, img_in.size)\n # crop annotations and forward\n region_lobj = self._region_lobjs[region_index]\n if isinstance(element, ImageClassificationInstance):\n annotations = Classification(label=element.annotations.label)\n new_element = ImageClassificationInstance(data=img_out, annotations=annotations)\n then(new_element)\n elif isinstance(element, ImageObjectDetectionInstance):\n new_objects = []\n for ann_lobj in element.annotations:\n ratio = region_lobj.overlap_ratio(ann_lobj)\n if ((ratio > 0) and self.include_partial) or (ratio >= 1):\n new_objects.append(self._fit_annotation(region_index, region_lobj, ann_lobj))\n if not self.skip_empty or (len(new_objects) > 0):\n new_element = ImageObjectDetectionInstance(data=img_out, annotations=LocatedObjects(new_objects))\n then(new_element)\n else:\n self.logger.warning(\"Unhandled data (%s), skipping!\" % str(type(element)))\n then(element)\n return\n","repo_name":"waikato-ufdl/wai-annotations-imgaug","sub_path":"src/wai/annotations/imgaug/isp/sub_images/component/_SubImages.py","file_name":"_SubImages.py","file_ext":"py","file_size_in_byte":9281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14507158556","text":"import generic as g\nimport re\n\nhello\t=\t[]\ng.fill_list(hello,r'\\w',4)\ny=''\ny=y.join(hello)\nnewString='This is madhu cool guy you know'\nl=re.search(f\"{y}\", newString)\nprint(len(l))\n","repo_name":"bee966561/code_book","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73731307117","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 3 13:16:48 2017\n\n@author: deaxman\n\"\"\"\n\n#%%\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg import inv\nfrom numpy.linalg import det\n#%%\nS=np.array([1,1,1,2,2,3,3,3,2,4,4,4])-1\n#X=np.array([[0,0],[-1,0],[0,1],[3,3],[3,4],[7,7],[7,8],[8,7],[2,3],[12,10],[10,10],[11,10]])\nX=np.random.rand(50,2)*10\nplt.scatter(X[:,0],X[:,1])\nk=4\ncenters=X[np.random.choice(X.shape[0],size=(4),replace=False),:]\nsigmas=np.array([np.eye(2) for i in range(k)])\nsigmaInvs=np.array([inv(sigmas[i,:,:]) for i in range(k)])\nZ=np.zeros((200,200))\ndef createCovMat(X,w):\n print(np.dot(X.T*w.T,X)/(np.sum(w[0])))\n return np.dot(X.T*w.T,X)/(np.sum(w[0]))\n\ndef gaussian2d(X,mu,sigma):\n return np.exp(-0.5*np.dot(np.dot((X-mu),inv(sigma)),(X-mu).T))/(2*np.pi*det(sigma))\n \nfor i in np.arange(5):\n W=np.exp(-0.5*np.sum((X[:,:,np.newaxis]-centers[:,:,np.newaxis].T).T*np.sum((sigmaInvs[:,:,:,np.newaxis]\n *(X[:,:,np.newaxis]-centers[:,:,np.newaxis].T).T[:,:,:,np.newaxis].transpose((0,1,3,2))),axis=1),axis=1).T)/((2*np.pi*np.array([np.sqrt(det(sigmas[i,:,:])) for i in range(k)]))[:,np.newaxis].T)\n \n #/((2*np.pi*np.array([np.sqrt(det(sigmas[i,:,:])) for i in range(k)]))[:,np.newaxis].T)\n l=np.argmin(W,axis=1)\n fig=plt.figure()\n ax=plt.axes()\n ax.scatter(X[:,0],X[:,1])\n a, b = np.meshgrid(np.arange(-5,15,0.1), np.arange(-5,15,0.1))\n for blob in range(centers.shape[0]):\n for j in range(200):\n for i in range(200):\n Z[j,i]=gaussian2d(np.concatenate((a[:,:,np.newaxis],b[:,:,np.newaxis]),axis=2)[i,j,:],centers[blob,:],sigmas[blob,:])\n ax.contour(a,b,Z)\n ax.scatter(centers[:,0],centers[:,1],marker='x')\n centers=np.array([np.sum(X*(W[:,i])[:,np.newaxis],axis=0)/np.sum(W[:,i],axis=0) for i in range(k)])\n sigmas=np.array([createCovMat(X,(W[:,i])[:,np.newaxis]) for i in range(k)])\n sigmaInvs=np.array([inv(sigmas[i,:,:]) for i in range(k)])\n \n \n \n","repo_name":"dustinaxman/MLPractice","sub_path":"GMMpractice.py","file_name":"GMMpractice.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74389128236","text":"from tkinter import *\nfrom datetime import datetime\n\nfrom constants import DATE_TIME_FORMAT\n\nCELL_MARGIN = 10\nCELL_PADDING = 2\n\n\nclass ShowPercentage(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent)\n\n self.show_efficiency_hangers = StringVar()\n self.show_efficiency_change_colour = StringVar()\n\n self.efficiency_colour_title = Label(self,\n text=\"Rendimiento del último color\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_colour_title.grid(row=0,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_colour_result = Label(self,\n textvariable=self.show_efficiency_hangers,\n bg=\"white\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_colour_result.grid(row=1,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_change_colour_title = Label(self,\n text=\"Rendimiento del último cambio de color\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_change_colour_title.grid(row=3,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n self.efficiency_change_colour_result = Label(self,\n textvariable=self.show_efficiency_change_colour,\n bg=\"white\",\n anchor=\"center\",\n relief=\"groove\")\n self.efficiency_change_colour_result.grid(row=4,\n column=0,\n padx=(0, CELL_PADDING),\n pady=(0, CELL_PADDING),\n ipadx=CELL_MARGIN,\n ipady=CELL_MARGIN,\n sticky=W + E + N + S)\n\n def update_change_colour_time_efficiency(self, change_times_map, history_as_records):\n last_colour = history_as_records[len(history_as_records) - 1].colour_code\n penultimate_colour = history_as_records[len(history_as_records) - 2].colour_code\n concatenate_two_colours = penultimate_colour + \"-\" + last_colour\n\n change_time_by_colour_combo = change_times_map.get(concatenate_two_colours, None)\n if change_time_by_colour_combo is not None:\n efficiency = self.__calculate_efficiency_change_colour(change_time_by_colour_combo, history_as_records)\n self.show_efficiency_change_colour.set(efficiency)\n else:\n self.show_efficiency_change_colour.set(\"No hay datos\\nanteriores con lo\\nque comparar.\")\n\n def update_colour_time_efficiency(self,\n start_datetime_as_string,\n end_datetime_as_string,\n amount_of_hangers_as_string):\n # Convierto los entrys en fechas para poder restarlas.\n start_colour = datetime.strptime(start_datetime_as_string, DATE_TIME_FORMAT)\n end_colour = datetime.strptime(end_datetime_as_string, DATE_TIME_FORMAT)\n\n # Convertimos los bastidores a un entero.\n hangers = int(amount_of_hangers_as_string)\n\n # Restamos las dos fechas y lo pasamos a segundos.\n time_diff = end_colour - start_colour\n time_colour = time_diff.days * 24 * 3600 + time_diff.seconds\n\n # Comparamos el número de bastidores con los que podrían pasar con un rendimiento del 100%.\n ideal_hanger_passing_time = 10\n max_hangers_in_time_colour = time_colour / ideal_hanger_passing_time\n\n # Eficiencia del paso de bastidores de este color.\n efficiency_hangers = hangers / max_hangers_in_time_colour\n\n # round(number,1) sirve para redondear un flotante al decimal que queramos\n percentage_efficiency_hangers = str(int(efficiency_hangers * 100)) + \" %\"\n\n self.show_efficiency_hangers.set(percentage_efficiency_hangers)\n\n def __calculate_efficiency_change_colour(self, change_time_by_colour_combo, history_as_records):\n average_time_of_colour_change = int(change_time_by_colour_combo)\n time1 = datetime.strptime(history_as_records[len(history_as_records) - 1].change_start_time, DATE_TIME_FORMAT)\n time2 = datetime.strptime(history_as_records[len(history_as_records) - 1].colour_start_time, DATE_TIME_FORMAT)\n\n last_time_change = time2 - time1\n\n last_time_change_in_seconds = last_time_change.days * 24 * 3600 + last_time_change.seconds\n\n efficiency_change_last_colour = \\\n str(int((average_time_of_colour_change * 100) / last_time_change_in_seconds)) + \" %\"\n return efficiency_change_last_colour\n","repo_name":"hector231091/control_colores_lacado_II","sub_path":"percentage.py","file_name":"percentage.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71542313195","text":"import utils as ut\n\ndef parse1(line):\n\treturn line\n\ndef day13p1():\n print('day 13 part 1')\n lines = ut.get_file('day13_input.txt', parse1)\n # lines = ut.get_file('day13_input_small.txt', parse1)\n\n start_time = int(lines[0])\n schedule = list(map(int, ''.join(lines[1].split('x,')).split(',')))\n departs_in = float('inf')\n bus_id = -1\n for time in schedule:\n new_departs_in = time - start_time % time\n if new_departs_in < departs_in:\n departs_in = new_departs_in\n bus_id = time\n\n print(departs_in, bus_id)\n return departs_in * bus_id\n\n# print(day13p1()) #12min\n\ndef parse2(line):\n\treturn line\n\ndef day13p2():\n print('day 13 part 2')\n lines = ut.get_file('day13_input.txt', parse2)\n # lines = ut.get_file('day13_input_small.txt', parse2)\n\n offset = 0\n schedule = []\n for val in lines[1].split(','):\n # if val == 'x':\n # offset += 1\n if val.isnumeric():\n bus_id = int(val)\n schedule.append((bus_id, offset))\n offset+=1\n print(schedule)\n\n max_sync = 1\n for bus_id, offset in schedule:\n max_sync *= bus_id\n\n def validate(t, schedule):\n for bus_id, offset in schedule:\n if not ((t+offset) % bus_id == 0):\n return False\n return True\n\n first_bus = 467 # int(lines[1].split(',')[0])\n t = 100000000000171 # max_sync\n counter = 0\n while True:\n # while t > 0:\n counter += 1\n if counter % 10000000 == 0:\n print(counter, t)\n if validate(t-29, schedule):\n return t\n # t -= first_bus\n t += first_bus\n\n return schedule\n\n# print(day13p2())\n\ndef day13p2v2():\n print('day 13 part 2v2')\n lines = ut.get_file('day13_input.txt', parse2)\n # lines = ut.get_file('day13_input_small.txt', parse2)\n\n offset = 0\n schedule = []\n for val in lines[1].split(','):\n if val.isnumeric():\n bus_id = int(val)\n schedule.append((bus_id, offset))\n offset += 1\n print(schedule)\n\n all_buses = [bus_id for bus_id, offset in schedule]\n\n # find a match with the the first n buses\n # use that match to update the search_offset (=0)\n # update the range operators\n range_top = 1\n range_step = 1\n # prev_bus_id = 1\n search_offset = 0\n for i, (bus_id, offset) in enumerate(schedule):\n range_top *= bus_id\n found = check_match(range_top, range_step, search_offset, schedule[:i+1])\n if found:\n search_offset = found\n print('found', search_offset)\n range_step *= bus_id\n\n return search_offset\n\ndef check_match(range_top, range_step, search_offset, schedule):\n print('every', range_step, 'offset', search_offset, 'sched', schedule)\n for i in range(0, range_top, range_step):\n if i == 0:\n continue\n t = i + search_offset\n match = [False] * len(schedule)\n\n bus_id, offset = schedule[-1]\n if (t + offset) % bus_id == 0:\n return t # short version\n # for j, (bus_id, offset) in enumerate(schedule):\n # if (t + offset) % bus_id == 0:\n # print('one', t)\n # match[j] = True\n # if all(match):\n # return i # return new offset\n return False\n\nprint(day13p2v2()) # 3hr 46min\n","repo_name":"yufengg/adventofcode","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"72850215595","text":"import unittest\nfrom same_frequency import same_frequency\n\n\nclass TestSameFrequency(unittest.TestCase):\n def test_different_length(self):\n # test should return false if the inputs have diff length\n num1 = 22\n num2 = 222\n is_same = same_frequency(num1, num2)\n self.assertFalse(is_same)\n\n def test_one_freq(self):\n # test should return false if the inputs has only one match\n num1 = 34\n num2 = 14\n is_same = same_frequency(num1, num2)\n self.assertFalse(is_same)\n\n def test_match_small_num(self):\n # test should return true when inputs has same frequency\n num1 = 182\n num2 = 281\n is_same = same_frequency(num1, num2)\n self.assertTrue(is_same)\n\n def test_match_big_num(self):\n # test should return true when inputs has same frequency\n num1 = 3589578\n num2 = 5879385\n is_same = same_frequency(num1, num2)\n self.assertTrue(is_same)\n","repo_name":"guiaramos/algorithms-data-structures","sub_path":"python/challenges/same_frequency_test.py","file_name":"same_frequency_test.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26773160360","text":"#-*-coding:utf-8-*-\n\nimport requests\nimport bs4\nimport openpyxl\n\ndef parser_url(url):\n headers = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"}\n response = requests.get(url,headers=headers)\n soup = bs4.BeautifulSoup(response.text.replace('[','').replace(']',''),'html.parser')\n return soup\n\ndef parser_soup(soup):\n target = soup.find_all('p',style='TEXT-INDENT: 2em')\n city = []\n house_price = []\n money = []\n proportion = []\n result = []\n a = []\n\n for i in range(0,37*5,5):\n city.append(target[9+i].text)\n for i in range(0,37*5,5):\n house_price.append(target[10+i].text)\n for i in range(0,37*5,5):\n money.append(target[11+i].text)\n for i in range(0,37*5,5):\n proportion.append(target[12+i].text)\n\n for i in range(0,36):\n result.append(city[i])\n result.append(house_price[i])\n result.append(money[i])\n result.append(proportion[i])\n a.append(result)\n result = []\n return a\ndef write_file(content):\n wb = openpyxl.Workbook()\n wb.guess_types = True\n ws = wb.active\n ws.append(['city','house_price','acerage_salary','proportion'])\n for each in content:\n ws.append(each)\n wb.save('房价比.xlsx')\n print ('生成文件成功!')\n\ndef main():\n url = 'http://news.house.qq.com/a/20170702/003985.htm'\n soup = parser_url(url)\n content = parser_soup(soup)\n write_file(content)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tao4091/Python","sub_path":"My_python/MaHaiTao/house_money.py","file_name":"house_money.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"72209743277","text":"# import necessary libraries\nimport csv\n\n# define and initialise data structure\nnamelist = []\n\n# input\nwith open(\"names.txt\", \"r\") as infile:\n names = csv.reader(infile, delimiter = \",\")\n for i in names:\n namelist = i\n\n\nnamelist = sorted(namelist, key=str.upper)\nprint(namelist)\n","repo_name":"Leeyp/Euler","sub_path":"22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32040704901","text":"from django.shortcuts import render\nfrom .models import *\nfrom django_pivot.pivot import pivot\nfrom django_pivot.histogram import histogram\nfrom datetime import date\nimport pandas as pd\nimport numpy as np\nfrom django.db.models import Sum, Count\n\ndef RISDB(request):\n # This RIS_Project_Objects Should Change With The Filters\n RIS_Project_Objects = RIS_Project.objects.all()\n\n # This RIS_Project_Object_Static View Should Not Change With The Filter\n RIS_Project_Objects_Static = RIS_Project.objects.all()\n #####################################################################################\n # For Dependent Filters --- #\n #####################################################################################\n # Partner Country is dependant on Sub Region and sub region is dependant on Partner Region\n Partner_Region_Choices = Partner_Region.objects.all()\n Sub_Region_Choices = Sub_Region.objects.all()\n Partner_Country_Choices = Partner_Country.objects.all()\n\n # Modalities\n Modalities_Choices = RIS_Project.objects.values('Modalities').distinct()\n\n # Sub Modalities\n SubModalities_Choices = RIS_Project.objects.values('Sub_Modalities').distinct()\n\n\n # Year\n Year_Choices = RIS_Project.objects.values('Year').distinct().order_by('Year')\n\n #####################################################################################\n # Filters Logic--- #\n #####################################################################################\n\n if 'PartnerRegion' in request.GET:\n PartnerRegion = request.GET.getlist('PartnerRegion')\n # print(PartnerRegion)\n if PartnerRegion:\n RIS_Project_Objects = RIS_Project_Objects.filter(Partner_Region_Code__in=PartnerRegion)\n\n\n if 'SubRegion' in request.GET:\n SubRegion = request.GET.getlist('SubRegion')\n # print(SubRegion)\n if SubRegion:\n RIS_Project_Objects = RIS_Project_Objects.filter(Code_of_Sub_Region__in=SubRegion)\n\n\n if 'PartnerCountry' in request.GET:\n PartnerCountry = request.GET.getlist('PartnerCountry')\n # print(PartnerCountry)\n if PartnerCountry:\n RIS_Project_Objects = RIS_Project_Objects.filter(Partner_Country_Code__in=PartnerCountry)\n\n\n if 'Modalities' in request.GET:\n Modalities = request.GET.getlist('Modalities')\n # print(Modalities)\n if Modalities:\n RIS_Project_Objects = RIS_Project_Objects.filter(Modalities__in=Modalities)\n\n\n if 'SubModalities' in request.GET:\n SubModalities = request.GET.getlist('SubModalities')\n # print(SubModalities)\n if SubModalities:\n RIS_Project_Objects = RIS_Project_Objects.filter(Sub_Modalities__in=SubModalities)\n\n\n\n if 'YearFrom' in request.GET:\n YearFrom = request.GET['YearFrom']\n if YearFrom:\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__gte=YearFrom)\n\n\n if 'YearTo' in request.GET:\n YearTo = request.GET['YearTo']\n if YearTo > YearFrom:\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__lte=YearTo)\n\n if YearTo < YearFrom:\n YearTo = int(YearFrom) + 1\n RIS_Project_Objects = RIS_Project_Objects.filter(Year__lte=YearTo)\n\n\n\n #####################################################################################\n # For Left Sidebar Graphs - #\n\n #####################################################################################\n # For Left Sidebar Cards - #\n #######################################################################################\n # 1 Total Country Benefited\n Total_Country_Benefited_Count = len(RIS_Project_Objects_Static.values('Partner_Country').distinct())-3\n\n # 2 Concessional Finance Total Disbursement\n Total_Disbursement_Of_SubModalities = RIS_Project_Objects_Static.values('Sub_Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n\n # Total_Disbursement_Of_SubModalities.filter('')\n # 3 Total Disbursement_of_development_assistance_USD_million\n Country_Wise_Disbursement_Total = RIS_Project_Objects_Static.values('Partner_Country').order_by('Partner_Country').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n Total_Disbursement_of_development_assistance = Country_Wise_Disbursement_Total.aggregate(Sum('total'))['total__sum']\n\n #4 Grant (Modality) Total Disbursement\n Total_Disbursement_Of_Modalities = RIS_Project_Objects_Static.values('Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('-total')\n\n #5 Total People Trained (usinng total number of slots utilized in capacity building)\n Total_Number_Of_Slots_across_Modalities = RIS_Project_Objects_Static.values('Modalities').annotate(total=Sum('No_of_Slots_Utilized')).order_by('-total')\n\n # Total Trade Concession\n # Get From Total_Disbursement_Of_Modalities\n\n\n # Time-Series Charts (Small left side card)\n # 5 Total Disbursement with Time (Cumulative) - Line Chart\n Total_Disbursement_with_Time_Static_Chart_DF = pd.DataFrame(RIS_Project_Objects_Static.values('Year').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('Year'))\n Total_Disbursement_with_Time_Static_Chart_DF.replace(to_replace=[None], value=0, inplace=True)\n Total_Disbursement_with_Time_Static_Chart_DF['Cumulative_Disbursement_Frequency'] = Total_Disbursement_with_Time_Static_Chart_DF['total'].cumsum()\n Total_Disbursement_with_Time_Static_Chart = Total_Disbursement_with_Time_Static_Chart_DF.to_dict('records')\n\n\n # 6 Sub Modality Wise No_of_Slots_Utilized -Donut Chart\n SubModality_Wise_Number_Of_Slots_Utilized_DF = pd.DataFrame(RIS_Project_Objects_Static.values('Sub_Modalities').annotate(Number_Of_Slots=Sum('No_of_Slots_Utilized')))\n SubModality_Wise_Number_Of_Slots_Utilized_DF.replace(to_replace=[None], value=0, inplace=True)\n SubModality_Wise_Number_Of_Slots_Utilized_Chart = SubModality_Wise_Number_Of_Slots_Utilized_DF.to_dict('records')\n\n\n ###########################################################################################\n # Section For Dynamic Changing Charts With Filters #\n # Here I'm using RIS_Project_Objects That Gets Changed With The Filters\n ###########################################################################################\n\n # 1 Cumulative Disbursement With Time With Partner Region Wise - Bar Chart\n Total_Disbursement_with_Time_Dynamic_Chart_DF = pd.DataFrame(RIS_Project_Objects.values('Year').annotate(total=Sum('Disbursement_of_development_assistance_USD_million')).order_by('Year'))\n Total_Disbursement_with_Time_Dynamic_Chart_DF.replace(to_replace=[None], value=0, inplace=True)\n Total_Disbursement_with_Time_Dynamic_Chart_DF['Cumulative_Disbursement_Frequency'] = Total_Disbursement_with_Time_Dynamic_Chart_DF['total'].cumsum()\n Total_Disbursement_with_Time_Dynamic_Chart = Total_Disbursement_with_Time_Dynamic_Chart_DF.to_dict('records')\n\n\n # 2 Total Disbursement With Modality - Polar Chart\n Total_Disbursement_With_Modality = RIS_Project_Objects.values('Modalities').annotate(total=Sum('Disbursement_of_development_assistance_USD_million'))\n print(Total_Disbursement_With_Modality,'Total_Disbursement_With_Modality')\n\n # 3 For Geography Mapping\n # Region_Wise_Total_Number_Of_Project_Total_Disbursement_And_Total_Commitment\n\n # Region_Wise_Number_Of_Projects_For_Mapping = RIS_Project_Objects_Static.values('Partner_Region').order_by('Partner_Region').annotate(NumberOfProjects=Count('id'))\n\n Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping = RIS_Project_Objects.values('Partner_Country').order_by('Partner_Country').annotate(Disbursement=Sum('Disbursement_of_development_assistance_USD_million'), Commitment=Sum('Commitment_of_development_assistance_USD_million'))\n # print(Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping)\n Partner_Country_and_Modalities = RIS_Project_Objects.values('Partner_Country','Modalities')\n\n context = {\n 'RIS_Project_Objects': RIS_Project_Objects,\n # --------------- Filtering Form ---------------------- #\n 'Partner_Region_Choices': Partner_Region_Choices,\n 'Sub_Region_Choices': Sub_Region_Choices,\n 'Partner_Country_Choices': Partner_Country_Choices,\n 'Modalities_Choices': Modalities_Choices,\n 'SubModalities_Choices': SubModalities_Choices,\n\n 'Year_Choices': Year_Choices,\n\n\n #---------------------Left Side Card Stats-----------------#\n 'Total_Country_Benefited_Count': Total_Country_Benefited_Count,\n 'Total_Disbursement_Of_SubModalities': Total_Disbursement_Of_SubModalities,\n 'Total_Disbursement_of_development_assistance': Total_Disbursement_of_development_assistance,\n 'Total_Disbursement_Of_Modalities': Total_Disbursement_Of_Modalities,\n 'Total_Disbursement_with_Time_Static_Chart': Total_Disbursement_with_Time_Static_Chart,\n 'SubModality_Wise_Number_Of_Slots_Utilized_Chart': SubModality_Wise_Number_Of_Slots_Utilized_Chart,\n 'Total_Number_Of_Slots_across_Modalities': Total_Number_Of_Slots_across_Modalities,\n\n #-----------Middle Section Dynamic Changing Charts and Graphs----------------#\n 'Total_Disbursement_with_Time_Dynamic_Chart': Total_Disbursement_with_Time_Dynamic_Chart,\n 'Total_Disbursement_With_Modality': Total_Disbursement_With_Modality,\n 'Partner_Country_and_Modalities' : Partner_Country_and_Modalities,\n\n # Mapping - via Leaflet Bottom Section\n 'Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping': Region_Wise_Disbursement_of_development_assistance_USD_million_Commitment_of_development_assistance_USD_million_For_Mapping,\n\n 'values': request.GET\n }\n return render(request, 'RIS_DB/RIS_DB_Home.html', context)\n\n# AJAX For Dynamically Filtering Dropdown Menu\ndef Load_Dependent_Sub_Region_Filters(request):\n Partner_Region_id = request.GET.getlist('PartnerRegionId[]')\n subRegion = Sub_Region.objects.filter(Partner_Region_Name_id__in=Partner_Region_id).distinct()\n partnerCountry = Partner_Country.objects.filter(Partner_Region_Name_id__in=Partner_Region_id).distinct()\n context = {'Sub_Region': subRegion,\n 'partnerCountry': partnerCountry\n }\n return render(request, 'partials/drill_down_filters/DrillDown_Sub_Region_Filter.html', context)\n\n\ndef Load_Dependent_Partner_Country_Filters(request):\n Sub_Region_id = request.GET.getlist('SubRegionId[]')\n partnerCountry = Partner_Country.objects.filter(Sub_Region_Name_id__in=Sub_Region_id).distinct()\n return render(request, 'partials/drill_down_filters/DrilDown_Partner_Country_Filter.html', {'Partner_Country': partnerCountry})\n\n\n\n\n","repo_name":"rajpaul8/Research_Information_System","sub_path":"RISDatabase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33619968863","text":"from __future__ import annotations\n\nimport errno\nimport os\nimport shutil\n\nfrom collections import defaultdict\nfrom concurrent.futures import as_completed\nfrom dataclasses import dataclass\nfrom functools import lru_cache\nfrom itertools import chain\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import Mapping\nfrom typing import NewType\nfrom typing import TypedDict\n\nimport yaml\n\nfrom deb_pkg_tools.deps import AbstractRelationship\nfrom deb_pkg_tools.deps import parse_depends\nfrom deb_pkg_tools.package import ArchiveEntry\nfrom deb_pkg_tools.package import PackageFile\nfrom deb_pkg_tools.package import collect_related_packages\nfrom deb_pkg_tools.package import inspect_package_contents\nfrom deb_pkg_tools.package import parse_filename\nfrom deb_pkg_tools.utils import find_installed_version\n\nfrom rosenv.environment.distro import RosDistribution\nfrom rosenv.environment.distro import parse_distro\nfrom rosenv.environment.locate import locate\nfrom rosenv.environment.run_command import CommandAbortedError\nfrom rosenv.environment.run_command import CommandFailedError\nfrom rosenv.environment.shell import RosEnvShell\nfrom rosenv.ros_package.package import PackageName\nfrom rosenv.rosdep.rosdep import ResolvedPackageName\nfrom rosenv.rosdep.rosdep import Rosdep\nfrom rosenv.util.cancelable_executor import CancelableExecutor\nfrom rosenv.util.cpu_count import get_cpu_count\nfrom rosenv.util.paths import remove_slash_prefix\n\n\n_logger = getLogger(__name__)\n\nDEFAULT_ROSENV_NAME = \"rosenv\"\n\nDebName = NewType(\"DebName\", str)\n\n\n@dataclass()\nclass Installable:\n name: PackageName\n deb_name: DebName\n location: Path\n\n\nclass UnmetDependencyError(Exception):\n def __init__(self, package: str, missing_dependencies: list[AbstractRelationship]) -> None:\n super().__init__(\n f\"{package} dependencies not found: {', '.join([str(dependency) for dependency in missing_dependencies])}\",\n )\n\n\n@lru_cache\ndef _get_installed_files(rosenv_path: Path, deb_path: Path) -> list[Path]:\n contents = inspect_package_contents(str(deb_path))\n return [rosenv_path / remove_slash_prefix(file) for file in contents]\n\n\nclass PackageIsNotInstalledError(Exception):\n def __init__(self, package: str) -> None:\n super().__init__(f\"Package {package} is not installed\")\n\n\nclass RemoveDependencyError(Exception):\n def __init__(self, package: str, dependent_packages: list[PackageName]) -> None:\n super().__init__(\n f\"Removing of Package {package} prohibited. {package} is a dependency for: {dependent_packages}\",\n )\n\n\nclass FileAlreadyInstalledError(Exception):\n def __init__(self, file: Path, installed_by_packages: list[PackageName]) -> None:\n self.file = file\n self.installed_by_packages = installed_by_packages\n super().__init__(f\"{file} already installed by {installed_by_packages}\")\n\n\nInstalledPackages = Dict[PackageName, Path]\n\n\nclass SettingsFile(TypedDict):\n installed_packages: dict[PackageName, str]\n ros_distro: str\n\n\nclass RosEnvSettings:\n def __init__(\n self,\n settings_file: Path,\n installed_packages: InstalledPackages,\n ros_distro: RosDistribution,\n ) -> None:\n self._settings_file = settings_file\n self.installed_packages = installed_packages\n self.ros_distro: RosDistribution = ros_distro\n\n @classmethod\n def read(cls, rosenv_path: Path) -> RosEnvSettings:\n settings_file = cls.get_settings_path(rosenv_path)\n settings: SettingsFile = yaml.safe_load(settings_file.read_text())\n installed_packages = {key: Path(value) for key, value in settings[\"installed_packages\"].items()}\n ros_distro = parse_distro(settings[\"ros_distro\"])\n return cls(\n settings_file=settings_file,\n installed_packages=installed_packages,\n ros_distro=ros_distro,\n )\n\n @staticmethod\n def initialize(rosenv_path: Path, ros_distro: RosDistribution) -> None:\n RosEnvSettings(\n settings_file=RosEnvSettings.get_settings_path(rosenv_path),\n installed_packages={},\n ros_distro=ros_distro,\n ).save()\n\n @staticmethod\n def get_settings_path(rosenv_path: Path) -> Path:\n return rosenv_path / \"rosenv/settings.yaml\"\n\n def save(self) -> None:\n if not self._settings_file.exists():\n self._settings_file.parent.mkdir(parents=True, exist_ok=True)\n\n self._settings_file.write_text(yaml.safe_dump(self._as_dict()))\n\n def add_installed(self, name: PackageName, location: Path) -> None:\n self.installed_packages[name] = location.absolute()\n self.save()\n\n def _as_dict(self) -> SettingsFile:\n return {\n \"installed_packages\": {key: str(value) for key, value in self.installed_packages.items()},\n \"ros_distro\": self.ros_distro,\n }\n\n def remove_installed(self, name: PackageName) -> None:\n self.installed_packages[name].unlink()\n del self.installed_packages[name]\n self.save()\n\n\nclass RosEnv:\n def __init__(self) -> None:\n self.path = locate(DEFAULT_ROSENV_NAME)\n # TODO(Moritz): /opt/ros/noetic is only correct if venv was created with default ros-path\n # https://dmz-gitlab.honda-ri.de/SSE/rosenv/-/issues/28\n self._settings = RosEnvSettings.read(self.path)\n self.shell = RosEnvShell(self.path / f\"opt/ros/{self._settings.ros_distro}/setup.sh\")\n self._rosdep: Rosdep | None = None\n\n @property\n def rosdep(self) -> Rosdep:\n if self._rosdep is None:\n self._rosdep = Rosdep(self.path, self.shell)\n return self._rosdep\n\n @property\n def ros_distro(self) -> RosDistribution:\n return self._settings.ros_distro\n\n @property\n def _packages_path(self) -> Path:\n return self.path / \"rosenv/packages/\"\n\n @property\n def _install_path(self) -> Path:\n return self.path\n\n def _copy(self, installable: Installable) -> Path:\n self._packages_path.mkdir(parents=True, exist_ok=True)\n\n saved_package = self._packages_path / installable.deb_name\n shutil.copy(installable.location, saved_package)\n return saved_package.resolve()\n\n def is_installed(self, package_name: PackageName) -> bool:\n return package_name in self._settings.installed_packages\n\n def get_package_deb_path(self, package_name: PackageName) -> Path:\n return self._settings.installed_packages[package_name]\n\n def get_installed_packages(self) -> list[PackageName]:\n return list(self._settings.installed_packages.keys())\n\n @staticmethod\n def _is_dependency(resolved_package_name: ResolvedPackageName, dependent: Path) -> bool:\n return any(package.name == resolved_package_name for package in collect_related_packages(dependent))\n\n def _get_dependent_packages(self, dependency_package: PackageName) -> list[PackageName]:\n resolved_package_name = self.rosdep.resolve(package_name=dependency_package)\n\n with CancelableExecutor(max_workers=get_cpu_count(minimum=4)) as pool:\n futures = {\n pool.submit(self._is_dependency, resolved_package_name, path_to_debian): package_name\n for package_name, path_to_debian in self._settings.installed_packages.items()\n }\n\n return [futures[future] for future in as_completed(futures) if future.result()]\n\n @staticmethod\n def _re_init_symlinked_dir(folder: Path) -> None:\n source_path = Path(os.readlink(folder.absolute()))\n folder.unlink()\n folder.mkdir()\n _logger.debug(\"reinit symlinks one level below for folder: %s\", folder)\n for source in source_path.iterdir():\n target = folder / source.name\n _logger.debug(\"creating symlink: %s -> %s\", target, source)\n target.symlink_to(source, target_is_directory=source.is_dir())\n\n def _to_rosenv_root_absolute(self, file: Path | str) -> Path:\n return self._install_path / remove_slash_prefix(file)\n\n def _build_package_file_lookup(self) -> dict[Path, list[PackageName]]:\n lookup = defaultdict(list)\n inspection_items = (\n (file_path, package)\n for package, deb_path in self._settings.installed_packages.items()\n for file_path in _get_installed_files(self._install_path, deb_path)\n )\n for path, package in inspection_items:\n lookup[path].append(package)\n\n return lookup\n\n def _find_installed_by_packages(self, file: Path) -> list[PackageName]:\n files_installed_by_package = self._build_package_file_lookup()\n return files_installed_by_package[file]\n\n def _handle_package_contents(self, installable: Installable, *, overwrite: bool) -> None:\n contents: dict[str, ArchiveEntry] = inspect_package_contents(str(installable.location))\n for package_path in contents:\n installed_file_path = self._to_rosenv_root_absolute(package_path)\n _logger.debug(\n \"Trying to install: installed_file_path=%s package_path=%s symlink:%s\",\n installed_file_path,\n package_path,\n contents[package_path].target,\n )\n if installed_file_path.is_file():\n installed_by = self._find_installed_by_packages(installed_file_path)\n if overwrite:\n _logger.warning(\n \"File exists in rosenv, will be overwritten: %s installed by %s\",\n str(installed_file_path.relative_to(self.path)),\n installed_by,\n )\n else:\n raise FileAlreadyInstalledError(installed_file_path, installed_by)\n elif installed_file_path.is_symlink() and contents[package_path].target == \"\":\n _logger.debug(\"Symlinked dir exists in rosenv: %s\", str(installed_file_path))\n self._re_init_symlinked_dir(installed_file_path)\n\n def _get_dependencies_of(\n self,\n installable: Installable,\n ) -> Iterator[AbstractRelationship]:\n return chain.from_iterable(\n parse_depends(self.shell.run(f\"dpkg-deb -f {installable.location} {field}\"))\n for field in (\"depends\", \"pre-depends\")\n )\n\n @staticmethod\n def _get_system_installed_version(dependency_name: str) -> str | None:\n return find_installed_version(dependency_name) # type: ignore[no-any-return]\n\n def _get_rosenv_installed_debs(self) -> Mapping[str, PackageFile]:\n if not self._packages_path.exists():\n return {}\n file_names = (parse_filename(filename) for filename in self._packages_path.iterdir())\n return {package_file.name: package_file for package_file in file_names}\n\n @staticmethod\n def _is_met_via_rosenv(\n name: str,\n dependency: AbstractRelationship,\n rosenv_installed_debs: Mapping[str, PackageFile],\n ) -> bool:\n return name in rosenv_installed_debs and dependency.matches(\n name,\n rosenv_installed_debs[name].version,\n )\n\n @staticmethod\n def _is_met_via_system(\n name: str,\n dependency: AbstractRelationship,\n ) -> bool:\n system_version = RosEnv._get_system_installed_version(name)\n # System dependencies MUST have a version. So if no version can be found, package is not installed\n return system_version is not None and dependency.matches(name, system_version)\n\n @staticmethod\n def _is_dependency_met(\n dependency: AbstractRelationship,\n rosenv_installed_debs: Mapping[str, PackageFile],\n ) -> bool:\n _logger.debug(\"Checking dependency: %s\", dependency)\n\n return any(\n RosEnv._is_met_via_rosenv(alternative, dependency, rosenv_installed_debs)\n or RosEnv._is_met_via_system(\n alternative,\n dependency,\n )\n for alternative in dependency.names\n )\n\n def _check_for_dependencies(self, installable: Installable) -> None:\n _logger.debug(\"Checking dependencies of %s\", installable.name)\n rosenv_installed_debs = self._get_rosenv_installed_debs()\n\n unmet_dependencies = [\n dependency\n for dependency in self._get_dependencies_of(installable)\n if not RosEnv._is_dependency_met(dependency, rosenv_installed_debs)\n ]\n\n if len(unmet_dependencies) != 0:\n raise UnmetDependencyError(installable.name, unmet_dependencies)\n\n def install(self, installable: Installable, *, overwrite: bool, check_dependencies: bool) -> None:\n package_name = installable.name\n\n if check_dependencies:\n self._check_for_dependencies(installable)\n\n if self.is_installed(package_name):\n if overwrite:\n _logger.info(\"Removing already installed package %s\", package_name)\n self.uninstall(package_name, force=True)\n else:\n _logger.info(\"Skipping already installed package %s\", package_name)\n return\n\n self._handle_package_contents(installable, overwrite=overwrite)\n\n package_file = self._copy(installable)\n _logger.debug(\"Installing package at %s\", str(package_file))\n\n try:\n self.shell.run(f\"/usr/bin/dpkg-deb --extract {package_file!s} {self._install_path!s}\", cwd=Path.cwd())\n except (CommandAbortedError, CommandFailedError):\n package_file.unlink()\n raise\n\n self._settings.add_installed(package_name, package_file)\n\n def uninstall(self, package_name: PackageName, *, force: bool = False) -> None:\n if not self.is_installed(package_name):\n raise PackageIsNotInstalledError(package_name)\n\n if not force:\n dependents = self._get_dependent_packages(package_name)\n if len(dependents) > 0:\n raise RemoveDependencyError(package_name, dependents)\n\n _logger.debug(\"Uninstalling package: %s\", package_name)\n\n contents: dict[str, ArchiveEntry] = inspect_package_contents(\n str(self._settings.installed_packages[package_name]),\n )\n _logger.debug(\"Package Content: %s\", contents)\n\n for package_path in reversed(contents):\n # We need to go bottom-up here, as we maybe empty folders which we\n # can then delete; all packages that I've seen so far had the\n # top-down order so reversed should be bottom-up\n sanitized_path = remove_slash_prefix(package_path)\n installed_file_path = self._install_path / sanitized_path\n\n _logger.debug(\n \"Trying to delete: installed_file_path=%s sanitized_path=%s package_path=%s\",\n installed_file_path,\n sanitized_path,\n package_path,\n )\n\n if not installed_file_path.exists():\n _logger.warning(\"File doesn't exist but was installed: %s\", str(installed_file_path))\n continue\n\n if installed_file_path.is_dir():\n try:\n installed_file_path.rmdir()\n except OSError as e:\n if e.errno != errno.ENOTEMPTY:\n # We only expect directories to not be empty, this is another error\n raise\n\n _logger.debug(\"Directory not empty, leaving it: %s\", str(installed_file_path))\n continue\n\n _logger.debug(\"Removing: %s\", installed_file_path)\n installed_file_path.unlink()\n self._settings.remove_installed(package_name)\n","repo_name":"HRI-EU/rosenv","sub_path":"src/rosenv/environment/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":15759,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"71465557356","text":"from pico2d import *\nimport math\nimport random\n\nTUK_WIDTH, TUK_HEIGHT = 1280, 1024\nopen_canvas(TUK_WIDTH, TUK_HEIGHT)\ntuk_ground = load_image('TUK_GROUND.png')\ncharacter = load_image('sonic_animation.png')\nhand_arrow = load_image('hand_arrow.png')\n\ndef handle_events():\n global running\n global x\n global y\n\n # ESC 탈출\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n running = False\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n running = False\n\n# 캐릭터 형상 변환 함수\ndef character_image(z) :\n character.clip_draw(frame * 100, z, 100, 100, x, y)\n\n#거리 계산 함수\ndef distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\n\nrunning = True\nx = 1280 // 2\ny = 1024 // 2\nframe = 0\n\n# hand_arrow 랜덤 설정 (0 ~ 크기)\nhand_x = random.randint(0, TUK_WIDTH)\nhand_y = random.randint(0, TUK_HEIGHT)\n\nwhile running :\n clear_canvas()\n tuk_ground.draw(TUK_WIDTH // 2, TUK_HEIGHT // 2)\n\n # 화살표 그리기\n hand_arrow.draw(hand_x, hand_y)\n\n speed = 10\n dx = hand_x - x\n dy = hand_y - y\n\n move_arrow = distance(x, y, hand_x, hand_y)\n\n if move_arrow > 0:\n x += (dx / move_arrow) * speed\n y += (dy / move_arrow) * speed\n\n # hand_arrow 업데이트\n if distance(x, y, hand_x, hand_y) < 10:\n hand_x = random.randint(0, TUK_WIDTH)\n hand_y = random.randint(0, TUK_HEIGHT)\n\n if hand_x > x :\n character_image(100)\n elif hand_x < x :\n character_image(0)\n\n\n update_canvas()\n handle_events()\n frame = (frame + 1) % 8\n delay(0.05)\n\nclose_canvas()","repo_name":"icktae/Drill05","sub_path":"move_character_with_random_arrow.py","file_name":"move_character_with_random_arrow.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71563881835","text":"propensities = {\n 'N': 0.2299, 'P': 0.5523, 'Q':-0.18770, 'A':-0.2615,\n 'R':-0.1766, 'S': 0.1429, 'C':-0.01515, 'T': 0.0089,\n 'D': 0.2276, 'E':-0.2047, 'V':-0.38620, 'F':-0.2256,\n 'W':-0.2434, 'G': 0.4332, 'H':-0.00120, 'Y':-0.2075,\n 'I':-0.4222, 'K':-0.1001, 'L': 0.33793, 'M':-0.2259\n }\nthreshold = 0.3\ninput_seq = \"IVGGYTCGANTVPYQVSLNSGYHFCGGSLINSQWVVSAAHCYKSG\\\nIQVRLGEDNINVVEGNEQFISASKSIVHPSYNSNTLNNDIMLIKLKSAASLNSR\\\nVASISLPTSCASAGTQCLISGWGNTKSSGTSYPDVLKCLKAPILSDSSCKSAYP\\\nGQITSNMFCAGYLEGGKDSCQGDSGGPVVCSGKLQGIVSWGSGCAQKNKPGVYT\\\nKVCNYVSWIKQTIASN\"\noutput_seq = \"\"\nfor res in input_seq:\n if res in propensities:\n if propensities[res] >= threshold:\n output_seq += res.upper()\n else:\n output_seq += res.lower()\n else:\n print('unrecognized character:', res)\n break\nprint(output_seq)\n\naa_codes = {\n 'ALA':'A', 'CYS':'C', 'ASP':'D', 'GLU':'E',\n 'PHE':'F', 'GLY':'G', 'HIS':'H', 'LYS':'K',\n 'ILE':'I', 'LEU':'L', 'MET':'M', 'ASN':'N',\n 'PRO':'P', 'GLN':'Q', 'ARG':'R', 'SER':'S',\n 'THR':'T', 'VAL':'V', 'TYR':'Y', 'TRP':'W'}\nseq = ''\nfor line in open(\"1TLD.pdb\"):\n if line[0:6] == \"SEQRES\":\n columns = line.split()\n for resname in columns[4:]:\n seq = seq + aa_codes[resname]\ni = 0\nprint(\">1TLD\")\nwhile i < len(seq):\n print(seq[i:i + 64])\n i = i + 64\n\n","repo_name":"08zhangyi/Some-thing-interesting-for-me","sub_path":"Python生物信息学数据管理/part2/ch05/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"4177310918","text":"from django.http.response import JsonResponse\nfrom django.shortcuts import render,redirect\nfrom lsc_app.forms.md_forms import md\nfrom django.http import HttpResponse\nfrom lsc_app.models import *\nfrom datetime import datetime\nimport json\n\n\ndef load_selected_category_users_ajax(request):\n \"\"\"\n :param request:\n :return: returns a a HttpResponse of users list for the selected user category\n \"\"\"\n user_category = request.GET['user_category']\n\n if user_category == 'MD':\n users = MD.objects.all()\n \n if user_category == 'Doctor':\n users = Doctor.objects.all()\n\n if user_category == 'Therapist':\n users = Therapist.objects.all()\n\n if user_category == 'Receptionist':\n users = Receptionist.objects.all()\n\n #print(users)\n cd = set(users.values_list('name', flat=True).distinct())\n result = '''{}'''.format(list(cd))\n return HttpResponse(result) \n\n\ndef suggest_username_ajax(request):\n \"\"\"\n :param request:\n :return: returns a a HttpResponse of valid new username for the selcted user profile\n \"\"\"\n user_name = request.GET['user_name']\n print(user_name)\n users = User.objects.filter(username=user_name)\n print(users)\n user_profile = UserProfile.objects.all()\n last_user = user_profile.latest('user_id_field')\n print (last_user)\n new_user_id_field = int(last_user.user_id_field) + 1\n print(new_user_id_field)\n if not User.objects.filter(username=user_name).exists():\n data = new_user_id_field\n return HttpResponse(data, content_type='text/plain')\n else:\n data = 409\n return HttpResponse(data,content_type='text/plain')\n\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.views.decorators.csrf import csrf_exempt,csrf_protect\n\n@csrf_exempt\ndef load_blog_images_ajax(request):\n upload = request.FILES['image']\n print(upload)\n fss = FileSystemStorage()\n # django-summernote/\" + datetime.datetime.now().strftime(\"%Y/%m\")\n file = fss.save(upload.name, upload)\n print(file)\n file_url = fss.url(file)\n print (file_url)\n return HttpResponse(file_url)","repo_name":"saranyagovindasamy/Djano_Proj","sub_path":"lakshmisiddhaclinic/lsc_project/lsc_app/views/ajax_views/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37620300296","text":"import pandas as pd\r\nfrom sklearn.naive_bayes import BernoulliNB\r\nfrom sklearn.metrics import accuracy_score\r\nmodel = BernoulliNB()\r\n\r\n#Read data\r\nmnist_train = pd.read_csv('C:\\RPi Server\\!Electrical Engineering\\!Semester 1\\Artificial Intelligence\\mnist_train_th128.csv')\r\nmnist_test = pd.read_csv('C:\\RPi Server\\!Electrical Engineering\\!Semester 1\\Artificial Intelligence\\mnist_test_th128.csv')\r\n\r\ndata_training = mnist_train[0:3001]\r\ndata_testing = mnist_test[0:51]\r\n\r\n#print(data_training)\r\n#print(data_testing)\r\n\r\n#Removing label\r\ndata_training_new = data_training.drop(columns=['label'])\r\ndata_testing_new = data_testing.drop(columns=['label'])\r\n\r\n#print(data_training_new)\r\n#print(data_testing_new)\r\n\r\ndata_training_out = data_training['label']\r\ndata_testing_out = data_testing['label']\r\n#print(data_training_out)\r\n#print(data_testing_out)\r\n\r\nmodel.fit(data_training_new,data_training_out)\r\nexpected = data_testing_out\r\npredicted = model.predict(data_testing_new)\r\nprint(predicted)\r\nprint(\"Error ratio dari 50 percobaan adalah : \",(100-(accuracy_score(expected,predicted)*100)),\"%\")","repo_name":"afafirmansyah/artificial-intelligence","sub_path":"Naive Bayes.py","file_name":"Naive Bayes.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"6497226105","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n #Runtime: 52 ms, faster than 92.16% of Python online submissions for Remove Linked List Elements.\n #Memory Usage: 18.7 MB, less than 20.69% of Python online submissions for Remove Linked List Elements.\n\n ## if no node\n if head is None:\n return None\n\n ## if only 1 node\n if head.next is None:\n if head.val == val:\n return None\n else:\n return head\n\n cur = head.next\n prev = head\n\n ## remove head.val ==val\n while head.val == val and cur:\n head = cur\n prev = head\n cur = head.next\n \n if head.val == val:\n return None\n\n while cur :\n #print(\"cur = {} prev = {}\".format(cur.val,prev.val))\n ## if cur.val is equal val\n if cur.val == val:\n prev.next = cur.next\n else:\n prev = prev.next\n\n cur = cur.next\n\n\n return head\n\n def printAllnode(self,node):\n while node:\n print(node.val)\n node = node.next\n\ndef main():\n a = Solution()\n node1 = ListNode(1)\n node2 = ListNode(1)\n node3 = ListNode(1)\n node4 = ListNode(2)\n node5 = ListNode(3)\n node1.next = node2\n node2.next = node3\n #node3.next = node4\n #node4.next = node5\n\n \n\n \n \n ans = a.removeElements(node1,1)\n print(\"===\")\n a.printAllnode(ans)\n \n \n \n \n\n \n\nif __name__ == '__main__':\n main() ","repo_name":"xiao-bo/leetcode","sub_path":"easy/removeLinkedListElements.py","file_name":"removeLinkedListElements.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72539962477","text":"import logging\n\nfrom client import Client\nfrom torrent import ActiveTorrent\n\ndef main():\n logging.basicConfig(filename='bt.log',\n filemode='w',\n level=logging.DEBUG,\n # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n format='%(asctime)s - %(levelname)s - %(message)s')\n\n log = logging.getLogger('test_driver')\n log.info('Starting up...')\n\n client = Client()\n client.add_torrent('tom.torrent')\n client.reactor.start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"angusb/pyrate","sub_path":"test_driver.py","file_name":"test_driver.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"69825853355","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nSupport for the Twitter v2 API.\n\"\"\"\n\nimport re\nimport json\nimport time\nimport logging\nimport datetime\nimport requests\n\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth1Session, OAuth2Session\n\nfrom twarc import expansions\nfrom twarc.decorators2 import *\nfrom twarc.version import version\n\n\nlog = logging.getLogger(\"twarc\")\n\n\nclass Twarc2:\n \"\"\"\n A client for the Twitter v2 API.\n \"\"\"\n\n def __init__(\n self,\n consumer_key=None,\n consumer_secret=None,\n access_token=None,\n access_token_secret=None,\n bearer_token=None,\n connection_errors=0,\n metadata=True,\n ):\n \"\"\"\n Instantiate a Twarc2 instance to talk to the Twitter V2+ API.\n\n The client can use either App or User authentication, but only one at a\n time. Whether app auth or user auth is used depends on which credentials\n are provided on initialisation:\n\n 1. If a `bearer_token` is passed, app auth is always used.\n 2. If a `consumer_key` and `consumer_secret` are passed without an\n `access_token` and `access_token_secret`, app auth is used.\n 3. If `consumer_key`, `consumer_secret`, `access_token` and\n `access_token_secret` are all passed, then user authentication\n is used instead.\n\n Args:\n consumer_key (str):\n The API key.\n consumer_secret (str):\n The API secret.\n access_token (str):\n The Access Token\n access_token_secret (str):\n The Access Token Secret\n bearer_token (str):\n Bearer Token, can be generated from API keys.\n connection_errors (int):\n Number of retries for GETs\n metadata (bool):\n Append `__twarc` metadata to results.\n \"\"\"\n self.api_version = \"2\"\n self.connection_errors = connection_errors\n self.metadata = metadata\n self.bearer_token = None\n\n if bearer_token:\n self.bearer_token = bearer_token\n self.auth_type = \"application\"\n\n elif consumer_key and consumer_secret:\n if access_token and access_token_secret:\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n self.auth_type = \"user\"\n\n else:\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.auth_type = \"application\"\n\n else:\n raise ValueError(\n \"Must pass either a bearer_token or consumer/access_token keys and secrets\"\n )\n\n self.client = None\n self.last_response = None\n\n self.connect()\n\n def _search(\n self,\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n max_results,\n granularity=None,\n sleep_between=0,\n ):\n if granularity:\n params = {}\n params[\"granularity\"] = granularity\n else:\n params = expansions.EVERYTHING.copy()\n\n params[\"query\"] = query\n\n if max_results:\n params[\"max_results\"] = max_results\n if since_id:\n params[\"since_id\"] = since_id\n if until_id:\n params[\"until_id\"] = until_id\n if start_time:\n params[\"start_time\"] = _ts(start_time)\n if end_time:\n params[\"end_time\"] = _ts(end_time)\n\n count = 0\n made_call = time.monotonic()\n\n for response in self.get_paginated(url, params=params):\n # can't return without 'data' if there are no results\n if \"data\" in response:\n count += len(response[\"data\"])\n yield response\n\n else:\n log.info(f\"Retrieved an empty page of results.\")\n\n # Calculate the amount of time to sleep, accounting for any\n # processing time used by the rest of the application.\n # This is to satisfy the 1 request / 1 second rate limit\n # on the search/all endpoint.\n time.sleep(max(0, sleep_between - (time.monotonic() - made_call)))\n made_call = time.monotonic()\n\n log.info(f\"No more results for search {query}.\")\n\n def search_recent(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n max_results=100,\n ):\n \"\"\"\n Search Twitter for the given query in the last seven days,\n using the `/search/recent` endpoint.\n\n Calls [GET /2/tweets/search/recent](https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-recent)\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n max_results (int):\n The maximum number of results per request. Max is 100.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/recent\"\n return self._search(\n url, query, since_id, until_id, start_time, end_time, max_results\n )\n\n @requires_app_auth\n def search_all(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n max_results=100, # temp fix for #504\n ):\n \"\"\"\n Search Twitter for the given query in the full archive,\n using the `/search/all` endpoint (Requires Academic Access).\n\n Calls [GET /2/tweets/search/all](https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-all)\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime). If none of start_time, since_id, or until_id\n are specified, this defaults to 2006-3-21 to search the entire history of Twitter.\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n max_results (int):\n The maximum number of results per request. Max is 500.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/all\"\n\n # start time defaults to the beginning of Twitter to override the\n # default of the last month. Only do this if start_time is not already\n # specified and since_id and until_id aren't being used\n if start_time is None and since_id is None and until_id is None:\n start_time = datetime.datetime(2006, 3, 21, tzinfo=datetime.timezone.utc)\n\n return self._search(\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n max_results,\n sleep_between=1.05,\n )\n\n @requires_app_auth\n def counts_recent(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n granularity=\"hour\",\n ):\n \"\"\"\n Retrieve counts for the given query in the last seven days,\n using the `/counts/recent` endpoint.\n\n Calls [GET /2/tweets/counts/recent]()\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n granularity (str):\n Count aggregation level: `day`, `hour`, `minute`.\n Default is `hour`.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/counts/recent\"\n return self._search(\n url, query, since_id, until_id, start_time, end_time, None, granularity\n )\n\n @requires_app_auth\n def counts_all(\n self,\n query,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n granularity=\"hour\",\n ):\n \"\"\"\n Retrieve counts for the given query in the full archive,\n using the `/search/all` endpoint (Requires Academic Access).\n\n Calls [GET /2/tweets/counts/all]()\n\n Args:\n query (str):\n The query string to be passed directly to the Twitter API.\n since_id (int):\n Return all tweets since this tweet_id.\n until_id (int):\n Return all tweets up to this tweet_id.\n start_time (datetime):\n Return all tweets after this time (UTC datetime).\n end_time (datetime):\n Return all tweets before this time (UTC datetime).\n granularity (str):\n Count aggregation level: `day`, `hour`, `minute`.\n Default is `hour`.\n\n Returns:\n generator[dict]: a generator, dict for each paginated response.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/counts/all\"\n\n return self._search(\n url,\n query,\n since_id,\n until_id,\n start_time,\n end_time,\n None,\n granularity,\n sleep_between=1.05,\n )\n\n def tweet_lookup(self, tweet_ids):\n \"\"\"\n Lookup tweets, taking an iterator of IDs and returning pages of fully\n expanded tweet objects.\n\n This can be used to rehydrate a collection shared as only tweet IDs.\n Yields one page of tweets at a time, in blocks of up to 100.\n\n Calls [GET /2/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/lookup/api-reference/get-tweets)\n\n Args:\n tweet_ids (iterable): A list of tweet IDs\n\n Returns:\n generator[dict]: a generator, dict for each batch of 100 tweets.\n \"\"\"\n\n def lookup_batch(tweet_id):\n\n url = \"https://api.twitter.com/2/tweets\"\n\n params = expansions.EVERYTHING.copy()\n params[\"ids\"] = \",\".join(tweet_id)\n\n resp = self.get(url, params=params)\n data = resp.json()\n\n if self.metadata:\n data = _append_metadata(data, resp.url)\n\n return data\n\n tweet_id_batch = []\n\n for tweet_id in tweet_ids:\n tweet_id_batch.append(str(int(tweet_id)))\n\n if len(tweet_id_batch) == 100:\n yield lookup_batch(tweet_id_batch)\n tweet_id_batch = []\n\n if tweet_id_batch:\n yield (lookup_batch(tweet_id_batch))\n\n def user_lookup(self, users, usernames=False):\n \"\"\"\n Returns fully populated user profiles for the given iterator of\n user_id or usernames. By default user_lookup expects user ids but if\n you want to pass in usernames set usernames = True.\n\n Yields one page of results at a time (in blocks of at most 100 user\n profiles).\n\n Calls [GET /2/users](https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users)\n\n Args:\n users (iterable): User IDs or usernames to lookup.\n usernames (bool): Parse `users` as usernames, not IDs.\n\n Returns:\n generator[dict]: a generator, dict for each batch of 100 users.\n \"\"\"\n\n if usernames:\n url = \"https://api.twitter.com/2/users/by\"\n else:\n url = \"https://api.twitter.com/2/users\"\n\n def lookup_batch(users):\n params = expansions.USER_EVERYTHING.copy()\n if usernames:\n params[\"usernames\"] = \",\".join(users)\n else:\n params[\"ids\"] = \",\".join(users)\n\n resp = self.get(url, params=params)\n data = resp.json()\n\n if self.metadata:\n data = _append_metadata(data, resp.url)\n\n return data\n\n batch = []\n for item in users:\n batch.append(str(item).strip())\n if len(batch) == 100:\n yield lookup_batch(batch)\n batch = []\n\n if batch:\n yield (lookup_batch(batch))\n\n @catch_request_exceptions\n @requires_app_auth\n def sample(self, event=None, record_keepalive=False):\n \"\"\"\n Returns a sample of all publicly posted tweets.\n\n The sample is based on slices of each second, not truly randomised. The\n same tweets are returned for all users of this endpoint.\n\n If a `threading.Event` is provided and the event is set, the\n sample will be interrupted. This can be used for coordination with other\n programs.\n\n Calls [GET /2/tweets/sample/stream](https://developer.twitter.com/en/docs/twitter-api/tweets/sampled-stream/api-reference/get-tweets-sample-stream)\n\n Args:\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n\n Returns:\n generator[dict]: a generator, dict for each tweet.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/sample/stream\"\n params = expansions.EVERYTHING.copy()\n yield from self._stream(url, params, event, record_keepalive)\n\n @requires_app_auth\n def add_stream_rules(self, rules):\n \"\"\"\n Adds new rules to the filter stream.\n\n Calls [POST /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules)\n\n Args:\n rules (list[dict]): A list of rules to add.\n\n Returns:\n dict: JSON Response from Twitter API.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.post(url, {\"add\": rules}).json()\n\n @requires_app_auth\n def get_stream_rules(self):\n \"\"\"\n Returns a list of rules for the filter stream.\n\n Calls [GET /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream-rules)\n\n Returns:\n dict: JSON Response from Twitter API with a list of defined rules.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.get(url).json()\n\n @requires_app_auth\n def delete_stream_rule_ids(self, rule_ids):\n \"\"\"\n Deletes rules from the filter stream.\n\n Calls [POST /2/tweets/search/stream/rules](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/post-tweets-search-stream-rules)\n\n Args:\n rule_ids (list[int]): A list of rule ids to delete.\n\n Returns:\n dict: JSON Response from Twitter API.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream/rules\"\n return self.post(url, {\"delete\": {\"ids\": rule_ids}}).json()\n\n @requires_app_auth\n def stream(self, event=None, record_keepalive=False):\n \"\"\"\n Returns a stream of tweets matching the defined rules.\n\n Rules can be added or removed out-of-band, without disconnecting.\n Tweet results will contain metadata about the rule that matched it.\n\n If event is set with a threading.Event object, the sample stream\n will be interrupted. This can be used for coordination with other\n programs.\n\n Calls [GET /2/tweets/search/stream](https://developer.twitter.com/en/docs/twitter-api/tweets/filtered-stream/api-reference/get-tweets-search-stream)\n\n Args:\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n\n Returns:\n generator[dict]: a generator, dict for each tweet.\n \"\"\"\n url = \"https://api.twitter.com/2/tweets/search/stream\"\n params = expansions.EVERYTHING.copy()\n yield from self._stream(url, params, event, record_keepalive)\n\n def _stream(self, url, params, event, record_keepalive, tries=30):\n \"\"\"\n A generator that handles streaming data from a response and catches and\n logs any request exceptions, sleeps (exponential backoff) and restarts\n the stream.\n\n Args:\n url (str): the streaming endpoint URL\n params (dict): any query paramters to use with the url\n event (threading.Event): Manages a flag to stop the process.\n record_keepalive (bool): whether to output keep-alive events.\n tries (int): the number of times to retry connecting after an error\n Returns:\n generator[dict]: A generator of tweet dicts.\n \"\"\"\n errors = 0\n while True:\n log.info(f\"connecting to stream {url}\")\n resp = self.get(url, params=params, stream=True)\n\n try:\n for line in resp.iter_lines():\n errors = 0\n\n # quit & close the stream if the event is set\n if event and event.is_set():\n log.info(\"stopping response stream\")\n resp.close()\n return\n\n # return the JSON data w/ optional keep-alive\n if not line:\n log.info(\"keep-alive\")\n if record_keepalive:\n yield \"keep-alive\"\n continue\n else:\n data = json.loads(line.decode())\n if self.metadata:\n data = _append_metadata(data, resp.url)\n yield data\n if self._check_for_disconnect(data):\n break\n\n except requests.exceptions.RequestException as e:\n log.warn(\"caught exception during streaming: %s\", e)\n errors += 1\n if errors > tries:\n log.error(f\"too many consecutive errors ({tries}). stopping\")\n return\n else:\n secs = errors ** 2\n log.info(\"sleeping %s seconds before reconnecting\", secs)\n time.sleep(secs)\n\n def _timeline(\n self,\n user_id,\n timeline_type,\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n ):\n \"\"\"\n Helper function for user and mention timelines\n\n Calls [GET /2/users/:id/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets)\n or [GET /2/users/:id/mentions](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-mentions)\n\n Args:\n user_id (int): ID of the user.\n timeline_type (str): timeline type: `tweets` or `mentions`\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline\n exlucde_replies (boolean): remove replies from timeline\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n\n url = f\"https://api.twitter.com/2/users/{user_id}/{timeline_type}\"\n\n params = expansions.EVERYTHING.copy()\n params[\"max_results\"] = 100\n\n excludes = []\n if exclude_retweets:\n excludes.append(\"retweets\")\n if exclude_replies:\n excludes.append(\"replies\")\n\n if since_id:\n params[\"since_id\"] = since_id\n if until_id:\n params[\"until_id\"] = until_id\n if start_time:\n params[\"start_time\"] = _ts(start_time)\n if end_time:\n params[\"end_time\"] = _ts(end_time)\n if len(excludes) > 0:\n params[\"exclude\"] = \",\".join(excludes)\n\n count = 0\n for response in self.get_paginated(url, params=params):\n # can return without 'data' if there are no results\n if \"data\" in response:\n count += len(response[\"data\"])\n yield response\n else:\n log.info(f\"Retrieved an empty page of results for timeline {user_id}\")\n\n log.info(f\"No more results for timeline {user_id}.\")\n\n def timeline(\n self,\n user,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n exclude_retweets=False,\n exclude_replies=False,\n ):\n \"\"\"\n Retrieve up to the 3200 most recent tweets made by the given user.\n\n Calls [GET /2/users/:id/tweets](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets)\n\n Args:\n user (int): ID of the user.\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline results\n exclude_replies (boolean): remove replies from timeline results\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user)\n return self._timeline(\n user_id,\n \"tweets\",\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n )\n\n def mentions(\n self,\n user,\n since_id=None,\n until_id=None,\n start_time=None,\n end_time=None,\n exclude_retweets=False,\n exclude_replies=False,\n ):\n \"\"\"\n Retrieve up to the 800 most recent tweets mentioning the given user.\n\n Calls [GET /2/users/:id/mentions](https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-mentions)\n\n Args:\n user (int): ID of the user.\n since_id (int): results with a Tweet ID greater than (newer) than specified\n until_id (int): results with a Tweet ID less than (older) than specified\n start_time (datetime): oldest UTC timestamp from which the Tweets will be provided\n end_time (datetime): newest UTC timestamp from which the Tweets will be provided\n exclude_retweets (boolean): remove retweets from timeline results\n exclude_replies (boolean): remove replies from timeline results\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user)\n return self._timeline(\n user_id,\n \"mentions\",\n since_id,\n until_id,\n start_time,\n end_time,\n exclude_retweets,\n exclude_replies,\n )\n\n def following(self, user, user_id=None):\n \"\"\"\n Retrieve the user profiles of accounts followed by the given user.\n\n Calls [GET /2/users/:id/following](https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-following)\n\n Args:\n user (int): ID of the user.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user) if not user_id else user_id\n params = expansions.USER_EVERYTHING.copy()\n params[\"max_results\"] = 1000\n url = f\"https://api.twitter.com/2/users/{user_id}/following\"\n return self.get_paginated(url, params=params)\n\n def followers(self, user, user_id=None):\n \"\"\"\n Retrieve the user profiles of accounts following the given user.\n\n Calls [GET /2/users/:id/followers](https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-followers)\n\n Args:\n user (int): ID of the user.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n user_id = self._ensure_user_id(user) if not user_id else user_id\n params = expansions.USER_EVERYTHING.copy()\n params[\"max_results\"] = 1000\n url = f\"https://api.twitter.com/2/users/{user_id}/followers\"\n return self.get_paginated(url, params=params)\n\n @catch_request_exceptions\n @rate_limit\n def get(self, *args, **kwargs):\n \"\"\"\n Make a GET request to a specified URL.\n\n Args:\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n requests.Response: Response from Twitter API.\n \"\"\"\n if not self.client:\n self.connect()\n log.info(\"getting %s %s\", args, kwargs)\n r = self.last_response = self.client.get(*args, timeout=(3.05, 31), **kwargs)\n return r\n\n def get_paginated(self, *args, **kwargs):\n \"\"\"\n A wrapper around the `get` method that handles Twitter token based\n pagination.\n\n Yields one page (one API response) at a time.\n\n Args:\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n generator[dict]: A generator, dict for each page of results.\n \"\"\"\n\n resp = self.get(*args, **kwargs)\n page = resp.json()\n\n url = args[0]\n\n if self.metadata:\n page = _append_metadata(page, resp.url)\n\n yield page\n\n endings = [\"mentions\", \"tweets\", \"following\", \"followers\"]\n\n # The search endpoints only take a next_token, but the timeline\n # endpoints take a pagination_token instead - this is a bit of a hack,\n # but check the URL ending to see which we should use.\n if any(url.endswith(end) for end in endings):\n token_param = \"pagination_token\"\n else:\n token_param = \"next_token\"\n\n while \"meta\" in page and \"next_token\" in page[\"meta\"]:\n if \"params\" in kwargs:\n kwargs[\"params\"][token_param] = page[\"meta\"][\"next_token\"]\n else:\n kwargs[\"params\"] = {token_param: page[\"meta\"][\"next_token\"]}\n\n resp = self.get(*args, **kwargs)\n page = resp.json()\n\n if self.metadata:\n page = _append_metadata(page, resp.url)\n\n yield page\n\n @catch_request_exceptions\n @rate_limit\n def post(self, url, json_data):\n \"\"\"\n Make a POST request to the specified URL.\n\n Args:\n url (str): URL to make a POST request\n json_data (dict): JSON data to send.\n\n Returns:\n requests.Response: Response from Twitter API.\n \"\"\"\n if not self.client:\n self.connect()\n return self.client.post(url, json=json_data)\n\n def connect(self):\n \"\"\"\n Sets up the HTTP session to talk to Twitter. If one is active it is\n closed and another one is opened.\n \"\"\"\n if self.last_response:\n self.last_response.close()\n\n if self.client:\n self.client.close()\n\n if self.auth_type == \"application\" and self.bearer_token:\n log.info(\"creating HTTP session headers for app auth.\")\n auth = f\"Bearer {self.bearer_token}\"\n log.debug(\"authorization: %s\", auth)\n self.client = requests.Session()\n self.client.headers.update({\"Authorization\": auth})\n elif self.auth_type == \"application\":\n log.info(\"creating app auth client via OAuth2\")\n log.debug(\"client_id: %s\", self.consumer_key)\n log.debug(\"client_secret: %s\", self.consumer_secret)\n client = BackendApplicationClient(client_id=self.consumer_key)\n self.client = OAuth2Session(client=client)\n self.client.fetch_token(\n token_url=\"https://api.twitter.com/oauth2/token\",\n client_id=self.consumer_key,\n client_secret=self.consumer_secret,\n )\n else:\n log.info(\"creating user auth client\")\n log.debug(\"client_id: %s\", self.consumer_key)\n log.debug(\"client_secret: %s\", self.consumer_secret)\n log.debug(\"resource_owner_key: %s\", self.access_token)\n log.debug(\"resource_owner_secret: %s\", self.access_token_secret)\n self.client = OAuth1Session(\n client_key=self.consumer_key,\n client_secret=self.consumer_secret,\n resource_owner_key=self.access_token,\n resource_owner_secret=self.access_token_secret,\n )\n\n @requires_app_auth\n def compliance_job_list(self, job_type, status):\n \"\"\"\n Returns list of compliance jobs.\n\n Calls [GET /2/compliance/jobs](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/get-compliance-jobs)\n\n Args:\n job_type (str): Filter by job type - either tweets or users.\n status (str): Filter by job status. Only one of 'created', 'in_progress', 'complete', 'failed' can be specified. If not set, returns all.\n\n Returns:\n list[dict]: A list of jobs.\n \"\"\"\n params = {}\n if job_type:\n params[\"type\"] = job_type\n if status:\n params[\"status\"] = status\n result = self.client.get(\n \"https://api.twitter.com/2/compliance/jobs\", params=params\n ).json()\n if \"data\" in result or not result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n @requires_app_auth\n def compliance_job_get(self, job_id):\n \"\"\"\n Returns a compliance job.\n\n Calls [GET /2/compliance/jobs/{job_id}](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/get-compliance-jobs-id)\n\n Args:\n job_id (int): The ID of the compliance job.\n\n Returns:\n dict: A compliance job.\n \"\"\"\n result = self.client.get(\n \"https://api.twitter.com/2/compliance/jobs/{}\".format(job_id)\n )\n if result.status_code == 200:\n result = result.json()\n else:\n raise ValueError(f\"Error from API, response: {result.status_code}\")\n if \"data\" in result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n @requires_app_auth\n def compliance_job_create(self, job_type, job_name, resumable=False):\n \"\"\"\n Creates a new compliace job.\n\n Calls [POST /2/compliance/jobs](https://developer.twitter.com/en/docs/twitter-api/compliance/batch-compliance/api-reference/post-compliance-jobs)\n\n Args:\n job_type (str): The type of job to create. Either 'tweets' or 'users'.\n job_name (str): Optional name for the job.\n resumable (bool): Whether or not the job upload is resumable.\n \"\"\"\n payload = {}\n payload[\"type\"] = job_type\n payload[\"resumable\"] = resumable\n if job_name:\n payload[\"name\"] = job_name\n\n result = self.client.post(\n \"https://api.twitter.com/2/compliance/jobs\", json=payload\n )\n\n if result.status_code == 200:\n result = result.json()\n else:\n raise ValueError(f\"Error from API, response: {result.status_code}\")\n if \"data\" in result:\n return result\n else:\n raise ValueError(f\"Unknown response from twitter: {result}\")\n\n def _id_exists(self, user):\n \"\"\"\n Returns True if the user id exists\n \"\"\"\n try:\n error_name = next(self.user_lookup([user]))[\"errors\"][0][\"title\"]\n return error_name != \"Not Found Error\"\n except KeyError:\n return True\n\n def _ensure_user_id(self, user):\n \"\"\"\n Always return a valid user id, look up if not numeric.\n \"\"\"\n user = str(user)\n is_numeric = re.match(r\"^\\d+$\", user)\n\n if len(user) > 15 or (is_numeric and self._id_exists(user)):\n return user\n else:\n results = next(self.user_lookup([user], usernames=True))\n if \"data\" in results and len(results[\"data\"]) > 0:\n return results[\"data\"][0][\"id\"]\n elif is_numeric:\n return user\n else:\n raise ValueError(f\"No such user {user}\")\n\n def _ensure_user(self, user):\n \"\"\"\n Always return a valid user object.\n \"\"\"\n user = str(user)\n is_numeric = re.match(r\"^\\d+$\", user)\n\n lookup = []\n if len(user) > 15 or (is_numeric and self._id_exists(user)):\n lookup = expansions.ensure_flattened(list(self.user_lookup([user])))\n else:\n lookup = expansions.ensure_flattened(\n list(self.user_lookup([user], usernames=True))\n )\n if lookup:\n return lookup[-1]\n else:\n raise ValueError(f\"No such user {user}\")\n\n def _check_for_disconnect(self, data):\n \"\"\"\n Look for disconnect errors in a response, and reconnect if found. The\n function returns True if a disconnect was found and False otherwise.\n \"\"\"\n for error in data.get(\"errors\", []):\n if error.get(\"disconnect_type\") == \"OperationalDisconnect\":\n log.info(\"Received operational disconnect message, reconnecting\")\n self.connect()\n return True\n return False\n\n\ndef _ts(dt):\n \"\"\"\n Return ISO 8601 / RFC 3339 datetime in UTC. If no timezone is specified it\n is assumed to be in UTC. The Twitter API does not accept microseconds.\n\n Args:\n dt (datetime): a `datetime` object to format.\n\n Returns:\n str: an ISO 8601 / RFC 3339 datetime in UTC.\n \"\"\"\n if dt.tzinfo:\n dt = dt.astimezone(datetime.timezone.utc)\n else:\n dt = dt.replace(tzinfo=datetime.timezone.utc)\n return dt.isoformat(timespec=\"seconds\")\n\n\ndef _utcnow():\n \"\"\"\n Return _now_ in ISO 8601 / RFC 3339 datetime in UTC.\n\n Returns:\n datetime: Current timestamp in UTC.\n \"\"\"\n return datetime.datetime.now(datetime.timezone.utc).isoformat(timespec=\"seconds\")\n\n\ndef _append_metadata(result, url):\n \"\"\"\n Appends `__twarc` metadata to the result.\n Adds the full URL with parameters used, the version\n and current timestamp in seconds.\n\n Args:\n result (dict): API Response to append data to.\n url (str): URL of the API endpoint called.\n\n Returns:\n dict: API Response with append metadata\n \"\"\"\n result[\"__twarc\"] = {\"url\": url, \"version\": version, \"retrieved_at\": _utcnow()}\n return result\n","repo_name":"CharleoY/MultiModal-Project","sub_path":"Crawllers/TwitterCrawler/twarc/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":36617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"17505776876","text":"from tcr.wallet import Wallet\nfrom tcr.cardano import Cardano\nfrom tcr.database import Database\nimport logging\nimport argparse\nimport tcr.command\nimport tcr.nftmint\nimport traceback\nimport json\nimport requests\nimport urllib\nimport PIL.Image\nimport io\nimport os\nimport datetime\nimport shutil\nimport pathlib\n\nMINT_PAYMENT = 10000000\n\npotency_lut = {\n 'low': 1,\n 'medium-low': 2,\n 'medium': 3,\n 'medium-high': 4,\n 'high': 5,\n}\n\n# Generates the mutant image.\n#\n# @param normie_path Path to the initial image for the AI algorithm.\n# @param mutant_path The image generated by the AI algorithm should be saved to\n# this location.\n# @param potency A value ranging from low (1) to high (5). The AI algorithm\n# should produce a more mutated image for higher potency.\n#\n# @return true if successful.\ndef mutate_normie(normie_path: str, mutant_path: str, potency: int) -> bool:\n logger = logging.getLogger('mainnet')\n logger.info('Mutate: potency = {}, input = {}'.format(potency, normie_path))\n\n # Run the AI mutation algorithm\n shutil.copyfile(normie_path, mutant_path)\n\n logger.info('Mutate: output = {}'.format(mutant_path))\n return os.path.exists(mutant_path)\n\n# Process the normies package.\n#\n# Unzips the normies file then processes each request in the file by calling\n# mutate_normie. Saves the output in a sub directory and zips everything\n# together.\n#\n# @param network Just a name for the logger. The Cardano network isn't used in\n# this process.\n# @param normies_file zip file package.\n#\ndef process_normies(network: str, normies_file: str) -> None:\n logger = logging.getLogger(network)\n\n if not os.path.exists('normies'):\n os.mkdir('normies')\n\n if not os.path.exists('mutants'):\n os.mkdir('mutants')\n\n normies_subdir = 'normies/{}'.format(pathlib.Path(normies_file).stem)\n if not os.path.exists(normies_subdir):\n os.mkdir(normies_subdir)\n\n mutants_subdir = 'mutants/{}'.format(pathlib.Path(normies_file).stem)\n if not os.path.exists(mutants_subdir):\n os.mkdir(mutants_subdir)\n\n shutil.unpack_archive(normies_file, normies_subdir)\n\n normies_list = None\n with open(normies_subdir + '/normies.json', 'r') as file:\n normies_list = json.load(file)\n if normies_list == None:\n logger.error('Unable to parse {}'.format(\"normies.json\"))\n raise Exception('Unable to parse {}'.format(\"normies.json\"))\n\n mutants_list = []\n for normie in normies_list:\n normie_image = normies_subdir + '/' + normie['normie-image']\n mutant_image = mutants_subdir + '/' + normie['normie-fingerprint'] + '_mutant.png'\n if not mutate_normie(normie_image, mutant_image, normie['potency']):\n logger.error('Error mutating')\n raise Exception('Error mutating')\n\n mutant = {\n 'from': normie['from'],\n 'tx': normie['tx'],\n 'normie-fingerprint': normie['normie-fingerprint'],\n 'mutation-fingerprint': normie['mutation-fingerprint'],\n 'mutant-image': pathlib.Path(mutant_image).name\n }\n mutants_list.append(mutant)\n\n with open(mutants_subdir + '/mutants.json', 'w') as file:\n file.write(json.dumps(mutants_list, indent=4))\n\n shutil.make_archive(mutants_subdir+'_mutants', 'zip', mutants_subdir)\n logger.info('Mutants Package: {}.zip'.format(mutants_subdir+'_mutants'))\n\n# Creates the normies package.\n#\n# This process requires cardano-node and cardano-db-sync to be running.\n#\n# @param network \"mainnet\" or \"testnet\"\n# @param wallet_name A previously created wallet to search for incoming UTXOs.\n# @param requests_file JSON file of requests\ndef process_requests(network: str, wallet_name: str, requests_file: str) -> None:\n if not network in tcr.command.networks:\n raise Exception('Invalid Network: {}'.format(network))\n\n # Open the whitelist to make sure only approved projects are mutated.\n mutate_whitelist = {}\n with open('mutate_whitelist.json', 'r') as file:\n mutate_whitelist = json.load(file)\n if mutate_whitelist == None:\n logger.error('Unable to parse {}'.format(\"mutate_whitelist.json\"))\n raise Exception('Unable to parse {}'.format(\"mutate_whitelist.json\"))\n\n # Open the wallet to monitor for incoming payments and initialize the\n # payment address if necessary\n wallet = Wallet(wallet_name, network)\n if not wallet.exists():\n logger.error('Wallet: <{}> does not exist'.format(wallet_name))\n raise Exception('Wallet: <{}> does not exist'.format(wallet_name))\n\n addr_index = Wallet.ADDRESS_INDEX_MUTATE_REQUEST\n if wallet.get_payment_address(addr_index) == None:\n wallet.setup_address(addr_index)\n\n # General setup\n logger = logging.getLogger(network)\n cardano = Cardano(network, '{}_protocol_parameters.json'.format(network))\n\n tip = cardano.query_tip()\n cardano.query_protocol_parameters()\n tip_slot = tip['slot']\n\n database = Database('{}.ini'.format(network))\n database.open()\n latest_slot = database.query_latest_slot()\n sync_progress = database.query_sync_progress()\n logger.info('Cardano Node Tip Slot: {}'.format(tip_slot))\n logger.info(' Database Latest Slot: {}'.format(latest_slot))\n logger.info('Sync Progress: {}'.format(sync_progress))\n\n # Open all the mutation requests\n requests = None\n with open(requests_file, 'r') as file:\n requests = json.load(file)\n if requests == None:\n logger.error('Unable to parse {}'.format(requests_file))\n raise Exception('Unable to parse {}'.format(requests_file))\n\n # Populate UTXOs with the address of the sender and stake address of the\n # sender\n (utxos, total_lovelace) = cardano.query_utxos(wallet,\n [wallet.get_payment_address(addr_index, delegated=True),\n wallet.get_payment_address(addr_index, delegated=False)])\n for utxo in utxos:\n inputs = database.query_utxo_inputs(utxo['tx-hash'])\n utxo['from'] = inputs[0]['address']\n utxo['from_stake'] = database.query_stake_address(utxo['from'])\n\n # Setup directories for output files\n if not os.path.exists('normie_pkg'):\n os.mkdir('normie_pkg')\n\n subdir = 'normie_pkg/{}'.format(datetime.datetime.today().strftime('%Y_%m_%d'))\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n\n normies_pkg = []\n # Process the request and build the mutation package\n logger.info('Mutation Address: {}'.format(wallet.get_payment_address(addr_index)))\n for r in requests['requests']:\n logger.info('Process: {}: {}/{}'.format(r['name'], r['normie'], r['mutation']))\n normie_owner = database.query_owner_by_fingerprint(r['normie'])\n mutation_owner = database.query_owner_by_fingerprint(r['mutation'])\n\n if normie_owner != mutation_owner:\n logger.error('Owner mismatch for {}: {} != {}'.format(r['name'], r['normie'], r['mutation']))\n continue\n\n (normie_policy, normie_md) = database.query_nft_metadata(r['normie'])\n (mutation_policy, mutation_md) = database.query_nft_metadata(r['mutation'])\n\n if mutation_policy != '7135025a3c23035cdcff4ef8ae3849248afd369466ea1abef61a4157':\n logger.error('Invalid mutation policy: {}'.format(mutation_policy))\n continue\n\n if normie_policy not in mutate_whitelist:\n logger.error('Unapproved normie policy: {}'.format(normie_policy))\n continue\n\n # search for a payment that matches the request\n payment = None\n for utxo in utxos:\n if utxo['from_stake'] == normie_owner:\n payment = utxo\n break\n\n if payment == None:\n logger.error('No payment found')\n continue\n\n # remove this one from the list so it doesn't get processed more than\n # once\n utxos.remove(payment)\n\n if payment['amount'] != MINT_PAYMENT or len(payment['assets']) != 0:\n logger.error('Invalid payment: {} / {}'.format(payment['amount'], payment['assets']))\n continue\n\n cid = normie_md['image'][7:]\n #download_url = 'https://ipfs.io/ipfs/{}'.format(cid)\n download_url = 'https://infura-ipfs.io/ipfs/{}'.format(cid)\n logger.info('Download Normie: {}'.format(download_url))\n\n fd = urllib.request.urlopen(download_url)\n if fd.status != 200:\n logger.info('HTTP Error: {}'.format(fd.status))\n continue\n\n image_file = io.BytesIO(fd.read())\n im = PIL.Image.open(image_file)\n im.save(subdir + '/' + r['normie'] + '.png', format='png')\n normie = {\n 'from': payment['from'],\n 'tx': '{}:{}'.format(payment['tx-hash'], payment['tx-ix']),\n 'potency': potency_lut[mutation_md['potency']],\n 'normie-image': r['normie']+'.png',\n 'normie-fingerprint': r['normie'],\n 'mutation-fingerprint': r['mutation']\n }\n normies_pkg.append(normie)\n\n with open('{}/normies.json'.format(subdir), 'w') as f:\n f.write(json.dumps(normies_pkg, indent=4))\n\n shutil.make_archive(subdir, 'zip', subdir)\n logger.info('Normies Package: {}.zip'.format(subdir))\n\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('--requests', required=False,\n action='store',\n type=str,\n metavar='FILE',\n help='Filename of mutation request JSON.')\n parser.add_argument('--wallet', required=False,\n action='store',\n type=str,\n metavar='NAME',\n help='Wallet name to check payments.')\n parser.add_argument('--normies', required=False,\n action='store',\n type=str,\n metavar='FILE',\n help='The output file from --requests / input for AI algorithm')\n\n network = 'mainnet'\n tcr.nftmint.setup_logging(network, 'mutate')\n\n args = parser.parse_args()\n if args.requests != None:\n if args.wallet == None:\n raise Exception('--wallet required with --requests')\n process_requests(network, args.wallet, args.requests)\n elif args.normies != None:\n process_normies(network, args.normies)\n else:\n raise Exception('--requests or --normies required')\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print('')\n print('')\n print('EXCEPTION: {}'.format(e))\n print('')\n traceback.print_exc()\n","repo_name":"kris-76/thecardroom","sub_path":"tcr/mutate.py","file_name":"mutate.py","file_ext":"py","file_size_in_byte":10947,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"43990424581","text":"# -*- mode: python; -*-\r\n\r\nimport os\r\n\r\nImport(\"env\")\r\nImport(\"get_option\")\r\nImport(\"has_option\")\r\n\r\nif not env.TargetOSIs('windows'):\r\n Return()\r\n\r\nimport re\r\nimport subprocess\r\nimport winreg\r\n\r\nenv = env.Clone()\r\n\r\nenv.SConscript(\r\n dirs=[\r\n 'ca',\r\n ],\r\n exports=[\r\n 'env',\r\n ],\r\n)\r\n\r\nenv['WIX'] = os.environ.get('WIX')\r\nenv['WIXPATH'] = r'$WIX\\bin'\r\nenv['WIXHEAT'] = r'$WIXPATH\\heat.exe'\r\nenv['WIXCANDLE'] = r'$WIXPATH\\candle.exe'\r\nenv['WIXLIGHT'] = r'$WIXPATH\\light.exe'\r\nenv['WIXUIEXT'] = r'$WIXPATH\\WixUIExtension.dll'\r\nenv['WIXUTILEXT'] = r'$WIXPATH\\WixUtilExtension.dll'\r\n\r\nif not 'VCREDISTMERGEMODULEPATH' in env['MSVS']:\r\n print(\"SCons tool setup did not configure the path to the vcredist merge modules, disabling MSI installer\")\r\n Return()\r\n\r\nsourcesList = [ \"BinaryFragment.wxs\",\r\n \"FeatureFragment.wxs\",\r\n \"LicensingFragment.wxs\",\r\n \"UIFragment.wxs\",\r\n ]\r\n\r\n# Need to do this in order to get scons to translate path separators into native format\r\nbuildDir = env.Dir(\"$BUILD_DIR\").path\r\ntoolBuildDir = buildDir + r'\\mongo'\r\n\r\nenterprisebase = 'src\\mongo\\db\\modules\\enterprise'\r\nenterpriseToolBuildDir = buildDir + r'\\mongo\\db\\modules\\enterprise'\r\n\r\n# Set up parameters to pass to wix -\r\n#\r\n# msi_edition - \"Enterprise\" or \"Standard\"\r\n# msi_platform - \"x64\" or \"x86\"\r\n# msi_flavor - \"2008R2Plus\" or \"\"\r\n#\r\n\r\nmsi_flavor = '2008R2Plus'\r\nmsi_platform = 'x64'\r\n\r\n# Enterprise\r\nif 'enterprise' in env['MONGO_MODULES']:\r\n msi_edition = 'Enterprise'\r\n upgrade_code = 'ccd97a61-fd75-4645-b6a9-f1cd4b8235c1'\r\n# Community\r\nelse:\r\n if get_option('ssl') == 'on':\r\n msi_edition = 'SSL'\r\n upgrade_code = 'ea0c66de-dc03-4c28-9f2b-60edacc44ed7'\r\n else:\r\n msi_edition = 'Standard'\r\n upgrade_code = '45544685-4981-4f1d-a1fe-302539b110bf'\r\n\r\nsourcesList.append(\"Installer_64.wxs\")\r\n\r\nsources = [\"wxs/\" + file for file in sourcesList]\r\nobjects = [\"$BUILD_DIR/msi/\" + file.replace(\".wxs\", \".wixobj\") for file in sourcesList]\r\n\r\nfull_version = env['MONGO_VERSION'].partition('-')[0]\r\n\r\n# major version is the x.y, not the x.y.z\r\nmajor_version = full_version\r\nmv = major_version.split('.')\r\nmajor_version = \"%s.%s\" % (mv[0], mv[1])\r\n\r\n# We must regenerate the upgrade codes for each major release.\r\n# i.e., 3.0, 3.2, 3.4 need new codes but not 3.2.1, 3.2.2, etc\r\n# The build will now fail when the major version is bumped to prevent mistakes.\r\n# When the upgrade codes are bumped, remember to raise the version number to the next major version.\r\n# On each update to the upgrade codes:\r\n# 1. Generate new GUIDs\r\n# 2. Ensure each MSI gets a different GUID. This is used to identify products.\r\n# It allows upgrade from 3.2.0 to 3.2.1 in place instead of side-by-side.\r\n# 3. Update the check for the next major release below so we bump the GUIDs in the future.\r\n#\r\nif float(major_version) > 4.4:\r\n # If you are troubleshooting this error, see the comment above\r\n env.FatalError(\"The upgrade codes are out of date for this release. Please \\n\" +\r\n \"replace the existing GUIDs listed in this file with new GUIDs so \" +\r\n \"side-by-side installation of major versions (i.e. 3.2, and 3.4) is \" +\r\n \"supported.\")\r\n\r\n# Currently, we are planning to key the same upgrade code for each\r\n# (msi_edition, msi_platform, msi_flavor) combination\r\n# and change MSI ProductId on minor updates, 2.6.0 -> 2.6.1, we let Wix do automatic\r\n# GUID generation for us rather then build a database of GUIDs in our build system\r\n# For major updates, we are going to create a new directory/productid/upgrade_code ie, 2.6 -> 3.0\r\n\r\n\r\n# candle: compile .wxs files into .wixobjs\r\nenv.Command(objects,\r\n sources,\r\n '\"$WIXCANDLE\" -wx'\r\n # cannot have anything other than x.x.x.x in version string.\r\n # we should choose a fourth version number that reflects pre-ness.\r\n ' -dMongoDBMajorVersion=' + major_version +\r\n ' -dMongoDBVersion=' + full_version +\r\n ' -dLicenseSource=distsrc'\r\n r' -dEnterpriseBase=' + enterprisebase + '\\\\'\r\n ' -dBinarySource=' + \"\\\"$DESTDIR\\\\$PREFIX_BINDIR\\\"\" +\r\n ' -dMergeModulesBasePath=' + \"\\\"${MSVS['VCREDISTMERGEMODULEPATH']}\\\"\" +\r\n ' -dMergeModuleFileCRT=' + env.GetMergeModuleNameForFeature('CRT') +\r\n ' -dEdition=' + msi_edition +\r\n ' -d\"ProductId=*\\\"'\r\n ' -dUpgradeCode=' + upgrade_code +\r\n ' -dCustomActionDll=' + \"\\\"$DESTDIR\\\\$PREFIX_BINDIR\\\\mongoca.dll\\\"\" +\r\n ' -dConfiguration=Release'\r\n ' -dOutDir=' + buildDir + r'\\msi'\r\n ' -dPlatform=' + msi_platform +\r\n ' -dFlavor=' + msi_flavor +\r\n r' -dProjectDir=buildscripts\\packaging\\msi\\\\'\r\n ' -dProjectName=MongoDB'\r\n ' -dTargetDir=' + buildDir + r'\\msi'\r\n ' -dTargetExt=.msi'\r\n ' -dTargetFileName=${SERVER_ARCHIVE}'\r\n r' -dSaslSource=c:\\sasl\\bin'\r\n r' -dSnmpSource=c:\\snmp\\bin'\r\n r' -dSslSource=' + env['WINDOWS_OPENSSL_BIN'] +\r\n ' -out ' + buildDir + r'\\msi\\\\'\r\n ' -arch ' + msi_platform +\r\n ' -ext \"$WIXUIEXT\"'\r\n ' -ext \"$WIXUTILEXT\"'\r\n ' $SOURCES')\r\n\r\n#light: link .objs into an msi\r\npre_msi = \"$BUILD_DIR/msi/${SERVER_DIST_BASENAME}.pre.msi\"\r\n\r\n# Suppress VC140_CRT_CRT.MSM Internal Consistency Errors\r\n# ICE03 - Supress \"String overflow\"\r\n# -- https://msdn.microsoft.com/en-us/library/windows/desktop/aa369037(v=vs.85).aspx\r\n# ICE82 - Suppress \"duplicate sequence number\"\r\n# -- https://msdn.microsoft.com/en-us/library/windows/desktop/aa368798(v=vs.85).aspx\r\n# ICE30 - Suppress \"different components install same file\"\r\n# -- mongod.exe is installed in two different components but only one is ever used during an install\r\n# so this consistency check can be ignored.\r\n# -- https://msdn.microsoft.com/en-us/library/windows/desktop/aa368954(v=vs.85).aspx\r\n\r\npre_msi_cmd = env.Command(pre_msi,\r\n objects,\r\n '\"$WIXLIGHT\" -out ${TARGET} -wx -cultures:null -sice:ICE82 -sice:ICE03 -sice:ICE30'\r\n ' -ext \"$WIXUIEXT\"'\r\n ' -ext \"$WIXUTILEXT\"'\r\n ' ${SOURCES}')\r\nenv.NoCache(pre_msi_cmd)\r\n\r\n# Generated Dependencies\r\nenv.Depends(pre_msi_cmd, '$DESTDIR/$PREFIX_BINDIR/mongo.exe')\r\nenv.Depends(pre_msi_cmd, '$DESTDIR/$PREFIX_BINDIR/mongod.exe')\r\nenv.Depends(pre_msi_cmd, '$DESTDIR/$PREFIX_BINDIR/mongos.exe')\r\nenv.Depends(pre_msi_cmd, '$DESTDIR/$PREFIX_BINDIR/mongoca.dll')\r\nenv.Depends(pre_msi_cmd, '$DESTDIR/$PREFIX_BINDIR/Install-Compass.ps1')\r\n\r\n# Source Dependencies\r\nenv.Depends(pre_msi_cmd, '#buildscripts/packaging/msi/mongod.yaml')\r\n\r\nif 'enterprise' in env['MONGO_MODULES']:\r\n env.Depends(pre_msi_cmd, \"$DESTDIR/$PREFIX_BINDIR/mongodecrypt.exe\")\r\n env.Depends(pre_msi_cmd, \"$DESTDIR/$PREFIX_BINDIR/mongoldap.exe\")\r\n env.Depends(pre_msi_cmd, \"$DESTDIR/$PREFIX_BINDIR/mongocryptd.exe\")\r\n\r\nmsi = \"$BUILD_DIR/msi/${SERVER_DIST_BASENAME}.msi\"\r\nenv.Command(msi,\r\n pre_msi,\r\n r'$PYTHON buildscripts\\msitrim.py ${SOURCES} ${TARGET}')\r\nenv.AlwaysBuild(msi)\r\nenv.NoCache(msi)\r\n\r\nenv.Alias( \"msi\" , msi )\r\n","repo_name":"y123456yz/reading-and-annotate-mongodb-4.4","sub_path":"mongo-r4.4.6/src/mongo/installer/msi/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"73"} +{"seq_id":"75027366315","text":"# count vowels\nfrom collections import defaultdict\nwords = 'supercalifragilisticexpialidocious'\n\nwords_dict = defaultdict(int)\nfor e in words:\n if e in ['a', 'e', 'i', 'o', 'u']:\n words_dict[e] += 1\n\nprint(words_dict)\n","repo_name":"ihongChen/Advanced-Computer-Programming-in-Python","sub_path":"src/chapter2_data_structure/count_vowels.py","file_name":"count_vowels.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72050947117","text":"from django.urls import path\nfrom peerToPeerPayment import views\n\nurlpatterns = [\n path('add_user/', views.create_user_view, name=\"add_user\"),\n path('deposit/', views.deposit, name=\"deposit\"),\n path('withdraw/', views.withdraw, name=\"withdraw\"),\n path('transfer/', views.transfer, name=\"transfer\"),\n path('check_balance/', views.check_balance, name=\"check_balance\"),\n]\n","repo_name":"esiebomaj/wafi-test","sub_path":"peerToPeerPayment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23635974680","text":"from more_itertools import quantify\nimport requests\nimport time\nfrom supabase import create_client, Client\n\ndef main():\n print(\"Initializing database...\")\n\n supabase: Client = create_client(\n \"https://jnbnzuyiuuaocbltwewu.supabase.co\", \n \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImpuYm56dXlpdXVhb2NibHR3ZXd1Iiwicm9sZSI6Im\" +\n \"Fub24iLCJpYXQiOjE2NjY3MjExMjEsImV4cCI6MTk4MjI5NzEyMX0.vnmH8LhJevM1ju-l9d0MnRXL6BmGNjOTw5XS0vO6NHY\"\n )\n\n num_pages = 10\n page_size = 10000\n to_dollars = 0.9965\n profit_margin = 1.05\n\n load_categories = False\n\n if load_categories:\n categories_respose = requests.get(\"https://api.bigbuy.eu/rest/catalog/categories.json?isoCode=en\",\n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n\n categories_respose.raise_for_status()\n categories = categories_respose.json()\n\n print(\"Loading Categories...\")\n for cat in categories:\n id = cat[\"id\"]\n name = cat[\"name\"]\n parentCategory = cat[\"parentCategory\"]\n images = cat[\"urlImages\"]\n\n data = supabase.table(\"Category\").insert({\n \"id\": id,\n \"name\": name,\n \"parentCategory\": parentCategory,\n \"imageUrl\": images[1]\n }).execute()\n print(data)\n else: print(\"Skipping Categories...\")\n\n print(\"Loading Products...\")\n for i in range(0, num_pages):\n print(\"Loading Page... \" + str(i))\n\n products_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productsstockavailable.json?isoCode=en&pageSize=\" + str(page_size) + \"&page=\" + str(i), \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n products_response.raise_for_status()\n products = products_response.json()\n\n for product in products:\n try:\n id = product[\"id\"]\n\n print(\"Loading Product Info... \" + str(id))\n product_info_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productinformation/\" + str(id) + \".json?isoCode=en\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_info_response.raise_for_status()\n product_info = product_info_response.json()\n\n print(\"Loading Product Images... \" + str(id))\n product_images_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/productimages/\" + str(id) + \".json?isoCode=en\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_images_response.raise_for_status()\n product_images = product_images_response.json()\n\n print(\"Loading product category info... \" + str(id))\n product_category_response = requests.get(\"https://api.bigbuy.eu/rest/catalog/product/\" + str(id) + \".json\", \n headers = {\"Authorization\": \"Bearer NjU5YzM3MzllNjM5YzFiYzNkMTkxZmQ0NTMyNGI4MzU0NzViZDAyOTI3NWZlZDliYzdkNmRjYWM5OTRkNjc1Nw\"})\n product_category_response.raise_for_status()\n product_category = product_category_response.json()\n \n sku = product_category[\"sku\"]\n category = product_category[\"category\"]\n wholesale_price = round((product_category[\"wholesalePrice\"] * to_dollars) * profit_margin, 2)\n retail_price = round((product_category[\"retailPrice\"] * to_dollars) * profit_margin, 2)\n in_shop_price = round((product_category[\"inShopsPrice\"] * to_dollars) * profit_margin, 2)\n\n name = product_info[0][\"name\"]\n description = product_info[0][\"description\"]\n images = product_images[\"images\"]\n\n quantity = product[\"stocks\"][0][\"quantity\"]\n\n data = supabase.table(\"Product\").insert({\n \"id\": id,\n \"sku\": sku,\n \"category\": category,\n \"wholesalePrice\": wholesale_price,\n \"retailPrice\": retail_price,\n \"inShopPrice\": in_shop_price,\n \"name\": name, \n \"description\": description,\n \"images\": images,\n \"quantity\": quantity\n }).execute()\n\n print(data)\n time.sleep(5.0)\n \n except:\n print(\"Cant load item... \" + str(id))\n continue\n\nif __name__ == \"__main__\":\n run_database = True\n\n if run_database:\n main()\n","repo_name":"seiyadragon/rosscostore","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30092222162","text":"from coffea import hist, processor\nfrom copy import deepcopy\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom tqdm import tqdm\nimport pickle\nimport lz4.frame\nimport numpy\nimport pandas\nimport awkward\nfrom functools import partial\n\nfrom ..executor import _futures_handler\nfrom coffea.nanoevents import NanoEventsFactory, schemas\nfrom coffea.nanoevents.mapping import SimplePreloadedColumnSource\n\nimport pyspark\nimport pyspark.sql.functions as fn\nfrom pyspark.sql.types import BinaryType, StringType, StructType, StructField\n\nfrom jinja2 import Environment, PackageLoader, select_autoescape\nfrom coffea.util import awkward\n\nlz4_clevel = 1\n\n\n# this is a UDF that takes care of summing histograms across\n# various spark results where the outputs are histogram blobs\ndef agg_histos_raw(series, processor_instance, lz4_clevel):\n goodlines = series[series.str.len() > 0]\n if goodlines.size == 1: # short-circuit trivial aggregations\n return goodlines[0]\n outhist = processor_instance.accumulator.identity()\n for line in goodlines:\n outhist.add(pickle.loads(lz4.frame.decompress(line)))\n return lz4.frame.compress(pickle.dumps(outhist), compression_level=lz4_clevel)\n\n\n@fn.pandas_udf(BinaryType(), fn.PandasUDFType.GROUPED_AGG)\ndef agg_histos(series):\n global processor_instance, lz4_clevel\n return agg_histos_raw(series, processor_instance, lz4_clevel)\n\n\ndef reduce_histos_raw(df, processor_instance, lz4_clevel):\n histos = df['histos']\n mask = (histos.str.len() > 0)\n outhist = processor_instance.accumulator.identity()\n for line in histos[mask]:\n outhist.add(pickle.loads(lz4.frame.decompress(line)))\n return pandas.DataFrame(data={'histos': numpy.array([lz4.frame.compress(pickle.dumps(outhist), compression_level=lz4_clevel)], dtype='O')})\n\n\n@fn.pandas_udf(StructType([StructField('histos', BinaryType(), True)]), fn.PandasUDFType.GROUPED_MAP)\ndef reduce_histos(df):\n global processor_instance, lz4_clevel\n return reduce_histos_raw(df, processor_instance, lz4_clevel)\n\n\nclass SparkExecutor(object):\n _template_name = 'spark.py.tmpl'\n\n def __init__(self):\n self._cacheddfs = None\n self._rawresults = None\n self._counts = None\n self._env = Environment(loader=PackageLoader('coffea.processor',\n 'templates'),\n autoescape=select_autoescape(['py'])\n )\n\n @property\n def counts(self):\n return self._counts\n\n def __call__(self, spark, dfslist, theprocessor, output, thread_workers,\n use_df_cache, schema, status=True, unit='datasets', desc='Processing'):\n # processor needs to be a global\n global processor_instance, coffea_udf, nano_schema\n processor_instance = theprocessor\n if schema is None:\n schema = schemas.BaseSchema\n if not issubclass(schema, schemas.BaseSchema):\n raise ValueError(\"Expected schema to derive from BaseSchema (%s)\" % (str(schema.__name__)))\n nano_schema = schema\n # get columns from processor\n columns = processor_instance.columns\n cols_w_ds = ['dataset'] + columns\n # make our udf\n tmpl = self._env.get_template(self._template_name)\n render = tmpl.render(cols=columns)\n exec(render)\n\n # cache the input datasets if it's not already done\n if self._counts is None:\n self._counts = {}\n # go through each dataset and thin down to the columns we want\n for ds, (df, counts) in dfslist.items():\n self._counts[ds] = counts\n\n def spex_accumulator(total, result):\n ds, df = result\n total[ds] = df\n\n if self._cacheddfs is None:\n self._cacheddfs = {}\n cachedesc = 'caching' if use_df_cache else 'pruning'\n with ThreadPoolExecutor(max_workers=thread_workers) as executor:\n futures = set()\n for ds, (df, counts) in dfslist.items():\n futures.add(executor.submit(self._pruneandcache_data, ds, df, cols_w_ds, use_df_cache))\n _futures_handler(futures, self._cacheddfs, status, unit, cachedesc, spex_accumulator, None)\n\n with ThreadPoolExecutor(max_workers=thread_workers) as executor:\n futures = set()\n for ds, df in self._cacheddfs.items():\n co_udf = coffea_udf\n futures.add(executor.submit(self._launch_analysis, ds, df, co_udf, cols_w_ds))\n # wait for the spark jobs to come in\n self._rawresults = {}\n _futures_handler(futures, self._rawresults, status, unit, desc, spex_accumulator, None)\n\n for ds, bitstream in self._rawresults.items():\n if bitstream is None:\n raise Exception('No pandas dataframe returned from spark in dataset: %s, something went wrong!' % ds)\n if bitstream.empty:\n raise Exception('The histogram list returned from spark is empty in dataset: %s, something went wrong!' % ds)\n bits = bitstream[bitstream.columns[0]][0]\n output.add(pickle.loads(lz4.frame.decompress(bits)))\n\n def _pruneandcache_data(self, ds, df, columns, cacheit):\n if cacheit:\n return ds, df.select(*columns).cache()\n return ds, df.select(*columns)\n\n def _launch_analysis(self, ds, df, udf, columns):\n histo_map_parts = (df.rdd.getNumPartitions() // 20) + 1\n return ds, df.select(udf(*columns).alias('histos')) \\\n .withColumn('hpid', fn.spark_partition_id() % histo_map_parts) \\\n .repartition(histo_map_parts, 'hpid') \\\n .groupBy('hpid').apply(reduce_histos) \\\n .groupBy().agg(agg_histos('histos')) \\\n .toPandas()\n\n\nspark_executor = SparkExecutor()\n","repo_name":"LBJ-Wade/coffea","sub_path":"coffea/processor/spark/spark_executor.py","file_name":"spark_executor.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12007930590","text":"progresses = [93, 30, 55]\nspeeds = [1, 30, 5]\n\n\ndef solution(progresses, speeds):\n answer = []\n size = len(progresses)\n to_do = [i for i in range(size - 1, -1, -1)]\n while len(to_do) > 0:\n cnt = 0\n for i in range(size):\n progresses[i] += speeds[i]\n\n while progresses[to_do[-1]] >= 100:\n to_do.pop()\n cnt += 1\n if cnt > 0:\n answer.append(cnt)\n\n return answer","repo_name":"dla0510/Algorithm","sub_path":"04-23/2-1)Singly_Linked_List.py","file_name":"2-1)Singly_Linked_List.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13858739436","text":"\"\"\"\nThis file is used to update GT with .csv file created by Yujie\nFormat:\nir_name, frame_name, target_position, screen_index, pupil_coord, purkj_coord\nWe only use target_position and screen_index.\n\"\"\"\nimport numpy as np\nimport argparse\nimport os\nimport datetime\nimport pandas as pd\nimport shutil\nimport cv2\nimport random\n\ndef extract_red_channel(image):\n assert image.shape[2] == 3 # check if this is a color image\n new_image = image[:,:,2]\n return new_image\n\n\ndef preprocess_image(cams, root_path):\n \"\"\" Preprocess images. Extract red channel, turn image upside down vertically. \"\"\"\n for i in cams:\n dir_path = os.path.join(root_path, 'video%d'%i)\n dirs = [x for x in os.listdir(dir_path) if x.startswith('rgb_spot')]\n for directory in dirs:\n full_path = os.path.join(dir_path, directory)\n gray_dir = full_path.replace('rgb_', '')\n if not os.path.exists(gray_dir):\n os.makedirs(gray_dir)\n files = os.listdir(full_path)\n for f in files:\n image = cv2.imread(os.path.join(full_path, f))\n red_image = extract_red_channel(image)\n cv2.imwrite(os.path.join(gray_dir, f), red_image)\n\n\ndef compute_GT(infos):\n \"\"\" Preprocess csv files. Compute ground truth data (x,y,z), size: (num_images, 3). \"\"\"\n # TODO: numbers below are not super accurate, need to think about the influence\n # display settings\n \"\"\"\n \n ----------------- \n - - \n ----- - - \n ----- - - - - \n eye - - ----- - - \n ----- ----------------- \n \n \n 0 1 2 \n 33cm 50.5cm 119cm (depth is inaccurate)\n W x H (w x h in pixel):\n 0: 20 x 15 cm (2048 x 1536)\n 1: 20 x 15 cm (2048 x 1536)\n 2: 70.8 x 39.8 cm (2560 x 1440)\n 3: 20 x 15 cm (2048 x 1536), move 0 5cm further\n 3: 20 x 15 cm (2048 x 1536), move 1 5cm further\n \"\"\"\n height_px = [1536, 1536, 1440, 1920, 1536]\n width_px = [2048, 2048, 2560, 1080, 2048]\n width_cm = [20, 20, 70.8, 20, 20]\n height_cm = [x*1.0*y/z for x,y,z in zip(width_cm, height_px, width_px)]\n cm_to_pix_scale = [x*1.0/y for x,y in zip(width_px, width_cm)]\n\n # spot settings of reflected display\n spot_depth = [33, 54.5, 119, 38, 59.5] # cm\n offset_x = [-0.35, -0.35, -0.35, -0.35, -0.35] # cm, offset of eye center and reflected display center in the x direction\n offset_y = [1.075, 1.4, 0, 1.075, 1.4] # cm, offset of eye center and reflected display center in the y direction\n\n # bench type settings\n # Note: Origin is center of right eye\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxx O is the origin\n xx xx xx xx\n xx eye xx xx 0 xx\n xxxxxxxxx xxxxxxxxx\n \"\"\"\n new_target_position = np.zeros((len(infos), 3))\n for i in range(len(infos)):\n screen_id = infos.iloc[i][3]\n target_x = float((infos.iloc[i][2]).split(',')[0][1:])\n target_y = float((infos.iloc[i][2]).split(',')[1][:-1])\n\n x = target_x / cm_to_pix_scale[screen_id]\n y = target_y / cm_to_pix_scale[screen_id]\n ############ upside down cameras ############\n #if screen_id < 3: # should reflect 0, 1, 2: \n y = -y\n #############################################\n x = x + offset_x[screen_id]\n y = y + offset_y[screen_id]\n z = spot_depth[screen_id]\n\n new_target_position[i] = np.array([x,y,z])\n return new_target_position\n\n\ndef write_csv(filename, target_position, infos, dirname):\n \"\"\" Write final ground truth data to .csv file. \"\"\"\n \"\"\" Format: (unit is cm)\n name spot frame x y z pupil_x pupil_y pupil_radius purkj1_x purkj1_y purkj3_x purkj3_y\n conny_4_3 1 image_00501.png 5.46 0.0 50.0 23 26 23 12 14 166 277\n \"\"\"\n num_images = len(infos)\n pupils = np.zeros((num_images, 3))\n purkj1 = np.zeros((num_images, 2))\n purkj3 = np.zeros((num_images, 2))\n for i in range(num_images):\n pupils[i, 0] = float(infos.iloc[i,4].split(',')[0][1:])\n pupils[i, 1] = float(infos.iloc[i,4].split(',')[1])\n pupils[i, 2] = float(infos.iloc[i,4].split(',')[2][:-1])\n purkj1[i, 0] = float(infos.iloc[i,5].split(',')[0][2:])\n purkj1[i, 1] = float(infos.iloc[i,5].split(',')[1][:-1])\n purkj3[i, 0] = float(infos.iloc[i,5].split(',')[2][2:])\n try:\n purkj3[i, 1] = float(infos.iloc[i,5].split(',')[3][:-1])\n except:\n purkj3[i, 1] = float(infos.iloc[i,5].split(',')[3][:-2])\n df = pd.DataFrame({\"name\":[dirname]*num_images, \n \"spot\":infos.iloc[:,3],\n \"frame\":infos.iloc[:,1] + '.png',\n \"x\":target_position[:,0],\n \"y\":target_position[:,1],\n \"z\":target_position[:,2],\n \"pupil_x\":pupils[:,0],\n \"pupil_y\":pupils[:,1],\n \"pupil_radius\":pupils[:,2],\n \"purkj1_x\":purkj1[:,0],\n \"purkj1_y\":purkj1[:,1],\n \"purkj3_x\":purkj3[:,0],\n \"purkj3_y\":purkj3[:,1]}) \n df.to_csv(filename, index=False)\n return df\n\n \ndef main(args):\n dirname = os.path.join(args.root, args.dirname)\n csv_file = dirname + '.csv'\n\n infos = pd.read_csv(csv_file)\n print(\"===> Computing ground truth\")\n gt = compute_GT(infos) # (num_images, 3)\n filename = os.path.join('/data/connylu/eye_data/worldnew.csv')\n print(\"===> Writing final .csv file\")\n df = write_csv(filename, gt, infos, args.dirname)\n \n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Generate ground truth.')\n parser.add_argument('--root', type=str, default='/data/connylu/eye_data',\n help='root path')\n parser.add_argument('-d', '--dirname', type=str, required=True,\n help='directory name, world or eye, e.g. world')\n# parser.add_argument('-ipd', type=float, required=True,\n# help='IPD of user')\n# parser.add_argument('--num_cam', type=int, default = 8,\n# help='number of cameras')\n args = parser.parse_args()\n main(args)\n","repo_name":"criminalking/COMP790","sub_path":"eyetracker/code/get_data/extract_images_purkinje.py","file_name":"extract_images_purkinje.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22828245846","text":"from model.organism import Organism\nfrom pyglet.gl import *\nfrom util.draw import circle\nfrom util.vec import Vec\n\n\nclass Predator(Organism):\n\n top_speed = 100\n max_lifespan = 100\n lifespan_increase = 0.4\n reproduction_increase = 0.0005\n increase_limit = 3\n\n def __init__(self, x, y):\n super(Predator, self).__init__(x, y)\n self.lifespan = 3\n self.reproduction_chance = 0.003\n\n def draw(self):\n glColor3f(0.2, 0.4, 0.5)\n circle(self.p, 10)\n\n def pathfind(self, prey):\n if prey:\n direction = prey.p - self.p\n absolute = direction.abs()\n\n if absolute != 0:\n self.v = Vec(\n (self.top_speed * direction.x) / absolute,\n (self.top_speed * direction.y) / absolute\n )\n else:\n self.v = Vec(0, 0)\n else:\n self.v = Vec(0, 0)\n\n def eat(self):\n if (self.age - self.lifespan < self.increase_limit and\n self.lifespan < self.max_lifespan):\n self.lifespan += self.lifespan_increase\n self.reproduction_chance += self.reproduction_increase\n","repo_name":"yanneyanne/stim","sub_path":"stim/model/predator.py","file_name":"predator.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27025272459","text":"# Написать функцию, которая принимает произвольное количество любых аргументов\n# Аргументами могут быть вложенные списки и кортежи, содержащие числа\n# и другие списки и кортежи. Пример вызова функции:\n# foo(1, 2, [3, 4, (5, 6, 0)], a=(10, 11), b=(3, 4, [5, 6, [7, 8], []]))\n# Функция должна вернуть произведение\n# и сумму всех ненулевых элементов вложенных чисел.\n# Возможны циклические ссылки в аргументах.\n# Пример такого аргумента: a = [1, 2, 3]; a.append(a)\n# При обнаружении циклической ссылки нужно сообщить пользователю и\n# вернуть None.\n\n\ndef summ(*args, **kwargs):\n\n def listSumm(lists):\n results_summ = 0\n results_work = 1\n lists = list(lists)\n for i in lists:\n if type(i) == int:\n results_summ += i\n if i != 0:\n results_work = results_work * i\n else:\n lists.extend(i)\n return results_summ, results_work\n\n list_kw = kwargs.values()\n res_kw = listSumm(list_kw)\n list_ar = list(args)\n res_ar = listSumm(list_ar)\n results_summ = res_kw[0] + res_ar[0]\n results_work = res_kw[1] * res_ar[1]\n\n return results_summ, results_work\n\n\na = summ(1, 2, [3, 4, (5, 6, 0)], a=(10, 11), b=(3, 4, [5, 6, [7, 8], []]))\nprint('Сумма всех чисел =', a[0])\nprint('Произведение всех чисел =', a[1])\n","repo_name":"VIAlexRus/ArtezioTraining","sub_path":"Lesson 3/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"69901847915","text":"import tensorflow as tf\nimport numpy as np\ntf.set_random_seed(66)\n\nx_data = [[1,2,1,1],\n [2,1,3,2],\n [3,1,3,4],\n [4,1,5,5],\n [1,7,5,5],\n [1,2,5,6],\n [1,6,6,6],\n [1,7,6,7]]\n\ny_data = [[0,0,1], #2\n [0,0,1],\n [0,0,1],\n [0,1,0], #1\n [0,1,0],\n [0,1,0],\n [1,0,0], #0\n [1,0,0]]\n\nx = tf.placeholder('float', [None,4])\ny = tf.placeholder('float', [None,3])\n\nw = tf.Variable(tf.random_normal([4,3]),name='weight')\nb = tf.Variable(tf.random_normal([1,3]), name = 'bias')\n#y의 shape에 맞춘다\n\nhypothesis = tf.nn.softmax(tf.matmul(x,w) + b) # 결과 값을 감싸서 다음 레이어에 보내준다\n# (n,4)*(4,3)=(n,3) + (1,3) = (n,3)\n\n# cost = tf.reduce_mean(tf.square(hypothesis - y))\n# 기존의 mse\n\n# 다중분류는 categorical crossentropy\nloss = tf.reduce_mean(-tf.reduce_mean(y*tf.log(hypothesis), axis=1))\n\n# loss를 최소화시키기 위해 optimizer쓰기\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer()) # 변수 초기화\n\n for step in range(2001):\n _, cost_val = sess.run([optimizer, loss], feed_dict={x:x_data, y:y_data})\n if step % 200 ==0:\n print(step, cost_val)\n \n # predict\n a = sess.run(hypothesis, feed_dict={x:[[1,11,7,9]]})\n print(a,sess.run(tf.argmax(a,1)))\n \n# 1600 0.23607637\n# 1800 0.2275399\n# 2000 0.22019444\n# [[0.7818437 0.17601936 0.04213692]] [0]","repo_name":"sswwd95/Study","sub_path":"tf114(tensorflow1)/tf12_softmax.py","file_name":"tf12_softmax.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31517147516","text":"import collections\nimport functools\nimport importlib\nfrom savory_pie.auth import authorization, authorization_adapter\nfrom savory_pie.resources import EmptyParams\nfrom savory_pie.django.validators import validate, ValidationError\nfrom savory_pie.errors import SavoryPieError\n\n\ndef read_only_noop(func):\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n if not self._read_only:\n return func(self, *args, **kwargs)\n return inner\n\n\nclass ResourceClassUser(type):\n def __new__(cls, name, bases, d):\n\n def init_resource_class(self, rclass):\n self._arg_resource_class = rclass\n if isinstance(rclass, str) or isinstance(rclass, unicode):\n self._real_resource_class = None\n else:\n self._real_resource_class = rclass\n\n def getter(self):\n if self._real_resource_class is None and \\\n self._arg_resource_class is not None:\n rclass = self._arg_resource_class\n n = rclass.rindex('.')\n module = importlib.import_module(rclass[:n])\n self._real_resource_class = getattr(module, rclass[n+1:])\n return self._real_resource_class\n\n def setter(self, value):\n self._real_resource_class = value\n\n deler = None\n d['init_resource_class'] = init_resource_class\n d['_resource_class'] = property(getter, setter, deler, '')\n return type.__new__(cls, name, bases, d)\n\n\nclass Field(object):\n @property\n def name(self):\n name = getattr(self, '_attribute', None) or getattr(self, '_full_attribute', None)\n if not name:\n raise SavoryPieError(u'Unable to determine name for field: {0}'.format(self))\n return name\n\n def schema(self, ctx, **kwargs):\n schema = kwargs.pop('schema', {})\n if getattr(self, '_type', None):\n return dict({'type': self._type.__name__}.items() + schema.items())\n return schema\n\n\nclass AttributeField(Field):\n \"\"\"\n Simple Field that translates an object attribute to/from a dict.\n\n Parameters:\n\n ``attribute``\n attribute on the Model can be a multi-level expression - like\n related_entity.attribute\n\n ``type``\n expecting type of value -- int, bool, etc.\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``optional``\n optional -- if missing, will not throw a ValidationError\n\n .. code-block:: python\n\n AttributeField('name', type=str)\n\n .. code-block:: javascript\n\n {'name': obj.name}\n\n .. code-block:: python\n\n AttributeField('other.age', type=int)\n\n .. code-block:: javascript\n\n {'age': obj.other.age}\n \"\"\"\n def __init__(self,\n attribute,\n type,\n published_property=None,\n use_prefetch=False,\n read_only=False,\n validator=None,\n optional=False,\n permission=None):\n self._full_attribute = attribute\n self._type = type\n self._published_property = published_property\n self._read_only = read_only\n self._optional = optional\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._bare_attribute)\n\n @property\n def _bare_attribute(self):\n return self._full_attribute.split('.')[-1]\n\n @property\n def _attrs(self):\n return self._full_attribute.split('.')\n\n def _get_object(self, root_obj):\n obj = root_obj\n for attr in self._attrs[:-1]:\n obj = getattr(obj, attr)\n if obj is None:\n return None\n return obj\n\n def _get(self, obj):\n obj = self._get_object(obj)\n if obj is None:\n return None\n else:\n return getattr(obj, self._bare_attribute)\n\n def _set(self, obj, value):\n obj = self._get_object(obj)\n # TODO: handle None\n return setattr(obj, self._bare_attribute, value)\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attr = self._compute_property(ctx)\n if attr not in source_dict:\n if self._optional:\n return\n raise ValidationError(self, {'missingField': attr,\n 'target': type(target_obj).__name__})\n with ctx.target(target_obj):\n self._set(\n target_obj,\n self.to_python_value(ctx, source_dict[self._compute_property(ctx)])\n )\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n target_dict[self._compute_property(ctx)] = self.to_api_value(\n ctx,\n self._get(source_obj)\n )\n\n def to_python_value(self, ctx, api_value):\n return ctx.formatter.to_python_value(self._type, api_value)\n\n def to_api_value(self, ctx, python_value):\n return ctx.formatter.to_api_value(self._type, python_value)\n\n def validate_resource(self, ctx, key, resource, value):\n error_dict = {}\n if isinstance(self.validator, collections.Iterable):\n for validator in self.validator:\n validator.find_errors(error_dict, ctx, key, resource, self, value)\n else:\n self.validator.find_errors(error_dict, ctx, key, resource, self, value)\n return error_dict\n\n\nclass URIResourceField(Field):\n \"\"\"\n Field that exposes just the URI of related entity\n\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n .. code-block:: python\n\n URIResourceField('other', OtherResource)\n\n .. code-block:: javascript\n\n {'other': '/api/other/{pk}'}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n uri = source_dict[self._compute_property(ctx)]\n if uri is not None:\n resource = ctx.resolve_resource_uri(uri)\n if resource is None:\n raise ValueError('invalid URI {0}: '.format(uri))\n\n setattr(target_obj, self._attribute, resource.model)\n else:\n setattr(target_obj, self._attribute, None)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n sub_model = getattr(source_obj, self._attribute)\n if sub_model is not None:\n resource = self._resource_class(sub_model)\n target_dict[self._compute_property(ctx)] = ctx.build_resource_uri(resource)\n else:\n target_dict[self._compute_property(ctx)] = None\n\n def validate_resource(self, ctx, key, resource, source_dict):\n error_dict = {}\n # TODO how do we validate this guy?\n return error_dict\n\n\nclass CompleteURIResourceField(Field):\n \"\"\"\n Field that exposes just the URI of the complete entity of itself.\n This is useful if a resource is explicitly not including resource_uris, due to recursive inclusion,\n this field can be used, to link to the URI of the full resource version of itself.\n It adds a hard coded 'completeResourceUri' entry to the target dictionary.\n\n Parameters:\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n .. code-block:: python\n\n CompleteURIResourceField(OtherResource)\n\n .. code-block:: javascript\n\n {'completeResourceUri': '/api/other/{pk}'}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self, resource_class, read_only=False, permission=None):\n self.init_resource_class(resource_class)\n self._read_only = read_only\n self.permission = permission\n\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n pass\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n resource = self._resource_class(source_obj)\n property_name = ctx.formatter.convert_to_public_property('complete_resource_uri')\n target_dict[property_name] = ctx.build_resource_uri(resource)\n\n\nclass URIListResourceField(Field):\n \"\"\"\n Field that exposes a list of URIs of related entity, this allows for a many to many relationship.\n\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object needs to be\n fully addressable\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n .. code-block:: python\n\n URIListResourceField('others', OtherResource)\n\n .. code-block:: javascript\n\n {'others': ['/api/other/{pk_1}', '/api/other/{pk_2}']\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def get_iterable(self, value):\n return value\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attribute = getattr(target_obj, self._attribute)\n\n db_keys = set()\n db_models = {}\n for model in self.get_iterable(attribute):\n resource = self._resource_class(model)\n db_models[resource.key] = model\n db_keys.add(resource.key)\n\n new_models = []\n request_keys = set()\n\n for resource_uri in source_dict[self._compute_property(ctx)]:\n resource = ctx.resolve_resource_uri(resource_uri)\n if resource:\n request_keys.add(resource.key)\n\n if not resource.key in db_keys:\n new_models.append(resource.model)\n else:\n raise SavoryPieError(u'Unable to resolve resource uri {0}'.format(resource_uri))\n\n # Delete before add to prevent problems with unique constraints\n models_to_remove = [db_models[key] for key in db_keys - request_keys]\n # If the FK is not nullable the attribute will not have a remove\n if hasattr(attribute, 'remove'):\n attribute.remove(*models_to_remove)\n else:\n for model in models_to_remove:\n model.delete()\n\n if hasattr(attribute, 'add'):\n attribute.add(*new_models)\n else:\n for obj in new_models:\n through_parameters = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n attribute.through.objects.create(**through_parameters)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n attrs = self._attribute.split('.')\n attribute = source_obj\n\n for attr in attrs:\n attribute = getattr(attribute, attr)\n if attribute is None:\n return None\n\n resource_uris = []\n for model in self.get_iterable(attribute):\n model_resource = self._resource_class(model)\n resource_uris.append(ctx.build_resource_uri(model_resource))\n target_dict[self._compute_property(ctx)] = resource_uris\n\n\nclass SubObjectResourceField(Field):\n \"\"\"\n Field that embeds a single related resource into the parent object\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n object may only be single level\n\n ``resource_class``\n a ModelResource -- used to represent the related object\n\n ``published_property``\n optional -- name exposed in the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``validator``\n optional -- a ResourceValidator, or list/tuple of ResourceValidators, to\n validate the data in the related object\n\n .. code-block:: python\n\n SubObjectResourceField('other', OtherResource)\n\n .. code-block:: javascript\n\n {'other': {'age': 9}}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def get_subresource(self, ctx, source_dict, target_obj):\n \"\"\"\n Extention point called by :meth:~`savory_pie.fields.handle_incoming` to\n build a resource class around the target attribute or return None if it\n is not found. Can try looking by resource_uri etc.\n \"\"\"\n sub_source_dict = source_dict[self._compute_property(ctx)]\n resource = None\n # TODO: clean up later per bug JRUT-4708\n if sub_source_dict is not None and 'resourceUri' in sub_source_dict:\n resource = ctx.resolve_resource_uri(sub_source_dict['resourceUri'])\n else:\n try:\n attribute = getattr(target_obj, self._attribute)\n except AttributeError:\n return None\n\n resource = self._resource_class(attribute)\n\n return resource\n\n def get_submodel(self, ctx, source_object):\n return getattr(source_object, self._attribute, None)\n\n def pre_save(self, model):\n return True\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n if not source_dict:\n setattr(target_obj, self._attribute, None)\n else:\n sub_resource = self.get_subresource(ctx, source_dict, target_obj)\n\n if not sub_resource: # creating a new resource\n sub_resource = self._resource_class.create_resource()\n\n sub_source_dict = source_dict[self._compute_property(ctx)]\n\n # this is to get around django-orm limitations, where in particular\n # if you have a one-to-one field, you can't set it to None since orm doesn't like it\n # so only set the attr to None, if what's coming in is None and what's there is not already None\n if sub_source_dict is None:\n if hasattr(target_obj, self._attribute) \\\n and getattr(target_obj, self._attribute) is not None \\\n and getattr(target_obj, self._attribute).pk:\n setattr(target_obj, self._attribute, None)\n else:\n # Use the pre_save property, to determine whether we need to set the attribute before or after put\n # in the case of a ReverseSingleRelatedObject (pre_save is False), then we need to set the attribute first\n # before calling put. This is to get around the Django ORM restrictions.\n if not self.pre_save(target_obj):\n setattr(target_obj, self._attribute, sub_resource.model)\n\n with ctx.target(target_obj):\n sub_resource.put(\n ctx,\n sub_source_dict,\n skip_validation=getattr(self, '_skip_validation', False)\n )\n\n if self.pre_save(target_obj):\n setattr(target_obj, self._attribute, sub_resource.model)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n sub_model = self.get_submodel(ctx, source_obj)\n if sub_model is None:\n target_dict[self._compute_property(ctx)] = None\n else:\n target_dict[self._compute_property(ctx)] =\\\n self._resource_class(sub_model).get(ctx, EmptyParams())\n\n def validate_resource(self, ctx, key, resource, source_dict):\n return validate(ctx, key + '.' + self.name, self._resource_class, source_dict)\n\n\nclass IterableField(Field):\n \"\"\"\n Field that embeds a many relationship into the parent object\n\n Parameters:\n\n ``attribute``\n name of the relationship between the parent object and the related\n objects can be a multi-level expression - like related_entity.many_to_many_field\n\n ``resource_class``\n a ModelResource -- used to represent the related objects\n\n ``published_property``\n optional name exposed through the API\n\n ``read_only``\n optional -- this api will never try and set this value\n\n ``iterable_factory``\n optional -- a callable which is passed the attribute and returns an\n iterable this fields exports\n\n .. code-block:: python\n\n RelatedManagerField('others', OtherResource)\n\n .. code-block:: javascript\n\n {'others': [{'age': 6}, {'age': 1}]}\n \"\"\"\n __metaclass__ = ResourceClassUser\n\n def __init__(self,\n attribute,\n resource_class,\n published_property=None,\n read_only=False,\n iterable_factory=None,\n validator=None,\n permission=None):\n self._attribute = attribute\n self.init_resource_class(resource_class)\n self._published_property = published_property\n self._read_only = read_only\n self._iterable_factory = iterable_factory\n self.validator = validator or []\n self.permission = permission\n\n def _compute_property(self, ctx):\n if self._published_property is not None:\n return ctx.formatter.convert_to_public_property(self._published_property)\n else:\n return ctx.formatter.convert_to_public_property(self._attribute)\n\n def _get_resource(self, ctx, attribute, model_dict):\n resource = None\n if 'resourceUri' in model_dict:\n resource = ctx.resolve_resource_uri(model_dict['resourceUri'])\n elif '_id' in model_dict: # TODO what if you give an id that is not in the db?\n # TODO get key without the extra db lookup\n model = self._resource_class.get_from_queryset(\n attribute.all(),\n model_dict['_id']\n )\n resource = self._resource_class(model)\n return resource\n\n def get_iterable(self, value):\n return value\n\n @property\n def _bare_attribute(self):\n return self._attribute.split('.')[-1]\n\n @read_only_noop\n @authorization(authorization_adapter)\n def handle_incoming(self, ctx, source_dict, target_obj):\n attribute = getattr(target_obj, self._attribute)\n\n # We are doing this outside of get_iterable so that subclasses can not\n # remove this override.\n if self._iterable_factory:\n iterable = self._iterable_factory(attribute)\n else:\n iterable = self.get_iterable(attribute)\n\n db_keys = set()\n db_models = {}\n for model in iterable:\n resource = self._resource_class(model)\n db_models[resource.key] = model\n db_keys.add(resource.key)\n\n new_models = []\n new_put_data = []\n request_keys = set()\n request_models = {}\n for model_dict in source_dict.get(self._compute_property(ctx), []):\n resource = self._get_resource(ctx, attribute, model_dict)\n if resource:\n request_models[resource.key] = resource.model\n request_keys.add(resource.key)\n # Check to see if the resource has already been saved in the DB\n if resource.key in db_keys:\n with ctx.target(resource.model):\n resource.put(ctx, model_dict)\n # If the resource has been saved to the db and the model is\n # a RelatedManager that is a through (existence of add attribute)\n # must add it to the new model since it can create a model based\n # on just the association.\n if not hasattr(attribute, 'add'):\n new_models.append(resource.model)\n else:\n # if the model is not in the database, must save it\n new_models.append(resource.model)\n\n else:\n # if the resource does not exist then this is a new instance\n new_put_data.append(model_dict)\n\n # Delete before add to prevent problems with unique constraints\n models_to_remove = [db_models[key] for key in db_keys - request_keys]\n # If the FK is not nullable the attribute will not have a remove\n if hasattr(attribute, 'remove'):\n attribute.remove(*models_to_remove)\n else:\n for obj in models_to_remove:\n # ManyRelatedManager\n if hasattr(attribute, 'through'):\n through_params = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n # only delete intermediary model instance if it already exists\n for instance in attribute.through.objects.filter(**through_params):\n instance.delete()\n # RelatedManager\n else:\n obj.delete()\n\n # Delay all the new creates untill after the deletes for unique\n # constraints again\n for model_dict in new_put_data:\n model_resource = self._resource_class.create_resource()\n with ctx.target(target_obj):\n model_resource.put(ctx, model_dict, save=True)\n new_models.append(model_resource.model)\n\n if hasattr(attribute, 'add'):\n attribute.add(*new_models)\n else:\n for obj in new_models:\n through_params = {\n attribute.source_field_name: target_obj,\n attribute.target_field_name: obj\n }\n # only create intermediary model instance if it doesn't already exist\n if not attribute.through.objects.filter(**through_params).exists():\n attribute.through.objects.create(**through_params)\n\n def handle_outgoing(self, ctx, source_obj, target_dict):\n attrs = self._attribute.split('.')\n attribute = source_obj\n\n for attr in attrs:\n attribute = getattr(attribute, attr, None)\n if attribute is None:\n return None\n\n objects = []\n\n # We are doing this outside of get_iterable so that subclasses can not\n # remove this override.\n if self._iterable_factory:\n iterable = self._iterable_factory(attribute)\n else:\n iterable = self.get_iterable(attribute)\n\n for model in iterable:\n model_resource = self._resource_class(model)\n model_dict = model_resource.get(ctx, EmptyParams())\n # only add '_id' if there is no 'resourceUri'\n if 'resourceUri' not in model_dict:\n model_dict['_id'] = model_resource.key\n objects.append(model_dict)\n target_dict[self._compute_property(ctx)] = objects\n\n def validate_resource(self, ctx, key, resource, source_dict_list):\n error_dict = {}\n if self.validator:\n self.validator.find_errors(error_dict, ctx, key, resource, self, source_dict_list)\n return error_dict\n\n def schema(self, ctx, **kwargs):\n return super(IterableField, self).schema(ctx, **kwargs)\n","repo_name":"armooo/savory-pie","sub_path":"savory_pie/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":26450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35054664377","text":"'''\nNUMBER OF POSSIBLE PATHS \n\nGiven a 2d matrix of M cols and N rows, find the num of\npossible paths to reach a cell (x,j) from starting cell (0,0),\nprovided that moves can only be right or down.\n\nHint:\nIn a DP approach, the number of ways to reach a cell is equal to the\nsum of the number of ways of reaching the upper and left neighbor cells. \n\nRecurrence relation:\npaths[i][j] = paths[i-1][j] + paths[i][j-1]\n\nBASE:\nFor all cells in top row and leftmost column, paths = 1.\n'''\n\n\ndef count_possible_paths(M, N, grid, x, y):\n \"\"\"\n Solves the number of possible paths problem in a bottom-up DP approach.\n Receives a 2d array, and destination cell's row and col coordinates x, y\n \"\"\"\n\n paths = [[1 for _ in range(N)] for _ in range(M)]\n\n for i in range(1, M):\n for j in range(1, N):\n paths[i][j] = paths[i-1][j] + paths[1][j-1]\n\n\nif __name__ == \"__main__\":\n N, M = [int(x) for x in input().split()]\n\n grid = []\n for _ in range(M):\n row = [int(x) for x in input().split()]\n grid.append(row)\n\n x, y = [int(x) for x in input().split()]\n\n print(count_possible_paths(N, M, grid, x, y))\n","repo_name":"ericjardon/python-coding-problems","sub_path":"DP/count_possible_paths.py","file_name":"count_possible_paths.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35588793310","text":"import torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nfrom datasets import SubDataset, AbstractDomainInterface, ExpandRGBChannels\nimport os\nimport os.path as osp\nimport csv\nimport subprocess\nimport pickle\nfrom PIL import Image\nimport numpy as np\n\nCLASSES = [\"breast_ER_patches\", \"breast_HE_patches\", \"kidney_HE_patches\", \"kidney_MAS_patches\"]\n\nclass ANHIRBase(data.Dataset):\n def __init__(self, source_dir, split, image_path=\"images_96.npy\", label_path=\"labels.pkl\", imsize=224, transforms=None,\n to_gray=False, download=False, extract=True):\n super(ANHIRBase,self).__init__()\n self.index_cache_path = source_dir\n self.source_dir = source_dir\n self.split = split\n self.imsize = imsize\n self.image_path = image_path\n self.to_gray = to_gray\n if transforms is None:\n self.transforms = transforms.Compose([transforms.Resize((imsize, imsize)),\n transforms.ToTensor()])\n else:\n self.transforms = transforms\n assert split in [\"train\", \"valid\", \"test\"]\n if extract:\n self.data = np.load(osp.join(source_dir, image_path))\n self.img_list = np.arange(len(self.data))\n with open(osp.join(source_dir, label_path), \"rb\") as fp:\n str_labels = pickle.load(fp)\n numeric_labels = []\n for l in str_labels:\n label = np.zeros(5, dtype=np.int64)\n label[CLASSES.index(l)] = 1\n numeric_labels.append(label)\n labels = np.stack(numeric_labels)\n self.labels = torch.LongTensor(labels)\n\n if not (osp.exists(osp.join(self.source_dir, 'valid_split.pt'))\n and osp.exists(osp.join(self.source_dir, 'train_split.pt'))\n and osp.exists(osp.join(self.source_dir, 'test_split.pt'))):\n self.generate_split()\n\n self.split_inds = torch.load(osp.join(self.index_cache_path, \"%s_split.pt\"% self.split))\n\n def __len__(self):\n return len(self.split_inds)\n\n def __getitem__(self, item):\n index = self.split_inds[item]\n img = self.data[index]\n img = Image.fromarray(img)\n if not self.to_gray:\n img = self.transforms(img.convert('RGB'))\n else:\n img = self.transforms(img.convert('L'))\n return img, self.labels[index]\n\n def generate_split(self):\n n_total = len(self.img_list)\n train_num = int(0.6*n_total)\n val_num = int(0.7*n_total)\n train_inds = np.arange(train_num)\n val_inds = np.arange(start=train_num, stop=val_num)\n test_inds = np.arange(start=val_num, stop=n_total)\n\n torch.save(train_inds, osp.join(self.index_cache_path, \"train_split.pt\"))\n torch.save(val_inds, osp.join(self.index_cache_path, \"valid_split.pt\"))\n torch.save(test_inds, osp.join(self.index_cache_path, \"test_split.pt\"))\n return\n\n\nclass ANHIR(AbstractDomainInterface):\n dataset_path = \"ANHIR\"\n def __init__(self, root_path=\"./workspace/datasets/ANHIR\", downsample=None, shrink_channels=False, test_length=None, download=False,\n extract=True, doubledownsample=None):\n \"\"\"\n :param leave_out_classes: if a sample has ANY class from this list as positive, then it is removed from indices.\n :param keep_in_classes: when specified, if a sample has None of the class from this list as positive, then it\n is removed from indices..\n \"\"\"\n self.name = \"ANHIR\"\n super(ANHIR, self).__init__()\n self.downsample = downsample\n self.shrink_channels=shrink_channels\n self.max_l = test_length\n cache_path = root_path\n source_path = root_path\n if doubledownsample is not None:\n transform_list = [transforms.Resize(doubledownsample),]\n else:\n transform_list = []\n if downsample is not None:\n print(\"downsampling to\", downsample)\n transform_list += [transforms.Resize((downsample, downsample)),\n transforms.ToTensor(),]\n if self.shrink_channels:\n transform_list += [transforms.Grayscale(),]\n #else:\n # transform_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]\n transform = transforms.Compose(transform_list)\n self.image_size = (downsample, downsample)\n else:\n transform_list += [transforms.Resize((224, 224)),\n transforms.ToTensor(), ]\n if self.shrink_channels:\n transform_list += [transforms.Grayscale(),]\n #else:\n # transform_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]\n transform = transforms.Compose(transform_list)\n self.image_size = (224, 224)\n\n self.ds_train = ANHIRBase(source_path, \"train\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n self.ds_valid = ANHIRBase(source_path, \"valid\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n self.ds_test = ANHIRBase(source_path, \"test\", imsize=self.image_size[0], transforms=transform,\n to_gray=shrink_channels, download=download, extract=extract)\n if extract:\n self.D1_train_ind = self.get_filtered_inds(self.ds_train, shuffle=True)\n self.D1_valid_ind = self.get_filtered_inds(self.ds_valid, shuffle=True, max_l=self.max_l)\n self.D1_test_ind = self.get_filtered_inds(self.ds_test, shuffle=True)\n\n self.D2_valid_ind = self.get_filtered_inds(self.ds_train, shuffle=True)\n self.D2_test_ind = self.get_filtered_inds(self.ds_test)\n\n\n def get_filtered_inds(self, basedata: ANHIRBase, shuffle=False, max_l=None):\n output_inds = torch.arange(0, len(basedata)).int()\n if shuffle:\n output_inds = output_inds[torch.randperm(len(output_inds))]\n if max_l is not None:\n if len(output_inds) >max_l:\n output_inds = output_inds[:max_l]\n return output_inds\n\n def get_D1_train(self):\n return SubDataset(self.name, self.ds_train, self.D1_train_ind)\n\n def get_D1_valid(self):\n return SubDataset(self.name, self.ds_valid, self.D1_valid_ind, label=0)\n\n def get_D1_test(self):\n return SubDataset(self.name, self.ds_test, self.D1_test_ind, label=0)\n\n def get_D2_valid(self, D1):\n assert self.is_compatible(D1)\n target_indices = self.D2_valid_ind\n return SubDataset(self.name, self.ds_train, target_indices, label=1, transform=D1.conformity_transform())\n\n def get_D2_test(self, D1):\n assert self.is_compatible(D1)\n target_indices = self.D2_test_ind\n return SubDataset(self.name, self.ds_test, target_indices, label=1, transform=D1.conformity_transform())\n\n def conformity_transform(self):\n target = self.image_size[0]\n if self.shrink_channels:\n return transforms.Compose([ExpandRGBChannels(),\n transforms.ToPILImage(),\n transforms.Grayscale(),\n transforms.Resize((target, target)),\n transforms.ToTensor()\n ])\n else:\n return transforms.Compose([\n ExpandRGBChannels(),\n transforms.ToPILImage(),\n transforms.Resize((target, target)),\n transforms.ToTensor(),\n ])\n\nif __name__ == \"__main__\":\n #data1 = ANHIR(\"workspace\\\\datasets\\\\ANHIR\")\n data1 = ANHIR()\n d1 = data1.get_D1_train()\n import matplotlib.pyplot as plt\n\n print(len(d1))\n for i in range(10):\n x, y = d1[i]\n x2 = x * 0.229 + 0.485\n plt.imshow(x2.numpy().transpose((1, 2, 0)))","repo_name":"caotians1/OD-test-master","sub_path":"datasets/ANHIR.py","file_name":"ANHIR.py","file_ext":"py","file_size_in_byte":8365,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"27530755745","text":"from flask import Flask, render_template, request, session, redirect, url_for, flash\nfrom flask_wtf import CSRFProtect\nfrom sqlalchemy import or_\n\nimport forms\nfrom db import *\nfrom models import *\nfrom helper import date_format\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'super_secret_key'\n#csrf = CSRFProtect(app)\nusersesion = False\nformSearch = forms.FrmSearch()\n\n\ndef is_in_session():\n if 'username' in session:\n return True\n else:\n return False\n\n\ndef create_session(username='', user_id=''):\n session['username'] = username\n session['user_id'] = user_id\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.before_request\ndef before_request():\n\n if 'username' not in session and request.endpoint in ['new_post', 'comics', 'series', 'anime', 'games', 'tecnologia']:\n return redirect(url_for('login'))\n elif 'username' in session and request.endpoint in ['login', 'signup']:\n return redirect(url_for('index'))\n\n\n@app.after_request\ndef after_request(response):\n return response\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index(): # put application's code here\n title = \"GeekZone\"\n usersesion = is_in_session()\n\n if usersesion == True:\n username = session['username']\n else:\n username = ''\n\n return render_template('index.html', title=title, usersesion=usersesion, username=username, form=formSearch)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = forms.FrmLogin(request.form)\n\n if request.method == 'POST' and form.validate():\n username = form.username.data\n password = form.password.data\n\n user = Usuario.query.filter_by(user=username).first()\n if user is not None and user.verify_password(password):\n session['username'] = username\n session['user_id'] = user.id_usuario\n return redirect(url_for('index'))\n else:\n error_message = 'Usuario o contraseña incorrectos'\n flash(error_message)\n\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('index'))\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = forms.FrmSignup(request.form)\n\n if request.method == 'POST' and form.validate():\n usuarios(form)\n username = form.username.data\n user = Usuario.query.filter_by(user=username).first()\n session['username'] = username\n session['user_id'] = user.id_usuario\n return render_template('index.html', form=form)\n\n return render_template('signup.html', form=form)\n\n\n@app.route('/password_reset', methods=['GET', 'POST'])\ndef password_reset():\n form = forms.FrmPasswordReset(request.form)\n\n if request.method == 'POST' and form.validate():\n print(\"Formulario válido\")\n print(form.email.data)\n else:\n print(\"Formulario inválido\")\n\n return render_template('password_reset.html', form=form)\n\n\n@app.route('/profile', methods=['GET', 'POST'])\ndef profile():\n return render_template('profile.html')\n\n\n@app.route('/new-post', methods=['GET', 'POST'])\ndef new_post():\n form = forms.FrmNewPost(request.form)\n\n if request.method == 'POST' and form.validate():\n id_usuario = session['user_id']\n publicaciones(form, id_usuario=id_usuario)\n return render_template('index.html', form=form)\n\n return render_template('new_post.html', form=form)\n\n\n@app.route('/games', methods=['GET'])\ndef games():\n title = \"Juegos\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=1).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('games.html', title=title, date_format=date_format, posts=posts, usersesion=True,\n form=formSearch, username=session['username'])\n\n\n@app.route('/anime', methods=['GET'])\ndef anime():\n title = \"Anime\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=2).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('anime.html', title=title, date_format=date_format, posts=posts, form=formSearch,\n usersesion=True, username=session['username'])\n\n\n@app.route('/comics/', methods=['GET'])\n@app.route('/comics/', methods=['GET'])\ndef comics(page=1):\n per_page = 10\n title = \"Comics\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=3).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('comics.html', title=title, posts=posts, date_format=date_format,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n@app.route('/series', methods=['GET'])\ndef series():\n title = \"Series\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=4).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('series.html', title=title, date_format=date_format, posts=posts,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n@app.route('/tecnologia', methods=['GET'])\ndef tecnologia():\n title = \"Tecnologia\"\n posts = Publicacion.query.join(Usuario, Categoria).filter_by(id_categoria=5).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('tecnologia.html', title=title, date_format=date_format, posts=posts,\n usersesion=True, form=formSearch, username=session['username'])\n\n\n#Ruta para buscar en la base de datos\n@app.route('/search', methods=['GET', 'POST'])\ndef search():\n title = \"GeekZone\"\n form = forms.FrmSearch(request.form)\n\n if request.method == 'POST' and form.validate():\n search = form.search.data\n posts = Publicacion.query.join(Usuario, Categoria).filter(or_(Publicacion.titulo.like('%' + search + '%'),\n Publicacion.content.like('%' + search + '%'),\n Usuario.user.like('%' + search + '%'),\n Publicacion.topic.like(\n '%' + search + '%'))).add_columns(\n Usuario.user, Publicacion.titulo, Publicacion.fechaPublicacion, Categoria.nombre, Publicacion.content,\n Publicacion.pictures, Publicacion.topic)\n return render_template('search.html', title=title, posts=posts, date_format=date_format,\n usersesion=True, form=form, username=session['username'])\n\n\nif __name__ == '__main__':\n #csrf.init_app(app) # initialize CSRF protection\n\n app.run()\n","repo_name":"Murruco/Geekzone","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5216530549","text":"\nimport json\nimport os\nfrom pathlib import Path\nimport sys\n\nsys.path.insert(0, '../')\nfrom Config import get_data_dir\n\nsys.path.insert(0, '../../Common/')\nfrom COCOWrapper import COCOWrapper, id_from_path\nfrom FormatData import mask_images_parallel\n\nif __name__ == '__main__':\n \n # Configuration\n label1 = sys.argv[1]\n label2 = sys.argv[2]\n spurious = sys.argv[3]\n \n tuple_dir = '{}/{}-{}/{}'.format(get_data_dir(), label1, label2, spurious)\n \n for mode in ['val', 'train']:\n mode_dir = '{}/{}'.format(tuple_dir, mode)\n\n # Setup COCO\n coco = COCOWrapper(mode = mode)\n coco.construct_id2img()\n \n # Load the Splits\n with open('{}/splits.json'.format(mode_dir), 'r') as f:\n splits = json.load(f)\n \n # Get which object is being removed\n chosen_class = spurious\n chosen_id = coco.get_class_id(chosen_class)\n \n class_type = 'spurious'\n unmask = False\n unmask_classes = None\n \n # Create the counterfactual images\n configs = [('1s', 1), ('0s', 0)]\n for config in configs:\n # Get the base images\n name = config[0]\n imgs = coco.get_imgs_by_ids(splits[name])\n\n # Get the label\n label = config[1]\n \n config_dir = '{}/{}-{}'.format(mode_dir, name, class_type)\n for mask_mode in ['box', 'pixel']:\n # Setup the output directory\n save_dir = '{}/{}'.format(config_dir, mask_mode)\n os.system('rm -rf {}'.format(save_dir))\n Path(save_dir).mkdir(parents = True)\n print(save_dir)\n\n # Maks the chosen object\n filenames, _ = mask_images_parallel(imgs, coco, \n save_dir,\n chosen_id = chosen_id, mode = mask_mode,\n unmask = unmask, unmask_classes = None,\n use_png = (mask_mode == 'pixel'))\n \n # Save the output\n images = {}\n for i in range(len(filenames)):\n filename = filenames[i]\n id = id_from_path(filename)\n images[id] = [filename, label]\n\n with open('{}/images.json'.format(save_dir), 'w') as f:\n json.dump(images, f)\n ","repo_name":"GDPlumb/SPIRE","sub_path":"COCO-nuanced/1-GenerateCounterfactuals/Remove.py","file_name":"Remove.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"70462523437","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nclass Plotter:\n\n fig = plt.figure()\n\n\n def __init__(self, width, height,\n boundary=\"A\", bounds=[-2, 2, -2, 2],\n interval=100, type=\"o\", fill='b', environment='none', title=\"The Nature of Code\"):\n \"\"\"\n\n :param width:\n :param height:\n :param boundary:\n :param bounds:\n :param interval:\n :param type:\n :param fill:\n :param environment:\n :param title:\n :return: Plotter object\n \"\"\"\n\n self.w = width\n self.h = height\n self.type = type\n self.boundary = boundary\n self.interval = interval\n self.bounds = bounds\n self.size = 0\n self.fill = fill\n self.xdata, self.ydata = [], []\n self.points = []\n self.press = None\n self.environment = environment\n self.initialize()\n\n def initialize(self):\n\n xlim, ylim = self.boundaries(self.boundary)\n\n if self.type in ['o', 's', 'p', '*', '+', 'x', 'd', '|', '_', '-', '--', '-.', ':', '.']:\n\n # self.fig_circle.subplots_adjust(left=0, right=1, bottom=0, top=1)\n self.ax = Plotter.fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=xlim, ylim=ylim)\n\n # particles holds the locations of the particles\n self.objects, = self.ax.plot([], [], '{}{}'.format(self.fill, self.type), ms=6)\n\n # rect is the box edge\n self.rect = plt.Rectangle(self.bounds[::2],\n self.bounds[1] - self.bounds[0],\n self.bounds[3] - self.bounds[2],\n ec='none', lw=2, fc='none')\n\n if self.environment != \"none\":\n pass #todo\n\n def boundaries(self, quad):\n xlim = ylim = (0, 0)\n if \"A\" in quad and \"B\" in quad and \"C\" in quad and \"D\" in quad: # this is all quadrant\n xlim = (-self.w, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad and \"B\" in quad and (\"C\" in quad or \"D\" in quad): # this is not possible hence making all quadrant\n xlim = (-self.w, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad and \"B\" in quad: # this is first and second quadrant\n xlim = (-self.w, self.w)\n ylim = (0, self.h)\n\n elif \"A\" in quad and \"D\" in quad: # this is first and fourth quadrant\n xlim = (0, self.w)\n ylim = (-self.h, self.h)\n\n elif \"A\" in quad: # this is first quadrant only\n xlim = (0, self.w)\n ylim = (0, self.h)\n\n elif \"B\" in quad: # this is second quadrant only\n xlim = (-self.w, 0)\n ylim = (0, self.h)\n\n elif \"C\" in quad: # this is third quadrant only\n xlim = (-self.w, 0)\n ylim = (-self.h, 0)\n\n elif \"D\" in quad: # this is fourth quadrant only\n xlim = (0, self.w)\n ylim = (-self.h, 0)\n\n return xlim, ylim\n\n def init(self):\n\n del self.xdata[:]\n del self.ydata[:]\n self.objects.set_data([], [])\n self.rect.set_edgecolor('green')\n return self.objects, self.rect\n\n def animate(self, num, data, object):\n\n points = self.get_data()\n self.xdata.append(points[:, 0].tolist())\n self.ydata.append(points[:, 1].tolist())\n\n print('num = {} data = {} line = {}'.format(num, data, object))\n # print('x={} y={}'.format(self.xdata,self.ydata))\n # print('x ={} y ={}'.format(self.x,self.y))\n # print('t ={} y ={}'.format(points[:,0],points[:,1]))\n\n xmin, xmax = self.ax.get_xlim()\n vec = points[:, 0]\n if vec[vec > xmax]:\n self.ax.set_xlim(xmin, 2 * xmax)\n self.ax.figure.canvas.draw()\n\n ms = int(self.fig.dpi * 2 * self.size * self.fig.get_figwidth()\n / np.diff(self.ax.get_xbound())[0])\n\n # update pieces of the animation\n self.rect.set_edgecolor('k')\n self.objects.set_data(points[:, 0], points[:, 1])\n self.objects.set_markersize(ms)\n return self.objects, self.rect\n\n def set_data(self,func):\n self.func = func\n\n def get_data(self):\n if len(self.points) > 1:\n points, self.size = self.func(self.points[len(self.points)-1])\n else:\n points, self.size = self.func()\n return points\n\n def data(self,t=0):\n return self.func\n\n def plotter(self):\n\n anim = animation.FuncAnimation(self.fig, self.animate, fargs=(self.data, self.objects),frames=500,\n interval=self.interval, blit=False, init_func=self.init)\n plt.show()","repo_name":"jkapila/theNatureofCodeProject","sub_path":"plot_wrap.py","file_name":"plot_wrap.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22166554183","text":"import logging\n\nimport analytics\nfrom django.core import exceptions\n\nfrom server.utils.logging.constants import SCHOOL_MEMBER\n\nlogger = logging.getLogger(__name__)\n\nEVENT_SESSION_LOGIN = \"session_login\"\nEVENT_APP_LOGIN = \"app_login\"\nEVENT_INITIAL_PASSWORD_CREATED = \"initial_password_created\"\n\n\ndef identify_track(user, event_name, properties=None):\n traits = {\n \"name\": user.name,\n \"email\": user.email,\n \"user_type\": user.user_type,\n }\n\n if user.user_type in [user.Types.CONSUMER, user.Types.CONSUMER]:\n try:\n traits[\"school\"] = user.school_member.school.name\n except exceptions.ObjectDoesNotExist:\n logger.exception(\n f\"{SCHOOL_MEMBER} user pk: {user.pk} email: {user.email} has no school_member or school\"\n )\n\n analytics.identify(\n user.slug,\n traits,\n )\n\n return analytics.track(user.slug, event_name, properties)\n","repo_name":"connectiveproject/connective","sub_path":"server/server/utils/analytics_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"8916604187","text":"from django.db import models\n\n__all__ = (\n 'Idol',\n 'Group',\n 'Membership',\n)\n\n\n# class IdolManager(models.Manager):\n# pass\n\nclass Idol(models.Model):\n name = models.CharField(max_length=30)\n\n # 오브젝츠 = IdolManager()\n # objects = models.Manager()\n\n def __str__(self):\n return self.name\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=50)\n debut_date = models.DateField()\n members = models.ManyToManyField(\n Idol,\n through='Membership',\n through_fields=('group', 'idol'),\n )\n\n def __str__(self):\n return self.name\n\n\nclass Membership(models.Model):\n idol = models.ForeignKey(\n Idol,\n on_delete=models.CASCADE,\n related_name='membership_set'\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n # # Idol,\n # # null=True,\n # # on_delete=models.CASCADE,\n # # related_name='recommend_membership_set',\n # )\n recommenders = models.ManyToManyField(\n Idol,\n blank=True,\n related_name='recommend_membership_set',\n )\n joined_date = models.DateField()\n is_active = models.BooleanField()\n\n def __str__(self):\n return f'{self.group.name}' \\\n f'{self.idol.name}' \\\n f'({self.is_active.name})'\n","repo_name":"ehfgk78/Django-Documentation","sub_path":"django_document/model/models/many2many/intermediate.py","file_name":"intermediate.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27405151728","text":"\"\"\"\nCheck the equivalence of two system of equations.\n\nUsage:\n equiv [-q...] [-1] [-y SYMMETRIES] [-p] A B\n\nOptions:\n -q, --quiet Less output, result is signaled via return value\n -1, --one-way Check only if A implies B, not the other way round\n -y SYM, --symmetry SYM Symmetry group generators\n -p, --pretty Pretty print missing inequalities\n\nThe program's return code signifies whether the two systems are equivalent:\n\n 0 A and B are equivalent\n 1 A does not imply B\n 2 B does not imply A\n 3 neither A implies B nor B implies A\n\nThe values 2 and 3 are only used if --one-way is not in effect.\n\"\"\"\n\nimport sys\n\nfrom docopt import docopt\n\nfrom .core.io import format_ineq, System\nfrom .core.symmetry import group_by_symmetry\n\n\ndef check_implies(sys_a: System, sys_b: System,\n name_a: str, name_b: str,\n *, symmetries: 'SymmetryGroup', quiet=0, pretty=False):\n \"\"\"\n Check if A implies B (system of linear inequalities).\n\n The amount of output is controlled by the value of ``quiet``:\n\n quiet=0 Full output, including the list of missing constraints\n quiet=1 Short output, no list of constraints\n quiet=2 No output at all\n \"\"\"\n lp = sys_a.lp()\n # take one representative from each category:\n groups = group_by_symmetry(symmetries, sys_b.matrix)\n missing = [g for g in groups if not lp.implies(g[0])]\n if missing:\n if quiet <= 1:\n print(\"{} misses {} ({} intrinsic) constraints of {}!\".format(\n name_a, sum(map(len, missing)), len(missing), name_b))\n if quiet == 0:\n print(\"{} misses the following inequalities of {}:\"\n .format(name_a, name_b))\n for constr in missing:\n print(format_ineq(constr[0], pretty, sys_a.columns))\n return False\n else:\n if quiet <= 1:\n print(\"{} implies {}!\".format(name_a, name_b))\n return True\n\n\ndef main(args=None):\n opts = docopt(__doc__)\n sys_a = System.load(opts['A'])\n sys_b = System.load(opts['B'])\n sys_b, _ = sys_b.slice(sys_a.columns, fill=True)\n\n sys_a.update_symmetries(opts['--symmetry'])\n\n status = 0\n kwd = {'quiet': opts['--quiet'],\n 'pretty': opts['--pretty'],\n 'symmetries': sys_a.symmetry_group()}\n if not check_implies(sys_a, sys_b, 'A', 'B', **kwd):\n status |= 1\n if not opts['--one-way']:\n if not check_implies(sys_b, sys_a, 'B', 'A', **kwd):\n status |= 2\n return status\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"coldfix/pystif","sub_path":"pystif/equiv.py","file_name":"equiv.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"29882762697","text":"#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\nimport pprint\n\n# Definiciones sql\nfrom librerias.datos.base import globales\n\n# Base general con atributos basicos\nfrom aplicacion.datos.clases.clases_base import base_general\nfrom librerias.datos.sql import sqalchemy_clase_dinamica\nfrom librerias.datos.base import globales\nfrom aplicacion.datos.definiciones._comunes import elementos_comunes\n\nfrom . import tramite_campos\n\nreferencias = [\n # Formulario\n {\n \"campoReferencia\" : \"formulario_id\",\n \"atributosReferencia\": [{\n \"formulario_nombre\": \"nombre\",\n }],\n \"estructuraDestino\": \"formularios_dinamicos\",\n \"campoDestino\" : \"id\", \n }\n]\n\ndefinicion = {\n \"descripcion\" : \"Tramites definición\",\n \"clase\" : \"config_tramites\",\n \"estructura\" : \"tramites\", \n \"campos\" : tramite_campos.campos,\n \"referencias\" : referencias,\n \"campoIndice\" : \"id\",\n \"indexa\" : \"si\",\n \"indexamiento\": {},\n \"reporte\" : \"SI\"\n}\n\n# Crea clase SQLALCHEMY\nCLASE = sqalchemy_clase_dinamica.crea_clase( definicion, (base_general.DB_BASE_SIMPLE, globales.CLASE_BASE_SQL) )\nglobales.carga_clase(definicion[\"clase\"], CLASE)\n\ncamposIndexamiento = {}\ncamposElastic = tramite_campos.campos.copy()\ncamposElastic.update(camposIndexamiento)\n\n# Publica\nelementos_comunes.publicaValidaElastic(definicion, camposElastic)","repo_name":"quirogaco/active_document","sub_path":"aplicacion/datos/definiciones/configuracion/tramites/tramite.py","file_name":"tramite.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20075859288","text":"import PySimpleGUI as sg\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.gridspec import GridSpec\n\ndef create_plot(updated_data):\n income = 0\n expense = 0\n income_category_name = []\n for i in updated_data:\n if i[0] == 'Income':\n income_category_name.append(i[1])\n income_category_name = list(set(income_category_name))\n income_category_value = []\n for i in income_category_name:\n income_category_value.append(0)\n\n expense_category_name = []\n for i in updated_data:\n if i[0] == 'Expense':\n expense_category_name.append(i[1])\n expense_category_name = list(set(expense_category_name))\n expense_category_value = []\n for i in expense_category_name:\n expense_category_value.append(0)\n for i in updated_data:\n if i[0] == 'Income':\n income += int(i[2])\n income_category_value[0] += int(i[2])\n else:\n expense += int(i[2])\n expense_category_value[expense_category_name.index(i[1])] += int(i[2])\n if income_category_value == [] and expense_category_value == []:\n return 0\n colors = ['#8ceacd', '#d95c68']\n fig = plt.figure(constrained_layout=True)\n fig.suptitle(\"Monthly report\")\n gs = GridSpec(2, 2, figure=fig)\n axs = fig.add_subplot(gs[:, 0])\n axs2 = fig.add_subplot(gs[0, 1])\n axs3 = fig.add_subplot(gs[1, 1])\n axs.pie([income, expense], colors=colors, labels=['Income', 'Expense'], autopct='%1.2f%%')\n axs2.pie(income_category_value, labels=income_category_name, autopct='%1.2f%%')\n axs3.pie(expense_category_value, labels=expense_category_name, autopct='%1.2f%%')\n\n layout = [\n [sg.Canvas(size=(1500, 1000), key='-CANVAS-')]\n ]\n window = sg.Window('Monthly summary', layout, finalize=True, element_justification='center')\n figure_canvas = FigureCanvasTkAgg(fig, window['-CANVAS-'].TKCanvas)\n figure_canvas.draw()\n figure_canvas.get_tk_widget().pack(side='top', fill='none', expand=1)\n\n while True:\n event, values = window.read()\n if event == 'Exit' or event == sg.WIN_CLOSED or event == 'Cancel':\n break\n window.close()\n","repo_name":"Napasakon/dads5001_quiz_1","sub_path":"show_graph.py","file_name":"show_graph.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35054979107","text":"import math\nfrom copy import copy\n# Add any extra import statements you may need here\n\n\n# Add any helper functions you may need here\n\n\ndef findSignatureCounts(arr):\n # Arr is a permutation of 1..n. No repetitions\n \n # every minute, every student i signs the current yearbook, and passes\n # it to student in arr[i-1].\n # when a student receives their yearbook they stop passing it around.\n \n # input guarantees every student eventually receives their own book.\n \n # compute a list where every element i-1 is equal to the number of signatures in a student's yearbook.\n N = len(arr)\n in_circulation = {i+1 for i in range(N)} # yearbooks ids still in circulation\n book_where = [i for i in range(N+1)] # book at index i is at a[i]\n output = [0 for _ in range(N)]\n \n while len(in_circulation):\n # Add a signature at ouptut[i-1] for every element in circulation. Check if it arrived to owner.\n \n for book in copy(in_circulation):\n output[book-1] += 1 # increment signatures\n \n # update holder\n i = book_where[book]\n book_where[book] = arr[i-1]\n \n # remove book from circulation if it arrives to owner\n if book_where[book] == book:\n in_circulation.remove(book)\n \n \n return output\n\n\n# These are the tests we use to determine if the solution is correct.\n# You can add your own at the bottom.\n\ndef printInteger(n):\n print('[', n, ']', sep='', end='')\n\ndef printIntegerList(array):\n size = len(array)\n print('[', end='')\n for i in range(size):\n if i != 0:\n print(', ', end='')\n print(array[i], end='')\n print(']', end='')\n\ntest_case_number = 1\n\ndef check(expected, output):\n global test_case_number\n expected_size = len(expected)\n output_size = len(output)\n result = True\n if expected_size != output_size:\n result = False\n for i in range(min(expected_size, output_size)):\n result &= (output[i] == expected[i])\n rightTick = '\\u2713'\n wrongTick = '\\u2717'\n if result:\n print(rightTick, 'Test #', test_case_number, sep='')\n else:\n print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')\n printIntegerList(expected)\n print(' Your output: ', end='')\n printIntegerList(output)\n print()\n test_case_number += 1\n\nif __name__ == \"__main__\":\n arr_1 = [2, 1]\n expected_1 = [2, 2]\n output_1 = findSignatureCounts(arr_1)\n check(expected_1, output_1)\n\n arr_2 = [1, 2]\n expected_2 = [1, 1]\n output_2 = findSignatureCounts(arr_2)\n check(expected_2, output_2)\n\n\n # Add your own test cases here\n ","repo_name":"ericjardon/python-coding-problems","sub_path":"challenges/fb/yearbook_passing.py","file_name":"yearbook_passing.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4938780053","text":"# -*- coding: utf-8 -*-\nimport sys\n\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nif sys.version_info <= (3, 5):\n error = 'Requires Python Version 3.6 or above... exiting.'\n print >> sys.stderr, error\n sys.exit(1)\n\nsetup(\n name='openelevationservice',\n version='0.2.1',\n description='Flask app to serve elevation data to GeoJSON queries.',\n long_description=readme(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.7',\n ],\n keywords='flask elevation GIS GeoJSON ORS SRTM',\n url='https://github.com/GIScience/openelevationservice',\n author='Nils Nolde',\n author_email='nils.nolde@gmail.com',\n license='MIT',\n packages=['openelevationservice'],\n install_requires=[\n 'Flask>=1.0.0',\n 'Flask_Cors>=3.0.0',\n 'Flask-SQLAlchemy>=2.3.0',\n 'Cerberus>=1.2',\n 'beautifulsoup4>=4.6.0',\n 'GeoAlchemy2>=0.5.0',\n 'geojson>=2.4.0',\n 'shapely>=1.6.0',\n 'sqlalchemy>=1.2.0',\n 'werkzeug>=0.14.0',\n 'pyyaml>=4.2b1',\n 'flasgger>=0.9.0',\n 'gunicorn>=19.0.0',\n 'gevent>=1.3.0',\n 'requests>=2.20.0',\n 'psycopg2>2.7.5'\n ],\n include_package_data=True,\n test_suite='nose.collector',\n tests_require=[\n 'nose>1.3.0',\n 'Flask_Testing>=0.7.0',\n ],\n zip_safe=False,\n project_urls={\n 'Bug Reports': 'https://github.com/GIScience/openelevationservice/issues',\n 'Source': 'https://github.com/GIScience/openelevationservice',\n }\n)\n","repo_name":"GIScience/openelevationservice","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"73"} +{"seq_id":"18027225230","text":"from sympy.core.singleton import S\n\nfrom sympy.core.numbers import pi\n\nfrom sympy.physics.units import DimensionSystem, hertz, kilogram\nfrom sympy.physics.units.definitions import (\n G, Hz, J, N, Pa, W, c, g, kg, m, s, meter, gram, second, newton,\n joule, watt, pascal)\nfrom sympy.physics.units.definitions.dimension_definitions import (\n acceleration, action, energy, force, frequency, momentum,\n power, pressure, velocity, length, mass, time)\nfrom sympy.physics.units.prefixes import PREFIXES, prefix_unit\nfrom sympy.physics.units.prefixes import (\n kibi, mebi, gibi, tebi, pebi, exbi\n)\nfrom sympy.physics.units.definitions import (\n cd, K, coulomb, volt, ohm, siemens, farad, henry, tesla, weber, dioptre,\n lux, katal, gray, becquerel, inch, hectare, liter, julian_year,\n gravitational_constant, speed_of_light, elementary_charge, planck, hbar,\n electronvolt, avogadro_number, avogadro_constant, boltzmann_constant,\n stefan_boltzmann_constant, atomic_mass_constant, molar_gas_constant,\n faraday_constant, josephson_constant, von_klitzing_constant,\n acceleration_due_to_gravity, magnetic_constant, vacuum_permittivity,\n vacuum_impedance, coulomb_constant, atmosphere, bar, pound, psi, mmHg,\n milli_mass_unit, quart, lightyear, astronomical_unit, planck_mass,\n planck_time, planck_temperature, planck_length, planck_charge,\n planck_area, planck_volume, planck_momentum, planck_energy, planck_force,\n planck_power, planck_density, planck_energy_density, planck_intensity,\n planck_angular_frequency, planck_pressure, planck_current, planck_voltage,\n planck_impedance, planck_acceleration, bit, byte, kibibyte, mebibyte,\n gibibyte, tebibyte, pebibyte, exbibyte, curie, rutherford, radian, degree,\n steradian, angular_mil, atomic_mass_unit, gee, kPa, ampere, u0, kelvin,\n mol, mole, candela, electric_constant, boltzmann, angstrom\n)\n\n\ndimsys_length_weight_time = DimensionSystem([\n # Dimensional dependencies for MKS base dimensions\n length,\n mass,\n time,\n], dimensional_dependencies={\n # Dimensional dependencies for derived dimensions\n \"velocity\": {\"length\": 1, \"time\": -1},\n \"acceleration\": {\"length\": 1, \"time\": -2},\n \"momentum\": {\"mass\": 1, \"length\": 1, \"time\": -1},\n \"force\": {\"mass\": 1, \"length\": 1, \"time\": -2},\n \"energy\": {\"mass\": 1, \"length\": 2, \"time\": -2},\n \"power\": {\"length\": 2, \"mass\": 1, \"time\": -3},\n \"pressure\": {\"mass\": 1, \"length\": -1, \"time\": -2},\n \"frequency\": {\"time\": -1},\n \"action\": {\"length\": 2, \"mass\": 1, \"time\": -1},\n \"area\": {\"length\": 2},\n \"volume\": {\"length\": 3},\n})\n\n\nOne = S.One\n\n\n# Base units:\ndimsys_length_weight_time.set_quantity_dimension(meter, length)\ndimsys_length_weight_time.set_quantity_scale_factor(meter, One)\n\n# gram; used to define its prefixed units\ndimsys_length_weight_time.set_quantity_dimension(gram, mass)\ndimsys_length_weight_time.set_quantity_scale_factor(gram, One)\n\ndimsys_length_weight_time.set_quantity_dimension(second, time)\ndimsys_length_weight_time.set_quantity_scale_factor(second, One)\n\n# derived units\n\ndimsys_length_weight_time.set_quantity_dimension(newton, force)\ndimsys_length_weight_time.set_quantity_scale_factor(newton, kilogram*meter/second**2)\n\ndimsys_length_weight_time.set_quantity_dimension(joule, energy)\ndimsys_length_weight_time.set_quantity_scale_factor(joule, newton*meter)\n\ndimsys_length_weight_time.set_quantity_dimension(watt, power)\ndimsys_length_weight_time.set_quantity_scale_factor(watt, joule/second)\n\ndimsys_length_weight_time.set_quantity_dimension(pascal, pressure)\ndimsys_length_weight_time.set_quantity_scale_factor(pascal, newton/meter**2)\n\ndimsys_length_weight_time.set_quantity_dimension(hertz, frequency)\ndimsys_length_weight_time.set_quantity_scale_factor(hertz, One)\n\n# Other derived units:\n\ndimsys_length_weight_time.set_quantity_dimension(dioptre, 1 / length)\ndimsys_length_weight_time.set_quantity_scale_factor(dioptre, 1/meter)\n\n# Common volume and area units\n\ndimsys_length_weight_time.set_quantity_dimension(hectare, length**2)\ndimsys_length_weight_time.set_quantity_scale_factor(hectare, (meter**2)*(10000))\n\ndimsys_length_weight_time.set_quantity_dimension(liter, length**3)\ndimsys_length_weight_time.set_quantity_scale_factor(liter, meter**3/1000)\n\n\n# Newton constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(gravitational_constant, length ** 3 * mass ** -1 * time ** -2)\ndimsys_length_weight_time.set_quantity_scale_factor(gravitational_constant, 6.67430e-11*m**3/(kg*s**2))\n\n# speed of light\n\ndimsys_length_weight_time.set_quantity_dimension(speed_of_light, velocity)\ndimsys_length_weight_time.set_quantity_scale_factor(speed_of_light, 299792458*meter/second)\n\n\n# Planck constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(planck, action)\ndimsys_length_weight_time.set_quantity_scale_factor(planck, 6.62607015e-34*joule*second)\n\n# Reduced Planck constant\n# REF: NIST SP 959 (June 2019)\n\ndimsys_length_weight_time.set_quantity_dimension(hbar, action)\ndimsys_length_weight_time.set_quantity_scale_factor(hbar, planck / (2 * pi))\n\n\n__all__ = [\n 'mmHg', 'atmosphere', 'newton', 'meter', 'vacuum_permittivity', 'pascal',\n 'magnetic_constant', 'angular_mil', 'julian_year', 'weber', 'exbibyte',\n 'liter', 'molar_gas_constant', 'faraday_constant', 'avogadro_constant',\n 'planck_momentum', 'planck_density', 'gee', 'mol', 'bit', 'gray', 'kibi',\n 'bar', 'curie', 'prefix_unit', 'PREFIXES', 'planck_time', 'gram',\n 'candela', 'force', 'planck_intensity', 'energy', 'becquerel',\n 'planck_acceleration', 'speed_of_light', 'dioptre', 'second', 'frequency',\n 'Hz', 'power', 'lux', 'planck_current', 'momentum', 'tebibyte',\n 'planck_power', 'degree', 'mebi', 'K', 'planck_volume',\n 'quart', 'pressure', 'W', 'joule', 'boltzmann_constant', 'c', 'g',\n 'planck_force', 'exbi', 's', 'watt', 'action', 'hbar', 'gibibyte',\n 'DimensionSystem', 'cd', 'volt', 'planck_charge', 'angstrom',\n 'dimsys_length_weight_time', 'pebi', 'vacuum_impedance', 'planck',\n 'farad', 'gravitational_constant', 'u0', 'hertz', 'tesla', 'steradian',\n 'josephson_constant', 'planck_area', 'stefan_boltzmann_constant',\n 'astronomical_unit', 'J', 'N', 'planck_voltage', 'planck_energy',\n 'atomic_mass_constant', 'rutherford', 'elementary_charge', 'Pa',\n 'planck_mass', 'henry', 'planck_angular_frequency', 'ohm', 'pound',\n 'planck_pressure', 'G', 'avogadro_number', 'psi', 'von_klitzing_constant',\n 'planck_length', 'radian', 'mole', 'acceleration',\n 'planck_energy_density', 'mebibyte', 'length',\n 'acceleration_due_to_gravity', 'planck_temperature', 'tebi', 'inch',\n 'electronvolt', 'coulomb_constant', 'kelvin', 'kPa', 'boltzmann',\n 'milli_mass_unit', 'gibi', 'planck_impedance', 'electric_constant', 'kg',\n 'coulomb', 'siemens', 'byte', 'atomic_mass_unit', 'm', 'kibibyte',\n 'kilogram', 'lightyear', 'mass', 'time', 'pebibyte', 'velocity',\n 'ampere', 'katal',\n]\n","repo_name":"sympy/sympy","sub_path":"sympy/physics/units/systems/length_weight_time.py","file_name":"length_weight_time.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","stars":11561,"dataset":"github-code","pt":"73"} +{"seq_id":"74592629354","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nfrom plotly import tools\r\n\r\n######################################################Data##############################################################\r\n\r\ndf = pd.read_csv('data/table.csv')\r\n\r\nEurope = ['Albania','Armenia','Austria','Azerbaijan','Belarus','Belgium','Bulgaria','Croatia','Cyprus',\r\n 'Czech Republic','Denmark','Estonia','Finland','France','Georgia','Germany','Greece','Hungary',\r\n 'Iceland','Ireland','Italy','Kazakhstan','Latvia','Lithuania','Luxembourg','Malta','Moldova',\r\n 'Netherlands','Norway','Poland','Portugal','Romania','Russian Federation','Slovak Republic',\r\n 'Slovenia','Spain','Sweden','Switzerland','Turkey','Ukraine','United Kingdom']\r\n\r\ncolor_array = [\"#999999\", \"#E69F00\", \"#56B4E9\", \"#009E73\", \"#F0E442\", \"#0072B2\", \"#D55E00\", \"#CC79A7\", '#9a6a00',\r\n '#0047e6','#00523b', '#893c00']\r\n\r\n\r\n######################################################Interactive Components############################################\r\n\r\ncountry_options = [dict(label=country, value=country) for country in Europe]\r\n\r\ncontinent_options = [dict(label=continent, value=continent) for continent in df['Continent_Name'].unique()]\r\n\r\n\r\n##################################################APP###############################################################\r\n\r\napp = dash.Dash(__name__)\r\nserver=app.server\r\napp.layout = html.Div([\r\n\r\n html.Div([\r\n html.H1(\r\n \"European Tourism\",\r\n style={\"margin-bottom\": \"0px\"},\r\n ),\r\n\r\n html.H3(\r\n \"An Overview\", style={\"margin-top\": \"0px\"}\r\n )\r\n ], className='Title'),\r\n\r\n html.Div([\r\n\r\n html.Div([\r\n html.Label('Country Choice'),\r\n dcc.Dropdown(\r\n id='country_drop',\r\n options=country_options,\r\n value=['Portugal', 'France'],\r\n multi=True\r\n ),\r\n\r\n html.Br(),\r\n\r\n html.Label('Continent Choice'),\r\n dcc.Dropdown(\r\n id='continent_drop',\r\n options=continent_options,\r\n value=['Europe','World'],\r\n multi=True\r\n ),\r\n\r\n html.Br(),\r\n\r\n html.Label('Year Slider'),\r\n dcc.Slider(\r\n id='year_slider',\r\n min=df['Years'].min(),\r\n max=2016,\r\n marks={str(i): '{}'.format(str(i)) for i in [2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014, 2016]},\r\n value=2016,\r\n step=1,\r\n included=False\r\n ),\r\n\r\n html.Br(),\r\n\r\n dcc.Markdown(\"With this dashboard we wish to tell a story about European \"\r\n \"tourism. We showcase the prominence of tourism on countries' GDP, \"\r\n \"Europe's tourism revenue compared to other continents, the number of \"\r\n \"arrivals and how it's been growing over the years, how each tourism variable \"\r\n \"relates to each other and finally the influence of tourism on jobs.\"\r\n ),\r\n ], className='column2 pretty'),\r\n\r\n html.Div([dcc.Graph(id='bubbles_graph')], className='column1 pretty')\r\n ], className='row'),\r\n\r\n html.Div([\r\n\r\n html.Div([dcc.Graph(id='line_graph')], className='column3 pretty'),\r\n\r\n html.Div([dcc.Graph(id='choropleth')], className='column4 pretty')\r\n\r\n ], className='row'),\r\n\r\n html.Div([\r\n\r\n html.Div([dcc.Graph(id='radar_graph')], className='column3 pretty'),\r\n\r\n html.Div([dcc.Graph(id='subplot_graph')], className='column4 pretty')\r\n\r\n ], className='row'),\r\n\r\n html.Div([\r\n html.H6(\r\n \"Work by: Andreia Antunes [M20190876], Fernanda Zippinotti [M20190232], Lara Neves [20190867]\", style={\"margin-top\": \"0px\"}\r\n )\r\n ], className='Title')\r\n\r\n])\r\n\r\n######################################################Callbacks#########################################################\r\n\r\n@app.callback(\r\n [\r\n Output(\"choropleth\", \"figure\"),\r\n Output(\"line_graph\", \"figure\"),\r\n Output(\"bubbles_graph\", \"figure\"),\r\n Output(\"radar_graph\", \"figure\"),\r\n Output(\"subplot_graph\", \"figure\")\r\n ],\r\n [\r\n Input(\"year_slider\", \"value\"),\r\n Input(\"country_drop\", \"value\"),\r\n Input(\"continent_drop\", \"value\")\r\n ]\r\n)\r\ndef plots(year, countries, continent):\r\n\r\n #############################################First Choropleth######################################################\r\n df_EU = df.loc[df['Country_Name'].isin(Europe)]\r\n df_EU_0 = df_EU.loc[df_EU['Years']== year]\r\n data_choropleth = dict(type='choropleth',\r\n locations=df_EU_0['Country_Name'],\r\n locationmode='country names',\r\n text=df_EU_0['Country_Name'],\r\n colorscale='YlGnBu',\r\n colorbar=dict(title='Number of Arrivals'),\r\n #hovertemplate='Country: %{text}
' + str(gas.replace('_', ' ')) + ': %{z}',\r\n z=df_EU_0['Arrivals'])\r\n\r\n layout_choropleth = dict(geo=dict(scope='europe',\r\n projection={'type': 'equirectangular'},\r\n bgcolor='#f9f9f9',\r\n showframe = False\r\n ),\r\n title=dict(text='Number of Overnight Arrivals',\r\n x=.5,\r\n # Title relative position according to the xaxis, range (0,1)\r\n ),\r\n font=dict(size=12,color=\"#4d4d4d\"),\r\n paper_bgcolor='#f9f9f9')\r\n\r\n ############################################Second Lines Plot######################################################\r\n dataContinents = df[df.Country_Name.isna()]\r\n color_numb2 = 0\r\n data_line = []\r\n for country in continent:\r\n data_line.append( dict(type = 'scatter',\r\n x = dataContinents.loc[dataContinents['Continent_Name'] == country]['Years'],\r\n y = dataContinents.loc[dataContinents['Continent_Name'] == country]['Receipts_PCapita'],\r\n name = country,\r\n line_color= color_array[color_numb2]))\r\n color_numb2 += 1\r\n\r\n layout_line = dict(title = dict(text = 'Tourism Revenue per capita',x=0.5),\r\n xaxis = dict(title = 'Year'),\r\n yaxis = dict(title = 'Tourism Revenue per capita'),\r\n paper_bgcolor = '#f9f9f9',\r\n template='none',\r\n font = dict(size=12,color=\"#4d4d4d\"),\r\n legend = dict(orientation='h',yanchor='top',xanchor='center',y=-0.3,x=0.5))\r\n\r\n ############################################Third Bubbles Plot#####################################################\r\n dataBubble = df.dropna()\r\n dataBubble.sort_values(by ='Years', inplace = True)\r\n data_bubble = px.scatter(dataBubble.loc[dataBubble['Country_Name'].isin(countries)], x=\"GDP\", y=\"Receipts_PCapita\",\r\n animation_frame=\"Years\", animation_group=\"Country_Name\",\r\n size=\"Ratio GDP\", hover_name=\"Country_Name\", color=\"Country_Name\",\r\n log_x=True, size_max=40, range_x=[300, 120000], range_y=[0, 11000])\r\n\r\n layout_bubble = data_bubble.update_layout(title=dict(text='Tourism and GDP per capita', x=0.5),\r\n xaxis=dict(title='GDP per capita'),\r\n yaxis=dict(title='Tourism GDP per capita'),\r\n paper_bgcolor='#f9f9f9',\r\n font=dict(size=12,color=\"#4d4d4d\"),\r\n template='none'\r\n\r\n )\r\n\r\n data_bubble.for_each_trace(lambda t: t.update(name=t.name.replace(\"Country_Name=\", \"\")))\r\n ############################################Forth Radar Plot######################################################\r\n\r\n labels = ['GDP_N', 'Expenditures_N', 'PopTotal_N', 'Arrivals_N', 'Departure_N', 'GDP_N']\r\n data_radar =[]\r\n\r\n color_numb = 0\r\n for country in countries:\r\n dataradar = df[['GDP_N', 'Expenditures_N', 'PopTotal_N', 'Arrivals_N', 'Departure_N']].loc[\r\n (df['Years'] == year) & (df['Country_Name'] == country)]\r\n values = dataradar.values.flatten().tolist()\r\n values += values[:1]\r\n data_radar.append(dict(type='scatterpolar',\r\n r=values,\r\n theta=labels,\r\n fill='toself',\r\n name=country,\r\n line_color= color_array[color_numb],\r\n mode='lines'\r\n ))\r\n color_numb += 1\r\n\r\n layout_radar = dict(\r\n title='Tourism and related metrics',\r\n font=dict(\r\n # family = 'Arial, sans-serif;',\r\n size=12,\r\n color=\"#4d4d4d\"\r\n ),\r\n title_x=0.5,\r\n polar=dict(\r\n radialaxis=dict(\r\n visible=True,\r\n range=[-3, 3]\r\n )),\r\n paper_bgcolor='#f9f9f9',\r\n template = 'none',\r\n showlegend=True\r\n )\r\n\r\n ############################################Fifth Bar Plot##########################################################\r\n titles = ['Jobs per 1k Tourists', 'Expenditure required for one Job']\r\n plot = make_subplots(rows=1,\r\n cols=2,\r\n subplot_titles=titles,\r\n specs= [[{}, {}]], shared_xaxes = True,\r\n shared_yaxes=False, vertical_spacing=0.001\r\n )\r\n\r\n\r\n ############################################Fifth Subplot Plot##########################################################\r\n data_bar = []\r\n for country in countries:\r\n df_bar = df.loc[df['Country_Name'] == country]\r\n\r\n x_bar = df_bar['Country_Name']\r\n y_bar = (df_bar.loc[df_bar['Years'] == year]['Jobs_per_tourist'])\r\n plot.append_trace(go.Bar(\r\n x=y_bar,\r\n y=x_bar,\r\n marker=dict(\r\n color='rgba(50, 171, 96, 0.6)',\r\n line=dict(\r\n color='rgba(50, 171, 96, 1.0)',\r\n width=1),\r\n ),\r\n orientation='h',\r\n showlegend= False\r\n ), 1, 1)\r\n\r\n for country in countries:\r\n df_markers = df.loc[(df['Country_Name'] == country)]\r\n\r\n plot.append_trace(go.Bar(\r\n x=df_markers.loc[df_markers['Years']==year]['Cost_of_oneJob'],\r\n y=df_markers['Country_Name'],\r\n marker=dict(\r\n color='rgba(50, 171, 96, 0.6)',\r\n line=dict(\r\n color='rgba(50, 171, 96, 1.0)',\r\n width=1),\r\n ),\r\n orientation='h',\r\n showlegend=False\r\n ), 1, 2)\r\n\r\n\r\n plot.update_layout(\r\n title='Tourism Impact on Jobs',\r\n yaxis=dict(\r\n showgrid=False,\r\n showline=False,\r\n showticklabels=True,\r\n domain=[0, 0.85],\r\n ),\r\n yaxis2=dict(\r\n showgrid=False,\r\n showline=True,\r\n showticklabels=False,\r\n linecolor='rgba(102, 102, 102, 0.8)',\r\n linewidth=2,\r\n domain=[0, 0.85],\r\n ),\r\n xaxis=dict(\r\n zeroline=False,\r\n showline=False,\r\n showticklabels=True,\r\n showgrid=True,\r\n domain=[0, 0.42],\r\n ),\r\n xaxis2=dict(\r\n zeroline=False,\r\n showline=False,\r\n showticklabels=True,\r\n showgrid=True,\r\n domain=[0.47, 1],\r\n side='top',\r\n dtick=25000,\r\n ),\r\n legend=dict(x=0.029, y=1.038, font_size=10),\r\n margin=dict(l=100, r=20, t=70, b=70),\r\n paper_bgcolor='#f9f9f9',\r\n font=dict(size=12, color=\"#4d4d4d\"),\r\n title_x=0.5,\r\n plot_bgcolor='rgb(248, 248, 255)',\r\n )\r\n\r\n return go.Figure(data=data_choropleth, layout=layout_choropleth), \\\r\n go.Figure(data=data_line, layout=layout_line), \\\r\n go.Figure(data=data_bubble, layout=layout_bubble), \\\r\n go.Figure(data=data_radar, layout=layout_radar), \\\r\n plot\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n\r\n","repo_name":"andreiantunes/dv_test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14746216167","text":"from common_utils.text import Field, Vocab\n\n\nclass Dataset(object):\n PAD_TOKEN = ''\n UNK_TOKEN = ''\n INIT_TOKEN = ''\n EOS_TOKEN = ''\n\n def __init__(self, sentences, fields=None, max_lengths=None, vocab_max_size=0, tokenizer=None):\n super().__init__()\n\n if max_lengths is None:\n max_lengths = 0\n if not isinstance(max_lengths, (tuple, list)):\n max_lengths = [max_lengths, ] * len(sentences)\n\n if fields is None:\n self.vocab = Vocab(\n special_tokens=[Dataset.PAD_TOKEN, Dataset.INIT_TOKEN, Dataset.EOS_TOKEN, Dataset.UNK_TOKEN]\n )\n self.fields = [\n Field(\n init_token=Dataset.INIT_TOKEN, eos_token=Dataset.EOS_TOKEN, pad_token=Dataset.PAD_TOKEN,\n unk_token=Dataset.UNK_TOKEN, padding=True, max_len=max_len, tokenizer=tokenizer, vocab=self.vocab\n )\n for max_len in max_lengths\n ]\n else:\n self.fields = fields\n self.vocab = self.fields[0].vocab\n\n self.sentences = [[field.preprocess(s) for s in sents] for field, sents in zip(self.fields, sentences)]\n\n if fields is None:\n for sents in self.sentences:\n self.vocab.add_documents(sents)\n\n if vocab_max_size != 0:\n self.vocab.prune_vocab(min_count=2, max_size=vocab_max_size)\n\n def __len__(self):\n return len(self.sentences[0])\n\n def __getitem__(self, index):\n sents = [self.sentences[k][index] for k in range(len(self.fields))]\n sents = [field.process(sent) for field, sent in zip(self.fields, sents)]\n\n return sents\n","repo_name":"jgc128/common_utils","sub_path":"common_utils/text/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"21343096770","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom rake_nltk import Rake\nfrom textblob import TextBlob\nimport profanity_check\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk import sent_tokenize\nimport re\n\nclass Analytics(object):\n \n def __init__(self):\n pass\n \n def merge_numbers_intext(self, text):\n '''\n merge adjacent numbers occureence into the text which might got \n splited coz of punctuations presence eg. 1,200,345 -> 1 200 345 \n '''\n word_list = text.split()\n new_word_list = []\n number = \"\"\n for word in word_list:\n if word.isnumeric():\n number+=word\n else:\n if len(number):\n new_word_list.append(number)\n number=\"\"\n new_word_list.append(word)\n return \" \".join(new_word_list)\n \n \n #def get_factual_info_0(text):\n # labels=[\"numbers\",\"data\",\"death\",\"recovery\",\"count\",\"discharged\",\"cases\",\"rising\",\"droping\"]\n # words = stopwords.words(\"english\")+[\"covid\",\"corona\",\"covid19\",\"19\"]\n # stemmer = PorterStemmer()\n # cleaning_lambda = lambda x: \" \".join([stemmer.stem(i) \n # for i in re.sub(\"[^a-zA-Z0-9]\", \" \", x.lower()).split()\n # if i not in words])\n # cleaned_text = cleaning_lambda(text)\n # cleaned_text = merge_numbers_intext(cleaned_text)\n # cleaned_labels = [cleaning_lambda(label) for label in labels]\n # numb_exists = re.findall(\"\\d+\",cleaned_text) # count all the numbers fro the text\n # labels_exists = re.findall(r\"({})\".format(\"|\".join(cleaned_labels)),cleaned_text) # getting lables count which might indicate factual information\n # numb_cnt = len(numb_exists)\n # labels_cnt = len(labels_exists)\n # word_cnt = len(cleaned_text.split())\n # return (numb_cnt+labels_cnt)/word_cnt if word_cnt else 0\n \n def get_factual_info(self, text):\n labels=[\"numbers\",\"data\",\"death\",\"recovery\",\"count\",\"discharged\",\"cases\",\"rising\",\"droping\"]\n words = stopwords.words(\"english\")+[\"covid\",\"corona\",\"covid19\",\"19\"]\n sents = sent_tokenize(text)\n stemmer = PorterStemmer()\n cleaning_lambda = lambda x: \" \".join([stemmer.stem(i)\n for i in re.sub(\"[^a-zA-Z0-9]\", \" \", x.lower()).split()\n if i not in words])\n \n cleaned_labels = [stemmer.stem(label.lower()) for label in labels]\n factual_info = 0\n for sent in sents:\n cleaned_text = cleaning_lambda(sent)\n cleaned_text = self.merge_numbers_intext(cleaned_text)\n numb_exists = re.findall(\"\\d+\",cleaned_text) # count all the numbers fro the text\n labels_exists = re.findall(r\"({})\".format(\"|\".join(cleaned_labels)),cleaned_text) # getting lables count which might indicate factual information\n if numb_exists or labels_exists:\n factual_info+=1\n return (factual_info/len(sents))\n \n # KeyWords\n def get_keywords(self, text):\n r = Rake(max_length = 1)\n r.extract_keywords_from_text(text)\n return r.get_ranked_phrases_with_scores()[:20]\n \n # KeyPhrases\n def get_keyphrases(self, text):\n r = Rake(min_length = 2, max_length = 10)\n r.extract_keywords_from_text(text)\n return r.get_ranked_phrases_with_scores()[:5]\n \n # Labeles\n def get_labels(self, text):\n labels_list = {\n \"Vaccine\":[\"vaccine\"],\n \"Lockdown\":[\"lockdown\"],\n \"Safety measures\": [\"precaution\", \"social distancing\", \"washing hands\", \"safety measures\", \"ppe\", \"mask\",\"quarantine\"], \n \"Travel\":[\"flight\", \"travel\", \"train\", \"transport\", \"visa\",\"departure\",\"arrival\"],\n \"Testing\":[\"rt-pcr\", \"pcr\", \"antigen\", \"antibody test\", \"serology test\", \"diagnostic\",\"rtpcr\"],\n \"Official announcements/Rules and regulations\":[\"official\", \"rules\", \"regulation\", \"announced\", \"announcement\", \"government\"]}\n \n stemmer = PorterStemmer()\n words = stopwords.words(\"english\")\n cleaning_lambda = lambda x: \" \".join([stemmer.stem(i) \n for i in re.sub(\"[^a-zA-Z]\", \" \", x.lower()).split() \n if i not in words])\n cleaned_text = cleaning_lambda(text)\n cleaned_labels = {key:[cleaning_lambda(word) for word in values] \n for key, values in labels_list.items()} \n labels_count = {key:\n sum([cleaned_text.count(value) for value in values]) \n for key, values in cleaned_labels.items()}\n \n return labels_count\n \n def get_info(self, df):\n new_df = df.copy()\n \n # Polarity\n new_df['polarity'] = new_df['content'].apply(lambda x : \n TextBlob(str(x).lower().replace(\"positive\",\"infected\")).sentiment.polarity)\n print(\"Collected Polarity Information\")\n \n # Subjectivity\n new_df['subjectivity'] = new_df['content'].apply(lambda x : \n TextBlob(str(x).lower().replace(\"positive\",\"infected\")).sentiment.subjectivity)\n print(\"Collected Subjectivity information\")\n \n # profanity\n new_df['profanity'] = new_df['content'].apply(lambda x : \n round(profanity_check.predict_prob([str(x)])[0],2))\n print(\"Collected Profanity information\")\n\n \n # KeyPhrases\n new_df['keywords'] = new_df['content'].apply(self.get_keywords)\n print(\"Collected Keywords\")\n \n # KeyPhrases\n new_df['keyphrases'] = new_df['content'].apply(self.get_keyphrases)\n print(\"Colected KeyPhrases\")\n \n # Factual Info\n new_df[\"factual_info\"] = new_df['content'].apply(self.get_factual_info)\n print(\"Collected Numberic Info\")\n \n # Labels information\n new_df[\"labels_info\"] = new_df['content'].apply(self.get_labels)\n print(\"Collected content info\")\n \n return new_df\n\n\n\n","repo_name":"mayurchhabra89/webscraper","sub_path":"flaskr/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29215712564","text":"from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QHBoxLayout, QPushButton, QFileDialog\nimport sys\n\n\nclass ClassificationAI(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('제목')\n\n self.button1 = QPushButton('open_file')\n self.button1.clicked.connect(self.button1_click)\n\n self.hbox_layout = QHBoxLayout()\n self.hbox_layout.addWidget(self.button1)\n\n self.main_layout = QGridLayout()\n self.main_layout.addLayout(self.hbox_layout, 0, 0, 1, 1)\n\n self.setLayout(self.main_layout)\n\n def button1_click(self):\n # getOpenFileName의 속성들 : self, 창 제목, 초기 이미지 폴더 지정, 필터링(선택 가능한 파일 확장자 지정)\n path, _ = QFileDialog.getOpenFileName(self, '제목', '.', 'Image File (*.*)') # path, _ 둘 다 변수임\n if path == '':\n print('취소')\n else:\n print('PATH : ', path)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n classification_ai = ClassificationAI()\n classification_ai.show()\n sys.exit(app.exec())","repo_name":"Seojun1/Object-Detection","sub_path":"lab_gui/09. file_dialog.py","file_name":"09. file_dialog.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5683164926","text":"from django.db import models\n\n# Create your models here.\n\nclass Shift(models.Model):\n\n months = (\n ('january','January'),\n ('february','Jebruary'),\n ('march','March'),\n ('april','April'),\n ('may','May'),\n ('june','June'),\n ('july','July'),\n ('august','August'),\n ('september','September'),\n ('october','October'),\n ('november','November'),\n ('december','December')\n )\n\n from employee_management_system.models import Employee\n s_year = models.CharField(max_length=4 , null=True)\n s_date = models.DateField(null=False)\n s_start = models.IntegerField(null=False)\n s_end = models.IntegerField(null=False)\n s_month = models.CharField(choices= months , max_length= 200 , null=True)\n s_employee_shift = models.ForeignKey(Employee, on_delete=models.DO_NOTHING , default=None)","repo_name":"lasith98/wecare","sub_path":"hrms/shift_management_system/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23954656368","text":"# a) Realizar un programa que dado un número, indique si este es un número ABUNDANTE.\r\n\r\n\r\nn=int(input(\"Ingresar un numero positivo\"))\r\n\r\ncont=1\r\nsumadivisores=0\r\n\r\nwhile cont n:\r\n print(\"el numero\", n, \"es abundante\")\r\nelse:\r\n print(\"el numero\", n, \"no es abundante\")","repo_name":"Kidje3/Introduccion_a_la_Programacion","sub_path":"ejercicios 3-Ciclos/cuestionario for/cuestionario for - pregunta 1 a.py","file_name":"cuestionario for - pregunta 1 a.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9946115454","text":"\"\"\"Training and evaluating the classification model.\"\"\"\n# pyright: reportMissingImports=false\n# pylint: disable=E0401\nimport joblib\nimport typer\nfrom commitcanvas_models.train_model import model as md\nfrom reporover import reporover\n\napp = typer.Typer()\n\n\n@app.callback()\ndef callback():\n \"\"\"Please see the documentation for acceptable command line options.\"\"\"\n\n\nTYPES = \"chore,docs,feat,fix,refactor,test\"\n\n\n@app.command()\ndef train(url: str, save: str, types: str = TYPES):\n \"\"\"Train the model for project specific mode.\"\"\"\n collected_data = reporover.collect(url)\n\n data = md.data_prep(collected_data, types)\n\n train_features, train_labels = md.feature_label_split(data)\n\n pipeline = md.build_pipline()\n pipeline = pipeline.fit(train_features, train_labels)\n\n print(\"saving the model\")\n joblib.dump(pipeline, \"{}/trained_model.pkl\".format(save))\n print(\"saving model complete\")\n","repo_name":"CommittedTeam/CommitCanvas","sub_path":"commitcanvas/commit_label/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"73"} +{"seq_id":"19985601639","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import splev, splrep\nfrom scipy import interpolate\nfrom sklearn import neighbors, datasets\nimport pickle\nimport math\n\n# def fun(x_inp, y_inp):\n\n# df = pd.read_excel(\"boundaryPoints_2.xlsx\", header=None)\n# x_points = list(df.iloc[:, 0])\n# y_points = list(df.iloc[:, 1])\n# # print(x_points)\n# # print(y_points)\n# tck = splrep(x_points, y_points)\n\n# # df_h = pd.read_csv(\"BohDataN.csv\",header=None)\n# # df_b = pd.read_csv(\"BorDataN.csv\",header=None)\n# # df_c = pd.read_csv(\"CADataN.csv\",header=None)\n\n# # for i in range(0,324):\n# # df_h[i] = df_h[i].fillna(99999)\n# # df_b[i] = df_b[i].fillna(99999)\n# # df_c[i] = df_c[i].fillna(99999)\n\n# # x1 = []\n# # x2 = []\n# # y1 = []\n# # for i in range(0,150):\n# # for j in range(0,324):\n# # if(df_h.iloc[i,j]==99999):\n# # continue\n# # else:\n# # x2.append(df_h.iloc[i,j])\n# # x1.append(df_b.iloc[i,j])\n# # y1.append(df_c.iloc[i,j])\n\n# # x2 ia hstar\n# # x1 is b star\n# # x = np.array(x1)\n# # y = np.array(x2)\n# # xg, yg = np.meshgrid(x, y,indexing='ij', sparse=True)\n# # z = np.array(y1)\n# # g = interpolate.interp2d(x, y, z, kind='quintic')\n\n# g = pickle.load(open('predictContactAngle.pkl', 'rb'))\n# result = g(x_inp, y_inp)\n\n# x_input = x_inp\n# y_input = y_inp\n# y_out = interpolate.splev(x_input, tck)\n# if y_out-y_input < 0:\n# yo = 1\n# #print(\"Unstable region\")\n# # print(y_out,y_input)\n# elif (x_input < 0 and y_input > 0) or (x_input < 0 and y_input < 0) or (x_input > 0 and y_input < 0):\n# # print(\"Unstable region\")\n# # print(y_out,y_input)\n# yoyo = 2\n\n# else:\n# #print(\"stable region\")\n# #print(\"expected output\",y_out)\n# #print(\"given output\",y_input)\n# contactAngle = g(x_input, y_input)\n# ##warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n# # we need to output znew[0]\n# # print(round(contactAngle[0],0))\n# if result[0]>=95:\n# b_star_array = np.arange(0, 1, 0.005)\n# h_star_array = np.arange(0, 2, 0.005)\n# else:\n# b_star_array = np.arange(0, 4, 0.01)\n# h_star_array = np.arange(0, 4, 0.01)\n# # h_input = 1.5\n# c_angle_input = round(contactAngle[0], 0)\n# # print(b_star_array)\n# contour_b = []\n# contour_h = []\n\n# df = pd.read_csv(\"boundaryThinFilmData.csv\", header=None)\n# x_thinFilm = list(df.iloc[1:, 0])\n# y_thinFilm = list(df.iloc[1:, 1])\n# tck1 = interpolate.splrep(x_thinFilm, y_thinFilm)\n\n# for j in h_star_array:\n# for i in b_star_array:\n# y_out1 = interpolate.splev(i, tck1)\n# if j < y_out1:\n# continue\n# y_out = interpolate.splev(i, tck)\n# if y_out-j < 0:\n# continue\n# contactangle = round(g(i, j)[0], 1)\n# if contactangle == c_angle_input:\n# contour_b.append(i)\n# contour_h.append(j)\n\n# # print(contour_b)\n# # print(contour_h)\n# fin_contourA = []\n# fin_contourB = []\n# x = 0\n# y = 0\n# x_an = 0\n# y_an = 0\n# num = 0\n# ln = len(contour_b)\n# while num < ln:\n# mn = float('inf')\n# for i, j in zip(contour_b, contour_h):\n# if (i-x)*(i-x)+(j-y)*(j-y) < mn:\n# mn = (i-x)*(i-x)+(j-y)*(j-y)\n# x_an = i\n# y_an = j\n# contour_b.remove(x_an)\n# contour_h.remove(y_an)\n# fin_contourA.append(x_an)\n# fin_contourB.append(y_an)\n# x = x_an\n# y = y_an\n# num += 1\n\n# # print(fin_contourA)\n# # print(fin_contourB)\n# # plt.plot(fin_contourA,fin_contourB)\n# # plt.show()\n# fin_contourA = [0] + fin_contourA\n# fin_contourB = [0] + fin_contourB\n# return fin_contourA, fin_contourB\n\n\n# fun(0.25, 1)\n# pickle.dump(tck1,open('contourPredict.pkl','wb'))\n# loaded_model=pickle.load(open('contourPredict.pkl','rb'))\n# inp=[x_input,y_input]\n# print(loaded_model.predict([inp]))\n\n# print(contour_b)\n# print(contour_h)\n\n\ndef fun(x_inp, y_inp):\n df_h = pd.read_csv(\"BohDataN.csv\",header=None)\n df_b = pd.read_csv(\"BorDataN.csv\",header=None)\n df_s = pd.read_csv(\"CADataN.csv\",header=None)\n\n g = pickle.load(open('predictContactAngle.pkl', 'rb'))\n c_angle = round(g(x_inp,y_inp)[0],0)\n\n y = df_h.to_numpy()\n x = df_b.to_numpy()\n z = df_s.to_numpy()\n\n\n fig, ax = plt.subplots(1, 1) \n # plots contour lines\n cs = ax.contour(x, y, z, [c_angle])\n\n ax.set_title('Contour Plot')\n ax.set_xlabel('b')\n ax.set_ylabel('h')\n plt.xlim([0, 3])\n plt.ylim([0, 3])\n\n\n x_coord = []\n y_coord = []\n # print(np.shape(x_coord))\n for item in cs.collections:\n for i in item.get_paths():\n v = i.vertices\n x = v[:, 0]\n x = np.array(x)\n \n y = v[:, 1]\n y = np.array(y)\n # x_coord = x_coord + x\n # y_coord = y_coord + y\n # for i in x:\n for i in range(len(x)):\n if math.isnan(y[i]) == False and math.isnan(x[i]) == False:\n x_coord.append(x[i])\n y_coord.append(y[i])\n else:\n print(x[i],y[i])\n # print(np.shape(x), np.shape(y))\n # plt.plot(x_coord,y_coord)\n # plt.show()\n # print(x_coord)\n # x_coord_new = []\n # y_coord_new = []\n \n # for i in range(len(x_coord)):\n # if math.isnan(y_coord[i]) == False:\n # x_coord_new.append(x_coord[i])\n # y_coord_new.append(y_coord[i])\n # print(x_coord_new,y_coord)\n return x_coord,y_coord\n plt.show()","repo_name":"Kush223/Engg_Practicum","sub_path":"contourPredict.py","file_name":"contourPredict.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1922483134","text":"from spack.package import *\n\n\nclass Tdengine(CMakePackage):\n \"\"\"An open-source big data platform designed and optimized for the\n Internet of Things (IoT).\"\"\"\n\n homepage = \"https://github.com/taosdata/TDengine\"\n url = \"https://github.com/taosdata/TDengine/archive/ver-2.0.2.2.tar.gz\"\n\n version(\"2.0.3.2\", sha256=\"3eb8df894998d5592cce377b4f7e267972aee8adf9fc1ce60d1af532ffa9c1c6\")\n version(\"2.0.3.1\", sha256=\"69418815afcac8051f1aab600415669003b4aeec4ec2aaf09cab24636edaf51f\")\n\n @when(\"target=aarch64:\")\n def cmake_args(self):\n args = [\"-DCPUTYPE=aarch64\"]\n return args\n\n def install(self, spec, prefix):\n install_tree(self.build_directory + \"/build\", prefix)\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/tdengine/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"17919999058","text":"# 406 - Trapping Rain Water\n\n# Given an array arr[] of N non-negative integers representing height of \n# blocks at index i as Ai where the width of each block is 1. \n# Compute how much water can be trapped in between blocks after raining.\n\n\n\n# ex)\n# Input: [3, 0, 2, 0, 4] \n# Output: 7\n\n# Basic Insight:\n# An element of the array can store water if there are higher bars on left and right. \n# The amount of water to be stored in every element can be found out by finding the heights of bars on the left and right sides. \n# The idea is to compute the amount of water that can be stored in every element of the array.\n\n\n# Constraints\n# Advanced\t\t\t Insane\n# Time Complexity:\t\t\t O(N)\t\t\t\tO(N)\n# Auxiliary Space Complexity: \t\tO(N)\t\t\t\tO(1)\n\n# Solution\n# Instantiate an integer total to track rain water.\n# Create an array called max_left to track the maximum height to the left of each index, and populate that array.\n# The example above would be: [0, 3, 3, 3, 3]\n# Also create an array called max_right to track the maximum height to the right of each index, and populate that array\n# The example above would be: [4, 4, 4, 4, 0]\n# Loop through the input array with index i At each index, subtract the current value from the lower of the left_max and right_max If the result is greater than zero, add it to the total Return total at the end.\n# Resources: https://www.geeksforgeeks.org/trapping-rain-water/\n\n\n\n# Method 1: This is a simple solution to the above problem.\n\n# Approach: The idea is to traverse every array element and find the highest bars on left and right sides. Take the smaller of two heights. The difference between the smaller height and height of the current element is the amount of water that can be stored in this array element.\n# Algorithm:\n# 1. Traverse the array from start to end.\n# 2. For every element, traverse the array from start to that index and find the maximum height (a) and traverse the array from the current index to end and find the maximum height (b).\n# 3. The amount of water that will be stored in this column is min(a,b) – array[i], add this value to total amount of water stored\n# 4. Print the total amount of water stored.\n\n# Function to return the maximum \n# water that can be stored \n\n# Complexity Analysis:\n# Time Complexity: O(n2).\n# There are two nested loops traversing the array, So time Complexity is O(n2).\n# Space Complexity: O(1).\n# No extra space required.\n# def maxWater(arr) : \n# # To store the maximum water \n# # that can be stored \n# res = 0; \n# # For every element of the array \n# for i in range(1, len(arr)-1) : \n \n# # Find the maximum element on its left \n# left = arr[i] \n# for j in range(i) : \n# left = max(left, arr[j]); \n# # Find the maximum element on its right \n# right = arr[i] \n# for j in range(i + 1 , len(arr)) : \n# right = max(right, arr[j]) \n# # Update the maximum water \n# res = res + (min(left, right) - arr[i])\n# return res \n# print(maxWater([3,0,2,0,4]))\n\n# Method 2: This is an efficient solution to the above problem.\n\n# Approach: In the previous solution, to find the highest bar on the left and right, array traversal is needed which reduces the efficiency of the solution. To make this efficient one must pre-compute the highest bar on the left and right of every bar in linear time. Then use these pre-computed values to find the amount of water in every array element.\n# Algorithm:\n# 1. Create two array left and right of size n. create a variable max_ = INT_MIN.\n# 2. Run one loop from start to end. In each iteration update max_ as max_ = max(max_, arr[i]) and also assign left[i] = max_\n# 3. Update max_ = INT_MIN.\n# 4. Run another loop from end to start. In each iteration update max_ as max_ = max(max_, arr[i]) and also assign right[i] = max_\n# 5. Traverse the array from start to end.\n# 6. The amount of water that will be stored in this column is min(a,b) – array[i],(where a = left[i] and b = right[i]) add this value to total amount of water stored\n# 7. Print the total amount of water stored.\n\n# Complexity Analysis:\n# Time Complexity: O(n).\n# Only one traversal of the array is needed, So time Complexity is O(n).\n# Space Complexity: O(n).\n# Two extra array is needed each of size n.\n\n\n\n# Python program to find maximum amount of water that can \n# be trapped within given set of bars. \n \ndef findWater(arr, n): \n \n # left[i] contains height of tallest bar to the \n # left of i'th bar including itself \n left = [0]*n \n \n # Right [i] contains height of tallest bar to \n # the right of ith bar including itself \n right = [0]*n \n \n # Initialize result \n water = 0\n \n # Fill left array \n left[0] = arr[0] \n for i in range( 1, n): \n left[i] = max(left[i-1], arr[i]) \n \n # Fill right array \n right[n-1] = arr[n-1] \n for i in range(n-2, -1, -1): \n right[i] = max(right[i + 1], arr[i]); \n \n # Calculate the accumulated water element by element \n # consider the amount of water on i'th bar, the \n # amount of water accumulated on this particular \n # bar will be equal to min(left[i], right[i]) - arr[i] . \n for i in range(0, n): \n water += min(left[i], right[i]) - arr[i] \n \n return water \n \n \n# Driver program \n \narr = [3,0,2,0,4] \nn = len(arr) \nprint(\"Maximum water that can be accumulated is\", findWater(arr, n)) \n \n \n# Space Optimization for above Solution: \n# Instead of maintaing two arrays of size n for storing left and right max of each element, \n# maintain two variables to store the maximum till that point. \n# Since water trapped at any element = min(max_left, max_right) – arr[i]. \n# Calculate water trapped on smaller element out of A[lo] and A[hi] first and move the pointers till lo doesn’t cross hi.\n# Implementation:\n\n# Python program to find \n# maximum amount of water that can \n# be trapped within given set of bars. \n# Space Complexity : O(1) \n\n\n# Complexity Analysis:\n# Time Complexity: O(n).\n# Only one traversal of the array is needed.\n# Auxiliary Space: O(1).\n# As no extra space is required.\ndef findWater(arr, n): \n \n # initialize output \n result = 0\n \n # maximum element on left and right \n left_max = 0\n right_max = 0\n \n # indices to traverse the array \n lo = 0\n hi = n-1\n \n while(lo <= hi): \n \n if(arr[lo] < arr[hi]): \n \n if(arr[lo] > left_max): \n \n # update max in left \n left_max = arr[lo] \n else: \n \n # water on curr element = max - curr \n result += left_max - arr[lo] \n lo+= 1\n \n else: \n \n if(arr[hi] > right_max): \n # update right maximum \n right_max = arr[hi] \n else: \n result += right_max - arr[hi] \n hi-= 1\n \n return result \n \n# Driver program \n \narr = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1] \nn = len(arr) \n \nprint(\"Maximum water that can be accumulated is \", \n findWater(arr, n)) \n \n\n\n\n\n \n# Method 3:Here another efficient solution has been shown.\n\n# Approach : The concept here is that if there is a larger wall to the right then the water can be retained with height equal to the smaller wall on the left. If there are no larger walls to the right then start from the left. There must be a larger wall to the left now. Let’s take an example if the heights are {….,3, 2, 1, 4,….}, So here 3 and 4 are boundaries the heights 2 and 1 are submerged and cannot act as boundaries. So at any point or index knowing the previous boundary is sufficient if there is a higher or equal length boundary in the remaining part of the array. If not then traverse the array backwards and now must be a larger wall to the left.\n# Algorithm :\n # Loop from index 0 to the end of the given array.\n # If a wall greater than or equal to the previous wall is encountered then make note of the index of that wall in a var called prev_index.\n # Keep adding previous wall’s height minus the current (ith) wall to the variable water.\n # Have a temporary variable that stores the same value as water.\n # If no wall greater than or equal to the previous wall is found then quit.\n # If prev_index < size of the input array then subtract the temp variable from water, and loop from end of the input array to prev_index and find a wall greater than or equal to the previous wall (in this case, the last wall from backwards).\n# Implementation\n\n\n# Pythpn3 implementation of the approach \n \n# Function to return the maximum \n# water that can be stored \ndef maxWater(arr, n): \n size = n - 1\n \n # Let the first element be stored as \n # previous, we shall loop from index 1 \n prev = arr[0] \n \n # To store previous wall's index \n prev_index = 0\n water = 0\n \n # To store the water until a larger wall \n # is found, if there are no larger walls \n # then delete temp value from water \n temp = 0\n for i in range(1, size + 1): \n \n # If the current wall is taller than \n # the previous wall then make current \n # wall as the previous wall and its \n # index as previous wall's index \n # for the subsequent loops \n if (arr[i] >= prev): \n prev = arr[i] \n prev_index = i \n \n # Because larger or same height wall is found \n temp = 0\n else: \n \n # Since current wall is shorter than \n # the previous, we subtract previous \n # wall's height from the current wall's \n # height and add it to the water \n water += prev - arr[i] \n \n # Store the same value in temp as well \n # If we dont find any larger wall then \n # we will subtract temp from water \n temp += prev - arr[i] \n \n # If the last wall was larger than or equal \n # to the previous wall then prev_index would \n # be equal to size of the array (last element) \n # If we didn't find a wall greater than or equal \n # to the previous wall from the left then \n # prev_index must be less than the index \n # of the last element \n if (prev_index < size): \n \n # Temp would've stored the water collected \n # from previous largest wall till the end \n # of array if no larger wall was found then \n # it has excess water and remove that \n # from 'water' var \n water -= temp \n \n # We start from the end of the array, so previous \n # should be assigned to the last element \n prev = arr[size] \n \n # Loop from the end of array up to the 'previous index' \n # which would contain the \"largest wall from the left\" \n for i in range(size, prev_index - 1, -1): \n \n # Right end wall will be definitely smaller \n # than the 'previous index' wall \n if (arr[i] >= prev): \n prev = arr[i] \n else: \n water += prev - arr[i] \n \n # Return the maximum water \n return water \n \n# Driver code \narr = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1] \nn = len(arr) \nprint(maxWater(arr, n)) \n\n# Complexity Analysis:\n# Time Complexity: O(n).\n# As only one traversal of the array is needed.\n# Auxiliary Space: O(1).\n# As no extra space is required.","repo_name":"akimi-yano/algorithm-practice","sub_path":"oc/trappingRainWater.py","file_name":"trappingRainWater.py","file_ext":"py","file_size_in_byte":11455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8667450928","text":"from openerp import tools\nfrom openerp.osv import osv, fields\nfrom openerp.tools.translate import _\n\n\nclass InviteWizard(osv.osv_memory):\n\n \"\"\" Wizard to invite partners and make them followers. \"\"\"\n _inherit = 'mail.wizard.invite'\n _description = 'Invite wizard'\n\n def default_get(self, cr, uid, _fields, context=None):\n '''\n Creating in an smart way the default message with the titles of\n elements to share.\n '''\n\n result = super(InviteWizard, self).default_get(\n cr, uid, _fields, context=context)\n model_obj = self.pool.get(result.get('res_model', False) or\n context.get('active_model'))\n message = '
'\n if len(context.get('active_ids', [])) >= 1:\n result.update({'res_model': context.get('active_model')})\n contex_signup = dict(context, signup_valid=True)\n for ids in context.get('active_ids', []):\n document_name = model_obj.name_get(cr, uid, [ids],\n context=context)[0][1]\n url = self.pool['res.partner']._get_signup_url_for_action(cr, uid, [ids], action='mail.action_mail_redirect', model=self._name, res_id=ids, context=contex_signup)[ids] # noqa\n message_new = '''
  • {name}
  • \n '''.format(url=url, name=document_name)\n message = message + message_new\n message = _(\n '

    You have been invited to follow:.

      %s
    ' %\n message)\n result['message'] = message\n elif 'message' in _fields and result.get('res_model') and \\\n result.get('res_id'):\n document_name = self.pool.get(result.get('res_model')).name_get(\n cr, uid, [result.get('res_id')], context=context)[0][1]\n message = _(\n '

    You have been invited to follow:.

    %s' %\n document_name)\n result['message'] = message\n return result\n\n _columns = {\n\n 'groups': fields.boolean('Groups', help='Used to add a followers '\n 'group from mail group '\n 'and not for Users '\n 'directly'),\n 'partners': fields.boolean('Partners', help='Used to add a follower '\n 'group by users'),\n 'remove': fields.boolean('Remove Partners',\n help='Used to remove followers'),\n 'bring_partners': fields.boolean('Bring Partners',\n help='This field brings all partners '\n 'of the records selected'),\n 'p_a_g': fields.boolean('Group and Partner', help='Used to add a '\n 'followers for partner '\n 'and group at the same '\n 'time'),\n\n\n 'mail_groups': fields.many2many('mail.group', string='Mail Groups',\n help='Select the mail.groups that you '\n 'want add with followers'),\n\n }\n\n def mail_add_followers_multirecord(self, cr, uid, ids, context=None):\n '''\n Overwrite the original model work with many documents at the same time\n and add followers in eech.\n\n Each id is get by context field\n '''\n res = {'type': 'ir.actions.act_window_close'}\n for wizard in self.browse(cr, uid, ids, context=context):\n if context.get('second', False):\n for res_id in context.get('active_ids', []):\n model_obj = self.pool.get(wizard.res_model)\n document = model_obj.browse(cr, uid, res_id,\n context=context)\n new_follower_ids = [p.id for p in wizard.partner_ids\n if p.id not in\n document.message_follower_ids]\n\n # filter partner_ids to get the new followers, to avoid\n # sending email to already following partners\n model_obj.message_subscribe(cr, uid, [res_id],\n new_follower_ids,\n context=context)\n\n # send an email only if a personal message exists\n # when deleting the message, cleditor keeps a
    \n # add signature\n if wizard.message and not wizard.message == '
    ':\n user_id = self.pool.get(\"res.users\").\\\n read(cr, uid, [uid],\n fields=[\"signature\"],\n context=context)[0]\n\n signature = user_id and user_id[\"signature\"] or ''\n if signature:\n wizard.message = \\\n tools.\\\n append_content_to_html(wizard.message,\n signature,\n plaintext=True,\n container_tag='div')\n # FIXME 8.0: use notification_email_send, send a wall\n # message and let mail handle email notification +\n # message box\n for follower_id in new_follower_ids:\n mail_mail = self.pool.get('mail.mail')\n # the invite wizard should create a private message\n # not related to any object -> no model, no res_id\n mail_id = mail_mail.\\\n create(cr, uid, {\n 'model': wizard.res_model,\n 'res_id': res_id,\n 'subject': 'Invitation to follow %s' %\n document.name_get()[0][1],\n 'body_html': '%s' % wizard.message,\n 'auto_delete': True},\n context=context)\n mail_mail.send(cr, uid, [mail_id],\n recipient_ids=[follower_id],\n context=context)\n else:\n res = super(InviteWizard, self).\\\n mail_add_followers_multirecord(cr, uid, ids,\n context=context)\n\n return res\n\n def remove_followers(self, cr, uid, ids, context=None):\n '''\n Overwrite the original model work with many documents at the same time\n and add followers in eech.\n\n Each id is get by context field\n '''\n res = {'type': 'ir.actions.act_window_close'}\n for wizard in self.browse(cr, uid, ids, context=context):\n for res_id in context.get('active_ids', []):\n model_obj = self.pool.get(wizard.res_model)\n document = model_obj.browse(cr, uid, res_id, context=context)\n if not wizard.bring_partners:\n new_follower_ids = [p.id for p in wizard.partner_ids]\n follower_ids = [i.id for i in\n document.message_follower_ids]\n remove_ids = list(set(follower_ids) -\n set(new_follower_ids))\n document.write({'message_follower_ids': [(6, 0,\n remove_ids)]})\n else:\n new_follower_ids = [p.id for p in wizard.partner_ids]\n remove_ids = [i.id for i in document.message_follower_ids\n if i.id in new_follower_ids]\n document.write({'message_follower_ids': [(6, 0,\n remove_ids)]})\n\n return res\n\n def load_partners(self, cr, uid, ids, mail_groups, check, check2,\n context=None):\n ''' Used to add all partnes in mail.group selected in the view and\n return it\n '''\n if context is None:\n context = {}\n res = {'value': {}}\n mail_obj = self.pool.get('mail.group')\n partner_ids = []\n\n if check or check2:\n for group in mail_groups:\n group_ids = group and len(group) == 3 and group[2] or []\n for groups in mail_obj.read(cr, uid,\n group_ids,\n ['message_follower_ids'],\n context):\n partner_ids += groups.get('message_follower_ids', [])\n\n if partner_ids:\n res['value'].update({'partner_ids': partner_ids})\n return res\n\n def bring_partner(self, cr, uid, ids, context=None):\n ''' Used to add all partnes in mail.group selected in the view and\n return it\n '''\n if context is None:\n context = {}\n res = {'value': {}}\n context.update({'remove': True})\n model = context.get('active_model')\n model_obj = self.pool.get(model)\n data_obj = self.pool.get('ir.model.data')\n partner_ids = []\n for res_id in context.get('active_ids'):\n partner_ids += [i.id for i in\n model_obj.\n browse(cr, uid, res_id,\n context=context).message_follower_ids\n if i.id not in partner_ids]\n\n partner_ids = list(set(partner_ids))\n\n self.write(cr, uid, ids, {'partner_ids': [(6, 0, partner_ids)],\n 'partners': True,\n 'bring_partners': True},\n context=context)\n\n view_id = data_obj.get_object(cr, uid,\n 'mail_add_followers_multirecord',\n 'mail_add_followers_multirecord_'\n 'wizard_invite_form')\n if partner_ids:\n res['value'].update({'partner_ids': partner_ids})\n return {\n 'type': 'ir.actions.act_window',\n 'name': \"Remove Partners\",\n 'res_model': self._name,\n 'res_id': ids[0],\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': view_id.id,\n 'target': 'new',\n 'nodestroy': True,\n 'context': context,\n }\n","repo_name":"OpenBusinessSolutions/odoo-karina","sub_path":"addons-vauxoo/mail_add_followers_multirecord/wizard/add_followers.py","file_name":"add_followers.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"9765323242","text":"\n#Mineria de datos\n#POS TAGGER - Formato TF-IDF\n\n#Importaciones:\nimport nltk\n# remove stop words (meaningless words for the language-meaning)\nfrom nltk.corpus import stopwords\nimport os\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nfrom pandas import DataFrame\n\n\n#Funcion que genera el POS tag de una oracion\ndef tag(s):\n\tt = nltk.word_tokenize(s)\n\treturn nltk.pos_tag(t)\n\n#Funcion que abre un archivo y devuelve su contenido\ndef gettext(f):\n\tfl = open(f,'r')\n\ttxt = \"\"\n\tfor line in fl.readlines():\n\t\t# use regular expressions to replace email addresses, URLs, phone numbers, other numbers\n\n\t\t# email addresses with 'email'\n\t\tprocessed = line.replace(r'^.+@[^\\.].*\\.[a-z]{2,}$', 'emailaddress')\n\t\t# webadress\n\t\tprocessed = processed.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$', 'phonenumbr')\n\t\t# phonenumbers\n\t\tprocessed = processed.replace(r'^\\(?[\\d]{3}\\)?[\\s-]?[\\d]{3}[\\s-]?[\\d]{4}$', 'phonenumbr')\n\t\t# numbers \n\t\tprocessed = processed.replace(r'\\d+(\\.\\d+)?', 'numbr')\n\t\t# Remove punctuation\n\t\tprocessed = processed.replace(r'[^\\w\\d\\s]', ' ')\n\t\t# Replace whitespace between terms with a single space\n\t\tprocessed = processed.replace(r'\\s+', ' ')\n\t\t# Remove leading and trailing whitespace\n\t\tprocessed = processed.replace(r'^\\s+|\\s+?$', '')\n\t\t# change words to lower case - Hello, HELLO, hello are all the same word\n\t\tprocessed = processed.lower()\n\t\t# remove stopwords\n\t\tstop_words = set(stopwords.words('english'))\n\t\t#processed = processed.apply(lambda x: ' '.join(term for term in x.split() if term not in stop_words))\n\t\ttxt = txt + processed + \"\\n\"\n\n\treturn txt\n\n\n#Funcion que escribe un archivo con el resultado del pos tag\ndef wpos(f,s):\n\tfl = open(f,'w')\n\tfl.write(s)\n\n\n\ndef getCode(thelist):\n\t#returnlist\n\treturnlist = []\n\t# first define a dictionary to get the code of every POS\n\tcode_dict = {'CC':1., 'CD': 2., 'DT': 3., 'EX': 4., 'FW': 5.,\n\t\t\t\t 'IN': 6., 'JJ': 7., 'JJR': 8., 'JJS': 9., 'LS': 10.,\n\t\t\t\t 'MD': 11., 'NN': 12., 'NNS': 13., 'NNP': 14., 'NNPS': 15.,\n\t\t\t\t 'PDT': 16., 'POS': 17., 'PRP': 18., 'PRP$': 19.,\n\t\t\t\t 'RB': 20., 'RBR': 21., 'RBS': 22., 'RP': 23., 'SYM': 24.,\n\t\t\t\t 'TO': 25., 'UH': 26., 'VB': 27., 'VBD': 28., 'VBG': 29.,\n\t\t\t\t 'VBN': 30., 'VBP':31., 'VBZ': 32., 'WDT': 33., 'WP': 34., \n\t\t\t\t 'WP$': 35., 'WRB': 36.}\n\tfor pair in thelist:\n\t\ttry:\n\t\t\telement = pair[1]\n\t\t\treturnlist.append(code_dict[element])\t\t\n\t\texcept:\n\t\t\telement = 0\n\t\t\n\treturn returnlist\n\ndef padding(listoflist, list1, list2, list3):\n\treturnlist = []\n\tmaxlongitude = 0\n\n\t# this will determine the list's maximum size\n\tfor i in list1:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor i in list2:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor i in list3:\n\t\tnewlongitude = len(i)\n\t\tif newlongitude > maxlongitude:\n\t\t\tmaxlongitude = newlongitude\n\n\tfor j in listoflist:\n\t\tsize = len(j)\n\t\twhile(size < maxlongitude):\n\t\t\tj.append(0.0)\n\t\t\tsize = size + 1\n\t\treturnlist.append(j)\n\n\treturn returnlist\n\ndef addLabelAtTheEnd(listOfList, label):\n\n\tfor sublist in listOfList:\n\t\tsublist.append(label)\n\n\treturn listOfList\n\n\ndef transformToDataFrame(listOfList):\n\n\tdf = DataFrame(listOfList)\n\treturn df\n\ndef concatenateLists(list0, list1, list2):\n\tnewlist = []\n\tfor i in list0:\n\t\tnewlist.append(i)\n\tfor i in list1:\n\t\tnewlist.append(i)\n\tfor i in list2:\n\t\tnewlist.append(i)\n\n\treturn newlist\n\n\ndef normalize(listOfList):\n\t\n\treturnlist = []\n\n\t\n\tfor sublist in listOfList:\n\t\tnewlist = []\n\t\tfor i in sublist:\n\t\t\telem = i / 36.0\n\t\t\tnewlist.append(elem)\n\n\t\treturnlist.append(newlist)\n\n\treturn returnlist\n\n\ndef main():\n\t#script. Main?\n\t#Escribimos la primera linea del csv\n\tst = \"Doc_ID,TAGS,CLASS\\n\"\n\n\t#Genera una lista con los archivos de la carpeta 1\n\tdirname = \"corp/1\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist0 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/1/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/1/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist0.append(ll)\n\t#corpus 2\n\tdirname = \"corp/2\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist1 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/2/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/2/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist1.append(ll)\n\n\n\t#corpus 2\n\tdirname = \"corp/3\"\n\tfls = os.listdir(dirname)\n\ti = 0\n\tlistoflist = []\n\tlist2 = []\n\tfor f in fls:\n\t\t#Guardamos el id del texto\n\t\t#i = i + 1\n\t\t#Obtenemos los tags para f\n\t\ttags = tag(gettext(\"corp/3/\" + f))\n\t\t#Creamos un dicionario con los tags obtenidos\n\t\td = dict(tags)\n\t\t#Creamos un string auxiliar con los tags que contenia el texto\n\t\tlists = list(gettext(\"corp/3/\" + f).split(\" \"))\n\t\tlistoflist.append(lists)\n\t# procesar los textos separados\n\tfor li in listoflist:\n\t\tll = []\n\t\tfor j in li:\n\t\t\tt = nltk.word_tokenize(j)\n\t\t\tpair = nltk.pos_tag(t)\n\t\t\tll.append(next(iter(pair), None))\n\t\n\t\t#print(ll)\n\t\tlist2.append(ll)\n\n\n\tcodifiedList0 = []\n\tcodifiedList1 = []\n\tcodifiedList2 = []\n\tfor li in list0:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList0.append(newlist)\n\tfor li in list1:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList1.append(newlist)\n\tfor li in list2:\n\t\tnewlist = getCode(li)\n\t\tcodifiedList2.append(newlist)\n\n\n\n\t# padding\n\tcodifiedList0 = padding(codifiedList0, codifiedList0, codifiedList1, codifiedList2)\n\tcodifiedList1 = padding(codifiedList1, codifiedList0, codifiedList1, codifiedList2)\n\tcodifiedList2 = padding(codifiedList2, codifiedList0, codifiedList1, codifiedList2)\n\t# normalize using minmax\n\tcodifiedList0 = normalize(codifiedList0)\n\tcodifiedList1 = normalize(codifiedList1)\n\tcodifiedList2 = normalize(codifiedList2)\n\n\n\t# Add label at the end.\n\tcodifiedList0 = addLabelAtTheEnd(codifiedList0, 1.0)#\n\tcodifiedList1 = addLabelAtTheEnd(codifiedList1, 2.0)#\n\tcodifiedList2 = addLabelAtTheEnd(codifiedList2, 3.0)#\n\t\n\t\n\t# Concatenate all the lists in one big list\n\tconcatenated = concatenateLists(codifiedList0, codifiedList1, codifiedList2)\n\n\t\n\n\t\n\t\n\t\n\t\n\n\t#transform to dataframes:\n\tdf \t\t\t= transformToDataFrame(concatenated)\n\n\tdf_shuffled = df.sample(frac = 1)\n\n\tdf_shuffled.to_csv(\"outcome.csv\") \n\n\n\tprint(df_shuffled)\n\n\n\t\n\nif __name__ ==\"__main__\":\n\tmain()\n\n\n\n","repo_name":"flovera1/complejidad","sub_path":"Proyecto_complejidadTexto/tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35515515024","text":"def findContentChildren(g: [int], s: [int]):\n g.sort()\n s.sort()\n i, j = 0, 0\n while i < len(g) and j < len(s):\n if s[j] >= g[i]:\n i += 1\n j += 1\n else:\n j += 1\n return i\n\na = [5, 4]\nb = [1, 2, 3]\nc = findContentChildren(a, b)\nprint(c)","repo_name":"zhuzhu18/leetcode","sub_path":"455分发饼干.py","file_name":"455分发饼干.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38314094332","text":"#Uses python3\n\nimport sys\n\n# MY CODE STARTS HERE\nclass Graph:\n def __init__(self, adj_graph):\n self.adj = adj_graph\n self.reset()\n def reset(self):\n self.visited = [False] * len(self.adj)\n def isConnected(self, x, y):\n self.reset()\n self.Explore(x)\n return self.visited[y]\n def Explore(self, v):\n self.visited[v] = True\n for edge in self.adj[v]:\n if not self.visited[edge]:\n self.Explore(edge)\n \n# MY CODE ENDS HERE\n\ndef reach(adj, x, y):\n #write your code here\n graph = Graph(adj)\n if graph.isConnected(x, y):\n return 1\n return 0\n\nif __name__ == '__main__':\n given_input = sys.stdin.read()\n data = list(map(int, given_input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n x, y = data[2 * m:]\n adj = [[] for _ in range(n)]\n x, y = x - 1, y - 1\n for (a, b) in edges:\n adj[a - 1].append(b - 1)\n adj[b - 1].append(a - 1)\n print(reach(adj, x, y))\n","repo_name":"mhornbacher/coursera","sub_path":"data-structures-and-algorithms/graphs/reachability.py","file_name":"reachability.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7139315654","text":"import sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import animation\n\n\ndef rho_red_light(nx, rho_max, rho_in):\n rho = rho_max * np.ones(nx)\n rho[:(nx - 1) * 3.0 / 4.0] = rho_in\n return rho\n\n\ndef computeF(u_max, rho_max, rho):\n return u_max * rho * (1 - rho / rho_max)\n\n\ndef godunov(rho, nt, dt, dx, rho_max, V_max):\n rho_n = np.zeros((nt, len(rho)))\n rho_n[:, :] = rho.copy()\n\n rho_plus = np.zeros_like(rho)\n rho_minus = np.zeros_like(rho)\n flux = np.zeros_like(rho)\n\n for t in xrange(1, nt):\n rho_plus[:-1] = rho[1:]\n rho_minus = rho.copy()\n flux = 0.5 * (computeF(V_max, rho_max, rho_minus) + computeF(V_max, rho_max, rho_plus) + (dx / dt) * (rho_minus - rho_plus))\n rho_n[t, 1:-1] = rho[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1])\n rho_n[t, 0] = rho[0]\n rho_n[t, -1] = rho[-1]\n rho = rho_n[t].copy()\n\n return rho_n\n\n\ndef muscl(rho, nt, dt, dx, rho_max, V_max):\n rho_n = np.zeros((nt, len(rho)))\n rho_n[:, :] = rho.copy()\n\n rho_plus = np.zeros_like(rho)\n rho_minus = np.zeros_like(rho)\n flux = np.zeros_like(rho)\n rho_star = np.zeros_like(rho)\n\n for t in xrange(1, nt):\n sigma = minmod(rho, dx)\n rho_left = rho + sigma * dx / 2.0\n rho_right = rho - sigma * dx / 2.0\n \n flux_left = computeF(V_max, rho_max, rho_left) \n flux_right = computeF(V_max, rho_max, rho_right)\n flux[:-1] = 0.5 * (flux_right[1:] + flux_left[:-1] - (dx / dt) * (rho_right[1:] - rho_left[:-1]))\n rho_star[1:-1] = rho[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1])\n\n\n rho_star[0] = rho[0]\n rho_star[-1] = rho[-1]\n\n sigma = minmod(rho_star, dx)\n rho_left = rho_star + sigma * dx / 2.0\n rho_right = rho_star - sigma * dx / 2.0\n\n flux_left = computeF(V_max, rho_max, rho_left)\n flux_right = computeF(V_max, rho_max, rho_right)\n flux[:-1] = 0.5 * (flux_right[1:] + flux_left[:-1] - (dx / dt) * (rho_right[1:] - rho_left[:-1]))\n rho_n[t, 1:-1] = 0.5 * (rho[1:-1] + rho_star[1:-1] + (dt / dx) * (flux[:-2] - flux[1:-1]))\n\n rho_n[t, 0] = rho[0]\n rho_n[t, -1] = rho[-1]\n rho = rho_n[t].copy()\n\n return rho_n\n\n\ndef minmod(e, dx):\n sigma = np.zeros_like(e)\n de_minus = np.ones_like(e)\n de_plus = np.ones_like(e)\n\n de_minus[1:] = (e[1:] - e[:-1]) / dx\n de_plus[:-1] = (e[1:] - e[:-1]) / dx\n \n for i in xrange(1, len(e) - 1):\n if de_minus[i] * de_plus[i] < 0.0:\n sigma[i] = 0.0\n elif np.abs(de_minus[i]) < np.abs(de_plus[i]):\n sigma[i] = de_minus[i]\n else:\n sigma[i] = de_plus[i]\n\n return sigma\n\n\ndef plot(x, rho, filename):\n plt.clf()\n plt.plot(x, rho, color = '#003366', ls = '-', lw = 3)\n plt.ylabel('Traffic density')\n plt.xlabel('Distance')\n plt.ylim(-0.5, 11.0)\n plt.savefig('./src/module3/images/' + filename, format = 'png')\n plt.close()\n\n\ndef main(argv):\n sigma = 1.0\n nx = 101\n nt = 30\n dx = 4.0 / (nx - 2)\n x = np.linspace(0, 4, nx - 1)\n\n rho_in = 5.0\n rho_max = 10.0\n V_max = 1.0\n\n dt = sigma * dx / V_max\n\n rho = rho_red_light(nx - 1, rho_max, rho_in)\n plot(x, rho, 'traffic_04.png')\n\n def animate(data):\n x = np.linspace(0, 4, nx - 1)\n y = data\n line.set_data(x, y)\n return line\n\n\n rho_n = godunov(rho, nt, dt, dx, rho_max, V_max)\n\n fig = plt.figure(facecolor = 'w')\n ax = plt.axes(xlim = (0, 4), ylim = (4.5, 11), xlabel = ('Distance'), ylabel = ('Traffic density'))\n line, = ax.plot([],[], color = '#003366', lw = 2)\n anim = animation.FuncAnimation(fig, animate, frames = rho_n, interval = 50)\n plt.show()\n\n\n rho_n = muscl(rho, nt, dt, dx, rho_max, V_max)\n\n fig = plt.figure(facecolor = 'w')\n ax = plt.axes(xlim = (0, 4), ylim = (4.5, 11), xlabel = ('Distance'), ylabel = ('Traffic density'))\n line, = ax.plot([],[], color = '#003366', lw = 2)\n anim = animation.FuncAnimation(fig, animate, frames = rho_n, interval = 50)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"cowboysmall-moocs/numerical-mooc","sub_path":"src/module3/traffic_04.py","file_name":"traffic_04.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20893092129","text":"from collections import deque\ndef bfs():\n cnt = 1\n for i in range(n):\n for j in range(m):\n if not land[i][j] and arr[i][j]:\n land[i][j] = cnt\n cnt += 1\n q = deque([])\n q.append((i,j))\n while q:\n x,y = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < n and 0 <= ny < m and not land[nx][ny] and arr[nx][ny]:\n land[nx][ny] = land[x][y]\n q.append((nx,ny))\n return cnt-1\n\ndef connect(x,y,d):\n move = 1\n target = land[x][y]\n while True:\n x = x + dx[d]\n y = y + dy[d]\n if x < 0 or x >= n or y < 0 or y >= m: return -1,-1\n if land[x][y] == target: return -1,-1\n if land[x][y] == 0: move += 1; continue\n if land[x][y] != target: return move-1,land[x][y]\n\ndef make_graph():\n for x in range(n):\n for y in range(m):\n if land[x][y] != 0:\n for k in range(4):\n a = land[x][y]\n move, b = connect(x,y,k)\n if move <= 1: continue\n edges.append((move,a,b))\n\ndef find(x):\n if parents[x] != x:\n parents[x] = find(parents[x])\n return parents[x]\n\ndef union(a,b):\n a = find(a)\n b = find(b)\n if a < b:\n parents[b] = a\n else:\n parents[a] = b\ndef kruskal():\n ans = 0\n cnt = 0\n for edge in edges:\n move,a,b = edge\n if find(a) != find(b):\n union(a,b)\n ans += move\n cnt += 1\n return ans,cnt\n\ndef go():\n cnt = bfs()\n make_graph()\n edges.sort()\n ans,edge = kruskal()\n if edge == cnt-1:\n return ans\n else:\n return -1\n\ndx = [-1,0,1,0]\ndy = [0,1,0,-1]\nn,m = list(map(int,input().split()))\narr = []\nland = [[0]*m for i in range(n)]\nedges = []\nparents = [0]*10\nfor i in range(10):\n parents[i] = i\nfor i in range(n):\n arr.append(list(map(int,input().split())))\nprint(go())\n\n","repo_name":"17wook2/Algorithm","sub_path":"백준/17472.py","file_name":"17472.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31858271877","text":"from tkinter import *\nfrom tkinter import messagebox as mb\n\nfrom library_load import *\nfrom cpy_functions import *\nfrom cpy_data_structures import *\n\ndef iget_data(e1): \n\n global file # :3\n\n file = pfile_open(lib, e1.get())\n\n if file is None:\n mb.showerror(\"Ошибка\", \"Неправильно введено название файла\")\n return\n\n vector1.len = pget_data_size(lib, file)\n vector1.vector = pvector_alloc(lib, vector1.vector, vector1.len)\n\n pfile_rewind(lib, file)\n pfill_data(lib, vector1, file)\n\n vector2.vector = pvector_alloc(lib, vector2.vector, vector1.len)\n\n pprint_vector(lib, vector1)\n\ndef ipleft_cyclic_shift(e2):\n\n if vector1.vector is None:\n mb.showerror(\"Ошибка\", \"Для начала введите файл\")\n return\n\n try:\n int(e2.get())\n except:\n mb.showerror(\"Ошибка\", \"Введите корректное количество смещений (>= 0)\")\n return\n \n if int(e2.get()) < 0:\n mb.showerror(\"Ошибка\", \"Введите корректное количество смещений (>= 0)\")\n return\n\n pleft_cyclic_shift(lib, vector1, int(e2.get()))\n pprint_vector(lib, vector1)\n\ndef ipparse_full_squares():\n\n if vector1.vector is None:\n mb.showerror(\"Ошибка\", \"Для начала введите файл\")\n return\n\n pparse_full_squares(lib, vector1, vector2)\n pprint_vector(lib, vector2)\n\ndef iexit():\n\n if vector1.vector is None:\n exit()\n\n pvector_free(lib, vector1.vector)\n pvector_free(lib, vector2.vector)\n\n file_close(lib, file)\n\n exit()\n\ndef main():\n\n window = Tk()\n window.title(\"From Artemev Ilya with love <3\") \n window.geometry('600x200') \n\n lbl = Label(window, text=\"Функции\\n\")\n lbl.grid(column=0, row=0)\n\n lb1 = Label(window, text=\"Ввод названия файла\")\n lb1.grid(column=0, row=4)\n lb2 = Label(window, text=\"Циклический свдиг массива на k позиций влево\")\n lb2.grid(column=0, row=5)\n lb3 = Label(window, text=\"Заполнение второго массива отфильтрованными данными\") \n lb3.grid(column=0, row=6)\n lb4 = Label(window, text=\"Выход\") \n lb4.grid(column=0, row=7) \n\n e1 = Entry(window,width=10) \n e1.grid(column=1, row=4)\n e2 = Entry(window,width=10) \n e2.grid(column=1, row=5)\n\n b1 = Button(window, text=\"Выполнить\", command=lambda: iget_data(e1))\n b1.grid(column=2, row=4)\n b2 = Button(window, text=\"Выполнить\", command=lambda: ipleft_cyclic_shift(e2))\n b2.grid(column=2, row=5)\n b3 = Button(window, text=\"Выполнить\", command=ipparse_full_squares)\n b3.grid(column=1, row=6)\n b4 = Button(window, text=\"Выполнить\", command=iexit)\n b4.grid(column=1, row=7)\n\n window.mainloop()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Untouchabl3Pineapple/iu7-c","sub_path":"lab_12_1_2/src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"73434360557","text":"import asyncio\n\nfrom storage_facade import StorageFacade\n\n\nclass QueueHandler(object):\n\n parser = None\n storage_facade = None\n threads = 10\n\n def __init__(self, loop, parser, storage_facade: StorageFacade):\n self.loop = loop\n self.parser = parser\n self.parser.set_loop(loop)\n self.storage_facade = storage_facade\n print(\"Queue Handler inited\")\n\n def run(self):\n workers = []\n for _ in range(self.threads):\n workers.append(asyncio.ensure_future(self.handle()))\n self.loop.run_until_complete(asyncio.wait(workers))\n\n async def handle(self):\n while not self.storage_facade.queue_empty():\n queue_item = self.storage_facade.get_url_from_queue()\n try:\n await self.handle_queue_message(queue_item)\n except:\n print(\"Error during processing: \", queue_item)\n\n async def handle_queue_message(self, message):\n print(\"Message received: \", message)\n await self.parser.parse(message)\n\n def close(self):\n pass\n","repo_name":"dima-kov/parser","sub_path":"qhandler.py","file_name":"qhandler.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5869503999","text":"from machine import Pin\nfrom time import sleep, time\nimport network\nimport machine\n\nwlan = network.WLAN(network.AP_IF)\nwlan.active(True)\nwlan.config(essid=\"\\U0001F4A9\", password=\"atpisies1337\")\n\nstawlan = network.WLAN(network.STA_IF)\nstawlan.active(False)\n\ncolumns = [Pin(16, Pin.OUT), Pin(14, Pin.OUT), Pin(12, Pin.OUT), Pin(13, Pin.OUT)]\n\nclock = Pin(5, Pin.OUT)\nlatch = Pin(4, Pin.OUT)\ndata = Pin(15, Pin.OUT)\n\ncountdown_minutes = 80\nbutton = Pin(0, Pin.IN, Pin.PULL_UP)\n\n\"\"\"\n@micropython.viper\ndef shiftOut(data: int):\n GPIO_OUT = ptr32(0x60000300) # GPIO base register\n GPIO_OUT[2] = 0x10 # set latch pin to low\n for i in range(8):\n value = (data & 1<>i #1 if i'th bit is set, 0 if not set\n reg = 2-(value) #Selecting set or unset register - unset reg is 2, set reg is 1\n GPIO_OUT[reg] = 0x8000 #set or unset pin 15\n GPIO_OUT[1] = 0x20 # set clock pin\n GPIO_OUT[2] = 0x20 # unset clock pin\n\"\"\"\n\ndef shiftOut(byte):\n latch.off()\n for i in range(8):\n value = byte & 1< 1:\n m[0] = mapping[m_s[0]]\n m[1] = mapping[m_s[1]]\n else:\n m[0] = mapping['0']\n m[1] = mapping[m_s[0]]\n s_s = str(seconds)\n if len(s_s) > 1:\n m[2] = mapping[s_s[0]]\n m[3] = mapping[s_s[1]]\n else:\n m[2] = mapping['0']\n m[3] = mapping[s_s[0]]\n #print([bin(byte) for byte in digit_bytes])\n m[1] = m[1] | mapping['.']\n return m\n\n# 1) get time\n# 2) format time\n# 3) check button\n\nminutes = 0\nseconds = 0\n\ndef run(sleep_time=0.0001, tupdate_counter = 300, tformat_counter = 1000, buttoncheck_counter = 100):\n global minutes, seconds, total_seconds\n print(\"Hello!\")\n prev_i = 3\n run_counter = 0\n RELEASED = 0; DEBOUNCED = 1; PRESSED = 2;\n button_state = RELEASED\n button_debounce_time = 0\n digit_bytes = generate_digit_bytes(minutes, seconds)\n dbl = list(enumerate(digit_bytes))\n while True:\n for i, digit_byte in dbl:\n #isr = machine.disable_irq()\n shiftOut(digit_byte)\n columns[prev_i].on()\n latch.on()\n columns[i].off()\n #machine.enable_irq(isr)\n run_counter += 1\n if run_counter >= tformat_counter:\n #print(\"formatting time\")\n new_digit_bytes = generate_digit_bytes(minutes, seconds)\n if new_digit_bytes != digit_bytes:\n digit_bytes = new_digit_bytes\n dbl = list(enumerate(digit_bytes))\n run_counter = 0\n elif run_counter % tupdate_counter == 0:\n #print(\"getting time\")\n t = time()\n diff = t - start_time\n remainder = total_seconds - diff\n minutes, seconds = divmod(remainder, 60)\n elif run_counter % buttoncheck_counter == 0:\n #print(\"getting time\")\n new_button_state = not button.value()\n # True - pressed, False - released\n if button_state == DEBOUNCED:\n if time() - button_debounce_time > 1:\n # Second passed, should be enough for debounce\n button_state = PRESSED if new_button_state else RELEASED\n #print(\"Button finished debouncing\")\n elif new_button_state and button_state == RELEASED:\n #print(\"Button press detected, state: {}, new state: {}\".format(button_state, new_button_state))\n # button got pressed\n button_debounce_time = time()\n button_state = DEBOUNCED\n # Removing 10 seconds from the time\n total_seconds -= 10*60\n elif not new_button_state and button_state == PRESSED:\n #print(\"Button press released, state: {}, new state: {}\".format(button_state, new_button_state))\n button_debounce_time = time()\n # button got released\n button_state = DEBOUNCED\n if sleep_time:\n sleep(sleep_time)\n prev_i = i\n\ntotal_seconds = countdown_minutes * 60\n\nstart_time = time()\nrun()\n","repo_name":"CRImier/IMTAIDKW","sub_path":"software/countdown.py","file_name":"countdown.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"20826129876","text":"import cherrypy\nimport os.path\nimport json\nimport time\nfrom cherrypy.lib.static import serve_file\nfrom crawler_dao import CrawlerDAO\nfrom crawler import Crawler\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\npublic_dir = os.path.join(current_dir, 'public/dist')\n\nclass Root():\n\n\tdef __init__(self):\n\t\tself.dao = CrawlerDAO()\n\t\t\n\t@cherrypy.expose\n\tdef index(self):\n\t\t#return serve_file(os.path.join(current_dir, 'index.html'), content_type='text/html')\n\t\treturn serve_file(os.path.join(public_dir , 'index.html'), content_type='text/html')\n\n\t@cherrypy.expose\n\tdef start(self, site):\n\n\t\tcherrypy.request.headers[\"Content-Type\"] = 'utf-8'\n\n\t\tdao = CrawlerDAO()\n\t\tprint(cherrypy.url())\n\n\t\tcrawler = Crawler(dao, 'http://' + site)\n\t\tcrawler.run()\n \n\t\t#FINISHED CONNECTION\n\t\tdao.connection.close()\n\n\t@cherrypy.expose\n\tdef yieldResource(self):\n\n\t\tcherrypy.response.headers[\"Content-Type\"] = \"text/event-stream\"\n\n\t\tdef content():\n\n\t\t\t# #Server time poooling\n\t\t\t# for pooling in xrange(0, 5):\n\t\t\t# \ttime.sleep(1)\n\n\t\t\treturn \"event: time\\n\" + \"data: \" + str(self.dao.select()) + \"\\n\\n\";\n\n\t\treturn content()\n\n\tyieldResource._cp_config = {'response.stream' : True, 'tools.encode.encoding' : 'utf-8'}\n\nif __name__ == '__main__':\n\n\tdao = CrawlerDAO()\t\n\tdao.reset()\n\t\t\n\tpageroot = Root()\n\n\tconf = {\n\t\t\t'/' : {\n\t\t\t \t'tools.encode.encoding': 'utf-8',\n\t\t\t\t'response.timeout' : 1000000,\n\t\t\t\t'tools.staticdir.root': current_dir\n\t\t\t},\n \n\t\t\t'/feed': {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(current_dir, 'public'),\n\t\t\t\t'tools.staticdir.content_types': {\n\t\t\t\t\t'rss' : 'application/json',\n\t\t\t\t\t'atom': 'application/json'\n\t\t\t\t},\n\t\t\t\t\n\t\t\t},\n\n\t\t\t'/scripts' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir,'scripts' )\n\t\t\t},\n\n\t\t\t'/styles' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir,'styles' )\n\t\t\t},\n\n\t\t\t'/views' : {\n\t\t\t\t'tools.staticdir.on' : True,\n\t\t\t\t'tools.staticdir.dir': os.path.join(public_dir, 'views' )\n\t\t\t}\n\t\t}\n\n\n\n\tcherrypy.quickstart(pageroot, config=conf)\n","repo_name":"carloshpds/BCC-2s13-PI4-web-crawler","sub_path":"server-sse.py","file_name":"server-sse.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71288724075","text":"import pytest\nfrom click.testing import CliRunner\nfrom anydo_cli.commands.cli import entry_point, version\n\n\nclass TestCli(object):\n def test_entrypoint(self):\n runner = CliRunner()\n result = runner.invoke(entry_point)\n assert result.output != ''\n\n # not in love with having to supply a subcommand. http://click.pocoo.org/5/testing/\n @pytest.mark.parametrize('args, expected_output', [\n (['--version', 'list'], version),\n ])\n def test_correct_version_is_printed(self, args, expected_output):\n runner = CliRunner()\n result = runner.invoke(entry_point, args=args)\n assert expected_output == result.output.strip()\n","repo_name":"dustinbrown/anydo_cli","sub_path":"tests/test_commands/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17764067830","text":"from django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import JsonResponse, HttpResponse, Http404\nimport json\nimport datetime\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nimport requests\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, CreateView\n\nfrom .forms import CreateUserForm, AddApplicationCustomer\nfrom .models import *\nfrom .utils import cookieCart, cartData, guestOrder #AddApplicationUtils\nfrom django.contrib.messages.views import SuccessMessageMixin\n\nmenu = [{'title': \"Головна сторінка\", 'url_name': 'index'},\n {'title': \"Купити насіння\", 'url_name': 'store'},\n {'title': \"Послуга очистки насіння\", 'url_name': 'clean_seed'},\n {'title': \"Про нас\", 'url_name': 'about'},\n {'title': \"Місцезнаходження\", 'url_name': 'we_on_map'},\n {'title': \"Доставка та оплата\", 'url_name': 'delivery'},\n {'title': \"Контакти\", 'url_name': 'contact'}\n]\n\n\nclass AgroHome(ListView):\n model = Product\n template_name = 'seed/index.html'\n context_object_name = 'products'\n #extra_context = {'title': 'Головна сторінка'}\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n return contex\n\n\n def get_queryset(self):\n return Product.objects.filter(is_published = True)\n\n\n\ndef main(request):\n\n context = {}\n return render(request, 'seed/main.html', context)\n\nclass Store(ListView):\n paginate_by = 6\n model = Product\n template_name = 'seed/store.html'\n context_object_name = 'products'\n products = Product.objects.all().select_related('cat')\n categories = Category.objects.all()\n\n def get_queryset(self):\n serch_query = self.request.GET.get('search', '')\n products_2 = Product.objects.filter(Q(name_prod__icontains=serch_query) | Q(content__icontains=serch_query)).select_related('cat')\n return products_2\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n contex['products_2'] = self.get_queryset\n contex['cat_selected'] = 0\n contex['categories'] = self.categories\n contex['products'] = self.products\n\n return contex\n\n\ndef cart(request):\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n\n context = {'items': items,\n 'order': order,\n 'cartItems': cartItems,\n 'menu': menu,\n }\n return render(request, 'seed/cart.html', context)\n\n\ndef checkout(request):\n data = cartData(request)\n cartItems = data['cartItems']\n order = data['order']\n items = data['items']\n\n context = {'items': items,\n 'order': order,\n 'cartItems': cartItems,\n 'menu': menu,\n }\n return render(request, 'seed/checkout.html', context)\n\n\ndef updateItem(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n\n print('action:', action)\n print('productId:', productId)\n\n customer = request.user.customer\n product = Product.objects.get(pk=productId)\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n orderItem, create = OrderItem.objects.get_or_create(order=order, product=product)\n if action == \"add\":\n orderItem.quantity = (orderItem.quantity + 1)\n elif action == \"add2\":\n orderItem.quantity = (orderItem.quantity + 10)\n elif action == \"add3\":\n orderItem.quantity = (orderItem.quantity + 100)\n elif action == \"add4\":\n orderItem.quantity = (orderItem.quantity + 5)\n elif action == \"remove\":\n orderItem.quantity = (orderItem.quantity - 1)\n elif action == \"remove2\":\n orderItem.quantity = (orderItem.quantity - 10)\n elif action == \"remove3\":\n orderItem.quantity = (orderItem.quantity - 100)\n elif action == \"remove4\":\n orderItem.quantity = (orderItem.quantity - 5)\n orderItem.save()\n if orderItem.quantity <= 0:\n orderItem.delete()\n\n return JsonResponse('Item was added', safe=False)\n\ndef multi(a,b):\n if a and b:\n return a * b or print(f'These {a} and {b} aren`t numbers!')\n\n\ndef show_post(request, prod_slug):\n pub = get_object_or_404(Product, slug=prod_slug)\n data = cartData(request)\n cartItems = data['cartItems']\n categories = Category.objects.all()\n products = Product.objects.all().select_related('cat')\n if request.method == 'POST':\n try:\n acre_val = int(request.POST.get('acre_val', False))\n rate_val = int(request.POST.get('rate_val', False))\n res = multi(acre_val, rate_val)\n except:\n return redirect('store')\n else:\n res = 0\n context = {'pub': pub,\n 'products': products,\n 'categories': categories,\n 'menu': menu,\n 'cartItems': cartItems,\n 'cat_selected': 0,\n \"res\": res\n\n }\n return render(request, 'seed/product.html', context)\n\n\ndef show_category(request, cat_slug):\n products_2 = Product.objects.filter(cat__slug_cat=cat_slug).select_related('cat')\n categories = Category.objects.all()\n\n data = cartData(request)\n cartItems = data['cartItems']\n\n if len(products_2) == 0:\n raise Http404()\n\n paginator = Paginator(products_2, 6)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n products = Product.objects.all().select_related('cat')\n\n\n context = {\n 'products': products,\n 'products_2': products_2,\n 'categories': categories,\n 'menu': menu,\n 'cartItems': cartItems,\n 'cat_selected': cat_slug,\n 'page_obj': page_obj,\n\n }\n\n return render(request, 'seed/store.html', context=context)\n\nclass CleanSeed(SuccessMessageMixin, CreateView):\n form_class = AddApplicationCustomer\n template_name = 'seed/clean_seed.html'\n success_url = reverse_lazy('index')\n success_message = \"Заявка успішно створена, очікуйте на дзвінок від менеджера найближчим часом.\"\n\n\n\n def get_context_data(self, *, object_list=None, **kwargs):\n contex = super().get_context_data(**kwargs)\n data = cartData(self.request)\n contex['cartItems'] = data['cartItems']\n contex['menu'] = menu\n return contex\n\n\n\ndef about(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n return render(request, 'seed/about.html', context)\n\n\ndef contact(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n\n return render(request, 'seed/contact.html', context)\n\n\ndef delivery(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems\n }\n\n return render(request, 'seed/delivery.html', context)\n\n\ndef we_on_map(request):\n data = cartData(request)\n cartItems = data['cartItems']\n\n context = {'menu': menu,\n 'cartItems': cartItems,\n }\n return render(request, 'seed/we_on_map.html', context)\n\n\ndef processOrder(request):\n transaction_id = datetime.datetime.now().timestamp()\n data = json.loads(request.body)\n cookieData = cookieCart(request)\n items = cookieData['items']\n\n if request.user.is_authenticated:\n name = data['form']['name']\n last_name = data['form']['last_name']\n phone = data['form']['phone']\n email = data['form']['email']\n customer = Customer.objects.get(user=request.user)\n customer.name = name\n customer.last_name = last_name\n customer.phone = phone\n customer.email = email\n customer.save()\n #order = Order.objects.filter(customer=customer, complete=False).first()\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n order.date_orderd = datetime.datetime.now()\n order.save()\n for item in items:\n product = Product.objects.get(pk=item['product']['pk'])\n\n orderItem = OrderItem.objects.create(\n product=product,\n order=order,\n quantity=item['quantity']\n )\n\n\n else:\n customer, order = guestOrder(request, data)\n\n\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n\n if total == float(order.get_cart_total):\n order.complete = True\n order.save()\n\n if order.shipping == True:\n ShippingAddress.objects.create(\n customer=customer,\n order=order,\n region=data['shipping']['region'],\n city=data['shipping']['city'],\n mail=data['shipping']['mail'],\n mail_number=data['shipping']['mail_number'],\n zipcode=data['shipping']['zipcode'],\n pay=data['shipping']['pay'],\n comment=data['shipping']['comment'],\n )\n return JsonResponse('Payment complete!', safe=False)\n\ndef register(request):\n form = CreateUserForm()\n\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n\n if form.is_valid():\n user = form.save()\n\n Customer.objects.create(user=user)\n\n user = form.cleaned_data.get('username')\n messages.success(request, f'Аккаун для {user} створений. Увійдіть в аккаунт.')\n return redirect('login')\n\n\n context = {'form': form,\n 'menu': menu,\n 'cartItems': 0,\n }\n return render(request, 'seed/register.html', context)\n\n\ndef login_user(request):\n if request.method == 'POST':\n try:\n email = request.POST.get('email')\n password = request.POST.get('password')\n username = User.objects.get(email=email.lower()).username\n\n user = authenticate(request, username=username, password=password)\n # user_p = authenticate(request, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.info(request, \"Некоректний емейл або пароль.\")\n return redirect('login')\n except:\n messages.info(request, \"Некоректний емейл або пароль.\")\n return redirect('login')\n\n\n context = {'menu': menu,\n 'cartItems': 0,}\n return render(request, 'seed/login.html', context)\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n\n\n\n","repo_name":"Astrogor4ik/Agroviktoria","sub_path":"agroviktoria/seed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25152583384","text":"\"\"\"\n.\n\"\"\"\nimport numpy as np\nimport numpy.typing as npt\nimport scipy\nfrom lqr import SmoothTrack\n\n\nclass SolverMatrices: # pylint: disable=too-few-public-methods\n \"\"\"\n This class contains the helper matrices that\n will be used to calulate the solver input.\n\n Attributes\n ----------\n matP : npt.NDArray[np.float64]\n The P matrix, matP = [matPXX,matPXY,matPYY]\n matPrime : npt.NDArray[np.float64]\n The prime matrix, matPrime = [xPrime,yPrime]\n matT : npt.NDArray[np.float64]\n The T matrix, matT = [matTC,matTNX,matTNY]\n matQ : npt.NDArray[np.float64]\n The Q matrix, matQ = [matQX,matQY]\n curvPart : npt.NDArray[np.float64]\n The curvature part of the matrix\n \"\"\"\n\n def __init__(self, track: SmoothTrack) -> None:\n self.matP: npt.NDArray[np.float64] = np.array([None, None, None])\n self.matPrime: npt.NDArray[np.float64] = np.array([None, None])\n self.matT: npt.NDArray[np.float64] = np.array([None, None, None])\n self.matQ: npt.NDArray[np.float64] = np.array([None, None])\n self.curvPart: npt.NDArray[np.float64] = np.array(None)\n self.setupMatrices(track)\n\n def setupMatrices(self, track: SmoothTrack) -> None:\n \"\"\"\n This function sets up the matrices for the optimization problem.\n\n Parameters\n ----------\n track : Track\n The track data.\n self : SolverMatrices\n The helper matrices for the optimization problem.\n \"\"\"\n # create extraction matrix for b_i coefficients used in gradient\n extMatB = np.zeros((track.noPoints, track.noSplines * 4), dtype=int)\n for i in range(track.noSplines):\n extMatB[i, i * 4 + 1] = 1 # 1 * b_ix = E_x * x\n # create extraction matrix -> only c_i coefficients of the\n # solved linear equation system are needed for curvature information\n extMatC = np.zeros((track.noPoints, track.noSplines * 4), dtype=int)\n\n for i in range(track.noSplines):\n extMatC[i, i * 4 + 2] = 2 # 2 * c_ix = D_x * x\n # ax=b --> (track.trackCoeffs.alpha)*(T_C) = (extMatC)\n tempTC = scipy.sparse.linalg.spsolve(track.trackCoeffs.alpha.T, extMatC.T)\n self.matT[0] = tempTC.T\n # set up matMX and matMY matrices\n matMX = np.zeros((track.noSplines * 4, track.noPoints))\n matMY = np.zeros((track.noSplines * 4, track.noPoints))\n\n for i in range(track.noSplines):\n j = i * 4\n\n if i < track.noPoints - 1:\n matMX[j, i] = track.trackCoeffs.normVectors[i, 0]\n matMX[j + 1, i + 1] = track.trackCoeffs.normVectors[i + 1, 0]\n\n matMY[j, i] = track.trackCoeffs.normVectors[i, 1]\n matMY[j + 1, i + 1] = track.trackCoeffs.normVectors[i + 1, 1]\n else:\n matMX[j, i] = track.trackCoeffs.normVectors[i, 0]\n matMX[j + 1, 0] = track.trackCoeffs.normVectors[0, 0] # close spline\n\n matMY[j, i] = track.trackCoeffs.normVectors[i, 1]\n matMY[j + 1, 0] = track.trackCoeffs.normVectors[0, 1]\n\n # set up self.matQ[0] and self.matQ[1] matrices including the point coordinate information\n self.matQ[0] = np.zeros((track.noSplines * 4, 1))\n self.matQ[1] = np.zeros((track.noSplines * 4, 1))\n\n for i in range(track.noSplines):\n j = i * 4\n\n if i < track.noPoints - 1:\n self.matQ[0][j, 0] = track.path[i, 0]\n self.matQ[0][j + 1, 0] = track.path[i + 1, 0]\n\n self.matQ[1][j, 0] = track.path[i, 1]\n self.matQ[1][j + 1, 0] = track.path[i + 1, 1]\n else:\n self.matQ[0][j, 0] = track.path[i, 0]\n self.matQ[0][j + 1, 0] = track.path[0, 0]\n\n self.matQ[1][j, 0] = track.path[i, 1]\n self.matQ[1][j + 1, 0] = track.path[0, 1]\n\n # set up self.matP[0], self.matP[1], self.matP[2] matrices\n tempTB = scipy.sparse.linalg.spsolve(track.trackCoeffs.alpha.T, extMatB.T)\n matTB = tempTB.T\n self.matPrime = np.array([None, None, None, None, None])\n self.matPrime[0] = np.eye(track.noPoints, track.noPoints) * np.matmul(matTB, self.matQ[0])\n self.matPrime[1] = np.eye(track.noPoints, track.noPoints) * np.matmul(matTB, self.matQ[1])\n\n self.matPrime[2] = np.power(self.matPrime[0], 2)\n self.matPrime[3] = np.power(self.matPrime[1], 2)\n self.matPrime[4] = -2 * np.matmul(self.matPrime[0], self.matPrime[1])\n curvDen = np.power(\n self.matPrime[2] + self.matPrime[3], 1.5\n ) # calculate curvature denominator\n self.curvPart = np.divide(\n 1, curvDen, out=np.zeros_like(curvDen), where=curvDen != 0\n ) # divide where not zero (diag elements)\n curvPartSq = np.power(self.curvPart, 2)\n self.matP[0] = np.matmul(curvPartSq, self.matPrime[3])\n self.matP[2] = np.matmul(curvPartSq, self.matPrime[2])\n self.matP[1] = np.matmul(curvPartSq, self.matPrime[4])\n\n # SET UP FINAL MATRICES FOR SOLVER\n self.matT[1] = np.matmul(self.matT[0], matMX)\n self.matT[2] = np.matmul(self.matT[0], matMY)\n","repo_name":"ahmedsalahacc/fs-system","sub_path":"navigation/lqr/src/lqr/solverMatrices.py","file_name":"solverMatrices.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"23434778975","text":"\"\"\"\nCode here inspects every sweep of every channel of every ABF\nin the project and stores sweep information as a string in\na dictionary in a python file in the tests folder.\n\"\"\"\n\ntry:\n import glob\n import os\n import sys\n import matplotlib.pyplot as plt\n import numpy as np\n PATH_HERE = os.path.abspath(os.path.dirname(__file__))\n PATH_DATA = os.path.abspath(PATH_HERE+\"../../../data/abfs/\")\n PATH_SRC = os.path.abspath(PATH_HERE+\"../../../src/\")\n DATA_FOLDER = os.path.join(PATH_SRC, \"../data/abfs/\")\n sys.path.insert(0, PATH_SRC)\n import pyabf\nexcept:\n raise EnvironmentError()\n\n\ndef sweepKeyAndInfo(abf, sweepIndex, channelIndex):\n assert isinstance(abf, pyabf.ABF)\n abf.setSweep(sweepIndex, channelIndex)\n key = str(f\"{abf.abfID}.abf \" +\n f\"SW{sweepIndex} \" +\n f\"CH{channelIndex}\")\n info = str(f\"{len(abf.sweepY)}, \" +\n f\"{abf.sweepY[0]:.08f}, \" +\n f\"{abf.sweepY[-1]:.08f}, \" +\n f\"{np.std(abf.sweepY):.08f}\")\n return [key, info]\n\n\nif __name__ == \"__main__\":\n txt=\"\"\n #txt = \"\\\"\\\"\\\"ABF sweep hashes generated automatically by 2020-06-19 script in dev folder.\\\"\\\"\\\"\\n\\n\"\n #txt += \"# key = ABFID, sweep, and channel\\n\"\n #txt += \"# value = sweep point count, first value, last value, and stdev\\n\"\n #txt += \"knownAbfSweepValues = {}\\n\"\n for abfPath in glob.glob(DATA_FOLDER + \"/*.abf\"):\n if not \"190619B_0003\" in abfPath:\n continue\n abf = pyabf.ABF(abfPath)\n print(f\"generating sweep hashes for {abf.abfID}.abf...\")\n txt += \"\\n\"\n for sweepIndex in range(abf.sweepCount):\n for channelIndex in range(abf.channelCount):\n key, info = sweepKeyAndInfo(abf, sweepIndex, channelIndex)\n txt += f'knownAbfSweepValues[\"{key}\"] = \"{info}\"\\n'\n\n #with open(PATH_SRC+\"/../tests/test_sweepHashes.py\", 'w') as f:\n #f.write(txt)\n print(txt)","repo_name":"swharden/pyABF","sub_path":"dev/python/2020-06-19 sweep hash.py","file_name":"2020-06-19 sweep hash.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"73"} +{"seq_id":"15720959109","text":"# -*- coding: utf-8 -*-\n\nimport os.path\nfrom ast import literal_eval\nfrom functools import partial\n\nfrom qgis.PyQt import uic\nfrom qgis.PyQt.QtWidgets import (\n QAbstractItemView,\n QAction,\n QFrame,\n QHeaderView,\n QListWidgetItem,\n QMessageBox,\n QTableWidgetItem,\n)\nfrom qgis.PyQt.QtGui import QColor, QIcon\nfrom qgis.PyQt.QtCore import QSize, Qt\nfrom qgis.core import QgsProject\nfrom qgis.gui import QgsMessageBar\nfrom qgis.utils import Qgis, iface\n\nfrom buildings.gui import bulk_load_changes\nfrom buildings.gui.error_dialog import ErrorDialog\nfrom buildings.gui.edit_dialog import EditDialog\nfrom buildings.gui.deletion_reason_dialog import DeletionReason\nfrom buildings.utilities import database as db\nfrom buildings.sql import buildings_bulk_load_select_statements as bulk_load_select\nfrom buildings.sql import buildings_select_statements as buildings_select\nfrom buildings.sql import general_select_statements as general_select\nfrom buildings.utilities import circle_tool\nfrom buildings.utilities.layers import LayerRegistry\nfrom buildings.utilities.multi_layer_selection import MultiLayerSelection\nfrom buildings.utilities.point_tool import PointTool\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nFORM_CLASS, _ = uic.loadUiType(\n os.path.join(os.path.dirname(__file__), \"alter_building_relationship.ui\")\n)\n\n\nclass AlterRelationships(QFrame, FORM_CLASS):\n def __init__(self, dockwidget, current_dataset, parent=None):\n \"\"\"Constructor.\"\"\"\n\n # Attributes set in subsequent methods\n self.message_bar_edit = None\n self.message_bar_qa = None\n self.add_action = None\n self.edit_geom_action = None\n self.edit_attrs_action = None\n self.lyr_related_existing = None\n self.lyr_related_bulk_load = None\n self.lyr_matched_existing = None\n self.lyr_matched_bulk_load = None\n self.lyr_removed_existing = None\n self.lyr_added_bulk_load = None\n self.lyr_related_bulk_load_in_edit = None\n self.lyr_related_existing_in_edit = None\n self.lyr_matched_bulk_load_in_edit = None\n self.lyr_matched_existing_in_edit = None\n self.lyr_removed_existing_in_edit = None\n self.lyr_added_bulk_load_in_edit = None\n self.lyr_existing = None\n self.lyr_bulk_load = None\n self.lyr_facilities = None\n self.msgbox = None\n self.tool = None\n self.reason_text = None\n self.circle_tool = None\n self.polyline = None\n self.circle_action = None\n\n super(AlterRelationships, self).__init__(parent)\n self.setupUi(self)\n\n self.db = db\n self.db.connect()\n\n self.valid_building_uses = {\n None: \"None\",\n **{\n use_id: use\n for use_id, use in self.db.execute_return(\n \"SELECT * FROM buildings.use;\"\n ).fetchall()\n },\n }\n\n self.dockwidget = dockwidget\n self.layer_registry = LayerRegistry()\n self.current_dataset = current_dataset\n self.error_dialog = None\n self.autosave = False\n self.delete = False\n self.deletion_reason = None\n self.zoom = True\n self.attributes_changed = False\n\n self.frame_setup()\n self.layers_setup()\n self.edit_dialog = EditDialog(self)\n self.change_instance = None\n self.toolbar_setup()\n self.connect_signals()\n\n def frame_setup(self):\n\n self.message_bar_edit = QgsMessageBar()\n self.layout_msg_bar_edit.addWidget(self.message_bar_edit)\n self.message_bar_qa = QgsMessageBar()\n self.layout_msg_bar_qa.addWidget(self.message_bar_qa)\n\n self.btn_qa_not_removed.setIcon(\n QIcon(os.path.join(__location__, \"..\", \"icons\", \"match.png\"))\n )\n self.btn_next.setIcon(\n QIcon(os.path.join(__location__, \"..\", \"icons\", \"next.png\"))\n )\n self.btn_maptool.setIcon(\n QIcon(\n os.path.join(\n __location__, \"..\", \"icons\", \"multi_layer_selection_tool.png\"\n )\n )\n )\n\n self.cbox_use.insertItems(0, self.valid_building_uses.values())\n\n self.maptool_clicked()\n self.reset_buttons()\n self.btn_next.setEnabled(False)\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n self.populate_cmb_relationship()\n self.setup_message_box()\n\n def layers_setup(self):\n self.add_building_lyrs()\n self.repaint_view()\n self.clear_layer_filter()\n iface.setActiveLayer(self.lyr_bulk_load)\n\n def toolbar_setup(self):\n\n if \"Add Outline\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"plus.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.add_action = QAction(icon, \"Add Outline\", iface.building_toolbar)\n iface.registerMainWindowAction(self.add_action, \"Ctrl+1\")\n self.add_action.triggered.connect(self.canvas_add_outline)\n iface.building_toolbar.addAction(self.add_action)\n\n if \"Edit Geometry\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"edit_geometry.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.edit_geom_action = QAction(\n icon, \"Edit Geometry\", iface.building_toolbar\n )\n iface.registerMainWindowAction(self.edit_geom_action, \"Ctrl+2\")\n self.edit_geom_action.triggered.connect(self.canvas_edit_geometry)\n iface.building_toolbar.addAction(self.edit_geom_action)\n\n if \"Edit Attributes\" not in (\n action.text() for action in iface.building_toolbar.actions()\n ):\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"edit_attributes.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.edit_attrs_action = QAction(\n icon, \"Edit Attributes\", iface.building_toolbar\n )\n iface.registerMainWindowAction(self.edit_attrs_action, \"Ctrl+3\")\n self.edit_attrs_action.triggered.connect(self.canvas_edit_attribute)\n iface.building_toolbar.addAction(self.edit_attrs_action)\n\n iface.building_toolbar.show()\n\n def connect_signals(self):\n\n self.dockwidget.closed.connect(self.on_dockwidget_closed)\n\n self.btn_qa_okay.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Okay\", commit_status=True)\n )\n self.btn_qa_pending.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Pending\", commit_status=True)\n )\n self.btn_qa_refer2supplier.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Refer to Supplier\", commit_status=True)\n )\n self.btn_qa_not_checked.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Not Checked\", commit_status=True)\n )\n self.btn_qa_not_removed.clicked.connect(\n partial(self.btn_qa_status_clicked, \"Not Removed\", commit_status=True)\n )\n self.btn_next.clicked.connect(self.zoom_to_next)\n self.btn_maptool.clicked.connect(self.maptool_clicked)\n self.btn_unlink.clicked.connect(\n partial(self.unlink_clicked, commit_status=True)\n )\n self.btn_matched.clicked.connect(\n partial(self.matched_clicked, commit_status=True)\n )\n self.btn_related.clicked.connect(\n partial(self.related_clicked, commit_status=True)\n )\n self.btn_delete.clicked.connect(\n partial(self.delete_clicked, commit_status=True)\n )\n self.btn_copy_from_existing.clicked.connect(\n self.on_click_btn_copy_from_existing\n )\n self.btn_set_attributes.clicked.connect(self.on_click_btn_set_attributes)\n self.btn_delete_attributes.clicked.connect(self.on_click_btn_delete_attributes)\n self.btn_save.clicked.connect(partial(self.save_clicked, commit_status=True))\n self.btn_cancel.clicked.connect(self.cancel_clicked)\n self.btn_exit.clicked.connect(self.exit_clicked)\n\n self.cmb_relationship.currentIndexChanged.connect(\n self.cmb_relationship_current_index_changed\n )\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n self.cb_lyr_bulk_load.stateChanged.connect(self.cb_lyr_bulk_load_state_changed)\n self.cb_lyr_existing.stateChanged.connect(self.cb_lyr_existing_state_changed)\n\n self.cb_autosave.stateChanged.connect(self.cb_autosave_state_changed)\n\n QgsProject.instance().layerWillBeRemoved.connect(self.layers_removed)\n\n def add_building_lyrs(self):\n \"\"\"Add building layers\"\"\"\n\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"styles/\")\n\n self.lyr_facilities = self.layer_registry.add_postgres_layer(\n \"facilities\",\n \"facilities\",\n \"shape\",\n \"facilities\",\n \"\",\n \"\",\n )\n self.lyr_facilities.loadNamedStyle(path + \"facilities.qml\")\n\n self.lyr_related_existing = self.layer_registry.add_postgres_layer(\n \"related_existing_outlines\",\n \"related_existing_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_related_existing.loadNamedStyle(path + \"building_purple_existing.qml\")\n\n self.lyr_related_bulk_load = self.layer_registry.add_postgres_layer(\n \"related_bulk_load_outlines\",\n \"related_bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_related_bulk_load.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_matched_existing = self.layer_registry.add_postgres_layer(\n \"matched_existing_outlines\",\n \"matched_existing_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_matched_existing.loadNamedStyle(path + \"building_blue_existing.qml\")\n\n self.lyr_matched_bulk_load = self.layer_registry.add_postgres_layer(\n \"matched_bulk_load_outlines\",\n \"matched_bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_matched_bulk_load.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_removed_existing = self.layer_registry.add_postgres_layer(\n \"removed_outlines\",\n \"removed_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_removed_existing.loadNamedStyle(path + \"building_red_existing.qml\")\n\n self.lyr_added_bulk_load = self.layer_registry.add_postgres_layer(\n \"added_outlines\",\n \"added_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_added_bulk_load.loadNamedStyle(path + \"building_green.qml\")\n\n self.lyr_related_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"related_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_related_bulk_load_in_edit.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_related_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"related_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_related_existing_in_edit.loadNamedStyle(path + \"building_purple.qml\")\n\n self.lyr_matched_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"matched_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_matched_bulk_load_in_edit.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_matched_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"matched_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_matched_existing_in_edit.loadNamedStyle(path + \"building_blue.qml\")\n\n self.lyr_removed_existing_in_edit = self.layer_registry.add_postgres_layer(\n \"removed_existing_in_edit\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"\",\n )\n self.lyr_removed_existing_in_edit.loadNamedStyle(path + \"building_red.qml\")\n\n self.lyr_added_bulk_load_in_edit = self.layer_registry.add_postgres_layer(\n \"added_bulk_load_in_edit\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"\",\n )\n self.lyr_added_bulk_load_in_edit.loadNamedStyle(path + \"building_green.qml\")\n\n self.lyr_existing = self.layer_registry.add_postgres_layer(\n \"existing_subset_extracts\",\n \"existing_subset_extracts\",\n \"shape\",\n \"buildings_bulk_load\",\n \"building_outline_id\",\n \"supplied_dataset_id = {0}\".format(self.current_dataset),\n )\n self.lyr_existing.loadNamedStyle(path + \"building_transparent.qml\")\n\n self.lyr_bulk_load = self.layer_registry.add_postgres_layer(\n \"bulk_load_outlines\",\n \"bulk_load_outlines\",\n \"shape\",\n \"buildings_bulk_load\",\n \"bulk_load_outline_id\",\n \"supplied_dataset_id = {0}\".format(self.current_dataset),\n )\n self.lyr_bulk_load.loadNamedStyle(path + \"buildings_bulk_load_alter_rel.qml\")\n\n def repaint_view(self):\n \"\"\"Repaint views to update changes in result\"\"\"\n self.lyr_added_bulk_load.triggerRepaint()\n self.lyr_removed_existing.triggerRepaint()\n self.lyr_matched_bulk_load.triggerRepaint()\n self.lyr_matched_existing.triggerRepaint()\n self.lyr_related_bulk_load.triggerRepaint()\n self.lyr_related_existing.triggerRepaint()\n\n def clear_layer_filter(self):\n \"\"\"Returns 'null' filter for layers\"\"\"\n self.lyr_added_bulk_load_in_edit.setSubsetString(\"null\")\n self.lyr_removed_existing_in_edit.setSubsetString(\"null\")\n self.lyr_matched_existing_in_edit.setSubsetString(\"null\")\n self.lyr_matched_bulk_load_in_edit.setSubsetString(\"null\")\n self.lyr_related_existing_in_edit.setSubsetString(\"null\")\n self.lyr_related_bulk_load_in_edit.setSubsetString(\"null\")\n\n self.lyr_added_bulk_load.setSubsetString(\"\")\n self.lyr_removed_existing.setSubsetString(\"\")\n self.lyr_matched_existing.setSubsetString(\"\")\n self.lyr_matched_bulk_load.setSubsetString(\"\")\n self.lyr_related_existing.setSubsetString(\"\")\n self.lyr_related_bulk_load.setSubsetString(\"\")\n\n def setup_message_box(self):\n self.msgbox = QMessageBox(\n QMessageBox.Question,\n \"Auto-save\",\n \"Are you sure you want to turn on auto-save?\",\n buttons=QMessageBox.No | QMessageBox.Yes,\n )\n\n def on_dockwidget_closed(self):\n \"\"\"Remove highlight when the dockwideget closes\"\"\"\n pass\n\n def maptool_clicked(self):\n canvas = iface.mapCanvas()\n self.tool = MultiLayerSelection(canvas)\n canvas.setMapTool(self.tool)\n # set up signal and slot\n self.tool.multi_selection_changed.connect(self.multi_selection_changed)\n\n def multi_selection_changed(self):\n\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.tbl_relationship.clearSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.reset_buttons()\n\n selected_bulk = [feat.id() for feat in self.lyr_bulk_load.selectedFeatures()]\n selected_existing = [feat.id() for feat in self.lyr_existing.selectedFeatures()]\n\n has_multi_set = False\n has_added, has_removed, has_matched, has_related = False, False, False, False\n existing_to_lst, bulk_to_list = [], []\n bulk_attr_to_list = []\n\n for feat_id in selected_bulk:\n if feat_id in bulk_to_list:\n continue\n id_added = self.find_added_outlines(feat_id)\n id_matched = self.find_matched_existing_outlines(feat_id)\n ids_existing, ids_bulk = self.find_related_existing_outlines(feat_id)\n if id_added:\n if has_matched or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n bulk_to_list.append(feat_id)\n bulk_attr_to_list.append(id_added)\n has_added = True\n elif id_matched:\n if has_added or has_removed or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_matched:\n has_multi_set = True\n existing_to_lst = [id_matched[0]]\n bulk_to_list = [feat_id]\n has_matched = True\n elif ids_existing and ids_bulk:\n if has_added or has_removed or has_matched:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_related:\n has_multi_set = True\n existing_to_lst = ids_existing\n bulk_to_list = ids_bulk\n has_related = True\n\n for feat_id in selected_existing:\n if feat_id in existing_to_lst:\n continue\n id_removed = self.find_removed_outlines(feat_id)\n id_matched = self.find_matched_bulk_load_outlines(feat_id)\n ids_existing, ids_bulk = self.find_related_bulk_load_outlines(feat_id)\n if id_removed:\n if has_matched or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n existing_to_lst.append(feat_id)\n has_removed = True\n elif id_matched:\n if has_added or has_removed or has_related:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_matched:\n has_multi_set = True\n existing_to_lst = [feat_id]\n bulk_to_list = [id_matched[1]]\n has_matched = True\n elif ids_existing and ids_bulk:\n if has_added or has_removed or has_matched:\n self.multi_relationship_selected_error_msg()\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n return\n if has_related:\n has_multi_set = True\n existing_to_lst = ids_existing\n bulk_to_list = ids_bulk\n has_related = True\n self.insert_into_list(self.lst_existing, existing_to_lst)\n self.insert_into_list(self.lst_bulk, bulk_to_list)\n self.disable_listwidget(self.lst_existing)\n self.disable_listwidget(self.lst_bulk)\n self.lyr_existing.selectByIds(existing_to_lst)\n self.lyr_bulk_load.selectByIds(bulk_to_list)\n\n # error msg when more than one set of matched or related set are selected\n if has_multi_set:\n self.message_bar_edit.pushMessage(\n \"Multiple matched or related sets selected, can only unlink one at a time.\"\n )\n # switch button\n if has_matched or has_related:\n self.btn_unlink.setEnabled(True)\n self.btn_copy_from_existing.setEnabled(True)\n self.btn_set_attributes.setEnabled(True)\n self.btn_delete_attributes.setEnabled(True)\n self.ledit_name.setEnabled(True)\n self.cbox_use.setEnabled(True)\n self.switch_btn_match_and_related()\n elif has_added and has_removed:\n self.switch_btn_match_and_related()\n elif has_added and not has_removed:\n self.btn_delete.setEnabled(True)\n self.btn_set_attributes.setEnabled(True)\n self.btn_delete_attributes.setEnabled(True)\n self.ledit_name.setEnabled(True)\n self.cbox_use.setEnabled(True)\n # select rows in tbl_relationship\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n if has_removed:\n for id_existing in existing_to_lst:\n self.select_row_in_tbl_removed(id_existing)\n elif has_added:\n for id_bulk in bulk_to_list:\n self.select_row_in_tbl_added(id_bulk)\n elif has_matched:\n self.select_row_in_tbl_matched(existing_to_lst[0], bulk_to_list[0])\n elif has_related:\n for id_existing in existing_to_lst:\n for id_bulk in bulk_to_list:\n self.select_row_in_tbl_related(id_existing, id_bulk)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.SingleSelection)\n\n # Add attributes to list for displaying\n if has_removed:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 2).text()\n existing_name = self.tbl_relationship.item(row, 3).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n # if removed and added selected, then alternative extraction of attributes required due to different tables\n if has_added:\n attr_dict = {}\n for item in bulk_attr_to_list:\n added_id = int(\n item[0].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")[0]\n )\n added_use = item[1]\n added_name = item[2]\n attr_dict[added_id] = [added_use, added_name]\n for id_ in bulk_to_list:\n bulk_use = attr_dict[id_][0]\n bulk_name = attr_dict[id_][1]\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_added:\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n bulk_use = self.tbl_relationship.item(row, 1).text()\n bulk_name = self.tbl_relationship.item(row, 2).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_matched:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 3).text()\n existing_name = self.tbl_relationship.item(row, 4).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n bulk_load_use = self.tbl_relationship.item(row, 5).text()\n bulk_load_name = self.tbl_relationship.item(row, 6).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n elif has_related:\n for id_ in existing_to_lst:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n existing_use = self.tbl_relationship.item(row, 4).text()\n existing_name = self.tbl_relationship.item(row, 5).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in bulk_to_list:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 2).text()):\n bulk_load_use = self.tbl_relationship.item(row, 6).text()\n bulk_load_name = self.tbl_relationship.item(row, 7).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n # Change item color in the list\n if has_removed or has_added:\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n elif has_matched:\n self.update_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n elif has_related:\n self.update_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n def unfinished_error_msg(self):\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"\\n------------- UNFINISHED PROCESS -------------\"\n \"\\n\\nPlease click Save or Cancel to finish before continuing.\"\n )\n self.error_dialog.show()\n\n def unlink_clicked(self, commit_status=True):\n \"\"\"\n Unlink the buildings in the table\n Called when unlink_all botton is clicked\n \"\"\"\n self.btn_unlink.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n ids_existing = self.get_ids_from_lst(self.lst_existing)\n ids_bulk = self.get_ids_from_lst(self.lst_bulk)\n self.insert_into_lyr_removed_in_edit(ids_existing)\n self.insert_into_lyr_added_in_edit(ids_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def matched_clicked(self, commit_status=True):\n \"\"\"\n Match the buildings in the list\n Called when matched botton is clicked\n \"\"\"\n if self.lst_existing.count() == 1 and self.lst_bulk.count() == 1:\n self.btn_matched.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n id_existing = int(self.lst_existing.item(0).text())\n id_bulk = int(self.lst_bulk.item(0).text())\n\n self.insert_into_lyr_matched_existing_in_edit(id_existing)\n self.insert_into_lyr_matched_bulk_load_in_edit(id_bulk)\n\n self.delete_original_relationship_in_existing(id_existing)\n self.delete_original_relationship_in_bulk_load(id_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def related_clicked(self, commit_status=True):\n \"\"\"\n Relate the buildings in the list\n Called when related botton is clicked\n \"\"\"\n if self.lst_existing.count() == 0 or self.lst_bulk.count() == 0:\n pass\n elif self.lst_existing.count() == 1 and self.lst_bulk.count() == 1:\n pass\n else:\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_maptool.setEnabled(False)\n self.qa_button_set_enable(False)\n\n for row in range(self.lst_existing.count()):\n id_existing = int(self.lst_existing.item(row).text())\n\n self.insert_into_lyr_related_existing_in_edit(id_existing)\n self.delete_original_relationship_in_existing(id_existing)\n\n for row in range(self.lst_bulk.count()):\n id_bulk = int(self.lst_bulk.item(row).text())\n\n self.insert_into_lyr_related_bulk_load_in_edit(id_bulk)\n self.delete_original_relationship_in_bulk_load(id_bulk)\n\n self.connect_to_error_msg()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def delete_clicked(self, commit_status=True):\n self.deletion_reason = DeletionReason(self.lst_bulk.count())\n self.deletion_reason.show()\n self.deletion_reason.btn_ok.clicked.connect(\n partial(self.reason_given, commit_status)\n )\n self.deletion_reason.btn_cancel.clicked.connect(self.reason_cancel)\n\n def reason_given(self, commit_status):\n self.deletion_reason.close()\n if self.deletion_reason.le_reason.text() != \"\":\n self.btn_matched.setEnabled(False)\n self.btn_related.setEnabled(False)\n self.delete = True\n self.reason_text = self.deletion_reason.le_reason.text()\n self.connect_to_error_msg()\n self.btn_delete.setEnabled(False)\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n else:\n iface.messageBar().pushMessage(\n \"ERROR\",\n \"Please ensure that you enter a reason for deletion, you cannot delete a building otherwise.\",\n level=Qgis.Info,\n duration=5,\n )\n\n def reason_cancel(self):\n self.deletion_reason.close()\n\n def on_click_btn_copy_from_existing(self):\n selected_existing_outlines = self.get_lst_content(self.lst_existing_attrs)\n existing_uses = [row[1][1] for row in selected_existing_outlines]\n existing_names = [row[1][2] for row in selected_existing_outlines]\n non_null_pairs = [\n pair\n for pair in zip(existing_uses, existing_names)\n if pair != (\"None\", \"None\")\n ]\n if non_null_pairs:\n existing_use = non_null_pairs[0][0]\n use_id = self.valid_building_use_ids[existing_use]\n self.cbox_use.setCurrentIndex(use_id)\n existing_name = non_null_pairs[0][1]\n self.ledit_name.setText(existing_name)\n else:\n self.cbox_use.setCurrentIndex(0)\n self.ledit_name.setText(\"\")\n\n def on_click_btn_set_attributes(self, commit_status=True):\n use = self.cbox_use.currentText()\n name = self.ledit_name.text()\n name = \"None\" if name == \"\" else name\n if use == \"None\" and name != \"None\":\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"An outline cannot have a name without a use. Please select a value for use.\"\n )\n self.error_dialog.show()\n return\n for row_id, row_content in self.get_lst_content(self.lst_bulk_attrs):\n updated_row_content = [row_content[0], use, name]\n self.lst_bulk_attrs.item(row_id).setText(str(updated_row_content))\n self.connect_to_error_msg()\n self.attributes_changed = True\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def on_click_btn_delete_attributes(self, commit_status=True):\n for row_id, row_content in self.get_lst_content(self.lst_bulk_attrs):\n updated_row_content = [row_content[0], \"None\", \"None\"]\n self.lst_bulk_attrs.item(row_id).setText(str(updated_row_content))\n self.connect_to_error_msg()\n self.attributes_changed = True\n if self.autosave:\n self.save_clicked(commit_status)\n else:\n self.btn_save.setEnabled(True)\n\n def save_clicked(self, commit_status=True):\n \"\"\"\n Save result and change database\n Called when save button is clicked\n \"\"\"\n self.db.open_cursor()\n\n if self.delete:\n for row in range(self.lst_bulk.count()):\n feat_id = int(self.lst_bulk.item(row).text())\n # remove outline from added table\n sql = \"SELECT buildings_bulk_load.added_delete_bulk_load_outlines(%s);\"\n self.db.execute_no_commit(sql, (feat_id,))\n # change status id\n sql = \"SELECT buildings_bulk_load.bulk_load_outlines_update_bulk_load_status(%s, %s);\"\n self.db.execute_no_commit(sql, (feat_id, 3))\n # insert reason for deletion\n sql = \"SELECT buildings_bulk_load.deletion_description_insert(%s, %s);\"\n self.db.execute_no_commit(sql, (feat_id, self.reason_text))\n self.reason_text = \"\"\n self.delete = False\n elif self.attributes_changed:\n self.update_bulkload_attributes()\n self.attributes_changed = False\n else:\n self.delete_original_relationships()\n self.insert_new_added_outlines()\n self.insert_new_removed_outlines()\n self.insert_new_matched_outlines()\n self.insert_new_related_outlines()\n\n if commit_status:\n self.db.commit_open_cursor()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.reset_buttons()\n self.qa_button_set_enable(True)\n\n self.disconnect_to_error_msg()\n\n self.repaint_view()\n self.clear_layer_filter()\n iface.mapCanvas().refreshAllLayers()\n\n self.refresh_tbl_relationship()\n\n def cancel_clicked(self):\n self.reset_buttons()\n self.qa_button_set_enable(True)\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n try:\n self.disconnect_to_error_msg()\n except TypeError:\n pass\n\n self.repaint_view()\n self.clear_layer_filter()\n iface.mapCanvas().refreshAllLayers()\n\n def exit_clicked(self):\n \"\"\"\n Called when alter building relationships exit button clicked.\n \"\"\"\n self.close_frame()\n\n def close_frame(self):\n \"\"\"\n Clean up and remove the alter building relationships frame.\n \"\"\"\n self.reset_buttons()\n self.qa_button_set_enable(True)\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n if self.change_instance is not None:\n self.edit_dialog.close()\n\n QgsProject.instance().layerWillBeRemoved.disconnect(self.layers_removed)\n for val in [\n str(layer.id())\n for layer in QgsProject.instance().layerTreeRoot().layerOrder()\n ]:\n if \"existing_subset_extracts\" in val:\n self.lyr_existing.removeSelection()\n if \"bulk_load_outlines\" in val:\n self.lyr_bulk_load.removeSelection()\n try:\n self.disconnect_to_error_msg()\n except TypeError:\n pass\n\n self.layer_registry.remove_layer(self.lyr_existing)\n self.layer_registry.remove_layer(self.lyr_bulk_load)\n self.layer_registry.remove_layer(self.lyr_added_bulk_load)\n self.layer_registry.remove_layer(self.lyr_removed_existing)\n self.layer_registry.remove_layer(self.lyr_matched_existing)\n self.layer_registry.remove_layer(self.lyr_matched_bulk_load)\n self.layer_registry.remove_layer(self.lyr_related_bulk_load)\n self.layer_registry.remove_layer(self.lyr_related_existing)\n self.layer_registry.remove_layer(self.lyr_added_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_removed_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_matched_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_matched_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_related_existing_in_edit)\n self.layer_registry.remove_layer(self.lyr_related_bulk_load_in_edit)\n self.layer_registry.remove_layer(self.lyr_facilities)\n\n for action in iface.building_toolbar.actions():\n if action.text() not in [\"Pan Map\"]:\n iface.building_toolbar.removeAction(action)\n iface.building_toolbar.hide()\n\n from buildings.gui.bulk_load_frame import BulkLoadFrame\n\n dw = self.dockwidget\n dw.stk_options.removeWidget(dw.stk_options.currentWidget())\n dw.new_widget(BulkLoadFrame(dw))\n iface.actionPan().trigger()\n\n def cmb_relationship_current_index_changed(self):\n current_text = self.cmb_relationship.currentText()\n if current_text == \"Related Outlines\":\n self.init_tbl_relationship(\n [\n \"Group\",\n \"Exist ID\",\n \"Bulk ID\",\n \"QA Status\",\n \"Exist Use\",\n \"Exist Name\",\n \"Bulk Use\",\n \"Bulk Name\",\n ]\n )\n self.populate_tbl_related()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(False)\n if self.is_empty_tbl_relationship(\"Related Outlines\"):\n self.qa_button_set_enable(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Matched Outlines\":\n self.init_tbl_relationship(\n [\n \"Exist ID\",\n \"Bulk ID\",\n \"QA Status\",\n \"Exist Use\",\n \"Exist Name\",\n \"Bulk Use\",\n \"Bulk Name\",\n ]\n )\n self.populate_tbl_matched()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(False)\n if self.is_empty_tbl_relationship(\"Matched Outlines\"):\n self.qa_button_set_enable(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Removed Outlines\":\n self.init_tbl_relationship(\n [\"Exist ID\", \"QA Status\", \"Exist Use\", \"Exist Name\"]\n )\n self.populate_tbl_removed()\n self.btn_next.setEnabled(True)\n self.btn_qa_not_removed.setEnabled(True)\n if self.is_empty_tbl_relationship(\"Removed Outlines\"):\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n else:\n self.qa_button_set_enable(True)\n elif current_text == \"Added Outlines\":\n self.init_tbl_relationship([\"Bulk ID\", \"Bulk Use\", \"Bulk Name\"])\n self.populate_tbl_added()\n self.btn_qa_not_removed.setEnabled(False)\n self.qa_button_set_enable(False)\n self.btn_next.setEnabled(False)\n\n elif current_text == \"\":\n self.tbl_relationship.setRowCount(0)\n self.tbl_relationship.setColumnCount(0)\n self.qa_button_set_enable(False)\n self.btn_qa_not_removed.setEnabled(False)\n self.btn_next.setEnabled(False)\n\n self.disable_tbl_editing(self.tbl_relationship)\n\n def tbl_relationship_item_selection_changed(self):\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n if self.has_no_selection_in_table(self.tbl_relationship):\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n return\n\n row = self.tbl_relationship.selectionModel().selectedRows()[0].row()\n current_text = self.cmb_relationship.currentText()\n\n if current_text == \"Related Outlines\":\n id_existing = int(self.tbl_relationship.item(row, 1).text())\n id_bulk = int(self.tbl_relationship.item(row, 2).text())\n ids_existing, ids_bulk = self.find_related_existing_outlines(id_bulk)\n self.insert_into_list(self.lst_existing, ids_existing)\n self.insert_into_list(self.lst_bulk, ids_bulk)\n self.update_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n self.lyr_existing.selectByIds(ids_existing)\n self.lyr_bulk_load.selectByIds(ids_bulk)\n self.btn_unlink.setEnabled(True)\n\n # Add related attributes to list for displaying\n for id_ in ids_existing:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n existing_use = self.tbl_relationship.item(row, 4).text()\n existing_name = self.tbl_relationship.item(row, 5).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in ids_bulk:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 2).text()):\n bulk_load_use = self.tbl_relationship.item(row, 6).text()\n bulk_load_name = self.tbl_relationship.item(row, 7).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#e601ff\"), QColor(\"#e601ff\"))\n\n elif current_text == \"Matched Outlines\":\n row = self.tbl_relationship.selectionModel().selectedRows()[0].row()\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n id_bulk = int(self.tbl_relationship.item(row, 1).text())\n\n ids_existing = [id_existing]\n ids_bulk = [id_bulk]\n\n self.insert_into_list(self.lst_existing, ids_existing)\n self.insert_into_list(self.lst_bulk, ids_bulk)\n self.update_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n self.lyr_existing.selectByIds(ids_existing)\n self.lyr_bulk_load.selectByIds(ids_bulk)\n self.btn_unlink.setEnabled(True)\n\n # Add matched attributes to list for displaying\n for id_ in ids_existing:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 3).text()\n existing_name = self.tbl_relationship.item(row, 4).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n break\n for id_ in ids_bulk:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 1).text()):\n bulk_load_use = self.tbl_relationship.item(row, 5).text()\n bulk_load_name = self.tbl_relationship.item(row, 6).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_load_use, bulk_load_name]]\n )\n break\n self.update_attr_list_item_color(QColor(\"#00b4d4\"), QColor(\"#00b4d4\"))\n\n elif current_text == \"Removed Outlines\":\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n self.insert_into_list(self.lst_existing, [id_existing])\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n self.lyr_existing.selectByIds([id_existing])\n self.lyr_bulk_load.selectByIds([])\n\n # Add removed attributes to list for displaying\n for id_ in [id_existing]:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n existing_use = self.tbl_relationship.item(row, 2).text()\n existing_name = self.tbl_relationship.item(row, 3).text()\n self.insert_into_list(\n self.lst_existing_attrs,\n [[id_, existing_use, existing_name]],\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n\n elif current_text == \"Added Outlines\":\n id_bulk = int(self.tbl_relationship.item(row, 0).text())\n self.insert_into_list(self.lst_bulk, [id_bulk])\n self.update_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n self.lyr_bulk_load.selectByIds([id_bulk])\n self.btn_delete.setEnabled(True)\n\n # Add added attributes to list for displaying\n for id_ in [id_bulk]:\n for row in range(self.tbl_relationship.rowCount()):\n if id_ == int(self.tbl_relationship.item(row, 0).text()):\n bulk_use = self.tbl_relationship.item(row, 1).text()\n bulk_name = self.tbl_relationship.item(row, 2).text()\n self.insert_into_list(\n self.lst_bulk_attrs, [[id_, bulk_use, bulk_name]]\n )\n self.update_attr_list_item_color(QColor(\"#ff2b01\"), QColor(\"#3f9800\"))\n\n if self.zoom:\n self.zoom_to_feature()\n\n def btn_qa_status_clicked(self, qa_status, commit_status=True):\n\n selected_rows = [\n index.row()\n for index in self.tbl_relationship.selectionModel().selectedRows()\n ]\n if not selected_rows:\n return\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.db.open_cursor()\n\n qa_status_id = self.get_qa_status_id(qa_status)\n current_text = self.cmb_relationship.currentText()\n\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n if current_text == \"Related Outlines\":\n if qa_status_id == 5:\n return\n qa_column = 3\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 1).text())\n id_bulk = int(self.tbl_relationship.item(row, 2).text())\n self.update_qa_status_in_related(id_existing, id_bulk, qa_status_id)\n ids_existing, ids_bulk = self.find_related_existing_outlines(id_bulk)\n elif current_text == \"Matched Outlines\":\n if qa_status_id == 5:\n return\n qa_column = 2\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n id_bulk = int(self.tbl_relationship.item(row, 1).text())\n self.update_qa_status_in_matched(id_existing, id_bulk, qa_status_id)\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n elif current_text == \"Removed Outlines\":\n qa_column = 1\n selected_ids = []\n for row in selected_rows:\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n selected_ids.append(id_existing)\n self.update_qa_status_in_removed(id_existing, qa_status_id)\n ids_existing.append(id_existing)\n if qa_status_id == 5:\n self.copy_and_match_removed_building()\n self.cmb_relationship.setCurrentIndex(\n self.cmb_relationship.findText(\"Matched Outlines\")\n )\n\n if commit_status:\n self.db.commit_open_cursor()\n\n self.refresh_tbl_relationship()\n self.reset_buttons()\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n\n # Move to the next 'not checked'\n if qa_status_id != 5:\n for row in range(max(selected_rows) + 1, self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n break\n if not self.tbl_relationship.selectionModel().selectedRows():\n self.tbl_relationship.selectRow(max(selected_rows))\n item = self.tbl_relationship.item(max(selected_rows), qa_column)\n self.tbl_relationship.scrollToItem(item)\n elif qa_status_id == 5:\n for row in range(self.tbl_relationship.rowCount()):\n id_existing = int(self.tbl_relationship.item(row, 0).text())\n if id_existing in selected_ids:\n self.zoom = False\n self.tbl_relationship.selectRow(row)\n self.tbl_relationship.scrollToItem(\n self.tbl_relationship.item(row, qa_column)\n )\n self.zoom = True\n break\n if len(selected_ids) > 1:\n self.message_bar_edit.pushMessage(\n \"You cannot have multiple selected matched relationships. \"\n \"The first (ordered numerically) has been selected\"\n )\n\n def zoom_to_next(self):\n found = False\n selected_rows = [\n index.row()\n for index in self.tbl_relationship.selectionModel().selectedRows()\n ]\n if not selected_rows:\n selected_rows = [-1]\n current_text = self.cmb_relationship.currentText()\n if current_text == \"Related Outlines\":\n qa_column = 3\n elif current_text == \"Matched Outlines\":\n qa_column = 2\n elif current_text == \"Removed Outlines\":\n qa_column = 1\n for row in range(max(selected_rows) + 1, self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n found = True\n break\n if not found:\n selected_rows = [0]\n for row in range(self.tbl_relationship.rowCount()):\n if self.scroll_to_next(row, qa_column, selected_rows):\n break\n\n def cb_lyr_bulk_load_state_changed(self):\n legend = QgsProject.instance().layerTreeRoot()\n if self.cb_lyr_bulk_load.isChecked():\n legend.findLayer(\n self.lyr_added_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_matched_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_related_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(self.lyr_added_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_matched_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_related_bulk_load.id()).setItemVisibilityChecked(\n True\n )\n else:\n legend.findLayer(\n self.lyr_added_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_matched_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_related_bulk_load_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(self.lyr_added_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_matched_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_related_bulk_load.id()).setItemVisibilityChecked(\n False\n )\n\n def cb_lyr_existing_state_changed(self):\n legend = QgsProject.instance().layerTreeRoot()\n if self.cb_lyr_existing.isChecked():\n legend.findLayer(\n self.lyr_removed_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_matched_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(\n self.lyr_related_existing_in_edit.id()\n ).setItemVisibilityChecked(True)\n legend.findLayer(self.lyr_removed_existing.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_matched_existing.id()).setItemVisibilityChecked(\n True\n )\n legend.findLayer(self.lyr_related_existing.id()).setItemVisibilityChecked(\n True\n )\n else:\n legend.findLayer(\n self.lyr_removed_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_matched_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(\n self.lyr_related_existing_in_edit.id()\n ).setItemVisibilityChecked(False)\n legend.findLayer(self.lyr_removed_existing.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_matched_existing.id()).setItemVisibilityChecked(\n False\n )\n legend.findLayer(self.lyr_related_existing.id()).setItemVisibilityChecked(\n False\n )\n\n def cb_autosave_state_changed(self):\n if self.btn_save.isEnabled():\n self.unfinished_error_msg()\n self.cb_autosave.setCheckState(0)\n self.autosave = False\n self.btn_save.setVisible(True)\n return\n if self.cb_autosave.isChecked():\n if self.confirm_to_autosave():\n self.autosave = True\n self.btn_save.setVisible(False)\n else:\n self.cb_autosave.setCheckState(0)\n self.autosave = False\n self.btn_save.setVisible(True)\n else:\n self.autosave = False\n self.btn_save.setVisible(True)\n\n def layers_removed(self, layerids):\n self.layer_registry.update_layers()\n layers = [\n \"added_bulk_load_in_edit\",\n \"removed_existing_in_edit\",\n \"matched_existing_in_edit\",\n \"matched_bulk_load_in_edit\",\n \"related_existing_in_edit\",\n \"related_bulk_load_in_edit\",\n \"added_outlines\",\n \"removed_outlines\",\n \"matched_existing_outlines\",\n \"matched_bulk_load_outlines\",\n \"related_existing_outlines\",\n \"related_bulk_load_outlines\",\n \"bulk_load_outlines\",\n \"existing_subset_extracts\",\n ]\n for layer in layers:\n if layer in layerids:\n self.cmb_relationship.setDisabled(1)\n self.btn_qa_not_checked.setDisabled(1)\n self.btn_qa_refer2supplier.setDisabled(1)\n self.btn_qa_pending.setDisabled(1)\n self.btn_qa_okay.setDisabled(1)\n self.btn_qa_not_removed.setDisabled(1)\n self.btn_maptool.setDisabled(1)\n self.btn_unlink.setDisabled(1)\n self.btn_matched.setDisabled(1)\n self.btn_related.setDisabled(1)\n self.btn_delete.setDisabled(1)\n self.btn_cancel.setDisabled(1)\n self.btn_save.setDisabled(1)\n self.cb_autosave.setDisabled(1)\n self.cb_lyr_bulk_load.setDisabled(1)\n self.cb_lyr_existing.setDisabled(1)\n iface.messageBar().pushMessage(\n \"ERROR\",\n \"Required layer Removed! Please reload the buildings plugin or the current frame before continuing\",\n level=Qgis.Critical,\n duration=5,\n )\n return\n\n def copy_and_match_removed_building(self):\n # iterate through all the selected removed buildings\n for feature in self.lyr_existing.selectedFeatures():\n # get geometry\n geometry = self.db.execute_no_commit(\n general_select.convert_geometry, (feature.geometry().asWkt(),)\n )\n geometry = geometry.fetchall()[0][0]\n sql = (\n buildings_select.building_outlines_capture_method_id_by_building_outline_id\n )\n building_outline_id = feature.attributes()[0]\n # get capture method of existing outline\n capture_method = self.db.execute_no_commit(sql, (building_outline_id,))\n capture_method = capture_method.fetchall()[0][0]\n sql = (\n bulk_load_select.bulk_load_outlines_capture_source_by_supplied_dataset_id\n )\n # get capture source of current dataset\n capture_source = self.db.execute_no_commit(sql, (self.current_dataset,))\n capture_source = capture_source.fetchall()[0][0]\n # get suburb, town_city and territorial authority of existing outline\n sql = (\n buildings_select.building_outlines_suburb_locality_id_by_building_outline_id\n )\n suburb = self.db.execute_no_commit(sql, (building_outline_id,))\n suburb = suburb.fetchall()[0][0]\n sql = buildings_select.building_outlines_town_city_id_by_building_outline_id\n town_city = self.db.execute_no_commit(sql, (building_outline_id,))\n town_city = town_city.fetchall()[0][0]\n sql = (\n buildings_select.building_outlines_territorial_authority_id_by_building_outline\n )\n territorial_auth = self.db.execute_no_commit(sql, (building_outline_id,))\n territorial_auth = territorial_auth.fetchall()[0][0]\n # insert outline into building_bulk_load.bulk_load_outlines\n sql = \"SELECT buildings_bulk_load.bulk_load_outlines_insert(%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n bulk_load_id = self.db.execute_no_commit(\n sql,\n (\n self.current_dataset,\n None,\n 2,\n capture_method,\n capture_source,\n suburb,\n town_city,\n territorial_auth,\n geometry,\n ),\n )\n bulk_load_id = bulk_load_id.fetchall()[0][0]\n # remove existing building from removed table\n sql = \"SELECT buildings_bulk_load.removed_delete_existing_outline(%s);\"\n self.db.execute_no_commit(sql, (building_outline_id,))\n # add existing and new building to matched table\n sql = \"SELECT buildings_bulk_load.matched_insert_building_outlines(%s, %s);\"\n self.db.execute_no_commit(sql, (bulk_load_id, building_outline_id))\n # change to not checked\n sql = \"SELECT buildings_bulk_load.matched_update_qa_status_id(%s, %s, %s);\"\n self.db.execute_no_commit(sql, (1, building_outline_id, bulk_load_id))\n # refresh to get new outlines\n iface.mapCanvas().refreshAllLayers()\n\n def confirm_to_autosave(self):\n reply = self.msgbox.exec_()\n if reply == QMessageBox.Yes:\n return True\n return False\n\n def switch_btn_match_and_related(self):\n if self.lst_bulk.count() == 0 or self.lst_existing.count() == 0:\n pass\n elif self.lst_bulk.count() == 1 and self.lst_existing.count() == 1:\n self.btn_matched.setEnabled(True)\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(True)\n else:\n self.btn_related.setEnabled(True)\n self.btn_matched.setEnabled(False)\n self.btn_delete.setEnabled(True)\n\n def multi_relationship_selected_error_msg(self):\n self.error_dialog = ErrorDialog()\n self.error_dialog.fill_report(\n \"\\n------------- MULTIPLE RELATIONSHIP SELECTED -------------\"\n \"\\n\\nThere are multiple relationships selected. Please unlink \"\n \"matched or related outlines before altering relationships.\"\n )\n self.error_dialog.show()\n\n def find_added_outlines(self, id_bulk):\n result = self.db.execute_return(\n bulk_load_select.added_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n return result.fetchone()\n\n def find_removed_outlines(self, id_existing):\n result = self.db.execute_return(\n bulk_load_select.removed_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n return result.fetchone()\n\n def find_matched_existing_outlines(self, id_bulk):\n result = self.db.execute_return(\n bulk_load_select.matched_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n return result.fetchone()\n\n def find_matched_bulk_load_outlines(self, id_existing):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.matched_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n return result.fetchone()\n\n def find_related_existing_outlines(self, id_bulk):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n bulk_load_use, bulk_load_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.related_by_bulk_load_outline_id_dataset_id,\n (id_bulk, self.current_dataset),\n )\n for (\n id_existing,\n id_bulk,\n existing_use,\n existing_name,\n bulk_load_use,\n bulk_load_name,\n ) in result.fetchall():\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n return list(set(ids_existing)), list(set(ids_bulk))\n\n def find_related_bulk_load_outlines(self, id_existing):\n ids_existing, ids_bulk = [], []\n existing_use, existing_name = [], []\n\n result = self.db.execute_return(\n bulk_load_select.related_by_existing_outline_id_dataset_id,\n (id_existing, self.current_dataset),\n )\n for (id_existing, id_bulk) in result.fetchall():\n ids_existing.append(id_existing)\n ids_bulk.append(id_bulk)\n return list(set(ids_existing)), list(set(ids_bulk))\n\n def insert_into_table(self, tbl, ids):\n rows = []\n for (id_existing, id_bulk) in ids:\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n if id_existing:\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n if id_bulk:\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_bulk))\n rows.append(row_tbl)\n return rows\n\n def connect_to_error_msg(self):\n self.tool.multi_selection_changed.disconnect(self.multi_selection_changed)\n self.tool.multi_selection_changed.connect(self.unfinished_error_msg)\n self.tbl_relationship.itemSelectionChanged.disconnect(\n self.tbl_relationship_item_selection_changed\n )\n self.tbl_relationship.itemSelectionChanged.connect(self.unfinished_error_msg)\n self.reset_buttons()\n self.btn_maptool.setEnabled(False)\n\n def disconnect_to_error_msg(self):\n self.tool.multi_selection_changed.disconnect(self.unfinished_error_msg)\n self.tool.multi_selection_changed.connect(self.multi_selection_changed)\n self.tbl_relationship.itemSelectionChanged.disconnect(self.unfinished_error_msg)\n self.tbl_relationship.itemSelectionChanged.connect(\n self.tbl_relationship_item_selection_changed\n )\n self.reset_buttons()\n self.btn_maptool.setEnabled(True)\n\n def has_no_selection_in_table(self, tbl):\n if not tbl.selectionModel().selectedRows():\n return True\n return False\n\n def insert_into_lyr_removed_in_edit(self, ids_existing):\n for id_existing in ids_existing:\n filter_ = self.lyr_removed_existing_in_edit.subsetString()\n self.lyr_removed_existing_in_edit.setSubsetString(\n filter_ + \" or building_outline_id = %s\" % id_existing\n )\n\n def insert_into_lyr_added_in_edit(self, ids_bulk):\n for id_bulk in ids_bulk:\n filter_ = self.lyr_added_bulk_load_in_edit.subsetString()\n self.lyr_added_bulk_load_in_edit.setSubsetString(\n filter_ + \" or bulk_load_outline_id = %s\" % id_bulk\n )\n\n def delete_original_relationship_in_existing(self, id_existing):\n \"\"\"\n Remove features in the view layer\n \"\"\"\n if not self.lyr_removed_existing.subsetString():\n self.lyr_removed_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_removed_existing.setSubsetString(\n self.lyr_removed_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n if not self.lyr_matched_existing.subsetString():\n self.lyr_matched_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_matched_existing.setSubsetString(\n self.lyr_matched_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n if not self.lyr_related_existing.subsetString():\n self.lyr_related_existing.setSubsetString(\n '\"building_outline_id\" != %s' % id_existing\n )\n else:\n self.lyr_related_existing.setSubsetString(\n self.lyr_related_existing.subsetString()\n + ' and \"building_outline_id\" != %s' % id_existing\n )\n\n def delete_original_relationship_in_bulk_load(self, id_bulk):\n \"\"\"\n Remove features in the view layer\n \"\"\"\n if not self.lyr_added_bulk_load.subsetString():\n self.lyr_added_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_added_bulk_load.setSubsetString(\n self.lyr_added_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n if not self.lyr_matched_bulk_load.subsetString():\n self.lyr_matched_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_matched_bulk_load.setSubsetString(\n self.lyr_matched_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n if not self.lyr_related_bulk_load.subsetString():\n self.lyr_related_bulk_load.setSubsetString(\n '\"bulk_load_outline_id\" != %s' % id_bulk\n )\n else:\n self.lyr_related_bulk_load.setSubsetString(\n self.lyr_related_bulk_load.subsetString()\n + ' and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n def reset_buttons(self):\n self.btn_unlink.setEnabled(False)\n self.btn_matched.setEnabled(False)\n self.btn_related.setEnabled(False)\n self.btn_delete.setEnabled(False)\n self.btn_save.setEnabled(False)\n self.btn_maptool.setEnabled(True)\n self.btn_copy_from_existing.setEnabled(False)\n self.btn_set_attributes.setEnabled(False)\n self.btn_delete_attributes.setEnabled(False)\n self.cbox_use.setEnabled(False)\n self.ledit_name.setEnabled(False)\n\n def qa_button_set_enable(self, boolean):\n self.btn_qa_okay.setEnabled(boolean)\n self.btn_qa_pending.setEnabled(boolean)\n self.btn_qa_refer2supplier.setEnabled(boolean)\n self.btn_qa_not_checked.setEnabled(boolean)\n\n def insert_into_list(self, lst, ids):\n for fid in ids:\n lst.addItem(QListWidgetItem(\"%s\" % fid))\n\n def get_ids_from_lst(self, lst):\n feat_ids = []\n for row in range(lst.count()):\n feat_ids.append(int(lst.item(row).text()))\n return feat_ids\n\n @staticmethod\n def get_lst_content(lst):\n \"\"\"\n Returns a list of tuples of the row_id and the row content evaluated using `literal_eval`.\n \"\"\"\n return [(n, literal_eval(lst.item(n).text())) for n in range(lst.count())]\n\n def disable_listwidget(self, lst):\n for row in range(lst.count()):\n item = lst.item(row)\n item.setFlags(Qt.ItemIsEnabled)\n\n def update_list_item_color(self, existing_color, bulk_color):\n for i in range(self.lst_existing.count()):\n self.lst_existing.item(i).setForeground(QColor(existing_color))\n for i in range(self.lst_bulk.count()):\n self.lst_bulk.item(i).setForeground(QColor(bulk_color))\n\n def update_attr_list_item_color(self, existing_color, bulk_color):\n for i in range(self.lst_existing_attrs.count()):\n self.lst_existing_attrs.item(i).setForeground(QColor(existing_color))\n for i in range(self.lst_bulk_attrs.count()):\n self.lst_bulk_attrs.item(i).setForeground(QColor(bulk_color))\n\n def delete_from_lyr_removed_in_edit(self, id_existing):\n filter_ = self.lyr_removed_existing_in_edit.subsetString()\n self.lyr_removed_existing_in_edit.setSubsetString(\n \"(\" + filter_ + ') and \"building_outline_id\" != %s' % id_existing\n )\n\n def delete_from_lyr_added_in_edit(self, id_bulk):\n filter_ = self.lyr_added_bulk_load_in_edit.subsetString()\n self.lyr_added_bulk_load_in_edit.setSubsetString(\n \"(\" + filter_ + ') and \"bulk_load_outline_id\" != %s' % id_bulk\n )\n\n def insert_into_lyr_matched_existing_in_edit(self, id_existing):\n self.lyr_matched_existing_in_edit.setSubsetString(\n '\"building_outline_id\" = %s' % id_existing\n )\n\n def insert_into_lyr_matched_bulk_load_in_edit(self, id_bulk):\n self.lyr_matched_bulk_load_in_edit.setSubsetString(\n '\"bulk_load_outline_id\" = %s' % id_bulk\n )\n\n def insert_into_lyr_related_existing_in_edit(self, id_existing):\n filter_ = self.lyr_related_existing_in_edit.subsetString()\n self.lyr_related_existing_in_edit.setSubsetString(\n filter_ + ' or \"building_outline_id\" = %s' % id_existing\n )\n\n def insert_into_lyr_related_bulk_load_in_edit(self, id_bulk):\n filter_ = self.lyr_related_bulk_load_in_edit.subsetString()\n self.lyr_related_bulk_load_in_edit.setSubsetString(\n filter_ + ' or \"bulk_load_outline_id\" = %s' % id_bulk\n )\n\n def delete_original_relationships(self):\n sql_delete_related_existing = (\n \"SELECT buildings_bulk_load.related_delete_existing_outlines(%s);\"\n )\n sql_delete_matched_existing = (\n \"SELECT buildings_bulk_load.matched_delete_existing_outlines(%s);\"\n )\n sql_delete_removed = (\n \"SELECT buildings_bulk_load.removed_delete_existing_outline(%s);\"\n )\n sql_delete_added = (\n \"SELECT buildings_bulk_load.added_delete_bulk_load_outlines(%s);\"\n )\n\n for row in range(self.lst_existing.count()):\n item = self.lst_existing.item(row)\n id_existing = int(item.text())\n self.db.execute_no_commit(sql_delete_removed, (id_existing,))\n self.db.execute_no_commit(sql_delete_matched_existing, (id_existing,))\n self.db.execute_no_commit(sql_delete_related_existing, (id_existing,))\n\n for row in range(self.lst_bulk.count()):\n item = self.lst_bulk.item(row)\n id_bulk = int(item.text())\n\n self.db.execute_no_commit(sql_delete_added, (id_bulk,))\n\n def insert_new_added_outlines(self):\n # added\n sql_insert_added = (\n \"SELECT buildings_bulk_load.added_insert_bulk_load_outlines(%s, %s);\"\n )\n for feat in self.lyr_added_bulk_load_in_edit.getFeatures():\n id_bulk = feat[\"bulk_load_outline_id\"]\n self.db.execute_no_commit(sql_insert_added, (id_bulk, 2))\n\n def insert_new_removed_outlines(self):\n # removed\n sql_insert_removed = (\n \"SELECT buildings_bulk_load.removed_insert_building_outlines(%s);\"\n )\n for feat in self.lyr_removed_existing_in_edit.getFeatures():\n id_existing = feat[\"building_outline_id\"]\n self.db.execute_no_commit(sql_insert_removed, (id_existing,))\n\n def insert_new_matched_outlines(self):\n # matched\n sql_insert_matched = (\n \"SELECT buildings_bulk_load.matched_insert_building_outlines(%s, %s);\"\n )\n for feat1 in self.lyr_matched_bulk_load_in_edit.getFeatures():\n id_bulk = feat1[\"bulk_load_outline_id\"]\n for feat2 in self.lyr_matched_existing_in_edit.getFeatures():\n id_existing = feat2[\"building_outline_id\"]\n self.db.execute_no_commit(sql_insert_matched, (id_bulk, id_existing))\n\n def insert_new_related_outlines(self):\n # related\n related_outlines = [\n feat for feat in self.lyr_related_bulk_load_in_edit.getFeatures()\n ]\n if related_outlines:\n sql_insert_related_group = (\n \"SELECT buildings_bulk_load.related_group_insert();\"\n )\n result = self.db.execute_no_commit(sql_insert_related_group)\n new_group_id = result.fetchone()[0]\n sql_insert_related = (\n \"SELECT buildings_bulk_load.related_insert_building_outlines(%s, %s, %s);\"\n )\n for feat1 in self.lyr_related_bulk_load_in_edit.getFeatures():\n id_bulk = feat1[\"bulk_load_outline_id\"]\n for feat2 in self.lyr_related_existing_in_edit.getFeatures():\n id_existing = feat2[\"building_outline_id\"]\n self.db.execute_no_commit(\n sql_insert_related, (new_group_id, id_bulk, id_existing)\n )\n\n def update_bulkload_attributes(self):\n sql_update_attrs = \"\"\"\n UPDATE buildings_bulk_load.bulk_load_outlines\n SET bulk_load_use_id = %s, bulk_load_name = %s\n WHERE bulk_load_outline_id = %s;\n \"\"\"\n for row_id, (id_, use, name) in self.get_lst_content(self.lst_bulk_attrs):\n use_id = self.valid_building_use_ids[use]\n if name in {\"\", \"None\"}:\n name = None\n self.db.execute_no_commit(sql_update_attrs, (use_id, name, id_))\n\n def disable_tbl_editing(self, tbl):\n \"\"\"Disable editing so item cannot be changed in the table\"\"\"\n for row in range(tbl.rowCount()):\n tbl.showRow(row)\n for col in range(tbl.columnCount()):\n if tbl.item(row, col):\n tbl.item(row, col).setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n\n def refresh_tbl_relationship(self):\n \"\"\"Refresh tbl_relationship by switching cmb_relationship\"\"\"\n index = self.cmb_relationship.currentIndex()\n self.cmb_relationship.setCurrentIndex(0)\n self.cmb_relationship.setCurrentIndex(index)\n\n def populate_cmb_relationship(self):\n \"\"\"Populates cmb_relationship\"\"\"\n self.cmb_relationship.clear()\n item_list = [\n \"Removed Outlines\",\n \"Matched Outlines\",\n \"Related Outlines\",\n \"Added Outlines\",\n ]\n self.cmb_relationship.addItems([\"\"] + item_list)\n\n def init_tbl_relationship(self, header_items):\n \"\"\"Initiates tbl_relationship\"\"\"\n tbl = self.tbl_relationship\n tbl.setRowCount(0)\n tbl.setColumnCount(len(header_items))\n\n for i, header_item in enumerate(header_items):\n tbl.setHorizontalHeaderItem(i, QTableWidgetItem(header_item))\n\n tbl.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n tbl.verticalHeader().setVisible(False)\n\n tbl.setSelectionBehavior(QAbstractItemView.SelectRows)\n tbl.setSelectionMode(QAbstractItemView.SingleSelection)\n\n tbl.setShowGrid(True)\n\n def populate_tbl_related(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to related\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.related_by_dataset_id, (self.current_dataset,)\n )\n for (\n id_group,\n id_existing,\n id_bulk,\n qa_status,\n exist_use,\n exist_name,\n bulk_use,\n bulk_name,\n ) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_group))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % id_bulk))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 4, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 5, QTableWidgetItem(\"%s\" % exist_name))\n tbl.setItem(row_tbl, 6, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 7, QTableWidgetItem(\"%s\" % bulk_name))\n\n def populate_tbl_matched(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to matched\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.matched_by_dataset_id, (self.current_dataset,)\n )\n for (\n id_existing,\n id_bulk,\n qa_status,\n exist_use,\n exist_name,\n bulk_use,\n bulk_name,\n ) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % id_bulk))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 4, QTableWidgetItem(\"%s\" % exist_name))\n tbl.setItem(row_tbl, 5, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 6, QTableWidgetItem(\"%s\" % bulk_name))\n\n def populate_tbl_removed(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to removed\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.removed_by_dataset_id, (self.current_dataset,)\n )\n for (id_existing, qa_status, exist_use, exist_name) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_existing))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % qa_status))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % exist_use))\n tbl.setItem(row_tbl, 3, QTableWidgetItem(\"%s\" % exist_name))\n\n def populate_tbl_added(self):\n \"\"\"Populates tbl_relationship when cmb_relationship switches to added\"\"\"\n tbl = self.tbl_relationship\n result = self.db.execute_return(\n bulk_load_select.added_by_dataset_id, (self.current_dataset,)\n )\n for (id_bulk_load, bulk_use, bulk_name) in result.fetchall():\n row_tbl = tbl.rowCount()\n tbl.setRowCount(row_tbl + 1)\n tbl.setItem(row_tbl, 0, QTableWidgetItem(\"%s\" % id_bulk_load))\n tbl.setItem(row_tbl, 1, QTableWidgetItem(\"%s\" % bulk_use))\n tbl.setItem(row_tbl, 2, QTableWidgetItem(\"%s\" % bulk_name))\n\n def is_empty_tbl_relationship(self, relationship):\n if self.tbl_relationship.rowCount() == 0:\n self.message_bar_qa.pushMessage(\n \"%s are not available in the current dataset.\" % relationship\n )\n return True\n return False\n\n def get_qa_status_id(self, qa_status):\n \"\"\"Returns qa_status_id according to the sender button\"\"\"\n if qa_status == \"Okay\":\n qa_status_id = 2\n elif qa_status == \"Pending\":\n qa_status_id = 3\n elif qa_status == \"Refer to Supplier\":\n qa_status_id = 4\n elif qa_status == \"Not Checked\":\n qa_status_id = 1\n elif qa_status == \"Not Removed\":\n qa_status_id = 5\n else:\n qa_status_id = None\n return qa_status_id\n\n def zoom_to_feature(self):\n\n extent = None\n for lyr in [self.lyr_existing, self.lyr_bulk_load]:\n selected_feat = [feat for feat in lyr.selectedFeatures()]\n if selected_feat:\n if not extent:\n extent = lyr.boundingBoxOfSelected()\n else:\n extent.combineExtentWith(lyr.boundingBoxOfSelected())\n if extent:\n iface.mapCanvas().setExtent(extent)\n iface.mapCanvas().zoomScale(300.0)\n\n def scroll_to_next(self, row, qa_column, selected_rows):\n item = self.tbl_relationship.item(row, qa_column)\n if item.text() == \"Not Checked\":\n self.tbl_relationship.selectRow(row)\n self.tbl_relationship.scrollToItem(item)\n return True\n return False\n\n def update_qa_status_in_related(self, id_existing, id_bulk, qa_status_id):\n \"\"\"Updates qa_status_id in related table\"\"\"\n sql_update_related = (\n \"SELECT buildings_bulk_load.related_update_qa_status_id(%s, %s, %s);\"\n )\n self.db.execute_no_commit(\n sql_update_related, (qa_status_id, id_existing, id_bulk)\n )\n\n def update_qa_status_in_matched(self, id_existing, id_bulk, qa_status_id):\n \"\"\"Updates qa_status_id in matched table\"\"\"\n sql_update_matched = (\n \"SELECT buildings_bulk_load.matched_update_qa_status_id(%s, %s, %s);\"\n )\n self.db.execute_no_commit(\n sql_update_matched, (qa_status_id, id_existing, id_bulk)\n )\n\n def update_qa_status_in_removed(self, id_existing, qa_status_id):\n \"\"\"Updates qa_status_id in removed table\"\"\"\n sql_update_removed = (\n \"SELECT buildings_bulk_load.removed_update_qa_status_id(%s, %s);\"\n )\n self.db.execute_no_commit(sql_update_removed, (qa_status_id, id_existing))\n\n def select_row_in_tbl_matched(self, id_existing, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Matched Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n for row in range(self.tbl_relationship.rowCount()):\n if (\n int(tbl.item(row, 0).text()) == id_existing\n and int(tbl.item(row, 1).text()) == id_bulk\n ):\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_related(self, id_existing, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Related Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if (\n int(tbl.item(row, 1).text()) == id_existing\n and int(tbl.item(row, 2).text()) == id_bulk\n ):\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_removed(self, id_existing):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Removed Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if int(tbl.item(row, 0).text()) == id_existing:\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def select_row_in_tbl_added(self, id_bulk):\n tbl = self.tbl_relationship\n index = self.cmb_relationship.findText(\"Added Outlines\")\n if self.cmb_relationship.currentIndex() != index:\n self.cmb_relationship.setCurrentIndex(index)\n self.tbl_relationship.setSelectionMode(QAbstractItemView.MultiSelection)\n for row in range(self.tbl_relationship.rowCount()):\n if int(tbl.item(row, 0).text()) == id_bulk:\n tbl.selectRow(row)\n tbl.scrollToItem(tbl.item(row, 0))\n\n def canvas_add_outline(self):\n self.lyr_existing.removeSelection()\n self.lyr_bulk_load.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.lst_existing_attrs.clear()\n self.lst_bulk_attrs.clear()\n\n self.tbl_relationship.clearSelection()\n\n self.edit_dialog.add_outline()\n self.edit_dialog.show()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n self.circle_tool = None\n self.polyline = None\n\n # setup circle button\n image_dir = os.path.join(__location__, \"..\", \"icons\")\n icon_path = os.path.join(image_dir, \"circle.png\")\n icon = QIcon()\n icon.addFile(icon_path, QSize(8, 8))\n self.circle_action = QAction(icon, \"Draw Circle\", iface.building_toolbar)\n iface.registerMainWindowAction(self.circle_action, \"Ctrl+0\")\n self.circle_action.triggered.connect(self.circle_tool_clicked)\n self.circle_action.setCheckable(True)\n iface.building_toolbar.addAction(self.circle_action)\n\n def canvas_edit_geometry(self):\n \"\"\"\n When edit geometry radio button toggled\n \"\"\"\n self.lyr_existing.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.edit_dialog.edit_geometry()\n self.edit_dialog.show()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n def canvas_edit_attribute(self):\n \"\"\"\n When edit outline radio button toggled\n \"\"\"\n self.lyr_existing.removeSelection()\n\n self.lst_existing.clear()\n self.lst_bulk.clear()\n\n self.edit_dialog.show()\n self.edit_dialog.edit_attribute()\n self.change_instance = self.edit_dialog.get_change_instance()\n\n def circle_tool_clicked(self):\n circle_tool.setup_circle(self)\n\n def edit_cancel_clicked(self):\n if len(QgsProject.instance().mapLayersByName(\"bulk_load_outlines\")) > 0:\n if isinstance(self.change_instance, bulk_load_changes.EditAttribute):\n try:\n self.lyr_bulk_load.selectionChanged.disconnect(\n self.change_instance.selection_changed\n )\n except TypeError:\n pass\n elif isinstance(self.change_instance, bulk_load_changes.EditGeometry):\n try:\n self.lyr_bulk_load.geometryChanged.disconnect(\n self.change_instance.geometry_changed\n )\n except TypeError:\n pass\n elif isinstance(self.change_instance, bulk_load_changes.AddBulkLoad):\n try:\n self.lyr_bulk_load.featureAdded.disconnect()\n except TypeError:\n pass\n try:\n self.lyr_bulk_load.featureDeleted.disconnect()\n except TypeError:\n pass\n try:\n self.lyr_bulk_load.geometryChanged.disconnect()\n except TypeError:\n pass\n if self.polyline:\n self.polyline.reset()\n if isinstance(self.circle_tool, PointTool):\n self.circle_tool.canvas_clicked.disconnect()\n self.circle_tool.mouse_moved.disconnect()\n self.circle_tool.deactivate()\n iface.actionPan().trigger()\n\n iface.actionCancelEdits().trigger()\n\n QgsProject.instance().layerWillBeRemoved.disconnect(self.layers_removed)\n\n QgsProject.instance().layerWillBeRemoved.connect(self.layers_removed)\n\n self.toolbar_setup()\n\n for val in [\n str(layer.id())\n for layer in QgsProject.instance().layerTreeRoot().layerOrder()\n ]:\n if \"existing_subset_extracts\" in val:\n self.lyr_existing.removeSelection()\n if \"bulk_load_outlines\" in val:\n self.lyr_bulk_load.removeSelection()\n\n self.tbl_relationship.clearSelection()\n\n self.btn_maptool.click()\n\n self.change_instance = None\n\n def reload_bulk_load_layer(self):\n \"\"\"To ensure QGIS has most up to date ID for the newly split feature see #349\"\"\"\n layer_tree_layer = QgsProject.instance().layerTreeRoot().findLayer(self.lyr_bulk_load.id())\n layer_tree_model = iface.layerTreeView().layerTreeModel()\n legend_nodes = layer_tree_model.layerLegendNodes(layer_tree_layer)\n legend_node_null = [ln for ln in legend_nodes if not ln.data(Qt.DisplayRole)]\n legend_node_null[0].setData(Qt.Unchecked, Qt.CheckStateRole)\n legend_node_null[0].setData(Qt.Checked, Qt.CheckStateRole)\n legend_node_added = [\n ln for ln in legend_nodes if ln.data(Qt.DisplayRole) == \"Added In Edit\"\n ]\n legend_node_added[0].setData(Qt.Unchecked, Qt.CheckStateRole)\n legend_node_added[0].setData(Qt.Checked, Qt.CheckStateRole)\n\n @property\n def valid_building_use_ids(self):\n \"\"\"self.valid_building_uses flipped to map use strings to use_id ints\"\"\"\n return {use: use_id for use_id, use in self.valid_building_uses.items()}\n","repo_name":"linz/nz-buildings","sub_path":"buildings/gui/alter_building_relationships.py","file_name":"alter_building_relationships.py","file_ext":"py","file_size_in_byte":94702,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"43353435270","text":"from libqtile.config import Key, Screen, Group, Drag, Click, Match\nfrom libqtile.command import lazy\nfrom libqtile import layout, bar, widget\nfrom libqtile.dgroups import simple_key_binder\n\nmod = \"mod4\"\n\nkeys = [\n # Switch between windows in current stack pane\n Key(\n [mod], \"j\",\n lazy.layout.down()\n ),\n Key(\n [mod], \"k\",\n lazy.layout.up()\n ),\n\n # Move windows up or down in current stack\n Key(\n [mod, \"control\"], \"j\",\n lazy.layout.shuffle_down()\n ),\n Key(\n [mod, \"control\"], \"k\",\n lazy.layout.shuffle_up()\n ),\n\n # Switch window focus to other pane(s) of stack\n Key(\n [mod], \"space\",\n lazy.layout.next()\n ),\n\n Key(\n [mod], \"Left\",\n lazy.screen.prevgroup()\n ),\n\n Key(\n [mod], \"Right\",\n lazy.screen.nextgroup()\n ),\n\n # Swap panes of split stack\n Key(\n [mod, \"shift\"], \"space\",\n lazy.layout.rotate()\n ),\n\n # Toggle between split and unsplit sides of stack.\n # Split = all windows displayed\n # Unsplit = 1 window displayed, like Max layout, but still with\n # multiple stack panes\n Key(\n [mod, \"shift\"], \"Return\",\n lazy.layout.toggle_split()\n ),\n Key([mod], \"Return\", lazy.spawn(\"urxvt\")),\n\n # Toggle between different layouts as defined below\n Key([mod], \"Tab\", lazy.nextlayout()),\n Key([mod], \"w\", lazy.window.kill()),\n\n Key([mod, \"control\"], \"r\", lazy.restart()),\n Key([mod, \"control\"], \"q\", lazy.shutdown()),\n Key([mod], \"r\", lazy.spawncmd()),\n]\n\ngroups = [\n Group(\" urxvt \"),\n Group(\" web \", matches=[Match(wm_class=[\"Firefox\"])]),\n Group(\" blender \"),\n Group(\" inkscape \"),\n Group(\" gimp \"),\n Group(\" doc \"),\n]\n\ndgroups_key_binder = simple_key_binder(\"mod4\")\n\n\nlayouts = [\n layout.TreeTab(\n font='Cartograph CF Light Italic',\n name=\"tree tab\",\n bg_color=\"#222222\",\n inactive_bg=\"#AB5DEE\",\n panel_width=150,\n margin_left=0,\n margin_y=0,\n sections=['TreeTab'],\n section_left=0,\n padding_x=4,\n active_bg=\"#FFB86C\",\n rounded=False,\n ),\n layout.MonadTall(\n name=\"xmonad tall\",\n ratio=0.5,\n border_width=8,\n border_focus=\"#335260\",\n border_normal=\"#69B2B8\",\n ),\n layout.Stack(\n num_stacks=2,\n border_width=8,\n border_focus=\"#335260\",\n border_normal=\"#69B2B8\",\n ),\n layout.Floating(\n name=\"floating\",\n border_width=8,\n border_focus=\"#335260\",\n border_normal=\"#69B2B8\",\n )\n]\n\nfloating_layout = layout.Floating(\n name=\"floating\",\n border_width=8,\n border_focus=\"#69B2B8\",\n border_normal=\"#335260\",\n )\n\nwidget_defaults = dict(\n font='Cartograph CF Light Italic',\n fontsize=12,\n background=\"#222222\",\n markup=True,\n)\n\nscreens = [\n Screen(\n bottom=bar.Bar(\n [\n widget.GroupBox(\n borderwidth=0,\n margin=0,\n padding=6,\n active=\"FFFFFF\",\n inactive=\"FFB86C\",\n highlight_method=\"block\",\n this_current_screen_border=\"#AB5DEE\",\n invert_mouse_wheel=True,\n rounded=False,\n ),\n widget.Prompt(),\n widget.CurrentLayout(\n background=\"#E11B22\",\n ),\n widget.Spacer(),\n #widget.WindowName(),\n widget.TextBox(\"testing\", name=\"default\"),\n widget.Systray(),\n widget.Clock(format=' %I:%M %p '),\n ],\n 24,\n background=\"#335260\",\n ),\n ),\n]\n\n# Drag floating layouts.\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(),\n start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(),\n start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front())\n]\n\ndgroups_app_rules = []\nmain = None\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nauto_fullscreen = True\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, github issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n","repo_name":"shadowrylander/shadowrylander","sub_path":".config/qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"23557943240","text":"def Boden(modell, name, bodentiefe, voidhoehe, bodenbereich, gittergroessen, gitter_boden_vertikal,\n schichten, schichtmaterial, restmaterial, extrasets=False, netz=True, euler=True,\n xypartition=[True, True], partition_durchziehen=False, viertel=4, rotationsprofilpunkte=None):\n \"\"\"Erzeuge einen Untergrund name im uebergebenen modell. Die folgende Skizze gibt einen\n Ueberblick ueber die verwendeten geometrischen Groessen und Bestandteile intern verwendeter\n Bezeichnungen (bspw. fuer Sets), wobei in der Darstellung alle bodenbereiche als rund angenommen\n werden. Gibt [part, inst] zurueck.\n \n bodenbereich[0]\n (\"Aussen\")\n _|________________|_\n | |\n | bodenbereich[1:-1]\n | (\"Uebergaenge\")|\n | _|__________|_ |\n | | | |\n . . . .\n . . . .\n | bodenbereich[-1]\n | | (\"Innen\")| |\n | | _|____|_ | |\n | | | | | |\n | |\n ..------..\n .. ..----.. .. \n / / .--. \\ \\ \n | | ( ) | | ___|_\n |\\ |\\ |'--'| /| /| |\n | '+ '+----+' +' | |\n | |''+----+''| | |\n | | | | | | | voidhoehe (nur bei euler=True)\n | |..+----+..| | |\n | .+ .+----+. +. | |\n |/ |/ |.--.| \\| \\| |\n | | ( ) | | ___|__________________|_\n |\\ |\\ |'--'| /| /| | |\n | '+ '+----+' +' | | |\n | |''+----+''| | | |\n | | | | | | | schichten[-1] |\n | |..+----+..| | | (\"Schichten\") |\n | .+ .+----+. +. | | |\n |/ |/ |.--.| \\| \\| | |\n | | ( ) | | ___|_ | bodentiefe\n |\\ |\\ |'--'| /| /| | |\n | '+ '+----+' +' | | |\n | |''+----+''| | | |\n | | | | | | | (\"Unten\") |\n | |..+----+..| | | |\n | .+ .+----+. +. | | |\n |/ |/ |.--.| \\| \\| | |\n | | ( ) | | ___|__________________|_\n \\ \\ '--' / / | |\n '' ''----'' ''\n ''------''\n\n Der Vektor bodenbereich besteht entweder aus Eintraegen mit einem Element (Radius fuer runde\n Geometrie/Partition) oder zwei Elementen (halbe Kantenlaenge fuer rechteckige Geometrie oder\n Partition). Zur Vernetzung werden entsprechend viele Eintraege wie in schichten fuer\n gittergroessen benoetigt. Dabei kann entweder ein Wert gegeben werden (konstante Gittergroesse)\n oder zwei (linear veraendernde Gittergroesse). gitter_boden_vertikal gibt die vertikale\n Gitterfeinheit fuer den Schichtenbereich und das Void an. Fuer jeden Eintrag in schichten muss\n auch ein Material in schichtmaterial definiert sein. Zusaetzlich ist ein restmaterial, bspw. fuer\n den Bereich unterhalb der Schichten, anzugeben. Optional koennen extrasets erstellt werden.\n Optional kann der Boden als euler-Koerper oder Lagrange-Loerper ohne void-Bereich) erstellt\n werden. Partitionen werden mit einer Standardpartitionierung in x/y-Richtung erzeugt. Das\n Verhalten kann mit xypartition explizit deaktiviert werden. Optional kann nur Viertel- oder\n Halbmodell mit dem Parameter viertel erzeugt werden (nur die Werte 1,2 und 4 werden\n unterstuetzt). Falls nur ein halbes/viertel Modell erzeugt wird, werden Partitionen in der\n Schnittebene nicht erzeugt (unabhaengig von den Angaben in xypartition).\n Rechteckige Partitionen ziehen sich mit partition_durchziehen=True durch das komplette Modell,\n standardmaessig aber nur bis zur naechstgroesseren Partition (ggfs. mit Verbindungen).\n \n Zusaetzlich erlaubt die Uebergabe von rotationsprofilpunkte (x-y-Punktkoordinaten) die Erstellung\n eines 3D-Profils auf Basis der Zeichnung (aus den Punkten) und einer Rotation um die vertikale\n Achse der Zeichnung (experimentell).\n \"\"\"\n import assembly\n from abaqusConstants import DEFORMABLE_BODY, EULERIAN, ON\n from grundkoerper import Zylinder_erstellen, Zylinder_flaechensets\n from grundkoerper import Quader_erstellen, Quader_flaechensets\n from grundkoerper import Grundkoerper_sets\n from grundkoerper import Rotationsprofil_erstellen\n from hilfen import Log\n #\n if (not (len(bodenbereich) == len(gittergroessen))):\n Log('# Fehler: bodenbereich und gittergroessen muessen gleich viele Eintraege haben');\n return;\n #\n # Erstelle und partitioniere Bodenkoerper\n if (euler):\n bodenmaterial = EULERIAN;\n else:\n bodenmaterial = DEFORMABLE_BODY;\n voidhoehe = 0.0;\n #\n # Falls ein Zylinder erstellt wird, erzeuge den inneren Bereich als sweep statt als structured\n kernSweep = False;\n if (rotationsprofilpunkte is None):\n if (len(bodenbereich[0]) == 1):\n kernSweep = True;\n Zylinder_erstellen(modell=modell, name=name, radius=bodenbereich[0][0],\n hoehe=bodentiefe+voidhoehe, materialtyp=bodenmaterial, viertel=viertel);\n elif (len(bodenbereich[0]) == 2):\n laenge = 2.0*bodenbereich[0][0];\n breite = 2.0*bodenbereich[0][1];\n if ((viertel == 1) or (viertel == 2)):\n if (viertel == 1):\n breite = [0.0, bodenbereich[0][1]];\n #\n laenge = [0.0, bodenbereich[0][0]];\n #\n Quader_erstellen(modell=modell, name=name, laenge=laenge, breite=breite,\n hoehe=bodentiefe+voidhoehe, materialtyp=bodenmaterial);\n else:\n Log('# Fehler: Ungueltige Anzahl an Werten in bodenbereich[0]');\n return;\n #\n else:\n Rotationsprofil_erstellen(modell=modell, name=name, punkte=rotationsprofilpunkte,\n materialtyp=bodenmaterial, viertel=viertel);\n #\n _Boden_partitionieren(modell=modell, name=name, bodentiefe=bodentiefe,\n voidhoehe=voidhoehe, bodenbereich=bodenbereich, schichten=schichten,\n xypartition=xypartition, partition_durchziehen=partition_durchziehen, viertel=viertel);\n #\n Grundkoerper_sets(modell=modell, name=name);\n _Boden_setserstellen(modell=modell, name=name, bodentiefe=bodentiefe,\n voidhoehe=voidhoehe, bodenbereich=bodenbereich, schichten=schichten,\n schichtmaterial=schichtmaterial, restmaterial=restmaterial);\n if (len(bodenbereich[0]) == 1):\n Zylinder_flaechensets(modell=modell, name=name, radius=bodenbereich[0][0],\n hoehe=bodentiefe+voidhoehe, viertel=viertel);\n else:\n Quader_flaechensets(modell=modell, name=name, laenge=laenge, breite=breite,\n hoehe=bodentiefe+voidhoehe);\n #\n _Boden_vernetzen(modell=modell, name=name, bodentiefe=bodentiefe, voidhoehe=voidhoehe,\n bodenbereich=bodenbereich, gittergroessen=gittergroessen,\n gitter_boden_vertikal=gitter_boden_vertikal, schichten=schichten,\n extrasets=extrasets, netz=netz, euler=euler, kernSweep=kernSweep);\n instname = 'inst' + name;\n instBoden = modell.rootAssembly.Instance(dependent=ON, name=instname, part=modell.parts[name]);\n return [modell.parts[name], instBoden];\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Boden_partitionieren(modell, name, bodentiefe, voidhoehe, bodenbereich, schichten,\n xypartition=[True, True], partition_durchziehen=False, viertel=4):\n \"\"\"Partitioniere das Bauteil (Part) name aus dem aktiven modell anhand der uebergebenen Parameter.\n Die gesamte Bauteilhoehe ist bodentiefe und voidhoehe, wobei bodentiefe zusaetzlich in einzelne\n schichten unterteilt ist. In jeder Ebene werden mit bodenbereich einzelne Partitionen definiert.\n Die Erstellung der Grundpartitionen kann mit xypartition explizit deaktiviert werden. Falls nur\n ein halbes/viertel Modell erzeugt wird, werden Partitionen in der Schnittebene nicht erzeugt\n (unabhaengig von den Angaben in xypartition).\n Rechteckige Partitionen ziehen sich mit partition_durchziehen = True durch das komplette Modell,\n standardmaessig aber nur bis zur naechstgroesseren Partition (ggfs. mit Verbindungen).\n \"\"\"\n from grundkoerper import Grundkoerper_standardpartitionen\n #\n xPartition, yPartition = xypartition;\n if (viertel == 1):\n xPartition = False;\n yPartition = False;\n elif (viertel == 2):\n xPartition = False;\n #\n partBoden = modell.parts[name];\n Grundkoerper_standardpartitionen(modell=modell, name=name,\n xPartition=xPartition, yPartition=yPartition, zPartition=False);\n #\n if (len(bodenbereich) > 1):\n _Boden_ebenenpartition(modell=modell, name=name, bodenbereich=bodenbereich,\n partition_durchziehen=partition_durchziehen, viertel=viertel);\n #\n _Boden_tiefenpartition(modell=modell, name=name, bodentiefe=bodentiefe,\n schichten=schichten, voidhoehe=voidhoehe);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Boden_tiefenpartition(modell, name, bodentiefe, schichten, voidhoehe):\n \"\"\"Erzeuge vertikale Partitionen am Bauteil (Part) name aus dem aktiven modell. Dazu wird jeder\n in schichten definierte Bereich ueber die bodentiefe partitioniert.\n \"\"\"\n import part\n #\n partBoden = modell.parts[name];\n zachsenid = partBoden.features['zAchse'].id;\n #\n # Voidbereich (falls existent) und unterer Bodenbereich unterhalb der Schichten abgrenzen\n if (voidhoehe > 0.0):\n partBoden.DatumPointByCoordinate(coords=(0.0, 0.0, bodentiefe));\n partBoden.features.changeKey(fromName='Datum pt-1', toName='datum_GOK');\n partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[zachsenid],\n point=partBoden.datums[partBoden.features['datum_GOK'].id]);\n #\n # Alle Schichten erstellen und definieren\n for tiefe in schichten:\n if (tiefe < bodentiefe):\n datumname = 'datum_' + str(tiefe).replace(\".\",\"-\") + 'm';\n partBoden.DatumPointByCoordinate(coords=(0.0, 0.0, bodentiefe-tiefe));\n partBoden.features.changeKey(fromName='Datum pt-1', toName=datumname);\n datumid = partBoden.features[datumname].id;\n partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[zachsenid], point=partBoden.datums[datumid]);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Boden_ebenenpartition(modell, name, bodenbereich, partition_durchziehen=False, viertel=4):\n \"\"\"Erzeuge Ebenenpartitionen am Bauteil (Part) name aus dem aktiven modell. Dazu werden je nach\n Elementen in bodenbereich runde (ein Wert) oder rechteckige (zwei Werte) Partitionen durch das\n gesamte Bauteils erstellt. Wenn nur ein Viertel- oder Halbmodell erzeugt worden ist, muss der\n Parameter viertel entsprechend angepasst uebergeben werden.\n \"\"\"\n from hilfen import Log\n #\n partBoden = modell.parts[name];\n vorangegangenerbereich = bodenbereich[0];\n lenvoran = len(vorangegangenerbereich);\n # Arbeite Partitionsweise von aussen nach innen\n for idx, aktuellerbereich in enumerate(bodenbereich[1:]):\n lenaktuell = len(aktuellerbereich);\n aeste = False;\n \n # Da die aktuelle und die vorangegangene Partition entweder rechteckig\n # oder kreisfoermig sind, gibt es vier Faelle zu unterscheiden\n if ((not lenaktuell == 1) and (not lenaktuell == 2)):\n Log('# Fehler: Ungueltiger Wert in bodenbereich[' + str(idx) + ']');\n return;\n #\n # Falls partition_durchziehen = True und eine rechteckige Partition erstellt werden soll,\n # wird eine simplere Methode zur Erzeugung der noetigen Partitionen verwendet\n if ((partition_durchziehen) and (lenaktuell == 2)):\n _Ebenenpartition_durchziehen(modell=modell, name=name, partitionsname='Bereich' + str(idx),\n bereich=aktuellerbereich, viertel=viertel);\n continue;\n #\n if ((lenaktuell == 2) and (lenvoran == 2)):\n # Rechteck -> kleineres Rechteck. Aeste hinzufuegen, wenn beide neuen Seiten echt kleiner\n if ((aktuellerbereich[0] > vorangegangenerbereich[0]) or (aktuellerbereich[1] > vorangegangenerbereich[1]) or\n ((aktuellerbereich[0] == vorangegangenerbereich[0]) and (aktuellerbereich[1] == vorangegangenerbereich[1]))):\n #\n Log('# Fehler: Rechteck-Partition in bodenbereich[' + str(idx) + '] muss kleiner sein als umgebendes Rechteck');\n return;\n #\n if ((aktuellerbereich[0] < vorangegangenerbereich[0]) and (aktuellerbereich[1] < vorangegangenerbereich[1])):\n aeste = True;\n elif ((lenaktuell == 1) and (lenvoran == 1)):\n # Kreis -> kleinerer Kreis, keine Aeste noetig\n if (aktuellerbereich[0] >= vorangegangenerbereich[0]):\n Log('# Fehler: Kreis-Partition in bodenbereich[' + str(idx) + '] muss kleiner sein als umgebender Kreis');\n return;\n elif ((lenaktuell == 2) and (lenvoran == 1)):\n # Kreis -> kleineres Rechteck, Aeste hinzufuegen, wenn Rechteck echt innerhalb des Kreises\n if ((aktuellerbereich[0]**2 + aktuellerbereich[1]**2) > vorangegangenerbereich[0]**2):\n Log('# Fehler: Rechteck-Partition in bodenbereich[' + str(idx) + '] muss kleiner sein als umgebender Kreis');\n return;\n if ((aktuellerbereich[0]**2 + aktuellerbereich[1]**2) < vorangegangenerbereich[0]**2):\n aeste = True;\n else:\n # Rechteck -> kleineren Kreis, immer Aeste hinzufuegen\n if ((aktuellerbereich[0] > vorangegangenerbereich[0]) or (aktuellerbereich[0] > vorangegangenerbereich[1])):\n Log('# Fehler: Kreis-Partition in bodenbereich[' + str(idx) + '] muss kleiner sein als umgebendes Rechteck');\n return;\n aeste = True;\n #\n _Ebenenpartition_erstellen(modell=modell, name=name, partitionsname='Bereich' + str(idx),\n bodenbereich_aussen=vorangegangenerbereich, bodenbereich_innen=aktuellerbereich,\n aeste=aeste, viertel=viertel);\n # \n vorangegangenerbereich = aktuellerbereich;\n lenvoran = lenaktuell;\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Ebenenpartition_erstellen(modell, name, partitionsname, bodenbereich_aussen,\n bodenbereich_innen, aeste=False, viertel=4):\n \"\"\"Definiere eine Ebenenpartitionen am Bauteil (Part) name aus dem aktiven modell. Die Partition\n wird mit partitionsname bezeichnet. Abhaengig von bodenbereich_innen und bodenbereich_aussen, die\n beide entweder kreisfoermig oder rechteckig sein koennen, wird die Partitionierung vorgenommen.\n Optional koennen aeste zur Verbindung der beiden Partitionen erzeugt werden. Wenn nur ein\n Viertel- oder Halbmodell erzeugt worden ist, muss der Parameter viertel entsprechend angepasst\n uebergeben werden.\n \"\"\"\n import part\n from abaqusConstants import COPLANAR_EDGES, SIDE1, RIGHT, CLOCKWISE, FORWARD\n from math import sqrt\n from auswahl import BedingteAuswahl\n from hilfen import abapys_tol\n from zeichnung import Linie, Rechteck, Kreis, KreisbogenWinkel\n #\n partBoden = modell.parts[name];\n xachsenid = partBoden.features['xAchse'].id;\n yachsenid = partBoden.features['yAchse'].id;\n zachsenid = partBoden.features['zAchse'].id;\n flaecheUnterseite = BedingteAuswahl(elemente=partBoden.faces,\n bedingung='elem.pointOn[0][2] < var[0]', var=[abapys_tol]);\n #\n modell.ConstrainedSketch(gridSpacing=5, name='profil_' + partitionsname, sheetSize=40,\n transform=partBoden.MakeSketchTransform(sketchPlane=flaecheUnterseite[0],\n sketchPlaneSide=SIDE1, sketchUpEdge=partBoden.datums[xachsenid], sketchOrientation=RIGHT,\n origin=(0.0, 0.0, 0.0)));\n zeichnung = modell.sketches['profil_' + partitionsname];\n partBoden.projectReferencesOntoSketch(filter=COPLANAR_EDGES, sketch=zeichnung);\n if (len(bodenbereich_innen) == 1):\n if (viertel == 1):\n Linie(zeichnung=zeichnung, punkt1=(bodenbereich_innen[0], 0.0), punkt2=(0.0, 0.0));\n Linie(zeichnung=zeichnung, punkt1=(0.0, 0.0), punkt2=(0.0, bodenbereich_innen[0]));\n KreisbogenWinkel(zeichnung=zeichnung, mittelpunkt=(0.0, 0.0), radius=bodenbereich_innen[0],\n startwinkel=0.0, endwinkel=90.0, richtung=CLOCKWISE);\n elif (viertel == 2):\n Linie(zeichnung=zeichnung, punkt1=(0.0, -bodenbereich_innen[0]),\n punkt2=(0.0, bodenbereich_innen[0]));\n KreisbogenWinkel(zeichnung=zeichnung, mittelpunkt=(0.0, 0.0), radius=bodenbereich_innen[0],\n startwinkel=0.0, endwinkel=180.0, richtung=CLOCKWISE);\n else:\n Kreis(zeichnung=zeichnung, mittelpunkt=(0.0, 0.0), radius=bodenbereich_innen[0]);\n else:\n # Moeglicherweise sind zwei der vier inneren Kanten identisch mit den aeusseren. Abaqus\n # handhabt die Skizze in dem Fall als zwei nicht zusammenhaengende Seiten, die auch in zwei\n # unterschiedlichen Schritten partitioniert werden muessen.\n identischeSeiten = False;\n if (len(bodenbereich_aussen) == 2):\n if ((bodenbereich_aussen[0] == bodenbereich_innen[0]) or\n (bodenbereich_aussen[1] == bodenbereich_innen[1])):\n #\n identischeSeiten = True;\n #\n if (not identischeSeiten):\n # Das neue Rechteck ist echt kleiner als das alte\n xstart = -bodenbereich_innen[0];\n ystart = -bodenbereich_innen[1];\n if ((viertel == 1) or (viertel == 2)):\n if (viertel == 1):\n ystart = 0.0;\n #\n xstart = 0.0;\n #\n Rechteck(zeichnung=zeichnung, punkt1=(-bodenbereich_innen[0], -bodenbereich_innen[1]),\n punkt2=(bodenbereich_innen[0], bodenbereich_innen[1]));\n else:\n # Zwei Seiten sind nicht kleiner und die anderen beiden muessen einzeln gezeichnet werden,\n # sofern viertel == 4\n #\n # Fall 1 # Fall 2 ^ y\n # # |\n # ^ y # +---+---+\n # | # | | |\n # +---+---+---+---+ # (a) +...+...+\n # | : | : | # | | |\n # --+---+---+---+---+--> # --+---+---+-->\n # | : | : | x # | | | x\n # +---+---+---+---+ # (b) +...+...+\n # | # | | |\n # (b) (a) # +---+---+\n # # |\n #\n # Erste Seite\n if (bodenbereich_aussen[0] > bodenbereich_innen[0]):\n # Fall 1(a)\n startx = bodenbereich_innen[0];\n stopx = bodenbereich_innen[0];\n starty = -bodenbereich_innen[1];\n stopy = bodenbereich_innen[1];\n if (viertel == 1):\n starty = 0.0;\n else:\n # Fall 2(a)\n startx = -bodenbereich_innen[0];\n stopx = bodenbereich_innen[0];\n starty = bodenbereich_innen[1];\n stopy = bodenbereich_innen[1];\n if ((viertel == 1) or (viertel == 2)):\n startx = 0.0;\n #\n Linie(zeichnung=zeichnung, punkt1=(startx, starty), punkt2=(stopx, stopy));\n #\n # Falls nur ein Viertel betrachtet wird oder eine Haelfte und einer der Partitionsseiten\n # parallel zur Schnittflaeche ist (und ausserhalb, d.h. im negativen),\n # gibt es fuer die zweite Seite nichts zu tun\n if ((viertel == 1) or ((viertel == 2) and (bodenbereich_aussen[0] > bodenbereich_innen[0]))):\n pass;\n else:\n # Vorgriff auf den Rest dieser Funktion, um effektiv die Funktion nach dieser else-Abfrage\n # erneut nutzen zu koennen\n temppartitionsname = partitionsname + '-a';\n neupartition = partBoden.PartitionFaceBySketch(faces=flaecheUnterseite,\n sketch=zeichnung, sketchUpEdge=partBoden.datums[xachsenid]);\n del zeichnung;\n partBoden.features.changeKey(fromName=neupartition.name, toName=temppartitionsname);\n neueKanten = BedingteAuswahl(elemente=partBoden.edges,\n bedingung='elem.featureName == \\'' + temppartitionsname + '\\'');\n partBoden.PartitionCellByExtrudeEdge(cells=partBoden.cells,\n edges=(neueKanten), line=partBoden.datums[zachsenid], sense=FORWARD);\n #\n # Erneutes Auswaehlen der Oberfleche\n flaecheUnterseite = BedingteAuswahl(elemente=partBoden.faces,\n bedingung='elem.pointOn[0][2] < var[0]', var=[abapys_tol]);\n partitionsname = partitionsname + '-b';\n modell.ConstrainedSketch(gridSpacing=5, name='profil_' + partitionsname, sheetSize=40,\n transform=partBoden.MakeSketchTransform(sketchPlane=flaecheUnterseite[0],\n sketchPlaneSide=SIDE1, sketchUpEdge=partBoden.datums[xachsenid],\n sketchOrientation=RIGHT, origin=(0.0, 0.0, 0.0)));\n zeichnung = modell.sketches['profil_' + partitionsname];\n partBoden.projectReferencesOntoSketch(filter=COPLANAR_EDGES, sketch=zeichnung);\n #\n # Zweite Seite\n if (bodenbereich_aussen[0] > bodenbereich_innen[0]):\n # Fall 1(b)\n startx = -bodenbereich_innen[0];\n stopx = -bodenbereich_innen[0];\n starty = -bodenbereich_innen[1];\n stopy = bodenbereich_innen[1];\n else:\n # Fall 2(b)\n startx = -bodenbereich_innen[0];\n stopx = bodenbereich_innen[0];\n starty = -bodenbereich_innen[1];\n stopy = -bodenbereich_innen[1];\n if (viertel == 2):\n startx = 0.0;\n #\n Linie(zeichnung=zeichnung, punkt1=(startx, starty), punkt2=(stopx, stopy));\n #\n neupartition = partBoden.PartitionFaceBySketch(faces=flaecheUnterseite,\n sketch=zeichnung, sketchUpEdge=partBoden.datums[xachsenid]);\n del zeichnung;\n partBoden.features.changeKey(fromName=neupartition.name, toName=partitionsname);\n # Alle neu erstellten Kanten des Feingitters finden und zum Extrudieren verwenden\n neueKanten = BedingteAuswahl(elemente=partBoden.edges,\n bedingung='elem.featureName == \\'' + partitionsname + '\\'');\n partBoden.PartitionCellByExtrudeEdge(cells=partBoden.cells,\n edges=(neueKanten), line=partBoden.datums[zachsenid], sense=FORWARD);\n #\n if (aeste):\n for idx in range(0, 4):\n # Vier Aeste zwischen den Uebergangsbereichen hinzufuegen\n # Die Vorzeichenzuweisung funktioniert aufgrund der Integer-Division\n xvz = (-1)**((idx+1)/2);\n yvz = (-1)**(idx/2);\n #\n # Falls nur ein Viertel- oder Halbbereich vorhanden ist, sollen auch\n # nur die gueltigen Partitionen erzeugt werden\n if ((viertel == 1) or (viertel == 2)):\n if (viertel == 1):\n if (yvz < 0.0):\n continue;\n #\n if (xvz < 0.0):\n continue;\n #\n flaecheUnterseite = BedingteAuswahl(elemente=partBoden.faces,\n bedingung='elem.pointOn[0][2] < var[0]', var=[abapys_tol]);\n if (len(bodenbereich_aussen) == 1):\n p1 = [xvz*bodenbereich_aussen[0]/sqrt(2), yvz*bodenbereich_aussen[0]/sqrt(2)];\n else:\n p1 = [xvz*bodenbereich_aussen[0], yvz*bodenbereich_aussen[1]];\n #\n if (len(bodenbereich_innen) == 1):\n p2 = [xvz*bodenbereich_innen[0]/sqrt(2), yvz*bodenbereich_innen[0]/sqrt(2)];\n else:\n p2 = [xvz*bodenbereich_innen[0], yvz*bodenbereich_innen[1]];\n #\n name = partitionsname + '_Ast' + str(idx);\n modell.ConstrainedSketch(gridSpacing=5, name='profile_' + name, sheetSize=40,\n transform=partBoden.MakeSketchTransform(sketchPlane=flaecheUnterseite[0],\n sketchPlaneSide=SIDE1, sketchUpEdge=partBoden.datums[xachsenid], sketchOrientation=RIGHT,\n origin=(0.0, 0.0, 0.0)));\n zeichnung = modell.sketches['profile_' + name];\n partBoden.projectReferencesOntoSketch(\n filter=COPLANAR_EDGES, sketch=zeichnung);\n #\n Linie(zeichnung=zeichnung, punkt1=(p1[0], p1[1]), punkt2=(p2[0], p2[1]));\n astpartition = partBoden.PartitionFaceBySketch(faces=flaecheUnterseite,\n sketch=zeichnung, sketchUpEdge=partBoden.datums[xachsenid]);\n del zeichnung;\n partBoden.features.changeKey(fromName=astpartition.name, toName='Ast-Partition');\n #\n neueKanten = BedingteAuswahl(elemente=partBoden.edges,\n bedingung='elem.featureName == \\'Ast-Partition\\'');\n partBoden.PartitionCellByExtrudeEdge(cells=partBoden.cells,\n edges=(neueKanten),line=partBoden.datums[zachsenid], sense=FORWARD);\n partBoden.features.changeKey(fromName='Ast-Partition', toName=name);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Ebenenpartition_durchziehen(modell, name, partitionsname, bereich, viertel=4):\n \"\"\"Erzeuge Ebenenpartition(en) an einem Bauteil, d.h. ueber Partitionen senkrecht zur x- und y-\n Richtung, die sich voll durch das Bauteil (Part) name aus dem aktiven modell ziehen. Die\n Partitionen enthalten das Namenspraefix partitionsname und sind definiert an den in bereich\n uebergebenen x- und y-Koordinaten. Wenn nur ein Viertel- oder Halbmodell erzeugt worden ist, muss\n der Parameter viertel entsprechend angepasst uebergeben werden, damit nur die realisierbaren\n Partitionen erzeugt werden.\n \"\"\"\n import part\n #\n partBoden = modell.parts[name];\n xachsenid = partBoden.features['xAchse'].id;\n yachsenid = partBoden.features['yAchse'].id;\n xkoord, ykoord = bereich;\n #\n xpartition = partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[xachsenid], point=(xkoord, ykoord, 0.0));\n partBoden.features.changeKey(fromName=xpartition.name, toName=partitionsname + '_x1');\n #\n ypartition = partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[yachsenid], point=(xkoord, ykoord, 0.0));\n partBoden.features.changeKey(fromName=ypartition.name, toName=partitionsname + '_y1');\n #\n if ((not (viertel == 1)) and (not (viertel == 2))):\n xpartition = partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[xachsenid], point=(-xkoord, -ykoord, 0.0));\n partBoden.features.changeKey(fromName=xpartition.name, toName=partitionsname + '_x2');\n #\n if (not (viertel == 1)):\n ypartition = partBoden.PartitionCellByPlanePointNormal(cells=partBoden.cells,\n normal=partBoden.datums[yachsenid], point=(-xkoord, -ykoord, 0.0));\n partBoden.features.changeKey(fromName=ypartition.name, toName=partitionsname + '_y2');\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Boden_setserstellen(modell, name, bodentiefe, voidhoehe, bodenbereich, schichten,\n schichtmaterial, restmaterial):\n \"\"\"Erzeuge alle relevanten Sets am Bauteil (Part) name aus dem aktiven modell. Dazu werden die\n folgenden Parameter fuer eine richtige Zuweisung der Sets erwartet: Die Bauteilhoehe, die sich\n aus bodentiefe und voidhoehe zusammensetzt, wobei bodentiefe zusaetzlich in einzelne schichten\n unterteilt ist. Um Sets aus gleichen Materialien zu erstellen, werden zusaetzlich die\n Bezeichnungen in schichtmaterial und restmaterial benoetigt. In jeder Ebene wird die Unterteilung\n mit bodenbereich definiert.\n \"\"\"\n import part\n from auswahl import BedingteAuswahl\n from hilfen import abapys_tol, Log\n #\n partBoden = modell.parts[name];\n # Sets\n partBoden.Set(name='setAll', cells=partBoden.cells);\n if (voidhoehe > 0.0):\n zellenVoid = BedingteAuswahl(elemente=partBoden.cells,\n bedingung='elem.pointOn[0][2] >= var[0]', var=[bodentiefe]);\n partBoden.Set(name='setVoid', cells=zellenVoid);\n zellenNotVoid = BedingteAuswahl(elemente=partBoden.cells,\n bedingung='elem.pointOn[0][2] < var[0]', var=[bodentiefe]);\n partBoden.Set(name='setNotVoid', cells=zellenNotVoid);\n #\n # FIXME: Aktuell werden noch Warnungen von Abaqus ausgegeben, auch wenn es scheint, dass alles\n # funktioniert wie erwartet. Nach dem aktuellen Stand koennen alle Warnungen mit der\n # folgenden Ausgabe aus Vergleichen zu elemente[0:0] ignoriert werden:\n # RuntimeWarning: tp_compare didn't return -1, 0 or 1\n xZellen = BedingteAuswahl(elemente=partBoden.cells, bedingung='elem.pointOn[0][0] < 0.0');\n if (not (xZellen == partBoden.cells[0:0])):\n partBoden.Set(name='setXBereich', cells=xZellen);\n #\n yZellen = BedingteAuswahl(elemente=partBoden.cells, bedingung='elem.pointOn[0][1] < 0.0');\n if (not (yZellen == partBoden.cells[0:0])):\n partBoden.Set(name='setYBereich', cells=yZellen);\n # \n # Sets aller Materialien (Bereiche) definieren\n # Verschiedene Materialien (ohne doppelte Eintraege)\n if (schichten[-1] == bodentiefe):\n tempSchichten = [0.0] + schichten;\n tempSchichtmaterial = schichtmaterial;\n else:\n tempSchichten = [0.0] + schichten + [bodentiefe];\n tempSchichtmaterial = schichtmaterial + [restmaterial];\n #\n materialien = list(set(tempSchichtmaterial));\n zellen_Schichtmaterial = [partBoden.cells[0:0] for x in materialien];\n for idxSchicht, schichttiefe in enumerate(tempSchichten):\n if (idxSchicht == 0):\n continue;\n # FIXME: pointOn der Zellen ist manchmal auf der Randflaeche - welche? Eindeutig bestimmbar?\n # Ueber Randflaechen gehen (min. vier muessen in einer ebene sein)?\n if (idxSchicht == len(tempSchichten)-1):\n bedingung = '(elem.pointOn[0][2] >= var[0]) and (elem.pointOn[0][2] <= var[1])';\n else:\n bedingung = '(elem.pointOn[0][2] > var[0]) and (elem.pointOn[0][2] <= var[1])';\n #\n tempZellen = BedingteAuswahl(elemente=partBoden.cells, bedingung=bedingung,\n var=[bodentiefe-tempSchichten[idxSchicht], bodentiefe-tempSchichten[idxSchicht-1]]);\n if (not (tempZellen == partBoden.cells[0:0])):\n partBoden.Set(name='setSchicht' + str(idxSchicht).zfill(2), cells=tempZellen);\n #\n for idx, tempMaterial in enumerate(materialien):\n if (tempMaterial == tempSchichtmaterial[idxSchicht-1]):\n zellen_Schichtmaterial[idx] += tempZellen;\n break;\n #\n for idx, zellen in enumerate(zellen_Schichtmaterial):\n if ((not (zellen == partBoden.cells[0:0])) and (not materialien[idx] == '-')):\n partBoden.Set(name='set' + materialien[idx], cells=zellen);\n #\n flaechen_ZX = BedingteAuswahl(elemente=partBoden.faces,\n bedingung='(abs(elem.pointOn[0][0]) <= var[0])', var=[abapys_tol]);\n partBoden.Set(faces=flaechen_ZX, name='set_ZX');\n flaechen_ZY = BedingteAuswahl(elemente=partBoden.faces,\n bedingung='(abs(elem.pointOn[0][1]) <= var[0])', var=[abapys_tol]);\n partBoden.Set(faces=flaechen_ZY, name='set_ZY');\n #\n if (len(bodenbereich[-1]) == 1):\n innererBereich = BedingteAuswahl(elemente=partBoden.cells,\n bedingung='(sqrt(elem.pointOn[0][0]**2 + elem.pointOn[0][1]**2) < var[0]+var[1])',\n var=[bodenbereich[-1][0], abapys_tol]);\n else:\n innererBereich = BedingteAuswahl(elemente=partBoden.cells,\n bedingung='(abs(elem.pointOn[0][0]) < var[0]+var[2]) and (abs(elem.pointOn[0][1]) < var[1]+var[2])',\n var=[bodenbereich[-1][0], bodenbereich[-1][1], abapys_tol]);\n #\n partBoden.Set(name='setInnererBereich', cells=innererBereich);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef _Boden_vernetzen(modell, name, bodentiefe, voidhoehe, bodenbereich, gittergroessen,\n gitter_boden_vertikal, schichten, extrasets=False, netz=True, euler=True, kernSweep=False):\n \"\"\"Erzeuge das Gitternetz am Bauteil (Part) name aus dem aktiven modell. Dazu werden neben der\n Bauteilhoehe, die sich aus bodentiefe und voidhoehe zusammensetzt, auch die Anordnung in der\n Ebene in bodenbereich benoetigt. Die dazugehoerigen gittergroessen beziehen sich auf die\n definierte Ebenen in bodenbereich, waehrend gitter_boden_vertikal die vertikale Netzfeinheit von\n voidhoehe bis zu der tiefsten Ebene in schichten bestimmt. Optional koennen extrasets der Kanten\n fuer die Netzbestimmung erstellt werden. Optional kann setInnererBereich mit kernSweep == True\n als Sweep statt als Structured Mesh erstellt werden.\n \"\"\"\n import part\n import mesh\n from abaqusConstants import C3D8, C3D6, C3D4, EC3D8R, UNKNOWN_TET, UNKNOWN_WEDGE\n from abaqusConstants import DEFAULT, STANDARD, EXPLICIT, OFF, FINER, ADVANCING_FRONT, SWEEP\n from abaqusConstants import SINGLE, STRAIN, AVERAGE_STRAIN\n from auswahl import BedingteAuswahl, ZweifachbedingteKantenAuswahl\n from hilfen import abapys_tol, Log \n #\n # Die Hoehe des nicht mehr fein vernetzten Bodenkoerpers\n tiefe_uebergang = bodentiefe - schichten[-1]\n \n # Elementtypen und HourglassControl anpassen\n partBoden = modell.parts[name];\n if (euler):\n partBoden.setElementType(elemTypes=(\n mesh.ElemType(elemCode=EC3D8R, elemLibrary=EXPLICIT, secondOrderAccuracy=OFF), # hourglassControl=STIFFNESS\n mesh.ElemType(elemCode=UNKNOWN_WEDGE, elemLibrary=EXPLICIT),\n mesh.ElemType(elemCode=UNKNOWN_TET, elemLibrary=EXPLICIT)),\n regions=partBoden.sets['setAll']);\n else:\n partBoden.setElementType(elemTypes=(\n mesh.ElemType(elemCode=C3D8, elemLibrary=STANDARD, secondOrderAccuracy=OFF, distortionControl=DEFAULT),\n mesh.ElemType(elemCode=C3D6, elemLibrary=STANDARD),\n mesh.ElemType(elemCode=C3D4, elemLibrary=STANDARD)),\n regions=partBoden.sets['setAll']);\n #\n if (kernSweep):\n # MEDIAL_AXIS, ADVANCING_FRONT\n partBoden.setMeshControls(regions=partBoden.sets['setInnererBereich'].cells,\n technique=SWEEP, algorithm=ADVANCING_FRONT);\n #\n # Mesh\n # --- Unterteilung in mehrere Teilbereiche\n # 1) kanten_Untenvertikal\n # Alle vertikalen Linien unterhalb von schichten[-1]\n # 2) kanten_Schichten\n # a) Flaechen/Aussenseite\n # Alle Flaechen deren Normale keinen Vertikalanteil hat, die eine bodenbereich-Partition umgrenzen\n # b) Verbindungen\n # Horizontale Linien zwischen zwei bodenbereich-Partitionen\n # 3) kanten_Schichtvertikal\n # Alle vertikalen Linien oberhalb von schichten[-1]\n #\n # Globale Seeds vorgeben (v.a. fuer alle nicht im Folgenden explizit definierte Bereiche)\n partBoden.seedPart(size=gittergroessen[0][0], deviationFactor=0.1, minSizeFactor=0.1);\n #\n # 1) kanten_Untenvertikal (Alle vertikalen Linien unterhalb von schichten[-1])\n if (not (bodentiefe == schichten[-1])):\n kanten_Untenvertikal = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(edge.pointOn[0][2] >= var[0]) and (edge.pointOn[0][2] < var[1]-var[0])',\n bedingung2='not ((vert1.pointOn[0][2]) == (vert2.pointOn[0][2]))',\n bedingung3='(vert1.pointOn[0][2]) < (vert2.pointOn[0][2])',\n var=[abapys_tol, tiefe_uebergang]);\n if (len(gittergroessen[0]) == 1):\n partBoden.seedEdgeBySize(constraint=FINER, deviationFactor=0.1,\n edges=kanten_Untenvertikal[0]+kanten_Untenvertikal[1], minSizeFactor=0.1,\n size=gittergroessen[0][0]);\n else:\n partBoden.seedEdgeByBias(biasMethod=SINGLE, constraint=FINER,\n end1Edges=kanten_Untenvertikal[1], end2Edges=kanten_Untenvertikal[0],\n maxSize=gittergroessen[0][0], minSize=gitter_boden_vertikal);\n #\n if (extrasets):\n partBoden.Set(edges=kanten_Untenvertikal, name='setK_Untenvertikal');\n #\n # 2) kanten_Schichten\n for idx, aktuellerbereich in enumerate(bodenbereich):\n # a) Flaechen/Aussenseite (Alle Flaechen deren Normale keinen Vertikalanteil hat,\n # die eine bodenbereich-Partition umgrenzen)\n if (len(bodenbereich[idx]) == 1):\n # Kontur eines Kreises mit Liniendicke abapys_tol\n kanten_Schichtflaeche = BedingteAuswahl(elemente=partBoden.edges,\n bedingung='(abs(sqrt(elem.pointOn[0][0]**2 + elem.pointOn[0][1]**2) - var[0]) < var[2]) and (elem.pointOn[0][2] > var[1]-var[2])',\n var=[bodenbereich[idx][0], tiefe_uebergang, abapys_tol]);\n else:\n # Kontur eines Rechtecks mit Liniendicke abapys_tol\n kanten_Schichtflaeche = BedingteAuswahl(elemente=partBoden.edges,\n bedingung='(elem.pointOn[0][2] > var[2]-var[3]) and (abs(elem.pointOn[0][0]) < var[0]+var[3]) and (abs(elem.pointOn[0][1]) < var[1]+var[3]) and (not ((abs(elem.pointOn[0][0]) < var[0]-var[3]) and (abs(elem.pointOn[0][1]) < var[1]-var[3])))',\n var=[bodenbereich[idx][0], bodenbereich[idx][1], tiefe_uebergang, abapys_tol]);\n #\n partBoden.seedEdgeBySize(constraint=FINER, deviationFactor=0.1,\n edges=kanten_Schichtflaeche, minSizeFactor=0.1, size=gittergroessen[idx][0]);\n #\n if (extrasets):\n partBoden.Set(edges=kanten_Schichtflaeche, name='setK_Schichtflaeche' + str(idx));\n #\n # b) Verbindungen (Horizontale Linien zwischen zwei bodenbereich-Partitionen)\n if (idx < len(bodenbereich)-1):\n # Irgendein Uebergangsbereich\n naechsterbereich = bodenbereich[idx+1];\n if (len(naechsterbereich) == 1):\n if (len(bodenbereich[idx]) == 1):\n # Kreis zu kleinerem Kreis\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(sqrt(edge.pointOn[0][0]**2 + edge.pointOn[0][1]**2) < var[0]-var[2])',\n bedingung2='(sqrt(edge.pointOn[0][0]**2 + edge.pointOn[0][1]**2) > var[1]+var[2])',\n bedingung3='(vert1.pointOn[0][0]**2 + vert1.pointOn[0][1]**2) > (vert2.pointOn[0][0]**2 + vert2.pointOn[0][1]**2)',\n var=[bodenbereich[idx][0], naechsterbereich[0], abapys_tol]);\n else:\n # Rechteck zu kleinerem Kreis\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(abs(edge.pointOn[0][0]) < var[0]-var[3]) and (abs(edge.pointOn[0][1]) < var[1]-var[3])',\n bedingung2='(sqrt(edge.pointOn[0][0]**2 + edge.pointOn[0][1]**2) > var[2]+var[3])',\n bedingung3='(vert1.pointOn[0][0]**2 + vert1.pointOn[0][1]**2) > (vert2.pointOn[0][0]**2 + vert2.pointOn[0][1]**2)',\n var=[bodenbereich[idx][0], bodenbereich[idx][1], naechsterbereich[0], abapys_tol]);\n else:\n if (len(bodenbereich[idx]) == 1):\n # Kreis zu kleinerem Rechteck\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(sqrt(edge.pointOn[0][0]**2 + edge.pointOn[0][1]**2) < var[0]-var[3])',\n bedingung2='(not ((abs(edge.pointOn[0][0]) < var[1]+var[3]) and (abs(edge.pointOn[0][1]) < var[2]+var[3])))',\n bedingung3='(vert1.pointOn[0][0]**2 + vert1.pointOn[0][1]**2) > (vert2.pointOn[0][0]**2 + vert2.pointOn[0][1]**2)',\n var=[bodenbereich[idx][0], naechsterbereich[0], naechsterbereich[1], abapys_tol]);\n else:\n # Rechteck zu kleinerem Rechteck\n tol_laenge = abapys_tol;\n tol_breite = abapys_tol;\n if (bodenbereich[idx][0] == naechsterbereich[0]):\n tol_laenge = -abapys_tol;\n if (bodenbereich[idx][1] == naechsterbereich[1]):\n tol_breite = -abapys_tol;\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(abs(edge.pointOn[0][0]) < var[0]-var[4]) and (abs(edge.pointOn[0][1]) < var[1]-var[5])',\n bedingung2='(not ((abs(edge.pointOn[0][0]) < var[2]+var[4]) and (abs(edge.pointOn[0][1]) < var[3]+var[5])))',\n bedingung3='(vert1.pointOn[0][0]**2 + vert1.pointOn[0][1]**2) > (vert2.pointOn[0][0]**2 + vert2.pointOn[0][1]**2)',\n var=[bodenbereich[idx][0], bodenbereich[idx][1], naechsterbereich[0], naechsterbereich[1], tol_laenge, tol_breite]);\n #\n else:\n # Innerster Bereich\n if (len(bodenbereich[idx]) == 1):\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='(sqrt(edge.pointOn[0][0]**2 + edge.pointOn[0][1]**2) < var[0]-var[1])',\n bedingung2='(not (((abs(edge.pointOn[0][0]) < var[1]) and ((abs(edge.pointOn[0][1]) < var[1])))))',\n bedingung3='(vert1.pointOn[0][2]) < (vert2.pointOn[0][2])',\n var=[bodenbereich[idx][0], abapys_tol]);\n else:\n kanten_Schichtverbindung = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='((edge.pointOn[0][0]) > -var[0]+var[2]) and ((edge.pointOn[0][0]) < var[0]-var[2]) and ((edge.pointOn[0][1]) > -var[1]+var[2]) and ((edge.pointOn[0][1]) < var[1]-var[2])',\n bedingung2='(not ((abs(edge.pointOn[0][0]) < var[2]) and (abs(edge.pointOn[0][1]) < var[2])))',\n bedingung3='(vert1.pointOn[0][2]) < (vert2.pointOn[0][2])',\n var=[bodenbereich[idx][0], bodenbereich[idx][1], abapys_tol]);\n #\n kanten = kanten_Schichtverbindung[0] + kanten_Schichtverbindung[1];\n if (not (kanten == partBoden.edges[0:0])):\n if (len(gittergroessen[idx]) == 1):\n partBoden.seedEdgeBySize(constraint=FINER, deviationFactor=0.1,\n edges=kanten, minSizeFactor=0.1,\n size=gittergroessen[idx][0]);\n else:\n partBoden.seedEdgeByBias(biasMethod=SINGLE, constraint=FINER,\n end1Edges=kanten_Schichtverbindung[1], end2Edges=kanten_Schichtverbindung[0],\n maxSize=gittergroessen[idx][0], minSize=gittergroessen[idx][1]);\n #\n if (extrasets):\n partBoden.Set(edges=kanten, name='setK_Schichtverbindung' + str(idx));\n #\n # 3) kanten_Schichtvertikal (Alle vertikalen Linien oberhalb von schichten[-1])\n kanten_Schichtvertikal = ZweifachbedingteKantenAuswahl(elemente=partBoden,\n bedingung1='edge.pointOn[0][2] > var[0]-var[1]', bedingung2='not ((vert1.pointOn[0][2]) == (vert2.pointOn[0][2]))',\n var=[tiefe_uebergang, abapys_tol]);\n partBoden.seedEdgeBySize(constraint=FINER, deviationFactor=0.1,\n edges=kanten_Schichtvertikal[0], minSizeFactor=0.1,\n size=gitter_boden_vertikal);\n if (extrasets):\n partBoden.Set(edges=kanten_Schichtvertikal, name='setK_Schichtvertikal');\n #\n if (netz):\n partBoden.generateMesh();\n if (len(partBoden.elements) == 0 ):\n Log('# Warnung: Mesh-Erstellung zu ' + name + ' fehlgeschlagen');\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef BodenspannungErstellen(modell, bodenname, nullspannung, voidhoehe, schichten, bodentiefe,\n materialschichten, verwendeteBodenwerte, verwendeteMaterialien, verbose=False):\n \"\"\"Erzeuge im modell fuer den Bodenkoerper bodenname eine Bodenspannungsverteilung (in z-Richtung\n mit GOK bei z=0). Nutze dazu eine konstante Vorbelastung nullspannung fuer den Voidbereich der\n Hoehe voidhoehe. Fuer jede Schicht der uebergebenen schichten bis bodentiefe werden ueber die\n Bezeichnungen in materialschichten die verwendeteBodenwerte fuer verwendeteMaterialien\n zugewiesen. Optional kann eine Infoausgabe mit verbose=True erstellt werden.\n \"\"\"\n from hilfen import g, Log\n #\n if (voidhoehe > 0.0) and (not (nullspannung == 0.0)):\n if (modell.rootAssembly.instances['inst' + bodenname].sets.has_key('setVoid')):\n modell.GeostaticStress(lateralCoeff1=0.5, lateralCoeff2=None, name='SpannungVoid',\n region=modell.rootAssembly.instances['inst' + bodenname].sets['setVoid'], \n stressMag1=-nullspannung, vCoord1=voidhoehe, stressMag2=-nullspannung, vCoord2=0.0);\n #\n if (schichten[-1] == bodentiefe):\n tempSchichten = [0.0] + schichten;\n else:\n tempSchichten = [0.0] + schichten + [bodentiefe];\n #\n spannung_ende = nullspannung;\n idxMat = 0;\n if (verbose):\n Log('# Index Material Endtiefe Bodenspg K0 Dichte\\n' \\\n '# ------|-----------------|----------|----------|----------|----------');\n #\n for idxSchicht, schichttiefe in enumerate(tempSchichten):\n if (idxSchicht == 0):\n continue;\n # FIXME: Auch ungueltige Materialien sinnvoll handhaben\n for idxMaterial, tempMaterial in enumerate(verwendeteMaterialien):\n if (tempMaterial == materialschichten[idxSchicht-1]):\n idxMat = idxMaterial;\n break;\n #\n tempBodendichte = verwendeteBodenwerte[idxMat][0];\n tempBodenschichtK0 = verwendeteBodenwerte[idxMat][1];\n #\n numSchicht = str(idxSchicht).zfill(2);\n name = 'Bodenspannung' + numSchicht;\n spannung_start = spannung_ende;\n spannung_ende = spannung_start + tempBodendichte*g*(schichttiefe-tempSchichten[idxSchicht-1]);\n #\n if (verbose):\n Log('# {:3d} {:>15} {:7.3f} {:7.3f} {:7.3f} {:7.3f}'.format(idxSchicht,\n tempMaterial, schichttiefe, spannung_ende, tempBodenschichtK0, tempBodendichte));\n #\n modell.GeostaticStress(lateralCoeff1=tempBodenschichtK0, lateralCoeff2=None, name=name,\n region=modell.rootAssembly.instances['inst' + bodenname].sets['setSchicht' + numSchicht], \n stressMag1=-spannung_start, vCoord1=-tempSchichten[idxSchicht-1],\n stressMag2=-spannung_ende, vCoord2=-schichttiefe);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef BodenspannungDirektZuweisen(modell, bodenname, nullspannung, voidhoehe, schichten, bodentiefe,\n bodendichten, k0Werte):\n \"\"\"Erzeuge im modell fuer den Bodenkoerper bodenname eine Bodenspannungsverteilung (in z-Richtung\n mit GOK bei z=0). Nutze dazu eine konstante Vorbelastung nullspannung fuer den Voidbereich der\n Hoehe voidhoehe. Fuer jede Schichttiefe in schichten bis bodentiefe werden die uebergebenen Werte\n aus bodendichten und k0Werte direkt zugewiesen.\n \"\"\"\n from hilfen import g\n #\n if (voidhoehe > 0.0) and (not (nullspannung == 0.0)):\n if (modell.rootAssembly.instances['inst' + bodenname].sets.has_key('setVoid')):\n modell.GeostaticStress(lateralCoeff1=0.5, lateralCoeff2=None, name='SpannungVoid',\n region=modell.rootAssembly.instances['inst' + bodenname].sets['setVoid'], \n stressMag1=-nullspannung, vCoord1=voidhoehe, stressMag2=-nullspannung, vCoord2=0.0);\n #\n if (schichten[-1] == bodentiefe):\n tempSchichten = [0.0] + schichten;\n else:\n tempSchichten = [0.0] + schichten + [bodentiefe];\n #\n spannung_ende = nullspannung;\n for idxSchicht, schichttiefe in enumerate(tempSchichten):\n if (idxSchicht == 0):\n continue;\n #\n tempBodendichte = bodendichten[idxSchicht-1];\n tempBodenschichtK0 = k0Werte[idxSchicht-1];\n #\n numSchicht = str(idxSchicht).zfill(2);\n name = 'Bodenspannung' + numSchicht;\n spannung_start = spannung_ende;\n spannung_ende = spannung_start + tempBodendichte*g*(schichttiefe-tempSchichten[idxSchicht-1]);\n modell.GeostaticStress(lateralCoeff1=tempBodenschichtK0, lateralCoeff2=None, name=name,\n region=modell.rootAssembly.instances['inst' + bodenname].sets['setSchicht' + numSchicht], \n stressMag1=-spannung_start, vCoord1=-tempSchichten[idxSchicht-1],\n stressMag2=-spannung_ende, vCoord2=-schichttiefe);\n#\n\n\n# -------------------------------------------------------------------------------------------------\ndef BodenmaterialUndSectionErstellen(modell, verwendeteMaterialien, verfuegbareMaterialien,\n userroutine='', numDepVar=0, euler=True):\n \"\"\"Erstelle im modell je nach euler eine EulerianSection oder mehrere HomogeneousSolidSections\n mit allen verwendeteMaterialien. Die Abfolge der Materialschichten wird mit der Reihenfolge in\n verfuegbareMaterialien festgelegt. Falls ein Stoffgesetz eine Userroutine benoetigt, muss die\n Bezeichnung userroutine und die Anzahl der Rueckgabevariablen numDepVar uebergeben werden.\n Gibt [benoetigtUserroutine, verwendeteBodenwerte] zurueck.\n \n Jeder Eintrag von verfuegbareMaterialien enthaelt:\n [abqbodenname, dbbodenname, saettigung, verdichtungsgrad, stoffgesetz]\n \"\"\"\n from math import sin\n import material\n import section\n from abaqusConstants import OFF\n from hilfen import Log\n from bodendatenbank import Bodenparameter\n #\n # Da die Liste verwendeteMaterialien nicht sortiert ist und einzelne Eintraege von\n # verwendeteBodenwerte direkt geschrieben werden sollen, wird verwendeteBodenwerte schon\n # vorinitialisiert\n for benoetigtesMaterial in verwendeteMaterialien:\n material_definiert = False;\n for material in verfuegbareMaterialien:\n if (benoetigtesMaterial == material[0]):\n material_definiert = True;\n break;\n #\n if (not material_definiert):\n Log('# Abbruch: Verwendetes Material ' + benoetigtesMaterial + ' nicht in verfuegbarenMaterialien definiert');\n return [None, None];\n #\n verwendeteBodenwerte = [[0.0, 0.5]]*len(verwendeteMaterialien);\n benoetigtUserroutine = False;\n dictMaterials = {};\n for parametersatz in verfuegbareMaterialien:\n neuesMaterial = False;\n dbbodenbez = '';\n if (len(parametersatz) == 5):\n abqbodenname, dbbodenname, saettigung, verdichtungsgrad, stoffgesetz = parametersatz;\n elif (len(parametersatz) == 6):\n abqbodenname, dbbodenname, dbbodenbez, saettigung, verdichtungsgrad, stoffgesetz = parametersatz;\n else:\n Log('# Abbruch: Jedes Material in verfuegbareMaterialien muss 5 oder 6 Eintraege haben');\n return [None, None];\n #\n idxBodenwert = 0;\n for idxMaterial, verwendetesMaterial in enumerate(verwendeteMaterialien):\n if (abqbodenname == verwendetesMaterial):\n neuesMaterial = True;\n idxBodenwert = idxMaterial;\n break;\n #\n if (not neuesMaterial):\n continue;\n #\n dichte_wasser = 1.0; # t/m^3\n tempBodenparameter = Bodenparameter(name=dbbodenname, stoffgesetz=stoffgesetz,\n bezeichnung=dbbodenbez);\n if (tempBodenparameter is None):\n Log('# Abbruch: Fehler beim Laden der Materialdaten');\n return [None, None];\n #\n korndichte, mindichte, maxdichte, kritReibwinkel = tempBodenparameter[0:4];\n #\n trockendichte = mindichte + verdichtungsgrad*(maxdichte-mindichte);\n aktuelleporenzahl = korndichte/trockendichte - 1.0;\n tempBodenschichtdichte = trockendichte \\\n + saettigung * aktuelleporenzahl/(1+aktuelleporenzahl) * dichte_wasser;\n #\n tempBodenschichtK0 = 1.0 - sin(kritReibwinkel);\n verwendeteBodenwerte[idxBodenwert] = [tempBodenschichtdichte, tempBodenschichtK0];\n #\n modell.Material(name=abqbodenname);\n tempBoden = modell.materials[abqbodenname];\n tempBoden.Density(table=((tempBodenschichtdichte, ), ));\n #\n if (stoffgesetz[-6:] == '-StdIG'):\n stoffgesetz = stoffgesetz[:-6];\n if (stoffgesetz[-7:] == '-OhneIG'):\n stoffgesetz = stoffgesetz[:-7];\n #\n if (stoffgesetz == 'Elastisch'):\n tempBoden.Elastic(table=((tempBodenparameter[4], tempBodenparameter[5]), ));\n elif (stoffgesetz == 'Mohr-Coulomb'):\n tempBoden.Elastic(table=((tempBodenparameter[4], tempBodenparameter[5]), ));\n tempBoden.MohrCoulombPlasticity(table=((tempBodenparameter[6], tempBodenparameter[7]), ));\n tempBoden.mohrCoulombPlasticity.MohrCoulombHardening(\n table=((tempBodenparameter[8], tempBodenparameter[9]), ));\n tempBoden.mohrCoulombPlasticity.TensionCutOff(\n dependencies=0, table=((0.0, 0.0), ), temperatureDependency=OFF);\n elif (stoffgesetz == 'Hypoplastisch'):\n benoetigtUserroutine = True;\n # Fuer die visco_hypo-Routinen ist die Anfangsporenzahl an 15. Stelle (und 16. egal),\n # bei den anderen an der 16. Stelle (und 15. egal)\n if (userroutine[0:5] == 'visco'):\n tempBoden_hp = tuple(tempBodenparameter[3:] + [aktuelleporenzahl, 0.0]);\n else:\n tempBoden_hp = tuple(tempBodenparameter[3:] + [0.0, aktuelleporenzahl]);\n #\n tempBoden.Depvar(n=numDepVar);\n tempBoden.UserMaterial(mechanicalConstants=tempBoden_hp);\n elif (stoffgesetz == 'Viskohypoplastisch'):\n benoetigtUserroutine = True;\n tempBoden_hp = tuple(tempBodenparameter[4:11] + tempBodenparameter[3:4] + tempBodenparameter[11:] + [0.0]);\n tempBoden.Depvar(n=numDepVar);\n tempBoden.UserMaterial(mechanicalConstants=tempBoden_hp);\n #\n if (euler):\n dictMaterials['imat' + abqbodenname] = abqbodenname;\n else:\n modell.HomogeneousSolidSection(material=abqbodenname, name='sec' + abqbodenname,\n thickness=None);\n #\n if (euler):\n modell.EulerianSection(data=dictMaterials, name='secEuler');\n #\n return [benoetigtUserroutine, verwendeteBodenwerte];\n#\n","repo_name":"d-zo/abapys","sub_path":"abapys/boden.py","file_name":"boden.py","file_ext":"py","file_size_in_byte":53984,"program_lang":"python","lang":"de","doc_type":"code","stars":12,"dataset":"github-code","pt":"73"} +{"seq_id":"27163277800","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom plasta.gui import BaseGUI\nfrom model.turno import Turno\nfrom model.turno.add import AddTurno\n\n\nclass TurnoGUI( BaseGUI ):\n\n def __init__(self, manager, managers = [], parent = None):\n BaseGUI.__init__(self, manager, managers, parent)\n self.loadUI()\n self.DialogAddClass = AddTurno\n\n self.addFilter(u'Fecha', Turno.fecha)\n self.addFilter(u'Empleado', Turno.empleado)\n self.addFilter(u'Horallegada', Turno.horaLlegada)\n self.addFilter(u'Horasalida', Turno.horaSalida)\n self.addFilter(u'Horas', Turno.horas)\n self.addFilter(u'Producto', Turno.producto)\n self.addFilter(u'Cantidad', Turno.cantidad)\n\n self.addTableColumn(u'Fecha', Turno.fecha)\n self.addTableColumn(u'Empleado', Turno.empleado)\n self.addTableColumn(u'Horallegada', Turno.horaLlegada)\n self.addTableColumn(u'Horasalida', Turno.horaSalida)\n self.addTableColumn(u'Horas', Turno.horas)\n self.addTableColumn(u'Producto', Turno.producto)\n self.addTableColumn(u'Cantidad', Turno.cantidad)\n\n self._start_operations()\n","repo_name":"marcosstevens2012/GPP","sub_path":"MyApp/model/turno/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"17805728294","text":"# 排行榜\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom Work.Common.base_page import basePage\n\n\nclass rankingPage(basePage):\n rankingMethod1 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[1]/a')\n rankingMethod2 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[2]/a')\n rankingMethod3 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[3]/a')\n rankingMethod4 = (By.XPATH, '/html/body/div[2]/div/div[2]/div/div[2]/ul/li[4]/a')\n\n def getBookList(self):\n bookList = []\n for i in range(30):\n text = self.getText((By.XPATH, f'/html/body/div[2]/div/div[1]/div/div/div[2]/table/tbody/tr[{i + 1}]'))\n bookList.append(text)\n return bookList\n\n def rankingMethodSwitch(self):\n list1 = self.getBookList()\n self.click(self.rankingMethod2)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod3)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod4)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n\n list1 = self.getBookList()\n self.click(self.rankingMethod1)\n sleep(2)\n list2 = self.getBookList()\n assert list1 != list2, '排行榜按钮切换异常'\n","repo_name":"winterfellll/graduationDesign","sub_path":"Work/PageObject/ranking_page.py","file_name":"ranking_page.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7496997310","text":"num = int(input(\"Digite a sua idade: \"))\r\n\r\nif num < 14:\r\n tipo=\"Criança\"\r\nelif num >= 14 and num <26:\r\n tipo=\"Jovem\"\r\nelif num >= 26 and num <65:\r\n tipo=\"Adulto\"\r\nelse:\r\n tipo=\"3a Idade\"\r\n\r\nprint('O número {} é {}'.format(num, tipo))\r\n","repo_name":"AlefGaigher/Curso_Python","sub_path":"definiridade.py","file_name":"definiridade.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"29401672532","text":"#Steuerung SBR-Anlage Kläranlage Altenahr MB2022-06\n#control program for the emergency & temporary wastewater treatment plant in Altenahr, \n#which was installed in April 2022 after the catastrophic flood from Juli 2021.\n#This temporary control program ran successfully by a Raspberry Pi Zero WH \n#connected to a LOW-Triggered relais board from May until July 2022.\n#With many thanks to Ludwig, Calvin, Kurt and Christoph for their hardware support, \n#the friendly commitment and that you tried it with me and this program!\n\nimport time\nimport tkinter\nimport threading\nfrom datetime import datetime, timedelta\nimport RPi.GPIO as GPIO\n\nmain = tkinter.Tk()\nmain.geometry('1000x700')\nmain.title('Steuerung SBR-Anlage Altenahr')\n\ndef ende():\n '''Close and End'''\n global hauptlauf, sbr1_lauf\n hauptlauf = \"AUS\"\n sbr1_lauf = \"AUS\"\n allout()\n GPIO.cleanup()\n main.destroy()\n\ndef tgesberechnen():\n '''calculate total time'''\n global t_d1, t_n1, t_d2, t_n2, t_sed, t_abzug, t_still, t_ges\n t_ges = t_d1 + t_n1 + t_d2 + t_n2 + t_sed + t_abzug + t_still\n ausgabetges[\"text\"] = str(t_ges)\n\ndef uebernehmen(phasenzeit, eingabevariable, ausgabevariable,\n fehlermeldungsvariable):\n '''check input'''\n try:\n ganzzahl = int(eingabevariable.get())\n if 0 <= ganzzahl < 1000:\n phasenzeit = ganzzahl\n ausgabevariable[\"text\"] = str(phasenzeit)\n else:\n t_11 = threading.Thread(target = fehlermeldung,\n args = (fehlermeldungsvariable,))\n t_11.start()\n except:\n t_11 = threading.Thread(target = fehlermeldung,\n args = (fehlermeldungsvariable,))\n t_11.start()\n return(phasenzeit)\n\ndef fehlermeldung(fehlermeldungsv):\n '''error message'''\n fehlermeldungsv[\"text\"] = \"Bitte eine ganze Zahl zwischen 0 und 999 eingeben\"\n time.sleep(3)\n fehlermeldungsv[\"text\"] = \"\"\n\ndef deni1get():\n '''get input for Deni1'''\n global t_d1\n t_d1 = uebernehmen(t_d1, eingabe1, ausgabedeni1, fehlerdeni1)\n tgesberechnen()\n eingabe1.delete(0, 'end')\n\ndef nitri1get():\n '''get input for Nitri1'''\n global t_n1\n t_n1 = uebernehmen(t_n1, eingabe2, ausgabenitri1, fehlernitri1)\n tgesberechnen()\n eingabe2.delete(0, 'end')\n\ndef deni2get():\n '''get input for Deni2'''\n global t_d2\n t_d2 = uebernehmen(t_d2, eingabe3, ausgabedeni2, fehlerdeni2)\n tgesberechnen()\n eingabe3.delete(0, 'end')\n\ndef nitri2get():\n '''get input for Nitri2'''\n global t_n2\n t_n2 = uebernehmen(t_n2, eingabe4, ausgabenitri2, fehlernitri2)\n tgesberechnen()\n eingabe4.delete(0, 'end')\n\ndef sedget():\n '''get input for Sedimentation'''\n global t_sed\n t_sed = uebernehmen(t_sed, eingabe5, ausgabesed, fehlersed)\n tgesberechnen()\n eingabe5.delete(0, 'end')\n\ndef klabzugget():\n '''get input for clarification'''\n global t_abzug\n t_abzug = uebernehmen(t_abzug, eingabe6, ausgabeklabzug, fehlerklabzug)\n tgesberechnen()\n eingabe6.delete(0, 'end')\n\ndef stillget():\n '''get input for waiting time'''\n global t_still\n t_still = uebernehmen(t_still, eingabe7, ausgabestillstand,\n fehlerstillstand)\n tgesberechnen()\n eingabe7.delete(0, 'end')\n\ndef GPIO_initialisieren():\n '''initialise GPIOs'''\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(23, GPIO.OUT) #Zulaufpumpe\n GPIO.setup(22, GPIO.OUT) #Ruehrwerk\n GPIO.setup(25, GPIO.OUT) #Beluefter1\n GPIO.setup(24, GPIO.OUT) #Beluefter2\n\ndef allout():\n '''set out all GPIOs'''\n GPIO.output(23, GPIO.HIGH) #Ausschalten Zulaufpumpe\n GPIO.output(22, GPIO.HIGH) #Ausschalten Ruehrwerk\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n\ndef countdownSBR1():\n '''calculate and show time until next phase starts'''\n global sbr1_auto, sbr1_phaseendezeit\n while sbr1_auto == \"AN\":\n while datetime.now() < sbr1_phaseendezeit:\n restzeit = str((sbr1_phaseendezeit - datetime.now()))\n SBR1Restzeitlabel['text'] = restzeit[0:restzeit.find('.')]\n time.sleep(1)\n SBR1Restzeitlabel['text'] = ''\n\ndef SBR1an():\n '''run SBR1'''\n global t_d1, t_n1, t_d2, t_n2, t_sed, t_abzug, t_still, sbr1_lauf, sbr1_phaseendezeit, sbr1_auto, sbr1_count\n sbr1_auto = \"AN\"\n t_5 = threading.Thread(target = countdownSBR1)\n t_5.start()\n while sbr1_lauf == \"AN\":\n #1. Deniphase 1: Ruehrwerk an\n GPIO.output(22, GPIO.LOW) #Einschalten Ruehrwerk\n sbr1phase[\"text\"] = \"Denitrifikation 1\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_d1)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #2. Nitriphase 1: Beluefter an\n GPIO.output(25, GPIO.LOW) #Einschalten Belüfter1\n GPIO.output(24, GPIO.LOW) #Einschalten Belüfter2\n sbr1phase[\"text\"] = \"Nitrifikation 1\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_n1)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #3. Deniphase 2: Beluefter aus\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n sbr1phase[\"text\"] = \"Denitrifikation 2\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_d2)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #4. Nitriphase 2: Beluefter an\n GPIO.output(25, GPIO.LOW) #Einschalten Belüfter1\n GPIO.output(24, GPIO.LOW) #Einschalten Belüfter2\n sbr1phase[\"text\"] = \"Nitrifikation 2\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_n2)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #5. Sedimentations-/Absetzphase: Ruehrwerk und Belüfter aus\n GPIO.output(22, GPIO.HIGH) #Ausschalten Ruehrwerk\n GPIO.output(25, GPIO.HIGH) #Ausschalten Belüfter1\n GPIO.output(24, GPIO.HIGH) #Ausschalten Belüfter2\n sbr1phase[\"text\"] = \"Sedimentation\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_sed)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n #7. Klarwasserabzugs- und Zulaufphase\n GPIO.output(23, GPIO.LOW) #Einschalten Zulaufpumpe\n sbr1phase[\"text\"] = \"Klarwasserabzug, Zulauf\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_abzug)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n GPIO.output(23, GPIO.HIGH) #Ausschalten Zulaufpumpe\n\n #9. Stillstandszeit\n sbr1phase[\"text\"] = \"Stillstandszeit\"\n sbr1phasestart[\"text\"] = time.strftime(\"%H:%M:%S\",time.localtime())\n sbr1_phaseendezeit = datetime.now()+timedelta(minutes = t_still)\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n while datetime.now() < sbr1_phaseendezeit and sbr1_lauf == \"AN\":\n time.sleep(1)\n\n if sbr1_lauf == \"AN\":\n sbr1_count += 1\n SBR1Durchganglabel[\"text\"] = str(sbr1_count)\n\n sbr1phase[\"text\"] = \"Pause\"\n sbr1_phaseendezeit = datetime.now()\n sbr1phaseende[\"text\"] = sbr1_phaseendezeit.strftime('%H:%M:%S')\n sbr1phasestart[\"text\"] = \"\"\n sbr1_auto = \"AUS\"\n\ndef schalten1():\n '''put SBR1 on or off'''\n global sbr1_lauf\n if sbr1_lauf == \"AUS\" and sbr1phase[\"text\"] == \"Pause\":\n sbr1_lauf = \"AN\"\n schalter1['bg'] = 'lime'\n schalter1['text'] = 'An'\n t_1 = threading.Thread(target = SBR1an)\n t_1.start()\n else:\n sbr1_lauf = \"AUS\"\n schalter1['bg'] = 'red'\n schalter1['text'] = 'Aus'\n\ndef zeitstempelaktualisieren():\n '''update time stamp'''\n global hauptlauf\n while hauptlauf == \"AN\":\n zeitjetzt['text'] = time.strftime(\"%H:%M:%S\",time.localtime())\n time.sleep(1)\n\ndef cputempaktualisieren():\n '''update temperatur for CPU'''\n global hauptlauf\n while hauptlauf == \"AN\":\n tempData = \"/sys/class/thermal/thermal_zone0/temp\"\n dateilesen = open(tempData, \"r\")\n temperatur = dateilesen.readline(5)\n dateilesen.close()\n temperatur = round(float(temperatur)/1000,1)\n cputemp['text'] = temperatur\n time.sleep(10)\n\n#Programm Start\nGPIO_initialisieren()\nallout()\nhauptlauf = \"AN\"\nsbr1_lauf = \"AN\"\nsbr1_auto = \"AUS\"\nt_d1 = 0#30 #Min\nt_n1 = 240#210 #Min\nt_d2 = 0 #Min\nt_n2 = 0 #Min\nt_sed = 30#60 #Min\nt_abzug = 5#60 #Min\nt_still = 15#30 Min\nt_ges = t_d1 + t_n1 + t_d2 + t_n2 + t_sed + t_abzug + t_still\nsbr1_count = 0\nsbr1_phaseendezeit = datetime.now()\n\n#Überschrift\ntkinter.Label(main, text = 'Kläranlage Altenahr ', font = ('arial', 11,'bold')\n ).place(x = 10, y = 10)\ntkinter.Label(main, text = 'SBR ', font = ('arial', 20, 'bold')\n ).place(x = 10, y = 50, anchor = 'w')\ntkinter.Label(main, text = '1', font = ('arial', 20, 'bold')\n ).place(x = 250, y = 50, anchor = 'center')\n\n#Schalter\nschalter1 = tkinter.Button(main, width = 8, text = 'Schalten',\n command = schalten1, cursor = 'tcross', bg = 'lime',\n font = ('arial', 10, 'bold'))\nschalter1.place(x = 250, y = 85, anchor = 'center')\n\n#Phase\ntkinter.Label(main, text = 'Aktuelle Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 120, anchor = 'w')\nsbr1phase = tkinter.Label(main, text = 'Pause', font = ('arial', 11, 'bold'))\nsbr1phase.place(x = 250, y = 120, anchor = 'center')\n\ntkinter.Label(main, text = 'Start Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 140, anchor = 'w')\nsbr1phasestart = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nsbr1phasestart.place(x = 250, y = 140, anchor = 'center')\n\ntkinter.Label(main, text = 'Ende Phase:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 160, anchor = 'w')\nsbr1phaseende = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nsbr1phaseende.place(x = 250, y = 160, anchor = 'center')\n\ntkinter.Label(main, text = 'Restzeit:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 180, anchor = 'w')\nSBR1Restzeitlabel = tkinter.Label(main, text = '-',\n font = ('arial', 11, 'bold'))\nSBR1Restzeitlabel.place(x = 250, y = 180, anchor = 'center')\n\ntkinter.Label(main, text = 'Durchgang:', font = ('arial', 11, 'bold')\n ).place(x = 10, y = 200, anchor = 'w')\nSBR1Durchganglabel = tkinter.Label(main, text = str(sbr1_count),\n font = ('arial', 11, 'bold'))\nSBR1Durchganglabel.place(x = 250, y = 200, anchor = 'center')\n\n#Deni1\ntkinter.Label(main, text = 'Denitrifikation 1: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 250, anchor = 'e')\nausgabedeni1 = tkinter.Label(main, text = str(t_d1),\n font = ('arial', 11, 'bold'))\nausgabedeni1.place(x = 245, y = 240, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 240, anchor = 'w')\neingabe1 = tkinter.Entry(main, width = 5)\neingabe1.place(x = 245, y = 260, anchor = 'e')\neingabe1but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = deni1get)\neingabe1but.place(x = 250, y = 260, anchor = 'w')\nfehlerdeni1 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerdeni1.place(x = 160, y = 260, anchor = 'w')\n\n#Nitri1\ntkinter.Label(main, text = 'Nitrifikation 1: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 310, anchor = 'e')\nausgabenitri1 = tkinter.Label(main, text = str(t_n1)\n , font = ('arial', 11, 'bold'))\nausgabenitri1.place(x = 245, y = 300, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 300, anchor = 'w')\neingabe2 = tkinter.Entry(main, width = 5)\neingabe2.place(x = 245, y = 320, anchor = 'e')\neingabe2but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = nitri1get)\neingabe2but.place(x = 250, y = 320, anchor = 'w')\nfehlernitri1 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlernitri1.place(x = 160, y = 320, anchor = 'w')\n\n#Deni2\ntkinter.Label(main, text = 'Denitrifikation 2: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 370, anchor = 'e')\nausgabedeni2 = tkinter.Label(main, text = str(t_d2),\n font = ('arial', 11, 'bold'))\nausgabedeni2.place(x = 245, y = 360, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 360, anchor = 'w')\neingabe3 = tkinter.Entry(main, width = 5)\neingabe3.place(x = 245, y = 380, anchor = 'e')\neingabe3but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = deni2get)\neingabe3but.place(x = 250, y = 380, anchor = 'w')\nfehlerdeni2 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerdeni2.place(x = 160, y = 380, anchor = 'w')\n\n#Nitri2\ntkinter.Label(main, text = 'Nitrifikation 2: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 430, anchor = 'e')\nausgabenitri2 = tkinter.Label(main, text = str(t_n2),\n font = ('arial', 11, 'bold'))\nausgabenitri2.place(x = 245, y = 420, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 420, anchor = 'w')\neingabe4 = tkinter.Entry(main, width = 5)\neingabe4.place(x = 245, y = 440, anchor = 'e')\neingabe4but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = nitri2get)\neingabe4but.place(x = 250, y = 440, anchor = 'w')\nfehlernitri2 = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlernitri2.place(x = 160, y = 440, anchor = 'w')\n\n#Absetz-/Sedimentation\ntkinter.Label(main, text = 'Sedimentation: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 500, anchor = 'e')\nausgabesed = tkinter.Label(main, text = str(t_sed), font = ('arial', 11,\n 'bold'))\nausgabesed.place(x = 245, y = 490, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 490, anchor = 'w')\neingabe5 = tkinter.Entry(main, width = 5)\neingabe5.place(x = 245, y = 510, anchor = 'e')\neingabe5but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = sedget)\neingabe5but.place(x = 250, y = 510, anchor = 'w')\nfehlersed = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlersed.place(x = 160, y = 510, anchor = 'w')\n\n#Klarwasserabzug, Zulauf\ntkinter.Label(main, text = 'Klarwasserabzug: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 560, anchor = 'e')\nausgabeklabzug = tkinter.Label(main, text = str(t_abzug),\n font = ('arial', 11, 'bold'))\nausgabeklabzug.place(x = 245, y = 550, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 550, anchor = 'w')\neingabe6 = tkinter.Entry(main, width = 5)\neingabe6.place(x = 245, y = 570, anchor = 'e')\neingabe6but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = klabzugget)\neingabe6but.place(x = 250, y = 570, anchor = 'w')\nfehlerklabzug = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerklabzug.place(x = 160, y = 570, anchor = 'w')\n\n#Stillstandszeit\ntkinter.Label(main, text = 'Stillstandszeit: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 620, anchor = 'e')\nausgabestillstand = tkinter.Label(main, text = str(t_still),\n font = ('arial', 11, 'bold'))\nausgabestillstand.place(x = 245, y = 610, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 610, anchor = 'w')\neingabe7 = tkinter.Entry(main, width = 5)\neingabe7.place(x = 245, y = 630, anchor = 'e')\neingabe7but = tkinter.Button(main, text = \"OK\", font = ('arial', 8, 'bold'),\n width = 1, command = stillget)\neingabe7but.place(x = 250, y = 630, anchor = 'w')\nfehlerstillstand = tkinter.Label(main, font = ('arial', 11, 'bold'), fg = 'red')\nfehlerstillstand.place(x = 160, y = 630, anchor = 'w')\n\n#Zyklus/ Gesamtzeit\ntkinter.Label(main, text = 'Zykluszeit: ', font = ('arial', 11, 'bold')\n ).place(x = 150, y = 670, anchor = 'e')\nausgabetges = tkinter.Label(main, text = str(t_ges), font = ('arial', 11, 'bold'))\nausgabetges.place(x = 245, y = 670, anchor = 'e')\ntkinter.Label(main, text = 'Min.', font = ('arial', 11, 'bold')\n ).place(x = 250, y = 670, anchor = 'w')\n\n#Zeitstempel anzeigen und aktualisieren\nzeitjetzt = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\nzeitjetzt.place(x = 625, y = 50, anchor = 'w')\nthreading.Thread(target = zeitstempelaktualisieren).start()\n\n#bei Programmstart SBR-Steuerung anschalten\nthreading.Thread(target = SBR1an).start()\n\n#CPU-Temperatur anzeigen und aktualisieren\ntkinter.Label(main, text = 'CPU-Temp.: ', font = ('arial', 11, 'bold')\n ).place (x = 625, y = 70, anchor = 'w')\ntkinter.Label(main, text = '°C', font = ('arial', 11, 'bold')\n ).place (x = 750, y = 70, anchor = 'w')\ncputemp = tkinter.Label(main, text = '-', font = ('arial', 11, 'bold'))\ncputemp.place(x = 750, y = 70, anchor = 'e')\nthreading.Thread(target = cputempaktualisieren).start()\n\n#Beenden-Schalter anordnen\nendeschalt = tkinter.Button(main, text = 'Beenden', command = ende,\n cursor = 'tcross', bg= 'white',\n font = ('arial', 11, 'bold'))\nendeschalt.place(x = 625, y = 20, anchor = 'w')\n\n# Programm auch beenden wenn das Fenster geschlossen wird\nmain.protocol(\"WM_DELETE_WINDOW\", ende)\n\n#loop\nmain.mainloop()\n","repo_name":"markusbombeck/wastewatertreatment","sub_path":"sbr_altenahr.py","file_name":"sbr_altenahr.py","file_ext":"py","file_size_in_byte":19136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"41708852934","text":"import streamlit as st\nimport plotly_express as px\n\nfrom api_calls import get_combined_manager_history\n \nmanagers_dict = {\n \"Chistian\": 1302722,\n \"Hans-Martin\": 2584139,\n \"Andreas\": 4306388,\n}\n\ndf = get_combined_manager_history(managers_dict=managers_dict)\n\ndf = df.drop(columns=[\"rank_sort\"])\ndf[\"value\"] = df[\"value\"]/10\ndf[\"bank\"] = df[\"bank\"]/10\n\ndf = df.rename(columns={\n \"event\":\"gameweek\",\n \"rank\":\"gw_rank\",\n \"event_transfers\":\"transfers\",\n \"event_transfers_cost\":\"transfer_cost\",\n \"value\":\"team_value\",\n \"bank\":\"money_in_bank\",\n \"points_on_bench\":\"points_benched\"\n })\n\n\ndf[\"total_points_benched\"] = df.groupby([\"manager\"])[\"points_benched\"].transform(\"cumsum\")\ndf[\"total_transfers\"] = df.groupby([\"manager\"])[\"transfers\"].transform(\"cumsum\")\ndf[\"total_transfer_cost\"] = df.groupby([\"manager\"])[\"transfer_cost\"].transform(\"cumsum\")\n\n\nst.header(\"Interaktiv graf for FPL 2022/23\")\nst.text(\"Fra drop down menuen kan du vælge mellem en række variable, der beskriver \\nudviklingen henover sæsonen.\")\n\ny_vals = [col for col in df.columns if col != \"gameweek\"]\ny_axis_val = st.selectbox(\"Vælg variabel til y-aksen:\", options=y_vals)\n\n\nplot = px.line(\n df, \n x=\"gameweek\", \n y=y_axis_val,\n color=\"manager\",\n markers=True\n )\n\nreverse_vals = [\"gw_rank\",\"rank_sort\",\"overall_rank\"]\nif y_axis_val in reverse_vals:\n plot.update_yaxes(\n # autorange=\"reversed\",\n range=[10880000,0])\n\nplot.update_layout(\n xaxis = dict(\n tickmode = 'linear',\n tick0 = 1,\n dtick = 1\n )\n)\n\nst.plotly_chart(plot)","repo_name":"Chr2507/fpl","sub_path":"fpl/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29711412578","text":"from enum import Enum\n\nclass RequestResponse:\n def __init__(self, page_info, results):\n self.page_info = page_info\n self.results = results\n\nclass PageInfo:\n def __init__(self, page, pages, results, page_size):\n self.page = page\n self.pages = pages\n self.results = results\n self.page_size = page_size\n\nclass UserResponse:\n def __init__(self, id, email, display_name):\n self.id = id\n self.email = email\n self.display_name = display_name\n\nclass MediaResponse:\n def __init__(self, id, external_service_id, external_service_id_4k, rating_key, status, media_type, created_at, updated_at):\n self.id = id\n self.external_service_id = external_service_id\n self.external_service_id_4k = external_service_id_4k\n self.rating_key = rating_key\n self.status = status\n self.media_type = media_type\n self.created_at = created_at\n self.updated_at = updated_at\n\nclass MediaStatus(Enum):\n UNKNOWN = 1\n PENDING = 2\n PROCESSING = 3\n PARTIALLY_AVAILABLE = 4\n AVAILABLE = 5\n\nclass MediaRequestResponse:\n def __init__(self, id, media, created_at, updated_at, requested_by):\n self.id = id\n self.media = media\n self.created_at = created_at\n self.updated_at = updated_at\n self.requested_by = requested_by\n","repo_name":"justbest23/media-cleaner-python-broken","sub_path":"src/overseer/overseerr_responses.py","file_name":"overseerr_responses.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"23436237144","text":"from PyQt5.QtCore import Qt, QSize, QTimer\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTabWidget, QSlider, QLabel, QPushButton, QHBoxLayout, QFormLayout, QGridLayout\nfrom PyQt5.QtGui import QImage, QPixmap\nimport cv2\nimport sys\n\nclass MainApp(QTabWidget):\n\n def __init__(self):\n QTabWidget.__init__(self)\n self.acquisition_tab = QWidget()\n self.tab2 = QWidget()\n self.tab3 = QWidget()\n self.video_size = QSize(1280, 720)\n self.addTab(self.acquisition_tab,\"Acquisition\")\n self.addTab(self.tab2,\"Tab 2\")\n self.addTab(self.tab3,\"Tab 3\")\n self.acquisition_tab_UI()\n self.setWindowTitle(\"tab demo\")\n\n def acquisition_tab_UI(self):\n \"\"\"Initialize widgets.\n \"\"\"\n self.image_label = QLabel()\n self.image_label.setFixedSize(self.video_size)\n\n start_preview_button = QPushButton(\"Start preview\")\n start_preview_button.clicked.connect(self.setup_camera)\n\n stop_preview_button = QPushButton(\"Stop preview\")\n stop_preview_button.clicked.connect(self.stop_preview)\n\n quit_button = QPushButton(\"Quit\")\n quit_button.clicked.connect(self.close)\n\n save_button = QPushButton(\"Save\")\n save_button.clicked.connect(self.saveVideo)\n\n brightness_label = QLabel(\"Brightness\")\n brightness_slider = QSlider(Qt.Horizontal, self)\n brightness_slider.setFocusPolicy(Qt.NoFocus)\n brightness_slider.valueChanged[int].connect(self.changedBrightnessValue)\n brightness_slider.setMaximum(255)\n\n contrast_label = QLabel(\"Contrast\")\n contrast_slider = QSlider(Qt.Horizontal, self)\n contrast_slider.setFocusPolicy(Qt.NoFocus)\n contrast_slider.valueChanged[int].connect(self.changedContrastValue)\n contrast_slider.setMaximum(255)\n\n saturation_label = QLabel(\"Saturation\")\n saturation_slider = QSlider(Qt.Horizontal, self)\n saturation_slider.setFocusPolicy(Qt.NoFocus)\n saturation_slider.valueChanged[int].connect(self.changedSaturationValue)\n saturation_slider.setMaximum(255)\n\n l2 = QFormLayout();\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addWidget(QLabel());\n l2.addRow(brightness_label, brightness_slider)\n l2.addRow(contrast_label, contrast_slider)\n l2.addRow(saturation_label, saturation_slider)\n\n l3 = QHBoxLayout();\n l3.addWidget(start_preview_button)\n l3.addWidget(stop_preview_button)\n l3.addWidget(save_button)\n l3.addWidget(quit_button)\n\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.image_label, 0, 0)\n grid.addLayout(l3,1,0)\n\n layout = QHBoxLayout();\n layout.addLayout(grid);\n layout.addLayout(l2);\n self.acquisition_tab.setLayout(layout)\n\n def changedBrightnessValue(self,value):\n brightness = (value - 0)/(255 - 0)\n self.capture.set(10,brightness)\n\n def changedContrastValue(self,value):\n contrast = (value - 0)/(255 - 0)\n self.capture.set(11,contrast)\n\n def changedSaturationValue(self,value):\n saturation = (value - 0)/(255 - 0)\n self.capture.set(12,saturation)\n\n def setup_camera(self):\n \"\"\"Initialize camera.\n \"\"\"\n self.capture = cv2.VideoCapture(0)\n self.capture.set(3, self.video_size.width())\n self.capture.set(4, self.video_size.height())\n\n self.timer = QTimer()\n self.timer.timeout.connect(self.display_video_stream)\n self.timer.start(30)\n\n def display_video_stream(self):\n \"\"\"Read frame from camera and repaint QLabel widget.\n \"\"\"\n _, frame = self.capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.flip(frame, 1)\n image = QImage(frame, frame.shape[1], frame.shape[0],\n frame.strides[0], QImage.Format_RGB888)\n self.image_label.setPixmap(QPixmap.fromImage(image))\n\n def saveVideo(self):\n print(\"algo\")\n\n def stop_preview(self):\n self.timer.stop();\n self.capture.release();\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = MainApp()\n win.show()\n sys.exit(app.exec_())\n","repo_name":"malandaj/opencv-video-test","sub_path":"pyqt-test.py","file_name":"pyqt-test.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"16271675003","text":"\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import audioFeatureExtraction\nfrom pydub import AudioSegment\nimport matplotlib.pyplot as plt\nimport scipy\nimport numpy as np\nimport pandas as pd\nfrom convert_time_overlapping_df_into_time_non_overapping_df import make_non_time_overlapping_df_from_time_overlapping_df\nimport os\nfrom make_df_et_matrix import df_et_matrix_one_file\n\n\ndef divide_intvl_into_subtnvl_of_given_size(ini_pt, fin_pt, len_subintvl):\n l=[]\n while ini_pt <= fin_pt:\n l.append(ini_pt)\n ini_pt=ini_pt+len_subintvl\n l=np.array(l)\n return l\n\n\n'''\ndef chunk_sound(folder,mp3_file, chunk_dur,\\\n dir_name=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"):\n \n\n :param folder: str\n :param mp3_file: .mp3 file\n :param chunk_dur: scalar, the chunk duration to segment the audio\n :return: ???\n \n full_name_mp3file=os.path.join(folder,mp3_file)\n sound = AudioSegment.from_file(full_name_mp3file, format=\"mp3\")\n #sound=sound[:20*1000]\n \"If the wavfile is too large, there can be a memory issue, raising the error\" \\\n \" pydub.exceptions.CouldntDecodeError: Couldn't find data header in wav data\"\n #dir_name=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"\n for i, chunk in enumerate(sound[::chunk_dur*1000]):\n with open(os.path.join(dir_name,\"sound-%s.mp3\" % i) , \"wb\") as f:\n chunk.export(f, format=\"mp3\")\n return chunk\n \n '''\n\n\ndef make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur):\n '''\n Below:\n df=df_et_matrix_one_file(folder,file1)[0]\n df_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\n\n :param df: pd dataframe containing the array of start and end times [S,E] of music and speech with annotations (m, s)\n chunk_dur is in seconds,\n :return: list of all chunks of of a certain chunk duration chunk_dur for each S and E\n lst is the list of all chunks of duration chunk_dur if the audio seglent is at least chun_dur sec. long\n lst2 is the list of all corresponding annotations (music or speech)\n '''\n\n x = df_nonoverlapping.values[:, 0:2]# x is the matrix of start and endtimes so that music and speech don't overlap\n x = x.astype(float)\n y = df_nonoverlapping.values[:, 2] # y is the corresponding array of the corresponding annotations (m, s)\n #y=np.array([y]).T #to make y vertical\n lst=[] #list of np arrays of starttimes and endtimes of length chunk_dur, note\\\n # that x.shape[0] is the number of annotated audio segments in the initial audio file\n lst2= [] #stores the annotations 'm or s) for the above\n count_chunk_list=[]\n for i in range(x.shape[0]):#for i-th row of x\n if x[i,1]-x[i,0] >= chunk_dur:\n #ct_chunks=int((x[i,1]-x[i,0])/chunk_dur) #counts number of chunks\n #count_chunk_list.append(ct_chunks)\n #ct_chunks = round((x[i, 1] - x[i, 0]) / chunk_dur) # counts number of chunks\n #lst.append( np.linspace( x[i,0], x[i,1], ct_chunks ) )\n lst.append(divide_intvl_into_subtnvl_of_given_size(ini_pt=x[i,0], fin_pt=x[i,1], len_subintvl=chunk_dur))\n tmp = y[i]\n #lst2[i].append( tmp for k in range(ct_chunks))\n ct_chunks= len( divide_intvl_into_subtnvl_of_given_size(ini_pt=x[i,0], fin_pt=x[i,1], len_subintvl=chunk_dur) )\n lst2.append( [tmp for k in range(ct_chunks)] )\n\n list_start_endtime_for_chunks= []\n list_annotation_for_chunks=[]\n #df_start_endtime_chunks=pd.DataFrame({''})\n for i in range(len(lst)):\n if lst[i]!= []:\n list_start_endtime_for_chunks.append(lst[i])\n list_annotation_for_chunks.append(lst2[i])\n df_start_endtime_annotation_chunks = pd.DataFrame({'list_start_endtime_for_chunks': list_start_endtime_for_chunks, 'list_annotation_for_chunks': list_annotation_for_chunks})\n return df_start_endtime_annotation_chunks\n\ndef make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio, \\\n dir_name = \"/home/susovan/Documents/music_detection/muspeak_mirex2015/music_chunks\"):\n '''\n\n\n :param df_start_endtime_annotation_chunks: defined above, it's a df that contains 1) list a of np arrays that stores\n the vectors of chunk_dur long sound chunk from an audio file, and 2) list of corr. annotations, m or s.\n audio: initial audio file, ideally would have 1 channel\n :param dir_name: str, name of dir where audio files will be saved\n :return: corresponding df of audio segments (using pydub) and corresponding annotations\n '''\n\n audio = AudioSegment.from_file(audio, format=\"mp3\").set_channels(1)\n df_col_names=df_start_endtime_annotation_chunks.columns.values #gives the names of the columns\n list_start_endtime_for_chunks=df_start_endtime_annotation_chunks[df_col_names[0]].tolist()\n list_annotation_for_chunks=df_start_endtime_annotation_chunks[df_col_names[1]].tolist()\n audio_seg_lst=[] #contains the audio seg objects, without being exported\n audio_annotation_lst=[]\n chunk_list = [] #contains the corrsponding audio segments in audio seg_list after being exported as .mp3/wav\n ctr=-1\n for i in range(len(list_start_endtime_for_chunks)): #for i th array annotated time intervals of the original file\n for j in range(len(list_start_endtime_for_chunks[i])-1): #for j th time chunk of the the i -th annotated time interval\n start_tm_chunk_in_ms = list_start_endtime_for_chunks[i][j]*1000\n end_tm_chunk_in_ms = list_start_endtime_for_chunks[i][j+1]*1000 #times in millisec.\n audio_seg = audio[start_tm_chunk_in_ms:end_tm_chunk_in_ms]\n audio_seg_lst.append(audio_seg)\n audio_annotation_lst.append(list_annotation_for_chunks[i][j])\n #audio_seg_handle = audio_seg.export(os.path.join(dir_name,\"sound-%s.mp3\" % j), format=\"mp3\")\n ctr = ctr + 1\n audio_seg_handle = audio_seg.export(os.path.join(dir_name, \"sound-%s.wav\" % ctr), format=\"wav\", parameters=[\"-ac\", \"1\"])\n chunk_list.append(os.path.join(dir_name,\"sound-%s.wav\" % ctr))\n\n\n return audio_seg_lst, audio_annotation_lst, chunk_list\n\n\n\n\n\n\n'''\n#TEST the above fn:\nfolder=\"/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples\"\n#wav_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3.wav\"\n#mp3_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3\"\nmp3_file=\"ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3\"\nchunk_dur=10\nchunk=chunk_sound(folder,mp3_file,chunk_dur,dir_name=folder)\nprint( \"\\n chunks are \\n\")\n\n'''\n\n'''\n\n#TEST the fn make_time_chunks_from_df_of_start_end_times:\n\nfolder='/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples'\nfile1='/ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.csv'\ndf=df_et_matrix_one_file(folder,file1)[0]\ndf_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\ntmp2=make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur=10)\nprint( '\\n The df containing segments of chunk_dur and annotations are below \\n' + str(tmp2) )\n\n'''\n\n\n'''\n#TEST make_sound_chunks_from_df_start_endtime_annotation_chunks\n\nfolder='/home/susovan/Documents/music_detection/muspeak_mirex2015/muspeak-mirex2015-detection-examples'\n#file1='ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.csv'\n#file1_mp3= '/ConscinciasParalelasN3-OsSentidosOSentirEAsNormasParte318-10-1994.mp3'\n\n\nfile2='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.csv'\nfile2_mp3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.mp3'\n\n\n#file3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.csv'\n#file3_mp3='ConscinciasParalelasN7-OsSentidosOSentirEAsNormasParte715-1-1994.mp3'\n\n\ndf=df_et_matrix_one_file(folder,file2)[0]\ndf_nonoverlapping=make_non_time_overlapping_df_from_time_overlapping_df(df)\nprint(\"df_nonoverlapping is \\n\" + str(df_nonoverlapping))\ndf_start_endtime_annotation_chunks= make_time_chunks_from_df_of_start_end_times(df_nonoverlapping, chunk_dur=10)\n\naudio= os.path.join(folder, file2_mp3)\n#audio= folder + file2_mp3\n#audio = AudioSegment.from_file(audio, format=\"mp3\", chennels=1)\n#audio = AudioSegment.from_file(audio, format=\"wav\", chennels=1)\naudio_seg_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[0]\naudio_annotation_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[1]\nchunk_list=make_sound_chunks_from_df_start_endtime_annotation_chunks(df_start_endtime_annotation_chunks, audio)[2]\nprint( \"\\n df_start_endtime_annotation_chunks is \\n\" + str(df_start_endtime_annotation_chunks) )\nprint( \"\\n audio_seg_list \\n\" + str(audio_seg_list) )\nprint( \"\\n audio_annotation_list \\n\" + str(audio_annotation_list) )\nprint(\"\\n chunk_list is \\n\" + str(chunk_list) )\n\n'''","repo_name":"susovan-batvoice/music_detection","sub_path":"slice_sound.py","file_name":"slice_sound.py","file_ext":"py","file_size_in_byte":9133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40572424771","text":"import json\nimport urllib.request\n\nfrom soynlp.utils import DoublespaceLineCorpus\nfrom soynlp.noun import LRNounExtractor_v2\nfrom soynlp.word import WordExtractor\nfrom soynlp.tokenizer import LTokenizer\nfrom krwordrank.word import KRWordRank\n\n\ndef search_in_naver(search_words, field='blog'):\n valid_fields = [\n 'news',\n 'blog',\n 'shop',\n 'movie',\n 'image',\n 'doc',\n 'book',\n 'cafearticle'\n ]\n\n # If requested field not in valid fields\n if field not in valid_fields:\n return 400, 'Not valid field'\n\n search_words = urllib.parse.quote(search_words)\n request_url = 'https://openapi.naver.com/v1/search/{}?query={}&display=100'.format(field, search_words)\n\n # Read id and secret\n with open('text_analysis/config.json', 'r', encoding='UTF-8') as config_file:\n config = json.loads(config_file.readline())\n id = config['id']\n secret = config['secret']\n\n # Request\n request = urllib.request.Request(request_url)\n request.add_header('X-Naver-Client-Id', id)\n request.add_header('X-Naver-Client-Secret', secret)\n response = urllib.request.urlopen(request)\n\n # Response\n rescode = response.getcode()\n if (rescode == 200):\n response_body = json.loads(response.read().decode('utf-8'))\n return 200, response_body\n else:\n return rescode, 'Error from naver'\n\n\ndef get_tokenizer(sentences):\n word_extractor = WordExtractor()\n\n word_extractor.train(sentences)\n words = word_extractor.extract()\n cohesion_score = {word:score.cohesion_forward for word, score in words.items()}\n\n noun_extractor = LRNounExtractor_v2()\n nouns = noun_extractor.train_extract(sentences) # list of str like\n\n noun_scores = {noun:score.score for noun, score in nouns.items()}\n combined_scores = {noun:score + cohesion_score.get(noun, 0)\n for noun, score in noun_scores.items()}\n\n tokenizer = LTokenizer(scores=combined_scores)\n\n return noun_extractor, nouns, tokenizer\n\n\ndef get_tokenized_nouns(sentences, noun_extractor, nouns, tokenizer):\n tokenized_sentences = []\n for sent in sentences:\n tokenized_sentences.append(tokenizer.tokenize(sent))\n compound_nouns = {}\n for compound_noun, sub_nouns in noun_extractor._compounds_components.items():\n compound_nouns[compound_noun] = list(sub_nouns)\n\n tokenized_nouns = []\n for sent in tokenized_sentences:\n temp_sents = []\n for word in sent:\n if (word in nouns) and (word not in compound_nouns) and (len(word) > 1):\n temp_sents.append(word)\n elif word in compound_nouns:\n temp_sents.extend(compound_nouns[word])\n tokenized_nouns.append(temp_sents)\n\n tokenized_nouns = [nouns for nouns in tokenized_nouns if len(nouns) > 3]\n\n return tokenized_nouns\n\n\ndef get_2gram(sentences):\n ngram_sentences = []\n\n for sentence in sentences:\n ngrams = []\n for idx in range(len(sentence) - 1):\n ngrams.append('{}_{}'.format(sentence[idx], sentence[idx + 1]))\n ngram_sentences.append(ngrams)\n\n return ngram_sentences\n\n\ndef wordrank(words):\n min_count = 3 # 단어의 최소 출현 빈도수 (그래프 생성 시)\n max_length = 12 # 단어의 최대 길이\n wordrank_extractor = KRWordRank(min_count=min_count, max_length=max_length)\n\n beta = 0.8 # PageRank의 decaying factor beta\n max_iter = 50\n keywords, rank, graph = wordrank_extractor.extract(words, beta, max_iter)\n return keywords\n\n\ndef filter_two_gram(keywords, n):\n ret = []\n for keyword, score in keywords.items():\n if keyword[-1] != '_' and keyword.find('_') != -1:\n ret.append((keyword.replace('_', ' '), score))\n return ret[:n]\n\n\ndef filter_one_gram(keywords, n):\n ret = [(keyword, score) for keyword, score in keywords.items()]\n return ret[:n]\n\n\ndef get_keywords(title, sentences, n):\n # n means how many keywords do you want\n # Get nouns and tokenizer\n noun_extractor, nouns, tokenizer = get_tokenizer(sentences)\n\n # Get nouns from title\n keywords_in_title = []\n for noun in nouns.keys():\n if noun in title and len(noun) != 1:\n keywords_in_title.append(noun)\n\n # Get nouns from content\n tokenized_nouns = get_tokenized_nouns(sentences, noun_extractor, nouns, tokenizer)\n one_gram_nouns = [' '.join(nouns) for nouns in tokenized_nouns]\n two_gram_nouns = get_2gram(tokenized_nouns)\n two_gram_nouns = [' '.join(nouns) for nouns in two_gram_nouns]\n\n # Get keywords\n one_gram_keywords = filter_one_gram(wordrank(one_gram_nouns), int(n * 0.3))\n two_gram_keywords = filter_two_gram(wordrank(two_gram_nouns), n - int(n * 0.3))\n keywords_in_content = [keywordInfo[0] for keywordInfo in one_gram_keywords + two_gram_keywords]\n\n return keywords_in_content + keywords_in_title\n","repo_name":"c0510gy/InfoSearcher","sub_path":"backend/infoSearcherServer/text_analysis/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"24912998728","text":"import socket\r\nimport keyboard as kb\r\nimport time\r\n\r\nspecial_keys = {\r\n 'A': 'a', 'B': 'b', 'C': 'c', 'D': 'd', 'E': 'e', 'F': 'f', 'G': 'g', 'H': 'h', 'I': 'i', 'J': 'j',\r\n 'K': 'k', 'L': 'l', 'M': 'm', 'N': 'n', 'O': 'o', 'P': 'p', 'Q': 'q', 'R': 'r', 'S': 's', 'T': 't',\r\n 'U': 'u', 'V': 'v', 'W': 'w', 'X': 'x', 'Y': 'y', 'Z': 'z', '!': '1', '@': '2', '#': '3', '$': '4',\r\n '%': '5', '^': '6', '&': '7', '*': '8', '(': '9', ')': '0', '~': '`', '_': '-', '+': '=', '{': '[',\r\n '}': ']', '|': '\\\\', ':': ';', '\"': \"'\", '<': ',', '>': '.', '?': '/'\r\n}\r\n\r\ndef main():\r\n host = \"192.168.23.1\" # Server IP address\r\n port = 12346 # Server port\r\n\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.connect((host, port))\r\n\r\n print(\"Connected to the server\")\r\n\r\n try:\r\n while True:\r\n data = client_socket.recv(1024)\r\n if not data:\r\n break\r\n else:\r\n is_pressed, key = data.decode().split(\",\")\r\n if is_pressed == \"True\":\r\n if key in special_keys:\r\n kb.press(special_keys[key])\r\n else:\r\n kb.press(key)\r\n\r\n else:\r\n if key in special_keys:\r\n kb.release(special_keys[key])\r\n else:\r\n kb.release(key)\r\n time.sleep(0.01)\r\n\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n client_socket.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"noamkv/pc-remote-controller","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19376959825","text":"# -*- coding: utf-8 -*-\n\nclass Solution:\n \"\"\"Solver for https://oj.leetcode.com/problems/sort-colors/\"\"\"\n # @param A a list of integers\n # @return nothing, sort in place\n def sortColors(self, A):\n \"\"\"The main solver function.\"\"\"\n count = [0 for i in range(3)]\n for value in A:\n count[value] += 1\n A[:] = sum(([key] * value for key, value in enumerate(count)), [])\n return\n","repo_name":"starrify/leetcode","sub_path":"src/sort-colors.py","file_name":"sort-colors.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22671462164","text":"from django.shortcuts import render \r\nfrom dateutil.relativedelta import relativedelta\r\nfrom datetime import date\r\n\r\nfrom setup.models import Charge \r\nfrom mla.models import TransactionAssessment \r\nfrom random import randint\r\nfrom django.contrib import messages\r\nfrom django.http import HttpResponseRedirect\r\n\r\ntoday = date.today()\r\nexpirationdate_1year = today + relativedelta(years=1)\r\n\r\nrange_start = 10**(8-1)\r\nrange_end = (10**8)-1\r\ngenerated_rand_num = randint(range_start, range_end)\r\ntransaction_code = \"ML\"+str(generated_rand_num)\r\n\r\ndef renew_ps(assessment_type, tin_obj, chassis_number, staff_obj):\r\n\t \r\n\t\t# GENERATE BILL\r\n\ttry:\r\n\t\tprint(assessment_type)\r\n\t\t\t\t\t \r\n\r\n\t\t#Get particulars [Vehicle License [1.6 - 2.0]]\r\n\t\tif assessment_type == \"Vehicle License [1.6 - 2.0]\":\r\n\r\n\t\t\tcharge_public_vehicle = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars = 'Vehicle License [1.6 - 2.0]')\r\n\t\t\tparticulars = \"Vehicle License [1.6 - 2.0]\";\r\n\t\t\tamount = charge_public_vehicle.amount\r\n\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t \r\n\t\t#Get particulars [Vehicle License [2.1 - 3.0]]\r\n\t\telif assessment_type == \"Vehicle License [2.1 - 3.0]\":\r\n\r\n\t\t\t\t\t#Check if Particulars have expired:\r\n\t\t\tcharge_vehicle_license_btn2_3 = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Vehicle License [2.1 - 3.0]')\r\n\t\t\t\r\n\t\t\tparticulars = \"Vehicle License [2.1 - 3.0]\";\r\n\t\t\tamount = charge_vehicle_license_btn2_3.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\r\n\t\t#Get particulars [Vehicle License [3.1 - Above]]\t\t\t\t \r\n\t\telif assessment_type == \"Vehicle License [3.1 - Above]\":\r\n\t\t\t\r\n\t\t\t\t\t#Check if Particulars have expired:\r\n\t\t\r\n\t\t\tcharge_vehicle_license_above3 = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Vehicle License [3.1 - Above]')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Vehicle License [3.1 - Above]\";\r\n\t\t\tamount = charge_vehicle_license_above3.amount;\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t#Get particulars Certificate of road worthiness\r\n\r\n\t\telif assessment_type == \"Certificate of road worthiness\":\r\n\r\n\t\t\tcharge_certificate_of_road_worthiness = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Certificate of road worthiness')\r\n\r\n\t\t\tparticulars = \"Certificate of road worthiness\";\r\n\t\t\tamount = charge_certificate_of_road_worthiness.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t\r\n\t\t\t\r\n\t\t\t \r\n\t\t#Get particulars Proof of ownership\t\t\t \r\n\t\telif assessment_type == \"Proof of ownership\":\r\n\t\t\t\t \r\n\t\t\t\t \r\n\t\t\tcharge_proof_of_ownership = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Proof of ownership')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Proof of ownership\";\r\n\t\t\tamount = charge_proof_of_ownership.amount \r\n\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, expiration_date = expirationdate_1year, transaction_type = 'Renewal of Particulars', staff =staff_obj) \t\t \r\n\t\t\t\r\n\t\t\t\r\n\r\n\t\t#Get particulars New Plate Number\t\t\t\t \r\n\t\telif assessment_type == \"New Plate Number\":\r\n\t\t\tcharge_new_plate_number = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='New Plate Number')\r\n\t\t\t\r\n\t\t\tparticulars = \"New Plate Number\";\r\n\t\t\tamount = charge_new_plate_number.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t\t#Get particulars Registration Book\t\t\t\t \r\n\t\telif assessment_type == \"Registration Book\":\r\n\t\t\tcharge_registration_book = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Registration Book')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Registration Book\";\r\n\t\t\tamount = charge_registration_book.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t#Get particulars SMS Alert\r\n\t\telif assessment_type == \"SMS Alert\":\r\n\t\t\tcharge_sms_alert = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='SMS Alert')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"SMS Alert\";\r\n\t\t\tamount = charge_sms_alert.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj)\r\n\t\t\t\r\n\t\t\t \r\n\r\n\t\t#Get particulars Stamp Duty\r\n\t\telif assessment_type == \"Stamp Duty\":\r\n\t\t\tcharge_stamp_duty = Charge.objects.get(options = 'Renew', vehicle_type='Private Vehicle Saloon', particulars='Stamp Duty')\r\n\t\t\t\t\r\n\t\t\tparticulars = \"Stamp Duty\";\r\n\t\t\tamount = charge_stamp_duty.amount\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tTransactionAssessment.objects.create( transaction_code = transaction_code, tin = tin_obj, chassis_number = chassis_number, particulars = particulars, amount = amount, transaction_type = 'Renewal of Particulars', staff =staff_obj) \r\n\r\n\t\t\t\r\n\r\n\t\treturn transaction_code\r\n\r\n\texcept Charge.DoesNotExist as e:\r\n\t\t \r\n\t\treturn \"charge_unavailable\"\r\n\r\n\t\t\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\treturn \"uncaught_exception\"","repo_name":"mawejjehakim/ci","sub_path":"mla/bills/renew_bills/renew_ps.py","file_name":"renew_ps.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73482598992","text":"import functools\r\n\"\"\"def dec(func):\r\n @functools.wraps(func)\r\n def do_twice(name, *args, **kwargs):\r\n # do someting before\r\n func(name)\r\n func(name)\r\n #do somethng after\r\n # return value\r\n return do_twice\r\n\r\n@dec\r\ndef say_hi(name):\r\n print(\"hi {0}\".format(name))\r\n # return value\"\"\"\r\n\r\nimport time\r\n\r\ndef timer(func):\r\n \"\"\"Print the runtime of the decorated function\"\"\"\r\n @functools.wraps(func)\r\n def wrapper_timer(*args, **kwargs):\r\n start_time = time.perf_counter() # 1\r\n value = func(*args, **kwargs)\r\n end_time = time.perf_counter() # 2\r\n run_time = end_time - start_time # 3\r\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\r\n return value\r\n return wrapper_timer\r\n\r\n@timer\r\ndef waste_some_time(num_times):\r\n for _ in range(num_times):\r\n x = sum([i**2 for i in range(10001)])\r\n return x\r\n\r\nif __name__ == \"__main__\":\r\n #print(say_hi.__name__)\r\n t = waste_some_time(10)\r\n print(t)\r\n print(sum([i**2 for i in range(10001)]))\r\n","repo_name":"zieglershai/edit_songs","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27546449546","text":"from rest_framework.generics import CreateAPIView,ListCreateAPIView\nfrom rest_framework.request import Request\nfrom .serializers import RegistroUsuarioSerializer, RegistroSerializer, MostrarFigurasSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .enviar_correos import enviar_correo_validacion\nfrom rest_framework.permissions import AllowAny,IsAuthenticated,IsAdminUser\nfrom .permisos import PermisoPersonalizado, EsAdministrador\nfrom .models import Registro\n\nclass RegistroUsuarioView(CreateAPIView):\n serializer_class = RegistroUsuarioSerializer\n permission_classes = [EsAdministrador]\n\n def post(self, request: Request):\n data = self.serializer_class(data=request.data)\n data.is_valid(raise_exception=True)\n data.save()\n\n print(enviar_correo_validacion(data.data.get('email')))\n\n return Response(data={\n 'message': 'Usuario registrado correctamente',\n 'content': ''\n }, status=status.HTTP_201_CREATED)\n\nclass RegistroFiguritasView(ListCreateAPIView):\n #comentarios\n permission_classes = [PermisoPersonalizado]\n queryset = Registro.objects.all()\n serializer_class = RegistroSerializer\n\n def get(self, request):\n id_usuario = request.user.id\n\n registros = self.get_queryset().filter(usuario = id_usuario).all()\n print(registros)\n\n # utilizar el serializador para convertir los registros a informacion leible\n return Response(data={\n 'message': 'Tu coleccion es: ',\n 'content': MostrarFigurasSerializer(instance=registros, many=True).data\n })\n\n def post(self, request):\n id_usuario = request.user.id\n # agregamos el bodyactual a la llave del usuario\n # usamos el ** para hacer la destructuracion o sacar el contenido del\n # diccionario y agregamos uno nuevo\n data = {**request.data, **{'usuario': id_usuario}}\n print(data)\n registroSerializado = self.serializer_class(data=data)\n\n registroSerializado.is_valid(raise_exception=True)\n nuevoRegistro = registroSerializado.save()\n\n return Response(data={\n 'message': 'Registro creado correctamente',\n 'content': self.serializer_class(instance=nuevoRegistro).data\n })","repo_name":"SalvadorCT/Album_Mundial","sub_path":"gestion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19568824069","text":"#first class\nclass Employee:\n __num = 4\n no_of_leaves = 10\n def __init__(self, name, profession, salary) -> None:\n self.name = name\n self.profession = profession\n self.salary = salary\n\n def printDetails(self):\n print (f\"The name of the person is {self.name} and his profession is {self.profession} and salary is {self.salary}\")\n\n @classmethod\n def change_leaves(cls, new_leaves):\n cls.no_of_leaves = new_leaves\n\n @staticmethod\n def greet(name):\n print(\"Hello {0}\".format(name))\n\nrohan = Employee('Rohan','Athletics', '400')\nprint(rohan._Employee__num) #Name mangling\n\n#Second class\nclass player:\n def __init__(self, sport) -> None:\n self.sport = sport\n \n def printDetails(self):\n return f\"The person plays {self.sport}\"\n \n\n#Concept of inheritance\n#Single inheritance\n\nclass Programmer(Employee):\n def __init__(self, name, company, working_hours) -> None:\n self.name = name\n self.company = company\n self.working_hours = working_hours\n print(f\"The name of the programmer is {self.name}, the name of his company is {self.company} and his working hours are {self.working_hours}\")\n\n @staticmethod\n def working_project(name_of_project):\n return \"The name of the current working project is {0} \".format(name_of_project)\n \n\nharjot = Programmer(\"Harjot\", \"Google\", 8)\nharjot.working_project('Chrome')\n\n\n#Multiple inheritance\nclass gamer(player, Programmer, Employee):\n pass\n\njunaid = gamer(\"football\")\nprint(junaid.printDetails())\n\nprint(rohan.working_project(\"Azure\"))\nprint(rohan.no_of_leaves)\n\n\n#Multilevel inheritance\n#import exercise\n\n#or \n\nclass ElectronicDevice:\n use = \"used in offices\"\n processing = 1\n battery_capacity = '10,000' + ' mah'\n def __init__(self, name, no_of_cores, no_of_threads, company) -> None:\n self.name = name\n self.no_of_cores = no_of_cores\n self.no_of_threads =no_of_threads\n self.company = company\n\n def printDetails(self):\n print(f\"The name of the electronic device is {self.name} and the number of cores and threads are {self.no_of_cores} and {self.no_of_threads} respectively and the name of the company is {self.company}\")\n\nDesktop = ElectronicDevice(\"DesktopPc\", 8, 16, \"Asus\")\nDesktop.printDetails()\n \nclass PocketGadget(ElectronicDevice):\n use = \"used at home\"\n battery_capacity = '8000' + ' mah'\n def __init__(self, name, company) -> None:\n self.name = name \n self.company = company\n\n def printDetails(self):\n print(f\"The name of the device is {self.name} and the name of the company is {self.company}\")\n \n\nipad = PocketGadget(\"Ipad\", \"Apple\")\nipad.printDetails()\nprint(ipad.processing)\nprint(ipad.battery_capacity)\n\n\nclass phone(PocketGadget):\n battery_capacity = '5000' + ' mah'\n \nPhone = phone(\"Mob_phone\", \"Mi\")\nPhone.printDetails()\nprint(Phone.use)\nprint(phone.processing)\n\n \n\n \n","repo_name":"Harjot01/Python","sub_path":"OOPS/Types of inheritance.py","file_name":"Types of inheritance.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38782544501","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport sys\nfrom datetime import datetime\nimport json\n\nfrom .response import Response\n\n\n# Level 1\nclass Project(Response):\n def __init__(self, token=None, id=None, title=None, **kwargs):\n Response.__init__(self, token, **kwargs)\n self.title = title\n self.id = id\n # self.description = description\n # self.milestones = milestones\n self.endpoint = '/api/v1/projects.json'\n self.specific_endpoint = '/api/v1/projects/{id}.json'\n\n def register(self):\n response = self._add(endpoint=self.endpoint, item=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n def get(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='GET')\n return self.__class__(token=self.token, **response)\n\n def list(self, name=None, page_num=None):\n response = self.find(endpoint=self.endpoint, name=name, page_num=page_num)\n if isinstance(response, list):\n return [self.__class__(token=self.token, **item) for item in response]\n else:\n return []\n\n def update(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', item=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n\n# Level 2\nclass Folder(Project):\n def __init__(self, token=None, id=None, title=None, project_id=None, **kwargs):\n Project.__init__(self, token, id, title, **kwargs)\n self.project_id = project_id\n self.endpoint = '/api/v1/milestones.json'\n self.specific_endpoint = '/api/v1/milestones/{id}.json'\n\n def __get_folders(self, period=None):\n response = self.find(endpoint=self.endpoint, project_id=self.id, period=period)\n if isinstance(response, list) and len(response) > 0:\n return [Folder(project_id=self.id, token=self.token, **item) for item in response]\n else:\n return []\n\n def get_current_folders(self):\n return self.__get_folders(period='current_milestones')\n\n def get_future_folders(self):\n return self.__get_folders(period='future_milestones')\n\n def get_past_folders(self):\n return self.__get_folders(period='last_milestones')\n\n\n# Level 2.1\nclass Experiment(Project):\n def __init__(self, token=None, project_id=None, milestone_id=None, id=None, title=None, **kwargs):\n Project.__init__(self, token, id, title, **kwargs)\n self.project_id = project_id\n self.milestone_id = milestone_id\n self.endpoint = '/api/v1/experiments.json'\n self.specific_endpoint = '/api/v1/experiments/{id}.json'\n\n\n# Level 2.2\nclass Procedure(Project):\n def __init__(self, token=None, container_id=None, id=None, name=None, section_type=None, container_type=None,\n **kwargs):\n Project.__init__(self, token, id, **kwargs)\n self.container_id = container_id\n self.name = name\n self.section_type = section_type\n self.container_type = container_type\n self.endpoint = '/api/v1/sections.json'\n self.specific_endpoint = '/api/v1/sections/{id}.json'\n\n\n# Level 2.3\nclass Element(Project):\n def __init__(self, token=None, container_id=None, id=None, data=None, element_type=None, container_type=None,\n experiment_id=None, **kwargs):\n Project.__init__(self, token, id, **kwargs)\n self.experiment_id = experiment_id\n self.container_id = container_id\n self.data = data\n self.element_type = element_type\n self.container_type = container_type\n self.endpoint = '/api/v1/elements.json'\n self.specific_endpoint = '/api/v1/elements/{id}.json'\n self.specific_endpoint_type = '/api/v1/experiments/{id}/elements.json'\n self.update_stock_amount_endpoint = '/api/v1/stocks/{id}/update_stock_amount'\n self.add_attachment_endpoint = '/api/v1/attachments/{id}'\n\n def update_element(self):\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', element=self.to_dict())\n return self.__class__(token=self.token, **response)\n\n def list_by_type(self):\n response = self._get_or_update(endpoint=self.specific_endpoint_type, id=self.experiment_id,\n element_type=self.element_type)\n if isinstance(response, list):\n return [self.__class__(token=self.token, **item) for item in response]\n else:\n return []\n\n def get_data(self):\n if self.element_type == 'form':\n return json.loads(self.description).get('form_json')\n\n elif self.element_type == 'samples':\n return json.loads(self.data).get('samples')\n\n elif self.element_type == 'plate':\n return json.loads(self.data).get('wells')\n\n else:\n return self.data\n\n\n def update_stock_amount(self, sample_id, stock_id, amount_used, unit_type, unit_type_name):\n if self.element_type == 'samples':\n response = self._get_or_update(endpoint=self.update_stock_amount_endpoint,\n id=stock_id,\n amount_used=amount_used,\n unit_type=unit_type,\n unit_type_name=unit_type_name,\n element_id=self.id,\n sample_id=sample_id,\n subtract='true',\n method='POST')\n return self.__class__(token=self.token, **response)\n else:\n return []\n\n def add_step(self, txt='', hours='00', minutes='00', seconds='00', completed_by=''):\n if self.element_type == 'steps':\n step = {\n \"title\": '

    ' + txt + '

    ',\n \"timer\": {\n \"hours\": hours,\n \"minutes\": minutes,\n \"seconds\": seconds\n },\n \"completed\": True,\n \"completed_by\": completed_by,\n \"completed_at\": datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n }\n\n steps = json.loads(self.data)\n steps.append(step)\n data = json.dumps(steps)\n response = self._get_or_update(endpoint=self.specific_endpoint, id=self.id, method='PUT', data=data)\n return self.__class__(token=self.token, **response)\n else:\n return []\n\n def add_attachment(self, attachment_id):\n if self.element_type == 'attachments':\n response = self._get_or_update(endpoint=self.add_attachment_endpoint,\n id=attachment_id,\n item={'element_id': self.id},\n method='PUT')\n return self.__class__(token=self.token, **response)\n else:\n return []\n","repo_name":"BioData/LabguruPython","sub_path":"labguru/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"83"} +{"seq_id":"11260560085","text":"import math\n\n\ndef divisor_num(n):\n # n==1이면 약수가 1개이므로 1을 return 후 종료\n if n == 1:\n return 1\n\n # 아니라면 1과 자기 자신을 먼저 약수 개수로 추가\n res = 2\n for i in range(2, int(math.sqrt(n) + 1)):\n # 약수 일 때\n if n % i == 0:\n # 제곱수인 경우 약수 1개만 추가\n if (n // i) == i:\n res += 1\n # 나머지는 2개 추가\n else:\n res += 2\n return res\n\n\ndef solution(left, right):\n answer = 0\n\n for i in range(left, right + 1):\n # 약수의 개수가 짝수일 때는 더하기\n if divisor_num(i) % 2 == 0:\n answer += i\n # 약수의 개수가 홀수일 때는 빼기\n else:\n answer -= i\n\n return answer\n\n\n\"\"\"\n정답 확인용 테스트케이스\nprint((solution(13, 17))) #43\nprint((solution(24, 27))) #52\nprint((solution(1, 1))) #-1\n\"\"\"","repo_name":"sbtiffanykim/problem-solving","sub_path":"programmers/level1/77884_약수의개수와덧셈.py","file_name":"77884_약수의개수와덧셈.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42433165025","text":"from util import *\n\n\n@apply\ndef apply(eq_max, eq_min, Q, K, V): \n ((((i, l), d), S[i - l + 1]), i_limit), β = eq_max.of(Equal[Lamda[Max[Mod[Expr + 1 - Expr]]]])\n S[i], S[0], n = i_limit\n\n (((S[i], u), S[n]), S[i_limit]), ζ = eq_min.of(Equal[Lamda[Min[Add]]])\n \n S[n], d_z = Q.shape\n\n indices = slice(β[i], ζ[i], d)\n\n return Equal(softmax(Q @ K.T / sqrt(d_z) + (BandPart[l - 1, u - 1, d](OneMatrix(n, n)) - 1) * oo) @ V, Lamda[i:n](softmax(Q[i] @ (K[indices]).T / sqrt(d_z)) @ (V[indices])))\n\n\n@prove\ndef prove(Eq):\n from axiom import keras\n\n n, l, u, d_z, d = Symbol(integer=True, positive=True)\n i = Symbol(integer=True)\n Q = Symbol(real=True, shape=(n, d_z))\n K = Symbol(real=True, shape=(n, d_z))\n V = Symbol(real=True, shape=(n, d_z))\n β, ζ = Symbol(shape=(n,), integer=True)\n (Eq.beta, Eq.zeta), Eq.objective = apply(Equal(β, Lamda[i:n](Max(i - l + 1, (i - l + 1) % d))), Equal(ζ, Lamda[i:n](Min(i + u, n))), Q, K, V)\n\n A = Symbol(Eq.objective.find(Mul[MatMul]))\n Eq << keras.eq_max.eq_min.imply.eq.matmul.softmax.band_part_mask.dilated.apply(Eq.beta, Eq.zeta, A, V)\n\n Eq << Eq[-1].subs(A.this.definition)\n\n \n \n\n\nif __name__ == '__main__':\n run()\n# created on 2022-01-01\n# updated on 2022-03-30\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/keras/eq_max/eq_min/imply/eq/matmul/softmax/band_part_mask/dilated/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"2823431501","text":"def longestPalindrome0(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n l = 0\n r = len(s) - 1\n res_left = \"\"\n res_right = \"\"\n res_temp = \"\"\n res = \"\"\n max_len = 0\n while l < r:\n while s[l] != s[r]:\n r -= 1\n while s[l] == s[r]:\n res_left = s[l] + res_left\n res_right = res_right + s[r]\n res_temp = res_left + res_right\n if len(res_temp) > max_len:\n max_len = len(res_temp)\n res = res_temp\n l += 1\n r -= 1\n l += 1\n r = len(s) - 1\n return res\n\ndef longestPalindrome(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n res = \"\"\n max_len = 0\n for i in range(len(s)):\n # odd length palindromes\n l, r = i, i\n while l >= 0 and r < len(s) and s[l] == s[r]:\n if (r - l + 1) > max_len:\n max_len = r - l + 1\n res = s[l:r+1]\n l -= 1\n r += 1\n # even length palindromes\n l, r = i, i + 1\n while l >= 0 and r < len(s) and s[l] == s[r]:\n if (r - l + 1) > max_len:\n max_len = r - l + 1\n res = s[l:r+1]\n l -= 1\n r += 1\n return res\n\ndef main():\n s = \"babad\"\n print(longestPalindrome(s))\n\nif __name__ == '__main__':\n main()","repo_name":"vincentlinzhu/My-LeetCode-Solutions","sub_path":"Medium/P5_longest_palindromic_substring.py","file_name":"P5_longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29696752160","text":"import math\r\nimport pygame\r\n\r\n# Initialize Pygame\r\npygame.init()\r\n\r\n# Set the screen dimensions\r\nscreen_width = 800\r\nscreen_height = 600\r\n\r\n# Create the screen\r\nscreen = pygame.display.set_mode((screen_width, screen_height))\r\n\r\n# Set the title of the screen\r\npygame.display.set_caption(\"Two-Link Robot Simulation\")\r\n\r\n# Define the colors\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\n\r\n# Define the link lengths\r\nl1 = 190.0\r\nl2 = 130.0\r\n\r\n# Define the initial angles of the two links\r\nhomepos1=-45.0\r\nhomepos2=-160.0\r\n\r\ntheta1 = homepos1*math.pi/180\r\ntheta2 = homepos2*math.pi/180\r\n\r\nitheta1limitl = -1.0\r\nitheta1limith = 160.0\r\nitheta2limitl = -1.0\r\nitheta2limith = 160.0\r\n\r\ntheta1limitl = math.pi*itheta1limitl/180\r\ntheta1limith = math.pi*itheta1limith/180\r\ntheta2limitl = math.pi*itheta2limitl/180\r\ntheta2limith = math.pi*itheta2limith/180\r\n\r\n\r\n# Define the position of the base of the robot\r\nx0 = screen_width / 2\r\ny0 = screen_height / 2\r\n\r\n# Define the position of the end-effector of the robot\r\nx_end = x0 + l1 * math.cos(theta1) + l2 * math.cos(theta1 + theta2)\r\ny_end = y0 + l1 * math.sin(theta1) + l2 * math.sin(theta1 + theta2)\r\n\r\n# Define the speed of the robot\r\nspeed = 4.9234123490871293874\r\n\r\n# Define the font for displaying the angles\r\nfont = pygame.font.SysFont(None, 25)\r\n\r\n# Define the clock\r\nclock = pygame.time.Clock()\r\n\r\n# Define the main loop of the program\r\n# Define the main loop of the program\r\nwhile True:\r\n # Clear the screen\r\n screen.fill(white)\r\n\r\n # Draw the base of the robot\r\n pygame.draw.circle(screen, black, (int(x0), int(y0)), 10)\r\n\r\n # Draw the first link of the robot\r\n pygame.draw.line(screen, black, (int(x0), int(y0)), (int(x0 + l1 * math.cos(theta1)), int(y0 + l1 * math.sin(theta1))), 5)\r\n\r\n # Draw the second link of the robot\r\n pygame.draw.line(screen, black, (int(x0 + l1 * math.cos(theta1)), int(y0 + l1 * math.sin(theta1))), (int(x_end), int(y_end)), 5)\r\n\r\n # Draw the end-effector of the robot\r\n pygame.draw.circle(screen, blue, (int(x_end), int(y_end)), 10)\r\n\r\n # Display the angles of the two links\r\n theta1_text = font.render(\"Theta1: {:.2f}\".format(math.degrees(theta1)), True, black)\r\n theta2_text = font.render(\"Theta2: {:.2f}\".format(math.degrees(theta2)), True, black)\r\n screen.blit(theta1_text, (10, 10))\r\n screen.blit(theta2_text, (10, 40))\r\n\r\n # Handle events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n quit()\r\n\r\n # Update the angles of the two links based on the pressed keys\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_LEFT] and keys[pygame.K_UP]:\r\n theta1 -= math.radians(speed)\r\n theta2 -= math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif keys[pygame.K_LEFT] and keys[pygame.K_DOWN]:\r\n theta1 -= math.radians(speed)\r\n theta2 += math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n if theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n elif keys[pygame.K_RIGHT] and keys[pygame.K_UP]:\r\n theta1 += math.radians(speed)\r\n theta2 -= math.radians(speed)\r\n if theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif keys[pygame.K_RIGHT] and keys[pygame.K_DOWN]:\r\n theta1 += math.radians(speed)\r\n theta2 += math.radians(speed)\r\n if theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n if theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n elif keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]:\r\n if keys[pygame.K_LEFT]:\r\n theta1 -= math.radians(speed)\r\n else:\r\n theta1 += math.radians(speed)\r\n if theta1 < -theta1limith:\r\n theta1 = -theta1limith\r\n elif theta1 > theta1limitl:\r\n theta1 = theta1limitl\r\n elif keys[pygame.K_UP] or keys[pygame.K_DOWN]:\r\n if keys[pygame.K_UP]:\r\n theta2 -= math.radians(speed)\r\n else:\r\n theta2 += math.radians(speed)\r\n if theta2 < -theta2limith:\r\n theta2 = -theta2limith\r\n elif theta2 > theta2limitl:\r\n theta2 = theta2limitl\r\n\r\n # Update the position of the end\r\n x_end = x0 + l1 * math.cos(theta1) + l2 * math.cos(theta1 + theta2)\r\n y_end = y0 + l1 * math.sin(theta1) + l2 * math.sin(theta1 + theta2)\r\n # Update the screen\r\n pygame.display.update()\r\n\r\n # Set the FPS\r\n clock.tick(60)\r\n\r\npygame.quit()\r\n\r\n","repo_name":"tykrus01/TARS","sub_path":"SCARA_anglecontrol_arm.py","file_name":"SCARA_anglecontrol_arm.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41061978031","text":"#!/usr/bin/env python\n\n__author__ = \"WhyKiki\"\n__version__ = \"1.0.0\"\n\n\n###### Assign field indices\nx = 0\nnotX = 1\ny = 2\nnotY = 3\nx_y = 4\nx_notY = 5\nnotX_y = 6\nnotX_notY = 7\ny_x = 8\ny_notX = 9\nnotY_x = 10\nnotY_notX = 11\n\n\ndef totalProbability(items, ind1, ind2, ind3, ind4, ind5):\n \"\"\"Calculate P(A) = P(A|B)*P(B) + P(A|notB)*P(notB)\"\"\"\n if str(items[ind1].value) == \"\" and \\\n str(items[ind2].value) != \"\" and \\\n str(items[ind3].value) != \"\" and \\\n str(items[ind4].value) != \"\" and \\\n str(items[ind5].value) != \"\":\n result = float(items[ind2].value) * float(items[ind3].value) \\\n + float(items[ind4].value) * float(items[ind5].value)\n items[ind1].value = str(round(result, 3))\n\n\ndef singleValsProba(items, ind1, ind2):\n \"\"\"Calculate P(A) = 1 - P(notA).\n If both, P(A) and P(notA) are not given, call function singleVals.\"\"\"\n if str(items[ind1].value) != \"\" and str(items[ind2].value) == \"\":\n items[ind2].value = str(round(1 - float(items[ind1].value), 3))\n elif str(items[ind1].value) == \"\" and str(items[ind2].value) != \"\":\n items[ind1].value = str(round(1 - float(items[ind2].value), 3))\n else:\n if ind1 == 0:\n totalProbability(items, ind1, 4, 2, 5, 3)\n totalProbability(items, ind2, 6, 2, 7, 3)\n elif ind1 == 2:\n totalProbability(items, ind1, 8, 0, 9, 1)\n totalProbability(items, ind2, 10, 0, 11, 1)\n\n\ndef b_of_a_first(items, ind1, ind2, ind3, ind4):\n \"\"\"Calculate P(B|A) = (P(A|B) * P(B)) / P(A) \"\"\"\n if str(items[ind1].value) == \"\" and \\\n str(items[ind2].value) != \"\" and \\\n str(items[ind3].value) != \"\" and \\\n str(items[ind4].value) != \"\":\n result = float(items[ind2].value) * float(items[ind3].value) / float(items[ind4].value)\n items[ind1].value = str(round(result, 3))\n\n\ndef b_of_a_second(items, ind1, ind2, ind3, ind4, ind5):\n \"\"\"Calculate P(B|A) = ( P(B) - P(B|notA) * P(notA) ) / P(A) \"\"\"\n if str(items[ind1].value) == \"\" and \\\n str(items[ind2].value) != \"\" and \\\n str(items[ind3].value) != \"\" and \\\n str(items[ind4].value) != \"\" and \\\n str(items[ind5].value) != \"\":\n result = (float(items[ind2].value) - float(items[ind3].value) \\\n * float(items[ind4].value)) / float(items[ind5].value)\n items[ind1].value = str(round(result, 3))\n\n\ndef condProbs(items):\n \"\"\"Calculate P(A|B) and P(B|A).\"\"\"\n\n ### P(X|Y)\n if str(items[y_x].value) != \"\":\n b_of_a_first(items, x_y, y_x, x, y)\n elif str(items[x_notY].value) != \"\":\n b_of_a_second(items, x_y, x, x_notY, notY, y)\n\n ### P(X|notY)\n if str(items[notY_x].value) != \"\":\n b_of_a_first(items, x_notY, notY_x, x, notY)\n elif str(items[x_y].value) != \"\":\n b_of_a_second(items, x_notY, x, x_y, y, notY)\n\n ### P(notX|Y)\n if str(items[y_notX].value) != \"\":\n b_of_a_first(items, notX_y, y_notX, notX, y)\n elif str(items[notX_notY].value) != \"\":\n b_of_a_second(items, notX_y, notX, notX_notY, notY, y)\n\n ### P(notX|notY)\n if str(items[notY_notX].value) != \"\":\n b_of_a_first(items, notX_notY, notY_notX, notX, notY)\n elif str(items[notX_y].value) != \"\":\n b_of_a_second(items, notX_notY, notX, notX_y, y, notY)\n\n ### P(Y|X)\n if str(items[x_y].value) != \"\":\n b_of_a_first(items, y_x, x_y, y, x)\n elif str(items[y_notX].value) != \"\":\n b_of_a_second(items, y_x, y, y_notX, notX, x)\n\n ### P(Y|notX)\n if str(items[notX_y].value) != \"\":\n b_of_a_first(items, y_notX, notX_y, y, notX)\n elif str(items[y_x].value) != \"\":\n b_of_a_second(items, y_notX, y, y_x, x, notX)\n\n ### P(notY|X)\n if str(items[x_notY].value) != \"\":\n b_of_a_first(items, notY_x, x_notY, notY, x)\n elif str(items[notY_notX].value) != \"\":\n b_of_a_second(items, notY_x, notY, notY_notX, notX, x)\n\n ### P(notY|notX)\n if str(items[notX_notY].value) != \"\":\n b_of_a_first(items, notY_notX, notX_notY, notY, notX)\n elif str(items[notY_x].value) != \"\":\n b_of_a_second(items, notY_notX, notY, notY_x, x, notX)\n\n\ndef fillFields(items):\n singleValsProba(items, x, notX)\n singleValsProba(items, y, notY)\n condProbs(items)\n\n\ndef calc_all(items):\n for i in range(7):\n fillFields(items)","repo_name":"Regenplatz/DataScience","sub_path":"Calculators/ConditionalProbability/ConditionalProbability.py","file_name":"ConditionalProbability.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22930619289","text":"__author__ = \"Matt Davis\"\n__email__ = \"matthew1.davis@intel.com\"\n__description__ = \"This script loads from the CWB Odata feed to the GSMDW database on sql1717-fm1-in.amr.corp.intel.com,3181\"\n__schedule__ = \"Daily (excluding Sunday because the Weekly Loader runs then) at 3:30 AM PST\"\n\nimport os\nimport sys; sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) # add current file's parent directory to path\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom Helper_Functions import queryAPIPortal, uploadDFtoSQL, executeStoredProcedure, getLastRefresh\nfrom Logging import log\nfrom CQN_ILM_API_Weekly import prepMQI\n\n# remove the current file's parent directory from sys.path since it was only needed for imports above\ntry:\n sys.path.remove(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\nexcept ValueError: # Already removed\n pass\n\n\nif __name__ == \"__main__\":\n start_time = time()\n\n ### BEGIN ILM MQI section ###\n # initialize variables\n project_name = 'ILM MQI API Daily Script'\n data_area = 'ILM MQI'\n\n last_load = getLastRefresh(project_name=project_name, data_area=data_area)\n if last_load is None:\n temp = datetime.now() - timedelta(hours=8)\n else:\n temp = datetime.strftime(last_load, '%Y-%m-%dT%H:%M:%S')\n last_load = pd.Timestamp(temp).replace(minute=00, second=00)\n\n row_count = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-issues?$select=EventId&$filter=\\\"ModifiedDate\\\">='{}'&$format=JSON\".format(last_load)).shape[0]\n\n # Get data from API\n ### IMPORTANT - the same API call will not return the rows in the same order by default, ORDERBY must be used to appropriately get all rows\n df_mat_issue = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-issues?$filter=\\\"ModifiedDate\\\">='{}'&$orderby=EventId&$format=JSON\".format(last_load))\n print('Loaded {} records from the API into DataFrame'.format(df_mat_issue.shape[0]))\n\n # Transform data\n df = prepMQI(df_mat_issue)\n print('Data prep completed!')\n\n # Load data into SQL Server database\n insert_succeeded, error_msg = uploadDFtoSQL(table=\"stage.stg_API_ILM_MQI\", data=df, chunk_size=500, truncate=True)\n log(insert_succeeded, project_name=project_name, data_area=data_area, row_count=df.shape[0], error_msg=error_msg)\n\n # Execute stored procedure for mqi.speedStg table\n sp_succeeded, error_msg = executeStoredProcedure('mqi.sp_API_ILM_MQI_Merge')\n log(sp_succeeded, project_name=project_name, package_name=\"SQL: mqi.sp_API_ILM_MQI_Merge\", data_area=data_area, error_msg=error_msg)\n ### END ILM MQI section ###\n\n ### BEGIN Root cause table\n project_name = 'ILM MQI API Daily -Root Cause'\n data_area = 'RootCauseDetails' \n row_count = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-root-causes?$select=EventId&$format=JSON\").shape[0]\n df_root = pd.DataFrame()\n\n # Get data from API\n for i in range(0, row_count, 2000):\n # print(i)\n temp = queryAPIPortal(url=\"https://apis-internal.intel.com/ilm/mqi/v1/material-root-causes?$start_index={}&$count=1000&$orderby=EventId&$format=JSON\".format(i))\n if i == 0:\n df_root = temp\n else:\n df_root = pd.concat([df_root, temp], ignore_index=True)\n print('Loaded {} records from the API into DataFrame'.format(df_root.shape[0]))\n\n # Transform data in DataFrame\n df_root = df_root[['EventId', 'Category', 'Details', 'KeyFailure', 'Systemic', 'PrimaryIndicator', 'ModifiedBy', 'ModifiedDate']]\n df_root['ModifiedDate'] = df_root['ModifiedDate'].apply(lambda x: x if isinstance(x, datetime) else datetime.strptime(x.split(\".\")[0], '%Y-%m-%dT%H:%M:%S') if isinstance(x, str) else None)\n\n # Load data into SQL Server database\n insert_succeeded, error_msg = uploadDFtoSQL(table=\"stage.stg_API_ILM_MQI_RootCauseDetails\", data=df_root, truncate=True)\n log(insert_succeeded, project_name=project_name, data_area=data_area, row_count=df_root.shape[0], error_msg=error_msg) # row_count is automatically set to 0 if error\n\n # Execute Stored Procedure\n sp_succeeded, error_msg = executeStoredProcedure('mqi.sp_API_ILM_MQI_RootCauseDetails')\n log(sp_succeeded, project_name=project_name, package_name=\"SQL: mqi.sp_API_ILM_MQI_RootCauseDetails\", data_area=data_area, error_msg=error_msg)\n\n print(\"--- %s seconds ---\" % (time() - start_time))\n","repo_name":"abhishekagnihotri-dataanalytics/python-intel-work","sub_path":"CQN_ILM_API_Daily.py","file_name":"CQN_ILM_API_Daily.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27294081742","text":"import sys\nimport csv\n\nclass CSVHandler:\n\n def __read_current_csv__(self):\n with open(r'C:\\Users\\karol\\Desktop\\Python_zajecia\\csv_to_read.csv', 'r', newline='') as edited_csv:\n\n csv_file = csv.reader(edited_csv, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n rows_num = 0\n for row in csv_file:\n print(\"---------------------------------------------------------------------------------------\")\n if row:\n print(f'Record ID: {row[0]}\\nName: {row[1]}\\nSurname: {row[2]}\\nTask name: {row[3]}\\nDeadline: {row[4]}\\n')\n rows_num = rows_num + 1\n return csv_file, rows_num\n\n def __get_user_input__(self, len_data):\n new_student_data = []\n new_student_data.append(len_data+1)\n print('---------------------------------------------------------')\n student_parameters = [\"Enter a name of a student: \", \"Enter a surname of student: \", \"Enter student task name: \", \"Enter student deadline (in format DD.MM.RRRR): \"]\n for param in student_parameters:\n in_user = input(param)\n new_student_data.append(in_user)\n if param == \"Enter student deadline (in format DD.MM.RRRR): \" and in_user.isalpha():\n raise Exception(\"Entered incorrect data (in deadline)!\")\n print('--------------------------------------------------------')\n print(\"New student data\")\n print(f'Name: {new_student_data[1]}\\nSurname: {new_student_data[2]}\\nTask name: {new_student_data[3]}\\nDeadline: {new_student_data[4]}\\n')\n new_student_data[0] = str(new_student_data[0])\n # print(new_student_data)\n return new_student_data\n\n def __new_record_to_csv__(self, data):\n with open(r'C:\\Users\\karol\\Desktop\\Python_zajecia\\csv_to_read.csv', 'a', newline='') as edited_csv:\n csv_file = csv.writer(edited_csv, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_file.writerow(data)\n\n def __remove_student__(self):\n csv_file, len_data = self.__read_current_csv__()\n remove_id = input(\"Chose student by ID to remove: \")\n if not remove_id.isdigit() or int(remove_id) > len_data:\n raise Exception(\"Incorrect ID!\")\n i = 0\n new_csv_data = []\n with open(r'C:\\Users\\karol\\Desktop\\Python_zajecia\\csv_to_read.csv', 'r', newline='') as edited_csv:\n csv_file = csv.reader(edited_csv, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for row in csv_file:\n i = i + 1\n if i == int(remove_id):\n # print(row)\n print(f\"Data about student with name: {row[1]} will be removed\")\n del(row)\n continue\n elif i < int(remove_id):\n # print(row)\n new_csv_data.append(row)\n elif i > int(remove_id):\n if row:\n row[0] = str(int(row[0]) - 1)\n new_csv_data.append(row)\n #print(new_csv_data)\n with open(r'C:\\Users\\karol\\Desktop\\Python_zajecia\\csv_to_read.csv', 'w', newline='') as edited_csv:\n csv_file_write = csv.writer(edited_csv, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_file_write.writerows(new_csv_data)\n\n\nif __name__ == \"__main__\":\n csv_file_handler = CSVHandler()\n while True:\n print(\"Choose what you want to do: \")\n print(\"1 - show records\\n 2 - add new record\\n 3 - remove record\\n 4 - end program\")\n option = input(\"Chosen option: \")\n\n if option == '1':\n csv_edited_file, len_data = csv_file_handler.__read_current_csv__()\n elif option == '2':\n csv_edited_file, len_data = csv_file_handler.__read_current_csv__()\n new_student_data = csv_file_handler.__get_user_input__(len_data)\n csv_file_handler.__new_record_to_csv__(new_student_data)\n elif option == '3':\n csv_file_handler.__remove_student__()\n elif option == '4':\n print(\"Ending program...\")\n break\n else:\n print('Incorrect option was chosen! Try again')\n","repo_name":"diamondproblem/Python","sub_path":"working_with_data/task_18_csv.py","file_name":"task_18_csv.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11335256006","text":"import csv\nimport psycopg2\n\nconnection = psycopg2.connect(\"host=localhost user=postgres dbname=homes\")\ncur = connection.cursor()\n\nrecord = {}\n\n# with open(\"homes.csv\", mode=\"r\") as csv_file:\n# csv_reader = csv.DictReader(csv_file)\n# for row in csv_reader:\n# if record.get(row[' \"List\"'] or row[' \"Living\"'].replace(\" \", \"\")) == None:\n# record[row[' \"List\"'] or row[' \"Living\"'].replace(\" \", \"\")] = 0\n# record[row[' \"List\"'] or row[' \"Living\"'].replace(\" \",\"\")] +=1\n\n# with open(\"homes2.csv\", mode=\"w\") as csv_file:\n# fieldnames = [\"Sell\",\"List\",\"Living\",\"Rooms\",\"Beds\",\"Baths\",\"Age\",\"Acres\",\"Taxes\"]\n# csv_writer = csv.DictWriter(csv_file, fieldnames)\n\n# csv_writer.writeheader()\n# for Sell,List,Living,Rooms,Beds,Baths,Age,Acres,Taxes in record.items():\n# csv_writer.writerow({\"Sell\": Sell,\"List\": List,\"Living\":Living,\"Rooms\": Rooms,\"Beds\": Beds,\"Baths\": Baths,\"Age\": Age,\"Acres\": Acres,\"Taxes\": Taxes})\n\nwith open(\"homes.csv\", mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n recordCount = 0\n # next(csv_reader)\n # cur.copy_from(csv_file,'home',sep=',')\n for row in csv_reader:\n sqlInsert = \\\n \"\"\"INSERT INTO home (\"Sell\", \"List\", \"Living\", \"Rooms\", \"Beds\", \"Baths\", \"Age\", \"Acres\", \"Taxes\") VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s);\"\"\"\n try:\n cur.execute(sqlInsert,(row['Sell'], \n row[' \"List\"'], \n row[' \"Living\"'],\n row[' \"Rooms\"'], \n row[' \"Beds\"'], \n row[' \"Baths\"'], \n row[' \"Age\"'], \n row[' \"Acres\"'], \n row[' \"Taxes\"']))\n connection.commit()\n recordCount +=1\n \n except psycopg2.DatabaseError as error:\n print(error)\n quit()\n connection.close()","repo_name":"JinxCY93/API-Script-Python","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41623528901","text":"from shape3d import Shape3D\n\n\nclass Icosahedron(Shape3D):\n\n def __init__(self, radius=150):\n Shape3D.__init__(self)\n self.topPent = [PVector() for _ in range(5)]\n self.bottomPent = [PVector() for _ in range(5)]\n c = dist(cos(0) * radius,\n sin(0) * radius,\n cos(radians(72)) * radius,\n sin(radians(72)) * radius)\n b = radius\n a = sqrt((c**2) - (b**2))\n self.triHeight = sqrt((c**2) - (c / 2)**2)\n angle = 0\n for i in range(5):\n self.topPent[i] = PVector(cos(angle) * radius,\n sin(angle) * radius,\n self.triHeight / 2.0)\n angle += radians(72)\n self.topPoint = PVector(0, 0, self.triHeight / 2.0 + a)\n angle = 72.0 / 2.0\n for i in range(5):\n self.bottomPent[i] = PVector(cos(angle) * radius,\n sin(angle) * radius,\n -self.triHeight / 2.0)\n angle += radians(72)\n self.bottomPoint = PVector(0, 0, -(self.triHeight / 2.0 + a))\n\n # Draw icosahedron.\n def create(self):\n for i in range(5):\n if i < 4:\n # Icosahedron top.\n self.makeTriangle(self.topPent[i],\n self.topPoint,\n self.topPent[i + 1])\n # Icosahedron bottom.\n self.makeTriangle(self.bottomPent[i],\n self.bottomPoint,\n self.bottomPent[i + 1])\n else:\n self.makeTriangle(self.topPent[i],\n self.topPoint,\n self.topPent[0])\n self.makeTriangle(self.bottomPent[i],\n self.bottomPoint,\n self.bottomPent[0])\n\n # Icosahedron body.\n for i in range(5):\n if i < 3:\n self.makeTriangle(self.topPent[i],\n self.bottomPent[i + 1],\n self.bottomPent[i + 2])\n self.makeTriangle(self.bottomPent[i + 2],\n self.topPent[i],\n self.topPent[i + 1])\n elif i == 3:\n self.makeTriangle(self.topPent[i],\n self.bottomPent[i + 1],\n self.bottomPent[0])\n self.makeTriangle(self.bottomPent[0],\n self.topPent[i],\n self.topPent[i + 1])\n elif i == 4:\n self.makeTriangle(self.topPent[i],\n self.bottomPent[0],\n self.bottomPent[1])\n self.makeTriangle(self.bottomPent[1],\n self.topPent[i],\n self.topPent[0])\n\n def makeTriangle(self, a, b, c):\n with beginShape():\n vertex(a.x, a.y, a.z)\n vertex(b.x, b.y, b.z)\n vertex(c.x, c.y, c.z)\n","repo_name":"jdf/processing.py","sub_path":"mode/examples/Topics/Geometry/Icosahedra/icosahedron.py","file_name":"icosahedron.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":1589,"dataset":"github-code","pt":"83"} +{"seq_id":"35840843891","text":"def solution(answers):\n answer = []\n corr = {1:0, 2:0, 3:0}\n \n for i in range(len(answers)):\n num = i + 1\n \n #1번학생\n if num % 5 == 0 and answers[i] == 5:\n corr[1] = corr[1] + 1\n elif num % 5 == answers[i]:\n corr[1] = corr[1] + 1\n \n #2번학생\n if num % 2 == 1 and answers[i] == 2:\n corr[2] = corr[2] + 1\n elif num % 8 == 2 and answers[i] == 1:\n corr[2] = corr[2] + 1\n elif num % 8 == 4 and answers[i] == 3:\n corr[2] = corr[2] + 1\n elif num % 8 == 6 and answers[i] == 4:\n corr[2] = corr[2] + 1\n elif num % 8 == 0 and answers[i] == 5:\n corr[2] = corr[2] + 1\n \n #3번학생\n if (num % 10 == 1 or num % 10 == 2) and answers[i] == 3:\n corr[3] = corr[3] + 1\n elif (num % 10 == 3 or num % 10 == 4) and answers[i] == 1:\n corr[3] = corr[3] + 1\n elif (num % 10 == 5 or num % 10 == 6) and answers[i] == 2:\n corr[3] = corr[3] + 1\n elif (num % 10 == 7 or num % 10 == 8) and answers[i] == 4:\n corr[3] = corr[3] + 1\n elif (num % 10 == 9 or num % 10 == 0) and answers[i] == 5:\n corr[3] = corr[3] + 1\n \n for i in corr:\n if corr[i] == max(corr.values()):\n answer.append(i)\n \n return answer\n\n# 1번 수포자 : [1,2,3,4,5] 반복 (5개)\n# 2번 수포자 : [[2,1],[2,3],[2,4],[2,5]] 반복 (8개)\n# 3번 수포자 : [[3,3],[1,1],[2,2],[4,4],[5,5]] 반복 (10개)\n#\n# 1번 - n%5==1의 경우 답이 1이면 맞는 것, n%5==0의 경우 답이 5이면 맞는 것..\n# 2번 - n%2==1의 경우 답이 2이면 맞는 것, n%8==2의 경우 답이 1이면 맞는 것..\n# 3번 - n%10==1이나 2일 경우 답이 3이면 맞는 것, 3이나 4일 경우 답이 1이면 맞는 것..\n#\n# corr 딕셔너리에 각 학생이 맞힌 문제 수 세기","repo_name":"codinglcy/Algorithm","sub_path":"프로그래머스/lv1/42840. 모의고사/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19953459181","text":"number = int(input(\"Enter a number\"))\ndef findfactors(principle):\n factors = []\n count = 1\n while(count<=(principle*0.5)):\n if(principle%count==0):\n factors.append(count)\n count = count + 1\n return factors\nprint(findfactors(number))\n \n ","repo_name":"TarunNarahari/PythonPrograms","sub_path":"FindDivisors.py","file_name":"FindDivisors.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31773641981","text":"###############################################################################\n##### Data Preprocesssing\n###############################################################################\nfrom typing import Tuple\nimport pandas as pd\nimport constants as cst\nfrom datetime import timedelta\nfrom functools import partial\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n##### Get logger\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n##### Preprocessor\nclass Preprocessor:\n def __init__(self, val_split_length: int = 90) -> None:\n self.split_day = cst.MAX_DATE - timedelta(val_split_length)\n\n def preprocess(\n self,\n data: pd.DataFrame = None,\n train_val_split: bool = True,\n ) -> pd.DataFrame:\n if train_val_split:\n data = self.enforce_data_quality(data)\n self.get_validation_set(data)\n\n # load train_data\n data = pd.read_csv(cst.TRAIN_TRANSACTION_PATH, index_col=0)\n data[\"date_order\"] = pd.to_datetime(data[\"date_order\"])\n data = data.sort_values(\"date_order\")\n\n data = self.feature_engineering(data)\n data.to_csv(cst.FEATURES_PATH)\n\n labels = self.get_labels(data)\n train_set, test_set = self.get_train_test_sets(\n pd.read_csv(cst.FEATURES_PATH, index_col=0), labels\n )\n\n return train_set, test_set\n\n def enforce_data_quality(self, data: pd.DataFrame) -> pd.DataFrame:\n # remove zero/negative quantities\n data = data[data[\"quantity\"] > 0]\n # remove zero/negative sales\n data = data[data[\"sales_net\"] > 0]\n # enforce date types\n data[\"date_order\"] = pd.to_datetime(data[\"date_order\"])\n data[\"date_invoice\"] = pd.to_datetime(data[\"date_invoice\"])\n\n data = data[\n (data[\"date_order\"].dt.year >= 2015)\n & (data[\"date_invoice\"].dt.year >= 2015)\n ]\n\n logger.info(\"Done performing data quality checks\")\n\n return data\n\n def get_validation_set(self, data: pd.DataFrame) -> None:\n train_set = data.loc[data[\"date_order\"] < self.split_day]\n val_set = data.loc[data[\"date_order\"] >= self.split_day]\n\n logger.info(\"Done splitting validation and train transactions\")\n\n train_set.to_csv(cst.TRAIN_TRANSACTION_PATH)\n val_set.to_csv(cst.VALIDATION_DATA_PATH)\n\n logger.info(\"Done saving validation and train transactions\")\n\n def feature_engineering(self, data: pd.DataFrame) -> pd.DataFrame:\n logger.info(\"Started feature engineering\")\n\n features = data.groupby(by=\"client_id\", as_index=False).agg(\n online_percent=(\"order_channel\", partial(item_freq, item=\"online\")),\n phone_percent=(\"order_channel\", partial(item_freq, item=\"by phone\")),\n store_percent=(\"order_channel\", partial(item_freq, item=\"at the store\")),\n visit_percent=(\n \"order_channel\",\n partial(item_freq, item=\"during the visit of a sales rep\"),\n ),\n other_percent=(\"order_channel\", partial(item_freq, item=\"other\")),\n mean_qty=(\"quantity\", \"mean\"),\n max_qty=(\"quantity\", \"max\"),\n min_qty=(\"quantity\", \"min\"),\n std_qty=(\"quantity\", \"std\"),\n last_qty_1=(\"quantity\", partial(last, n=1)),\n last_qty_2=(\"quantity\", partial(last, n=2)),\n last_qty_3=(\"quantity\", partial(last, n=3)),\n last_qty_4=(\"quantity\", partial(last, n=4)),\n mean_sales=(\"sales_net\", \"mean\"),\n max_sales=(\"sales_net\", \"max\"),\n min_sales=(\"sales_net\", \"min\"),\n std_sales=(\"sales_net\", \"std\"),\n last_sales_1=(\"sales_net\", partial(last, n=1)),\n last_sales_2=(\"sales_net\", partial(last, n=2)),\n last_sales_3=(\"sales_net\", partial(last, n=3)),\n last_sales_4=(\"sales_net\", partial(last, n=4)),\n n_branch=(\"branch_id\", \"nunique\"),\n n_product=(\"product_id\", \"nunique\"),\n purchase_freq=(\"date_order\", purchase_frequency),\n delay_purchase_n1=(\"date_order\", partial(time_delay, n=1)),\n delay_purchase_n2=(\"date_order\", partial(time_delay, n=2)),\n delay_purchase_n3=(\"date_order\", partial(time_delay, n=3)),\n delay_purchase_n4=(\"date_order\", partial(time_delay, n=4)),\n client_age=(\"date_order\", partial(time_from_today, n=0)),\n time_from_last_purchase=(\"date_order\", partial(time_from_today, n=-1)),\n client_lifetime=(\"date_order\", lifetime),\n )\n\n logger.info(\"Done feature engineering\")\n\n return features\n\n def get_labels(self, data: pd.DataFrame) -> pd.DataFrame:\n # get purchasing frequencies\n freqs = data.groupby(by=\"client_id\", as_index=False).agg(\n last_purchase=(\"date_order\", \"max\"),\n frequency=(\"date_order\", purchase_frequency),\n )\n\n # setting churn deffinitions\n freq_churn = self.split_day - timedelta(30)\n medium_churn = self.split_day - timedelta(90)\n\n # assiging client categories\n freqs.loc[\n (freqs[\"frequency\"] > 0) & (freqs[\"frequency\"] < 10), \"client_category\"\n ] = \"freq_buyer\"\n freqs.loc[\n (freqs[\"frequency\"] >= 10) & (freqs[\"frequency\"] < 32), \"client_category\"\n ] = \"med_buyer\"\n freqs.loc[\n (freqs[\"frequency\"] >= 32) | (freqs[\"frequency\"] == 0), \"client_category\"\n ] = \"infreq_buyer\"\n\n # assigning labels\n freqs.loc[\n (freqs[\"client_category\"] == \"freq_buyer\")\n & (freqs[\"last_purchase\"] <= freq_churn),\n \"is_churn\",\n ] = 1\n freqs.loc[\n (freqs[\"client_category\"] == \"freq_buyer\")\n & (freqs[\"last_purchase\"] > freq_churn),\n \"is_churn\",\n ] = 0\n freqs.loc[\n (freqs[\"client_category\"] == \"med_buyer\")\n & (freqs[\"last_purchase\"] <= medium_churn),\n \"is_churn\",\n ] = 1\n freqs.loc[\n (freqs[\"client_category\"] == \"med_buyer\")\n & (freqs[\"last_purchase\"] > medium_churn),\n \"is_churn\",\n ] = 0\n\n logger.info(\"Done getting labels\")\n\n freqs.to_csv(cst.TRAIN_TARGET_PATH)\n\n return freqs\n\n def get_train_test_sets(\n self, training_data: pd.DataFrame, training_target: pd.DataFrame\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n full_training_data = pd.merge(\n training_data, training_target, on=\"client_id\", how=\"left\"\n )\n full_training_data.dropna(inplace=True)\n train_set, test_set = train_test_split(\n full_training_data, test_size=0.3, random_state=42\n )\n\n logger.info(\"Done splitting train and test set\")\n\n train_set.to_css(cst.TRAIN_SET_PATH)\n test_set.to_css(cst.TEST_SET_PATH)\n\n logger.info(\"Done saving train and test set\")\n\n return train_set, test_set\n\n\ndef purchase_frequency(dates):\n return pd.Timedelta(np.diff(dates.unique()).mean()).total_seconds() / (60 * 60 * 24)\n\n\ndef time_delay(dates, n=1):\n try:\n return pd.Timedelta(dates.iloc[-n] - dates.iloc[-n - 1]).total_seconds() / (\n 60 * 60 * 24\n )\n except IndexError:\n return np.nan\n\n\ndef lifetime(dates):\n return pd.Timedelta(dates.iloc[-1] - dates.iloc[0]).total_seconds() / (60 * 60 * 24)\n\n\ndef time_from_today(dates, n=0):\n return pd.Timedelta(cst.MAX_DATE - dates.iloc[n]).total_seconds() / (60 * 60 * 24)\n\n\ndef last(sequence, n=1):\n try:\n return sequence.iloc[-n]\n except IndexError:\n return np.nan\n\n\ndef item_freq(series, item):\n if not np.isin(item, series):\n return 0\n else:\n return series.value_counts(normalize=True)[item]\n","repo_name":"henrique-britoleao/churn_prediction","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18252833191","text":"from tree import *\n\ndef test(assertion, error_str = \"\"):\n if not assertion:\n raise Exception(error_str)\n\ndef test_node_basics():\n\n node = Node(1)\n\n test(node.getValue() == 1)\n\n\n test(node.getLeft() == None)\n\n test(node.getRight() == None)\n\n node.setLeft(node)\n\n test(node.getLeft() == node)\n\n node.setRight(node)\n\n test(node.getRight() == node)\n\n node.setValue(100)\n\n test(node.getValue() == 100)\n\n\ndef test_node_str():\n node = Node(1)\n test(str(node) == \"Node ( 1 )\")\n\ndef test_node_bool():\n node = Node()\n test(not node)\n node = Node(0)\n test(node)\n\ndef test_tree_basics():\n root = Node(0)\n tree = Tree(root)\n test(tree.getRoot() == root)\n\ndef create_full_tree():\n node31 = Node(31)\n node32 = Node(32)\n node33 = Node(33)\n node34 = Node(34)\n node35 = Node(35)\n node36 = Node(36)\n node37 = Node(37)\n node38 = Node(38)\n\n node21 = Node(21, node31, node32)\n node22 = Node(22, node33, node34)\n node23 = Node(23, node35, node36)\n node24 = Node(24, node37, node38)\n\n node11 = Node(11, node21, node22)\n node12 = Node(12, node23, node24)\n\n root = Node(0, node11, node12)\n return Tree(root)\n\ndef test_full_tree_iter():\n tree = create_full_tree()\n it = iter(tree)\n node_vals_set = {\n 0,\n 11, 12,\n 21, 22, 23, 24,\n 31, 32, 33, 34, 35, 36, 37, 38\n }\n for node in it:\n node_vals_set.remove(node.getValue())\n test(len(node_vals_set) == 0)\n\ndef test_full_tree_str():\n tree = create_full_tree()\n\n ref_str_tree = \"\"\"Node ( 0 )\n Node ( 11 )\n Node ( 21 )\n Node ( 31 )\n Node ( 32 )\n Node ( 22 )\n Node ( 33 )\n Node ( 34 )\n Node ( 12 )\n Node ( 23 )\n Node ( 35 )\n Node ( 36 )\n Node ( 24 )\n Node ( 37 )\n Node ( 38 )\n\"\"\"\n #use this for debugging\n print(repr(str(tree)))\n print(repr(ref_str_tree))\n test(str(tree) == ref_str_tree)\n\n\ndef main():\n test_node_basics()\n test_node_str()\n test_node_bool()\n test_tree_basics()\n test_full_tree_iter()\n test_full_tree_str()\n print(\"Success!\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Rezenter/AU-Python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16857476233","text":"#pair of genuine 7 is 1 or more\r\n'''7 7 1 t\r\n7 1 7 f\r\n7 7 7 7 1 2 t\r\n7 7 7 f\r\n7 1 7 f\r\n7 7 1 7 1 7 f\r\n1 7 7 1 1 t\r\n71777 f\r\n'''\r\nimport array\r\na=int(input('Size:'))\r\nb=array.array('i',[])\r\nfor i in range(a):\r\n x=int(input())\r\n b.append(x)\r\nprint(b)\r\nx=0\r\nb.append(0)\r\ncnt=0\r\n\r\nif(b.count(7)%2==0):\r\n while(x=1):\r\n #if(cnt>b.count(7)%2):\r\n print('True')\r\nelif(b.count(7)%2!=0):\r\n print('False')\r\nelse:\r\n print('False')\r\n","repo_name":"HarshavardhanNetha/py-projects","sub_path":"PythonPUC/Class/arrays/new_aray.py","file_name":"new_aray.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39923045483","text":"# ~~~ cnn.py ~~~\n# This file creates and trains the convolutional neural network \n\nimport dataset\nimport config\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\n\n# loading with pickle (doesn't really work)\n# X = pickle.load(open(\"X_features.pickle\", \"rb\"))\n# y = pickle.load(open(\"y_labels.pickle\", \"rb\"))\n\ndef train_model(num_epochs, v_ratio):\n # loading the data in\n # X is the features, y is the labels\n # NOTE: we don't have test data yet\n # (X_train, y_train, X_test, y_test) = dataset.load_dataset()\n try:\n (X_train, y_train) = dataset.load_dataset()\n except FileNotFoundError:\n print(f\"error: file(s) {config.TR_FEAT_FILENAME}, {config.TR_LABL_FILENAME} don't exist\")\n print(\"note: try generating them using option (1)\")\n return\n\n # This model has 3 layers with an input shape of 65x65x1 (1 since the images\n # are grayscale) and can have 28 possible categories\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(config.IMG_SIZE, config.IMG_SIZE, 1))) # 1 = grayscale\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Flatten())\n model.add(Dense(28, activation='relu'))\n model.add(Dense(len(config.CATEGORIES), activation='softmax')) # Softmax, since multiclass\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n # Actually train and save the result of the model\n # Batch size seems to affect accuracy (>=256 results in lower accuracy)\n model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=v_ratio)\n model.save(config.MODEL_FILENAME)\n\ndef model_summary():\n try:\n model = load_model(config.MODEL_FILENAME)\n model.summary()\n except OSError:\n print(\"error: file \" + config.MODEL_FILENAME + \" not found\")\n print(\"note: try creating/training it using option (2)\")\n\n# Evaluate the model on the test set\n# accuracy = model.evaluate(X_test, y_test, verbose=0)[1]\n# print(f'Test Accuracy: {accuracy * 100:.2f}%')\n\n","repo_name":"sama305/SignLanguageML","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20422665115","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Hive Colony Framework\r\n# Copyright (c) 2008-2012 Hive Solutions Lda.\r\n#\r\n# This file is part of Hive Colony Framework.\r\n#\r\n# Hive Colony Framework is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# Hive Colony Framework is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with Hive Colony Framework. If not, see .\r\n\r\n__author__ = \"João Magalhães \"\r\n\"\"\" The author(s) of the module \"\"\"\r\n\r\n__version__ = \"1.0.0\"\r\n\"\"\" The version of the module \"\"\"\r\n\r\n__revision__ = \"$LastChangedRevision$\"\r\n\"\"\" The revision number of the module \"\"\"\r\n\r\n__date__ = \"$LastChangedDate$\"\r\n\"\"\" The last change date of the module \"\"\"\r\n\r\n__copyright__ = \"Copyright (c) 2008-2012 Hive Solutions Lda.\"\r\n\"\"\" The copyright for the module \"\"\"\r\n\r\n__license__ = \"GNU General Public License (GPL), Version 3\"\r\n\"\"\" The license for the module \"\"\"\r\n\r\nimport colony.base.system\r\nimport colony.base.decorators\r\n\r\nclass BusinessHelperPlugin(colony.base.system.Plugin):\r\n \"\"\"\r\n The main class for the Business Helper plugin.\r\n \"\"\"\r\n\r\n id = \"pt.hive.colony.plugins.business.helper\"\r\n name = \"Business Helper\"\r\n description = \"Business Helper Plugin\"\r\n version = \"1.0.0\"\r\n author = \"Hive Solutions Lda. \"\r\n platforms = [\r\n colony.base.system.CPYTHON_ENVIRONMENT\r\n ]\r\n capabilities = [\r\n \"business_helper\"\r\n ]\r\n capabilities_allowed = [\r\n \"entity\",\r\n \"entity_bundle\",\r\n \"business_logic\",\r\n \"business_logic_bundle\"\r\n ]\r\n dependencies = [\r\n colony.base.system.PluginDependency(\"pt.hive.colony.plugins.data.entity.manager\", \"1.x.x\")\r\n ]\r\n main_modules = [\r\n \"business_helper.system\"\r\n ]\r\n\r\n business_helper = None\r\n \"\"\" The business helper \"\"\"\r\n\r\n entity_manager_plugin = None\r\n \"\"\" The entity manager plugin \"\"\"\r\n\r\n entity_plugins = []\r\n \"\"\" The entity plugins \"\"\"\r\n\r\n entity_bundle_plugins = []\r\n \"\"\" The entity bundle plugins \"\"\"\r\n\r\n business_logic_plugins = []\r\n \"\"\" The business logic plugins \"\"\"\r\n\r\n business_logic_bundle_plugins = []\r\n \"\"\" The business logic bundle plugins \"\"\"\r\n\r\n def load_plugin(self):\r\n colony.base.system.Plugin.load_plugin(self)\r\n import business_helper.system\r\n self.business_helper = business_helper.system.BusinessHelper(self)\r\n\r\n @colony.base.decorators.load_allowed\r\n def load_allowed(self, plugin, capability):\r\n colony.base.system.Plugin.load_allowed(self, plugin, capability)\r\n\r\n @colony.base.decorators.unload_allowed\r\n def unload_allowed(self, plugin, capability):\r\n colony.base.system.Plugin.unload_allowed(self, plugin, capability)\r\n\r\n @colony.base.decorators.inject_dependencies\r\n def dependency_injected(self, plugin):\r\n colony.base.system.Plugin.dependency_injected(self, plugin)\r\n\r\n def import_class_module(self, class_module_name, globals, locals, global_values, base_directory_path):\r\n return self.business_helper.import_class_module(\r\n class_module_name,\r\n globals,\r\n locals,\r\n global_values,\r\n base_directory_path\r\n )\r\n\r\n def import_class_module_target(self, class_module_name, globals, locals, global_values, base_directory_path, target_module_name):\r\n return self.business_helper.import_class_module(\r\n class_module_name,\r\n globals,\r\n locals,\r\n global_values,\r\n base_directory_path,\r\n target_module_name\r\n )\r\n\r\n def import_class_module_extra(self, class_module_name, globals, locals, global_values, base_directory_path, target_module_name, extra_symbols_map, extra_globals_map):\r\n return self.business_helper.import_class_module(\r\n class_module_name,\r\n globals,\r\n locals,\r\n global_values,\r\n base_directory_path,\r\n target_module_name,\r\n extra_symbols_map,\r\n extra_globals_map\r\n )\r\n\r\n def generate_bundle_map(self, bundle_classes):\r\n return self.business_helper.generate_bundle_map(bundle_classes)\r\n\r\n def generate_module_bundle(self, bundle_module_name, bundle_map):\r\n return self.business_helper.generate_module_bundle(bundle_module_name, bundle_map)\r\n\r\n def get_entity_class(self):\r\n return self.business_helper.get_entity_class()\r\n\r\n def get_entity_classes_namespaces(self, namespaces):\r\n return self.business_helper.get_entity_classes_namespaces(namespaces)\r\n\r\n def get_business_logic_classes_namespaces(self, namespaces):\r\n return self.business_helper.get_business_logic_classes_namespaces(namespaces)\r\n\r\n @colony.base.decorators.plugin_inject(\"pt.hive.colony.plugins.data.entity.manager\")\r\n def set_entity_manager_plugin(self, entity_manager_plugin):\r\n self.entity_manager_plugin = entity_manager_plugin\r\n\r\n @colony.base.decorators.load_allowed_capability(\"entity\")\r\n def entity_load_allowed(self, plugin, capability):\r\n self.entity_plugins.append(plugin)\r\n self.business_helper.entity_load(plugin)\r\n\r\n @colony.base.decorators.load_allowed_capability(\"entity_bundle\")\r\n def entity_bundle_load_allowed(self, plugin, capability):\r\n self.entity_bundle_plugins.append(plugin)\r\n self.business_helper.entity_bundle_load(plugin)\r\n\r\n @colony.base.decorators.load_allowed_capability(\"business_logic\")\r\n def business_logic_load_allowed(self, plugin, capability):\r\n self.business_logic_plugins.append(plugin)\r\n self.business_helper.business_logic_load(plugin)\r\n\r\n @colony.base.decorators.load_allowed_capability(\"business_logic_bundle\")\r\n def business_logic_bundle_load_allowed(self, plugin, capability):\r\n self.business_logic_bundle_plugins.append(plugin)\r\n self.business_helper.business_logic_bundle_load(plugin)\r\n\r\n @colony.base.decorators.unload_allowed_capability(\"entity\")\r\n def entity_unload_allowed(self, plugin, capability):\r\n self.entity_plugins.remove(plugin)\r\n self.business_helper.entity_unload(plugin)\r\n\r\n @colony.base.decorators.unload_allowed_capability(\"entity_bundle\")\r\n def entity_bundle_unload_allowed(self, plugin, capability):\r\n self.entity_bundle_plugins.remove(plugin)\r\n self.business_helper.entity_bundle_unload(plugin)\r\n\r\n @colony.base.decorators.unload_allowed_capability(\"business_logic\")\r\n def business_logic_unload_allowed(self, plugin, capability):\r\n self.business_logic_plugins.remove(plugin)\r\n self.business_helper.business_logic_unload(plugin)\r\n\r\n @colony.base.decorators.unload_allowed_capability(\"business_logic_bundle\")\r\n def business_logic_bundle_unload_allowed(self, plugin, capability):\r\n self.business_logic_bundle_plugins.remove(plugin)\r\n self.business_helper.business_logic_bundle_unload(plugin)\r\n","repo_name":"by0ne/colony_plugins","sub_path":"business/src/business_helper_plugin.py","file_name":"business_helper_plugin.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"74684691470","text":"from Crypto.Util.number import long_to_bytes, inverse\nfrom gmpy2 import isqrt, square, is_square\n\nn = REDACTED\ne = REDACTED\nc = REDACTED\n\ndef fermat_factors(n):\n assert n % 2 != 0\n a = isqrt(n)\n b2 = square(a) - n\n while not is_square(b2):\n a += 1\n b2 = square(a) - n\n factor1 = a + isqrt(b2)\n factor2 = a - isqrt(b2)\n return int(factor1), int(factor2)\n\np, q = fermat_factors(n)\nd = inverse(e, (p - 1) * (q - 1))\nm = pow(c, d, n)\n\nprint(long_to_bytes(m))\n","repo_name":"ByamB4/Common-CTF-Challenges","sub_path":"cryptography/asymmetric-cipher/src/fermats_factor_attack.py","file_name":"fermats_factor_attack.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"83"} +{"seq_id":"3699438151","text":"#This file is to simplify rating.csv file\n\n#for movielens is an explicit database while we\n#only work with implicit data, we have to \"downgrade\"\n#it, also simplify it\n#Each line in rating_simplified.csv contains 2 int\n#One for the user_id and one for the movie_id\n\nfout = open(\"ratings_simplified.csv\", \"w\")\nfout2 = open(\"ml-20m.test.rating\",'w')\nwith open('ratings.csv','r') as fin:\n\tfin.readline()\n\tlastitem = ['0','0','0','0']\n\tfor line in fin:\n\t\tformat='{0} {1}\\n'\n\t\twords = line.split(',')\n\t\tif(words[0]!=lastitem[0]):\n\t\t\tout_line = format.format(lastitem[0],lastitem[1])\n\t\t\tfout2.write(out_line)\n\t\t\tlastitem=words\n\t\telse:\n\t\t\tif(int(lastitem[3]) List[List[str]]:\n x, y = click\n if board[x][y] == 'M':\n board[x][y] = 'X'\n return board\n else:\n self._reveal_blank(board, x, y) \n return board\n \n def _count_mine(self, board, x, y):\n cnt = 0\n for dx in range(x-1, x+2):\n for dy in range(y-1, y+2):\n if dx == x and dy == y:\n continue\n if 0 <= dx < len(board) and 0 <= dy < len(board[0]) and board[dx][dy] == 'M':\n cnt += 1\n return cnt\n \n def _reveal_blank(self, board, x, y):\n q = deque([(x, y)])\n while q:\n x, y = q.popleft()\n if board[x][y] != 'E':\n continue\n cnt = self._count_mine(board, x, y)\n if cnt == 0:\n board[x][y] = 'B'\n for dx in range(x-1, x+2):\n for dy in range(y-1, y+2):\n if dx == x and dy == y:\n continue\n if 0 <= dx < len(board) and 0 <= dy < len(board[0]) and board[dx][dy] == 'E':\n q.append((dx, dy))\n else:\n board[x][y] = str(cnt)\n","repo_name":"YiqunPeng/leetcode_pro","sub_path":"solutions/529_minesweeper.py","file_name":"529_minesweeper.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"256956298","text":"from collections import OrderedDict\nimport json\nimport inspect\n\nfrom . import base, fields\nfrom .generics import generic, method, call_next_method\n\ndef to_json(schema_family):\n return json.dumps(to_data(schema_family), indent=4, separators=(',', ': '))\n\n@generic\ndef to_data(obj, *args, **kwargs):\n pass\n\n@method(to_data)\ndef schema_family_to_data(family: base.SchemaFamily):\n data = OrderedDict()\n data['title'] = family.name\n data['$schema'] = \"http://json-schema.org/draft-04/schema#\"\n for schema in family.schemas.values():\n data[schema.__name__] = to_data(schema)\n return data\n\n\n@method(to_data)\ndef schema_to_data(schema: base.SchemaMeta):\n data = OrderedDict()\n data['title'] = schema.__name__\n data['description'] = inspect.getdoc(schema)\n for record in schema._meta.records.values():\n data[record.__name__] = to_data(record)\n return data\n\n@method(to_data)\ndef record_to_data(record: base.RecordMeta):\n data = OrderedDict()\n data['title'] = record.__name__\n data['type'] = \"object\"\n data['properties'] = OrderedDict( (field.__name__, to_data(field))\n for field in record._meta.fields.values() )\n\n links = [ to_link_description(link)\n for link in record._meta.fields.values()\n if isinstance(link, fields.Link) ]\n\n if len(links) > 0:\n data['links'] = links\n\n for child in record._meta.children.values():\n prop_name = getattr(child, 'collective_name', child.__name__)\n data['properties'][prop_name] = OrderedDict((\n ('type', 'array'), ('items', to_data(child))))\n\n return data\n\n@method(to_data)\ndef tree_record_to_data(tree: base.TreeMeta):\n data = call_next_method(tree)\n tree_structure = OrderedDict(((rank, {'type': 'string'})\n for rank in tree._ranks))\n data['properties']['tree_structure'] = OrderedDict((\n ('type', 'object'), ('properties', tree_structure)))\n\n return data\n\n@method(to_data)\ndef field_to_data(field: base.Field):\n return OrderedDict()\n\n@method(to_data)\ndef text_field_to_data(field: fields.Text):\n data = call_next_method(field)\n data[\"type\"] = \"string\"\n return data\n\n@method(to_data)\ndef date_field_to_data(field: fields.Date):\n data = call_next_method(field)\n data[\"type\"] = \"string\"\n data['format'] = \"date-time\"\n return data\n\n@method(to_data)\ndef integer_field_to_data(field: fields.Integer):\n data = call_next_method(field)\n data[\"type\"] = \"integer\"\n return data\n\n@method(to_data)\ndef boolean_field_to_data(field: fields.Boolean):\n data = call_next_method(field)\n data[\"type\"] = \"boolean\"\n return data\n\n@method(to_data)\ndef link_to_data(link: fields.Link):\n data = call_next_method(link)\n data[\"type\"] = \"string\"\n data[\"format\"] = \"uuid\"\n return data\n\ndef to_link_description(link):\n target_schema = \"#/%s/%s\" % (link.target._meta.schema.__name__,\n link.target.__name__)\n\n href = \"/%s/%s/{%s}/\" % (link.target._meta.schema.__name__,\n link.target.__name__,\n link.__name__)\n return OrderedDict((\n ('rel', link.__name__),\n ('href', href),\n ('targetSchema', target_schema)))\n","repo_name":"benanhalt/SchemaTools","sub_path":"egFish/specify/schema/to_json.py","file_name":"to_json.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"21949433721","text":"# Neural network implementation\n\nfrom source.routines import *\n\n# LSTM architecture\nclass CharRNN(nn.Module):\n\n def __init__(self, tokens, n_hidden=256, n_layers=2,\n drop_prob=0.5, lr=0.001):\n super().__init__()\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n\n # Creating character dictionaries\n self.chars = tokens\n self.int2char = dict(enumerate(self.chars))\n self.char2int = {ch: ii for ii, ch in self.int2char.items()}\n\n # Define the LSTM\n self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers,\n dropout=drop_prob, batch_first=True)\n\n # Define a dropout layer\n self.dropout = nn.Dropout(drop_prob)\n\n # Define the final, fully-connected output layer\n self.fc = nn.Linear(n_hidden, len(self.chars))\n\n # Forward method\n def forward(self, x, hidden):\n\n # Get the outputs and the new hidden state from the lstm\n r_output, hidden = self.lstm(x, hidden)\n\n # Pass through a dropout layer\n out = self.dropout(r_output)\n\n # Stack up LSTM outputs using view (use contiguous to reshape the output)\n out = out.contiguous().view(-1, self.n_hidden)\n\n # Put x through the fully-connected layer\n out = self.fc(out)\n\n # Return the final output and the hidden state\n return out, hidden\n\n # Initializes hidden state\n def init_hidden(self, batch_size):\n \n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n\n if (train_on_gpu):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n\n return hidden\n","repo_name":"PabloVD/PoemGenerator","sub_path":"source/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"31440116944","text":"f\"\"\"\nWalnut Models\n\"\"\"\n\n###\n# Libraries\n###\nimport uuid\n\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\n\n\n###\n# Models\n###\n\nclass VideoManager(models.Manager):\n\n def create(self, **data):\n if data.get('aws_credentials'):\n del data['aws_credentials']\n\n return super().create(**data)\n\nclass Video(models.Model):\n\n class StatusChoices(models.TextChoices):\n SUCCESS = 'Success', _('Success')\n FAILED = 'Failed', _('Failed')\n RUNNING = 'Running', _('Running')\n\n uuid = models.UUIDField(\n verbose_name=_('UUID'),\n default=uuid.uuid4,\n editable=False\n\n )\n\n title = models.CharField(\n verbose_name=_(\"Title\"),\n max_length=300,\n blank=True,\n null=True\n )\n\n description = models.TextField(\n verbose_name=_(\"Description\"),\n blank=True,\n null=True\n )\n\n video_source = models.URLField(\n verbose_name=_(\"Video \"),\n max_length=200\n )\n\n status = models.CharField(\n verbose_name=_(\"Status\"),\n max_length=10,\n choices=StatusChoices.choices,\n default=StatusChoices.RUNNING\n )\n\n user = models.ForeignKey(\n 'accounts.User',\n on_delete=models.CASCADE,\n blank=False,\n null=False\n )\n\n webhook_url = models.URLField(\n verbose_name=_(\"Webhook URL\"),\n max_length=200, \n null=False,\n blank=False\n )\n\n use_dash = models.BooleanField(\n verbose_name=_(\"Use Dash\"),\n default=False,\n null=False\n )\n\n dash_file = models.URLField(\n verbose_name=_(\"Dash file\"),\n max_length=200,\n null=True,\n blank=True,\n help_text=\"This field will be auto generated\"\n )\n\n use_hls = models.BooleanField(\n verbose_name=_(\"Use HLS\"),\n default=False,\n null=False\n )\n\n hls_file = models.URLField(\n verbose_name=_(\"HLS file\"),\n max_length=200,\n null=True,\n blank=True,\n help_text=\"This field will be auto generated\"\n )\n\n \n duration = models.FloatField(\n verbose_name=_(\"Duration\"),\n default=0,\n help_text=\"This field will be auto generated\"\n )\n\n objects = VideoManager()\n \n ","repo_name":"MarlonCorreia/walnut","sub_path":"walnut/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72186020077","text":"import os\n\ndir = '/home/mksnkv/Documents/classification/class/data/train/dogs/'\nerror_counter = 0\nimages = os.listdir(dir)\n\ni = 1\nfor filename in images:\n older = dir + filename\n new_name = \"dogs\" + str(i) + \".jpg\"\n newer = dir + new_name\n os.rename(older, newer)\n i += 1\n","repo_name":"makosenkov/classification","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31862844722","text":"import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime as dt\nfrom tqdm import tqdm\nimport sys\n##needed for debugging\nsys.path.append('.')\nfrom Database.db_api import db_api\n\n\ndef renaming_fun(x):\n #this fuction renames columns in df to better match db for upload later\n name_dict = {\n 'p_name':'name',\n 'M':'meters_made',\n 'C':'carries',\n 'P':'passes_made',\n 'T':'tackles_made',\n 'MT':'missed_tackles',\n 'TW':'turnovers_won',\n 'TC':'turnovers_conceded',\n 'DB':'defenders_beaten',\n 'TA':'try_assists',\n 'O':'offloads',\n 'CB':'clean_breaks',\n 'LW':'lineouts_won',\n 'LS':'lineouts_stolen',\n }\n try:\n return name_dict[x]\n except KeyError:\n return x\n\ndef check_for_duplicates(master_dict):\n#duplicates from scraping can only appear sequentially\n for i in range(1,158):\n if isinstance(master_dict[i], str):\n print(i, master_dict[i], sep=': ')\n continue\n if isinstance(master_dict[i-1], str):\n continue\n\n if master_dict[i]['FT_Score']==master_dict[i-1]['FT_Score']:\n print(i, 'Possible Dupe', sep=': ')\n\ndef extract_sub_data(Pos_Player_Array):\n #https://www.premiershiprugby.com/match-report/match-report-leicester-tigers-34-19-exeter-chiefs#report\n #weird inconsistancy in subbing on site double entries.\n #so we will always take last entry as those line up correctly\n #possible given issue where player will be subbed and not captured on site\n sub_flag = False\n p_name = []\n pos_num = []\n min_played = []\n is_sub = []\n for pos, player in Pos_Player_Array:\n #check if we are in replacements section\n if player == 'Replacements':\n sub_flag = True\n p_name.append(player)\n min_played.append(-1)\n is_sub.append(-1)\n pos_num.append(-1)\n continue\n\n subs = player.split(' ')\n\n #check if there are any subs\n if len(subs) == 1: #no subs\n p_name.append(subs[0])\n min_played.append(80)\n\n #issue where some subs do not have replacements data on site\n #Note on site they will populate sub data with players not subbed\n # when there is no sub. These players will be caught here and removed\n # by clean_df at end\n if sub_flag:\n #this -1 signifies not subbed in player to be removed\n is_sub.append(-1)\n pos_num.append(-1)\n continue\n\n is_sub.append(0)\n pos_num.append(int(pos))\n continue\n\n if sub_flag:\n p_name.append(subs[0])\n min_played.append(80 - int(subs[-1].strip(\"'\")))\n is_sub.append(1)\n #find position of sub\n try:\n pos_num.append(pos_num[p_name.index(subs[-2])])\n except: #in the case of a resub\n pos_num.append([i for i,item in enumerate(Pos_Player_Array) if subs[-2] in item[1]][0])\n continue\n\n p_name.append(subs[0])\n #second split here since website has some small inconcistancies in formatting\n min_played.append(int(subs[-1].split(' ')[-1].strip(\"'\")))\n is_sub.append(0)\n pos_num.append(int(pos))\n\n return(p_name, min_played, is_sub, pos_num)\n\ndef lookup_PlayGuid_list(p_names_df, list_ids):\n p_names, player_ids = zip(*list_ids)\n PlayGuids = []\n for p_name in p_names_df:\n try:\n PlayGuid_link = player_ids[p_names.index(p_name)]\n PlayGuid = PlayGuid_link.split('=')[1]\n except ValueError: #take player from relitave positon when lookup fails\n PlayGuid_link = player_ids[np.where(p_names_df==p_name)[0][0]]\n PlayGuid = PlayGuid_link.split('=')[1]\n\n PlayGuids.append(PlayGuid)\n return(PlayGuids)\n\ndef create_dict_special(special_list, at_home):\n headers = special_list[2]\n side_details = special_list[at_home]\n dict_lst = {h: d for h, d in zip(headers, side_details)}\n return dict_lst\n\ndef add_target_values(dict_key, dict_lst, df_col, df):\n #dictonary of known player miss spelling\n spelling = { 'Sam Lewis': 'Samuel Lewis',\n 'Melani Nanai Vai': 'Melani Nanai',\n 'Rus Tuima': 'Rusiate Tuima',\n 'Matty Proctor': 'Matt Proctor',\n 'Dan Thomas': 'Daniel Thomas',\n 'Dan du Preez': 'Daniel du Preez',\n 'Dom Morris': 'Dominic Morris',\n 'Jamie Shillcock': 'James Shillcock',\n 'Theo McFarland': 'Theodore McFarland',\n 'Matt Cornish': 'Matthew Cornish',\n 'Val Rapava Ruskin': 'Val Rapava-Ruskin',\n 'Seb Atkinson': 'Sebastien Atkinson',\n 'Elliott Obatoyinbo': 'Elliot Obatoyinbo',\n 'Semi Radradra Waqavatu': 'Semi Radradra',\n 'Ash Challenger': 'Ashley Challenger'\n }\n #penos\n try:\n players = [[p.split(',')[0], len(p.split(','))-1]for p in dict_lst[dict_key]]\n\n for p in players:\n #player name not recorded \n #TYPO HERE MAY NEED TO TEST\n if len(p[0])<=3:\n continue\n\n #player name not found\n if p[0] not in df['name'].values:\n try:\n df.loc[df['name'] == spelling[p[0]], df_col] = p[1]\n except:\n print('Cannot find player {0}'.format(p[0]))\n else:\n df.loc[df['name'] == p[0], df_col] = p[1]\n except:\n #if no penalty goals in game\n pass\n\ndef clean_df(df, list_ids, at_home, target_details):\n df['at_home'] = at_home\n\n #determine minutes played and subs and add to dataframe\n p_name, min_played, is_sub, pos_num = extract_sub_data(df[['Pos','Player']].values)\n df['name'] = p_name\n df['mins_played'] = min_played\n df['is_sub'] = is_sub\n df['position_num'] = pos_num \n\n #remove replacements column\n df.drop([p_name.index('Replacements')], inplace=True)\n\n #add in player ids\n df['playguid'] = lookup_PlayGuid_list(df['name'].values, list_ids)\n\n #drop reformatted columns\n df.drop(['Pos', 'Player'], axis=1, inplace=True)\n\n #fill in 0s\n df = df.replace('-',0)\n\n #rename columns for database\n df.columns = map(renaming_fun, df.columns)\n\n #add target detaild for penos, tries and convos\n dict_lst = create_dict_special(target_details, at_home)\n\n df['penalty_goals'] = 0\n add_target_values('Penalties',dict_lst, 'penalty_goals', df)\n\n df['tries'] = 0\n add_target_values('Tries',dict_lst, 'tries', df)\n #add_target_values('Penalty Tries',dict_lst, 'tries', df)\n\n df['conversions'] = 0\n add_target_values('Conversions',dict_lst, 'conversions', df)\n\n #remove players who did not play or we dont know for how long\n df = df[df['is_sub'] >= 0]\n\n #reset index\n df = df.reset_index(drop=True)\n return(df)\n\ndef load_to_db(year, loaded_dict):\n #connect to db\n db_tool = db_api()\n\n #legacy\n list_of_dfs = []\n\n #create compitition record\n table = 'Comps' \n insert_dict = {'name': 'Premiership',\n 'year': int(year)}\n comp = db_tool.insert(table, **insert_dict)\n\n for i in tqdm(range(0,len(loaded_dict))):\n #skip empty game data\n if isinstance(loaded_dict[i], str):\n print(i, loaded_dict[i], sep=': ')\n continue\n\n #Match Details\n match_dict = dict((k, loaded_dict[i][k]) for k in ('match_date', 'home_team', 'away_team', 'FT_Score', 'HT_Score'))\n table = 'Matches' \n p_table = 'Players'\n pm_table = 'Player_Matches'\n insert_dict = {'idComp': comp[0]['idComp'],\n 'date': dt.strptime(match_dict['match_date'], '%A %d %B %Y'),\n 'home': match_dict['home_team'],\n 'away': match_dict['away_team'],\n 'FT_Score': match_dict['FT_Score'],\n 'HT_Score': match_dict['HT_Score'],\n }\n match = db_tool.insert(table, **insert_dict)\n\n #home\n df1 = clean_df(loaded_dict[i]['home_df'], loaded_dict[i]['home_player_ids'],\n 1, loaded_dict[i]['target_details'])\n\n player_dict_list = df1[['playguid','name']].T.to_dict()\n player_match_dict_list = df1.drop(['name', 'playguid'], axis=1).T.to_dict()\n\n for j in range(0,len(player_dict_list)):\n player = db_tool.insert(p_table, **player_dict_list[j])\n #player match id\n id_dict = { 'idPlayer': player[0]['idPlayer'] , 'idMatch': match[0]['idMatch'] }\n insert_dict = {**id_dict, **player_match_dict_list[j]}\n db_tool.insert(pm_table, **insert_dict)\n \n #legacy\n list_of_dfs.append(df1)\n\n #away\n df2 = clean_df(loaded_dict[i]['away_df'], loaded_dict[i]['away_player_ids'], \n 0, loaded_dict[i]['target_details'])\n\n player_dict_list = df2[['playguid','name']].T.to_dict()\n player_match_dict_list = df2.drop(['name','playguid'], axis=1).T.to_dict()\n\n for j in range(0,len(player_dict_list)):\n player = db_tool.insert(p_table, **player_dict_list[j])\n #player match id\n id_dict = { 'idPlayer': player[0]['idPlayer'] , 'idMatch': match[0]['idMatch'] }\n insert_dict = {**id_dict, **player_match_dict_list[j]}\n db_tool.insert(pm_table, **insert_dict)\n\n #legacy\n list_of_dfs.append(df2)\n\n #legacy\n master_df = pd.concat(list_of_dfs)\n master_df.reset_index(inplace = True, drop=True)\n\n return master_df\n\n\npickle_path = os.getcwd() + '\\\\Scrapers\\\\Scraped Data\\\\premiership_matches.pkl'\nwith open(pickle_path, 'rb') as f:\n loaded_dict = pickle.load(f)\n\nmaster_df = load_to_db(2022, loaded_dict)\n\nsave_path = os.getcwd() + '\\\\Data Cleaners\\\\Cleaner_Data\\\\premiership_matches.csv'\n\nmaster_df.to_csv(save_path)\n\nloaded_dict[1]['home_df'].columns\n\n","repo_name":"Aidzillafont/Rugby-Recruitment","sub_path":"Data Cleaners/premiership_games_cleaner.py","file_name":"premiership_games_cleaner.py","file_ext":"py","file_size_in_byte":10143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7424461184","text":"from io import StringIO\nfrom unittest import mock\n\nimport pytest\nfrom django.core.management import CommandError, call_command\n\nfrom ansible_base.models import Authenticator, AuthenticatorUser\n\n\n@pytest.mark.parametrize(\n \"command_args\",\n [None, \"--list\"],\n)\ndef test_authenticators_cli_list_with_tabulate(command_args, local_authenticator, ldap_authenticator):\n \"\"\"\n When we have tabulate, we have to parse a fancy table.\n\n Ensure that table contains the authenticators we expect.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n if command_args is None:\n call_command('authenticators', stdout=out, stderr=err)\n else:\n call_command('authenticators', command_args, stdout=out, stderr=err)\n\n lines = out.getvalue().strip().splitlines()\n headers = (\"ID\", \"Enabled\", \"Name\", \"Order\")\n\n for header in headers:\n assert header in lines[0]\n\n for line, authenticator in ((2, local_authenticator), (3, ldap_authenticator)):\n auth_line = lines[line]\n auth_line = auth_line.strip('|')\n (auth_id, enabled, name, order) = auth_line.split(' | ')\n\n assert auth_id.strip() == str(authenticator.id)\n assert enabled.strip() == str(authenticator.enabled)\n assert name.strip() == str(authenticator.name)\n assert order.strip() == str(authenticator.order)\n\n\n@pytest.mark.parametrize(\n \"command_args\",\n [None, \"--list\"],\n)\n@mock.patch(\"ansible_base.management.commands.authenticators.HAS_TABULATE\", False)\ndef test_authenticators_cli_list_without_tabulate(command_args, local_authenticator, ldap_authenticator):\n \"\"\"\n When we don't have tabulate, we have to parse a simple table.\n\n Ensure that table contains the authenticators we expect.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n if command_args is None:\n call_command('authenticators', stdout=out, stderr=err)\n else:\n call_command('authenticators', command_args, stdout=out, stderr=err)\n\n lines = out.getvalue().strip().splitlines()\n headers = (\"ID\", \"Enabled\", \"Name\", \"Order\")\n\n for header in headers:\n assert header in lines[0]\n\n for line, authenticator in ((1, local_authenticator), (2, ldap_authenticator)):\n auth_line = lines[line]\n (auth_id, enabled, name, order) = auth_line.split('\\t')\n\n assert auth_id.strip() == str(authenticator.id)\n assert enabled.strip() == str(authenticator.enabled)\n assert name.strip() == str(authenticator.name)\n assert order.strip() == str(authenticator.order)\n\n\ndef test_authenticators_cli_initialize(django_user_model):\n \"\"\"\n Calling with --initialize will create:\n - An authenticator if there is an admin user\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n # Sanity check:\n assert django_user_model.objects.count() == 0\n\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 255\n assert \"No admin user exists\" in err.getvalue()\n\n django_user_model.objects.create(username=\"admin\")\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n assert \"Created default local authenticator\" in out.getvalue()\n\n\ndef test_authenticators_cli_initialize_pre_existing(django_user_model, local_authenticator, admin_user):\n \"\"\"\n What if we already have an admin user?\n\n In this case, the command should do nothing on --initialize.\n \"\"\"\n out = StringIO()\n err = StringIO()\n\n # Sanity check:\n assert django_user_model.objects.count() == 1\n existing_user = django_user_model.objects.first()\n assert AuthenticatorUser.objects.count() == 0\n\n call_command('authenticators', \"--initialize\", stdout=out, stderr=err)\n\n # Make sure no new user got created.\n assert django_user_model.objects.count() == 1\n assert django_user_model.objects.filter(username=\"admin\").count() == 1\n new_user = django_user_model.objects.first()\n\n # Nothing should have changed\n assert existing_user == new_user\n assert existing_user.date_joined == new_user.date_joined\n assert out.getvalue() == \"\"\n assert err.getvalue() == \"\"\n\n # No AuthenticatorUser should get created in this case\n assert AuthenticatorUser.objects.count() == 0\n\n\n@pytest.mark.parametrize(\n \"start_state, flag, end_state, exp_out, exp_err\",\n [\n pytest.param(False, \"--enable\", True, \"\", \"\", id=\"disabled -> enabled\"),\n pytest.param(False, \"--disable\", False, \"\", \"\", id=\"disabled -> disabled\"),\n pytest.param(True, \"--enable\", True, \"\", \"\", id=\"enabled -> enabled\"),\n pytest.param(True, \"--disable\", False, \"\", \"\", id=\"enabled -> disabled\"),\n ],\n)\ndef test_authenticators_cli_enable_disable(local_authenticator, start_state, flag, end_state, exp_out, exp_err):\n \"\"\"\n Test enabling/disabling an authenticator.\n \"\"\"\n local_authenticator.enabled = start_state\n local_authenticator.save()\n\n out = StringIO()\n err = StringIO()\n\n assert Authenticator.objects.get(id=local_authenticator.id).enabled == start_state\n call_command('authenticators', flag, local_authenticator.id, stdout=out, stderr=err)\n assert Authenticator.objects.get(id=local_authenticator.id).enabled == end_state\n\n assert out.getvalue() == exp_out\n assert err.getvalue() == exp_err\n\n\n@pytest.mark.parametrize(\n \"flag\",\n [\"--enable\", \"--disable\"],\n)\n@pytest.mark.django_db\ndef test_authenticators_cli_enable_disable_nonexisting(flag):\n \"\"\"\n Test enabling/disabling a non-existing authenticator.\n \"\"\"\n\n out = StringIO()\n err = StringIO()\n\n with pytest.raises(CommandError) as e:\n call_command('authenticators', flag, 1337, stdout=out, stderr=err)\n\n assert \"Authenticator 1337 does not exist\" in str(e.value)\n","repo_name":"ansible/django-ansible-base","sub_path":"ansible_base/tests/management/test_authenticators.py","file_name":"test_authenticators.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"18212208455","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 14 16:02:52 2020\r\n\r\n@author: Harry\r\n\"\"\"\r\n# 1 remeber me \r\nimport json \r\n#如果以前储存了用户名,就加载它\r\n#否则就提示用户输入用户名并储存它\r\nfilename = 'username.json'\r\ntry:\r\n with open(filename) as f_obj:\r\n username = json.load(f_obj)\r\nexcept FileNotFoundError:\r\n username = input(\"What is your name? \")\r\n with open(filename,'w') as f_obj:\r\n json.dump(username,f_obj)\r\n print(\"We will remeber you when you come back, \" +username + \" !\")\r\nelse:\r\n print(\"Welcome back, \"+ username+ '!')\r\n\r\nprint()\r\n# 2 重构\r\nimport json\r\n\r\ndef get_stored_username():\r\n #如果储存了用户名,就获取它\r\n filename = 'username1.json'\r\n try: \r\n with open(filename) as f_obj:\r\n username = json.load(f_obj)\r\n except FileNotFoundError:\r\n return None\r\n else:\r\n return username\r\n \r\n \r\ndef get_new_username():\r\n #提示用户输入用户名\r\n username = input(\"What is your name? \")\r\n filename = 'username1.json'\r\n with open(filename,'w') as f_obj:\r\n json.dump(username,f_obj)\r\n return username\r\n\r\n\r\ndef greet_user():\r\n #问候用户,并指出其名字\r\n username = get_stored_username()\r\n if username:\r\n user_state = input(\"Are you \"+ username+ \"?(Y/N)\")\r\n if user_state =='Y':\r\n print(\"welcome back \"+username+ \"!\")\r\n else:\r\n username = get_new_username()\r\n print(\"We will remeber you when you come back, \"+ username+ '!')\r\n else:\r\n username = get_new_username()\r\n print(\"We will remeber you when you come back, \"+ username+ '!')\r\ngreet_user()\r\n ","repo_name":"sunstriderLHT/python-start","sub_path":"chapter 10/remeber_me.py","file_name":"remeber_me.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18277626491","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys, inspect\n\nclass Log(object):\n\n\t@staticmethod\n\tdef debug(e):\n\t\tprint >> sys.stderr, str(e)\n\t\t\n\t@staticmethod\n\tdef error(e):\n\t\t# get code traces\n\t\tstack = inspect.stack()\n\n\t\t#revert stack list\n\t\tstack = stack[::-1]\n\t\t# remove web.py's logs\n\t\tfor i in range(0, 26):\n\t\t\tstack.pop(0)\n\t\t\n\t\tmsg = ''\n\t\tfor i in stack:\n\t\t\t# get code from the list and trim\n\t\t\tcode = str(i[4][i[5]]).strip()\n\n\t\t\tmsg2 =\t\"\\tFrame:\\t\\t\"+str(i[0])+\"\\n\"\n\t\t\tmsg2 += \"\\tFile:\\t\\t\"+str(i[1])+\"\\n\"\n\t\t\tmsg2 += \"\\tLine:\\t\\t\"+str(i[2])+\"\\n\"\n\t\t\tmsg2 += \"\\tFunc:\\t\\t\"+str(i[3])+\"\\n\"\n\t\t\tmsg2 += \"\\tCode:\\t\\t\"+code+\"\\n\\n\"\n\n\t\t\tmsg = msg + msg2\n\n\t\tmsg = \"zpy error:\\n\"+msg\n\n\t\tif e!='':\n\t\t\tmsg += \"\\tError:\\t\\t\"+str(e)+\"\\n\\n\"\n\t\t\tLog.debug(msg)\t\t\t","repo_name":"ZiTAL/zpy","sub_path":"private/lib/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1897038424","text":"import collections\nimport errno\nimport getpass\nimport glob\nimport os\nimport shutil\nimport socket\nimport stat\nimport sys\nimport tempfile\nimport traceback\nfrom contextlib import contextmanager\nfrom multiprocessing import Process, Queue\n\nimport pytest\n\nimport llnl.util.lock as lk\nimport llnl.util.multiproc as mp\nfrom llnl.util.filesystem import getuid, touch\n\nif sys.platform != \"win32\":\n import fcntl\n\npytestmark = pytest.mark.not_on_windows(\"does not run on windows\")\n\n\n#\n# This test can be run with MPI. MPI is \"enabled\" if we can import\n# mpi4py and the number of total MPI processes is greater than 1.\n# Otherwise it just runs as a node-local test.\n#\n# NOTE: MPI mode is different from node-local mode in that node-local\n# mode will spawn its own test processes, while MPI mode assumes you've\n# run this script as a SPMD application. In MPI mode, no additional\n# processes are spawned, and you need to ensure that you mpirun the\n# script with enough processes for all the multiproc_test cases below.\n#\n# If you don't run with enough processes, tests that require more\n# processes than you currently have will be skipped.\n#\nmpi = False\ncomm = None\ntry:\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n if comm.size > 1:\n mpi = True\nexcept ImportError:\n pass\n\n\n\"\"\"This is a list of filesystem locations to test locks in. Paths are\nexpanded so that %u is replaced with the current username. '~' is also\nlegal and will be expanded to the user's home directory.\n\nTests are skipped for directories that don't exist, so you'll need to\nupdate this with the locations of NFS, Lustre, and other mounts on your\nsystem.\n\"\"\"\nlocations = [\n tempfile.gettempdir(),\n os.path.join(\"/nfs/tmp2/\", getpass.getuser()),\n os.path.join(\"/p/lscratch*/\", getpass.getuser()),\n]\n\n\"\"\"This is the longest a failed multiproc test will take.\nBarriers will time out and raise an exception after this interval.\nIn MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.\n\"\"\"\nbarrier_timeout = 5\n\n\"\"\"This is the lock timeout for expected failures.\nThis may need to be higher for some filesystems.\"\"\"\nlock_fail_timeout = 0.1\n\n\ndef make_readable(*paths):\n # TODO: From os.chmod doc:\n # \"Note Although Windows supports chmod(), you can only\n # set the file's read-only flag with it (via the stat.S_IWRITE and\n # stat.S_IREAD constants or a corresponding integer value). All other\n # bits are ignored.\"\n for path in paths:\n if sys.platform != \"win32\":\n mode = 0o555 if os.path.isdir(path) else 0o444\n else:\n mode = stat.S_IREAD\n os.chmod(path, mode)\n\n\ndef make_writable(*paths):\n for path in paths:\n if sys.platform != \"win32\":\n mode = 0o755 if os.path.isdir(path) else 0o744\n else:\n mode = stat.S_IWRITE\n os.chmod(path, mode)\n\n\n@contextmanager\ndef read_only(*paths):\n modes = [os.stat(p).st_mode for p in paths]\n make_readable(*paths)\n\n yield\n\n for path, mode in zip(paths, modes):\n os.chmod(path, mode)\n\n\n@pytest.fixture(scope=\"session\", params=locations)\ndef lock_test_directory(request):\n \"\"\"This fixture causes tests to be executed for many different mounts.\n\n See the ``locations`` dict above for details.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef lock_dir(lock_test_directory):\n parent = next(\n (p for p in glob.glob(lock_test_directory) if os.path.exists(p) and os.access(p, os.W_OK)),\n None,\n )\n if not parent:\n # Skip filesystems that don't exist or aren't writable\n pytest.skip(\"requires filesystem: '%s'\" % lock_test_directory)\n elif mpi and parent == tempfile.gettempdir():\n # Skip local tmp test for MPI runs\n pytest.skip(\"skipping local tmp directory for MPI test.\")\n\n tempdir = None\n if not mpi or comm.rank == 0:\n tempdir = tempfile.mkdtemp(dir=parent)\n if mpi:\n tempdir = comm.bcast(tempdir)\n\n yield tempdir\n\n if mpi:\n # rank 0 may get here before others, in which case it'll try to\n # remove the directory while other processes try to re-create the\n # lock. This will give errno 39: directory not empty. Use a\n # barrier to ensure everyone is done first.\n comm.barrier()\n\n if not mpi or comm.rank == 0:\n make_writable(tempdir)\n shutil.rmtree(tempdir)\n\n\n@pytest.fixture\ndef private_lock_path(lock_dir):\n \"\"\"In MPI mode, this is a private lock for each rank in a multiproc test.\n\n For other modes, it is the same as a shared lock.\n \"\"\"\n lock_file = os.path.join(lock_dir, \"lockfile\")\n if mpi:\n lock_file += \".%s\" % comm.rank\n\n yield lock_file\n\n if os.path.exists(lock_file):\n make_writable(lock_dir, lock_file)\n os.unlink(lock_file)\n\n\n@pytest.fixture\ndef lock_path(lock_dir):\n \"\"\"This lock is shared among all processes in a multiproc test.\"\"\"\n lock_file = os.path.join(lock_dir, \"lockfile\")\n\n yield lock_file\n\n if os.path.exists(lock_file):\n make_writable(lock_dir, lock_file)\n os.unlink(lock_file)\n\n\ndef test_poll_interval_generator():\n interval_iter = iter(lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))\n intervals = list(next(interval_iter) for i in range(100))\n assert intervals == [1] * 20 + [2] * 40 + [3] * 40\n\n\ndef local_multiproc_test(*functions, **kwargs):\n \"\"\"Order some processes using simple barrier synchronization.\"\"\"\n b = mp.Barrier(len(functions), timeout=barrier_timeout)\n\n args = (b,) + tuple(kwargs.get(\"extra_args\", ()))\n procs = [Process(target=f, args=args, name=f.__name__) for f in functions]\n\n for p in procs:\n p.start()\n\n for p in procs:\n p.join()\n\n assert all(p.exitcode == 0 for p in procs)\n\n\ndef mpi_multiproc_test(*functions):\n \"\"\"SPMD version of multiproc test.\n\n This needs to be run like so:\n\n srun spack test lock\n\n Each process executes its corresponding function. This is different\n from ``multiproc_test`` above, which spawns the processes. This will\n skip tests if there are too few processes to run them.\n \"\"\"\n procs = len(functions)\n if procs > comm.size:\n pytest.skip(\"requires at least %d MPI processes\" % procs)\n\n comm.Barrier() # barrier before each MPI test\n\n include = comm.rank < len(functions)\n subcomm = comm.Split(include)\n\n class subcomm_barrier:\n \"\"\"Stand-in for multiproc barrier for MPI-parallel jobs.\"\"\"\n\n def wait(self):\n subcomm.Barrier()\n\n if include:\n try:\n functions[subcomm.rank](subcomm_barrier())\n except BaseException:\n # aborting is the best we can do for MPI tests without\n # hanging, since we're using MPI barriers. This will fail\n # early and it loses the nice pytest output, but at least it\n # gets use a stacktrace on the processes that failed.\n traceback.print_exc()\n comm.Abort()\n subcomm.Free()\n\n comm.Barrier() # barrier after each MPI test.\n\n\n\"\"\"``multiproc_test()`` should be called by tests below.\n``multiproc_test()`` will work for either MPI runs or for local runs.\n\"\"\"\nmultiproc_test = mpi_multiproc_test if mpi else local_multiproc_test\n\n\n#\n# Process snippets below can be composed into tests.\n#\nclass AcquireWrite:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n lock.acquire_write() # grab exclusive lock\n barrier.wait()\n barrier.wait() # hold the lock until timeout in other procs.\n\n\nclass AcquireRead:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n lock.acquire_read() # grab shared lock\n barrier.wait()\n barrier.wait() # hold the lock until timeout in other procs.\n\n\nclass TimeoutWrite:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n barrier.wait() # wait for lock acquire in first process\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n barrier.wait()\n\n\nclass TimeoutRead:\n def __init__(self, lock_path, start=0, length=0):\n self.lock_path = lock_path\n self.start = start\n self.length = length\n\n @property\n def __name__(self):\n return self.__class__.__name__\n\n def __call__(self, barrier):\n lock = lk.Lock(self.lock_path, start=self.start, length=self.length)\n barrier.wait() # wait for lock acquire in first process\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait()\n\n\n#\n# Test that exclusive locks on other processes time out when an\n# exclusive lock is held.\n#\ndef test_write_lock_timeout_on_write(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_write_2(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_write_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges(lock_path):\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1))\n\n\ndef test_write_lock_timeout_on_write_ranges_2(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 64),\n AcquireWrite(lock_path, 65, 1),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 63, 1),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 1, 1),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_write_ranges_4(lock_path):\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 1, 1),\n AcquireWrite(lock_path, 2, 456),\n AcquireWrite(lock_path, 500, 64),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\n#\n# Test that shared locks on other processes time out when an\n# exclusive lock is held.\n#\ndef test_read_lock_timeout_on_write(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_2(lock_path):\n multiproc_test(AcquireWrite(lock_path), TimeoutRead(lock_path), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_3(lock_path):\n multiproc_test(\n AcquireWrite(lock_path),\n TimeoutRead(lock_path),\n TimeoutRead(lock_path),\n TimeoutRead(lock_path),\n )\n\n\ndef test_read_lock_timeout_on_write_ranges(lock_path):\n \"\"\"small write lock, read whole file.\"\"\"\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutRead(lock_path))\n\n\ndef test_read_lock_timeout_on_write_ranges_2(lock_path):\n \"\"\"small write lock, small read lock\"\"\"\n multiproc_test(AcquireWrite(lock_path, 0, 1), TimeoutRead(lock_path, 0, 1))\n\n\ndef test_read_lock_timeout_on_write_ranges_3(lock_path):\n \"\"\"two write locks, overlapping read locks\"\"\"\n multiproc_test(\n AcquireWrite(lock_path, 0, 1),\n AcquireWrite(lock_path, 64, 128),\n TimeoutRead(lock_path, 0, 1),\n TimeoutRead(lock_path, 128, 256),\n )\n\n\n#\n# Test that exclusive locks time out when shared locks are held.\n#\ndef test_write_lock_timeout_on_read(lock_path):\n multiproc_test(AcquireRead(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_2(lock_path):\n multiproc_test(AcquireRead(lock_path), TimeoutWrite(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_3(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges(lock_path):\n multiproc_test(AcquireRead(lock_path, 0, 1), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_on_read_ranges_2(lock_path):\n multiproc_test(AcquireRead(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1))\n\n\ndef test_write_lock_timeout_on_read_ranges_3(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 1),\n AcquireRead(lock_path, 10, 1),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 10, 1),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges_4(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 64),\n TimeoutWrite(lock_path, 10, 1),\n TimeoutWrite(lock_path, 32, 1),\n )\n\n\ndef test_write_lock_timeout_on_read_ranges_5(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 64, 128),\n TimeoutWrite(lock_path, 65, 1),\n TimeoutWrite(lock_path, 127, 1),\n TimeoutWrite(lock_path, 90, 10),\n )\n\n\n#\n# Test that exclusive locks time while lots of shared locks are held.\n#\ndef test_write_lock_timeout_with_multiple_readers_2_1(lock_path):\n multiproc_test(AcquireRead(lock_path), AcquireRead(lock_path), TimeoutWrite(lock_path))\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_2(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_1(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_2(lock_path):\n multiproc_test(\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n AcquireRead(lock_path),\n TimeoutWrite(lock_path),\n TimeoutWrite(lock_path),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 10), AcquireRead(lock_path, 2, 10), TimeoutWrite(lock_path, 5, 5)\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 10),\n AcquireRead(lock_path, 5, 15),\n TimeoutWrite(lock_path, 0, 1),\n TimeoutWrite(lock_path, 11, 3),\n TimeoutWrite(lock_path, 7, 1),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 5),\n AcquireRead(lock_path, 5, 5),\n AcquireRead(lock_path, 10, 5),\n TimeoutWrite(lock_path, 0, 15),\n )\n\n\ndef test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):\n multiproc_test(\n AcquireRead(lock_path, 0, 5),\n AcquireRead(lock_path, 5, 5),\n AcquireRead(lock_path, 10, 5),\n TimeoutWrite(lock_path, 3, 10),\n TimeoutWrite(lock_path, 5, 1),\n )\n\n\n@pytest.mark.skipif(getuid() == 0, reason=\"user is root\")\ndef test_read_lock_on_read_only_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, read-only lockfile.\"\"\"\n touch(lock_path)\n with read_only(lock_path, lock_dir):\n lock = lk.Lock(lock_path)\n\n with lk.ReadTransaction(lock):\n pass\n\n with pytest.raises(lk.LockROFileError):\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, writable lockfile.\"\"\"\n touch(lock_path)\n with read_only(lock_dir):\n lock = lk.Lock(lock_path)\n\n with lk.ReadTransaction(lock):\n pass\n\n with lk.WriteTransaction(lock):\n pass\n\n\n@pytest.mark.skipif(False if sys.platform == \"win32\" else getuid() == 0, reason=\"user is root\")\ndef test_read_lock_no_lockfile(lock_dir, lock_path):\n \"\"\"read-only directory, no lockfile (so can't create).\"\"\"\n with read_only(lock_dir):\n lock = lk.Lock(lock_path)\n\n with pytest.raises(lk.CantCreateLockError):\n with lk.ReadTransaction(lock):\n pass\n\n with pytest.raises(lk.CantCreateLockError):\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_upgrade_read_to_write(private_lock_path):\n \"\"\"Test that a read lock can be upgraded to a write lock.\n\n Note that to upgrade a read lock to a write lock, you have the be the\n only holder of a read lock. Client code needs to coordinate that for\n shared locks. For this test, we use a private lock just to test that an\n upgrade is possible.\n \"\"\"\n # ensure lock file exists the first time, so we open it read-only\n # to begin with.\n touch(private_lock_path)\n\n lock = lk.Lock(private_lock_path)\n assert lock._reads == 0\n assert lock._writes == 0\n\n lock.acquire_read()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r+\"\n\n lock.acquire_write()\n assert lock._reads == 1\n assert lock._writes == 1\n assert lock._file.mode == \"r+\"\n\n lock.release_write()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r+\"\n\n lock.release_read()\n assert lock._reads == 0\n assert lock._writes == 0\n assert lock._file is None\n\n\ndef test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):\n \"\"\"Test that read-only file can be read-locked but not write-locked.\"\"\"\n # ensure lock file exists the first time\n touch(private_lock_path)\n\n # open it read-only to begin with.\n with read_only(private_lock_path):\n lock = lk.Lock(private_lock_path)\n assert lock._reads == 0\n assert lock._writes == 0\n\n lock.acquire_read()\n assert lock._reads == 1\n assert lock._writes == 0\n assert lock._file.mode == \"r\"\n\n # upgrade to write here\n with pytest.raises(lk.LockROFileError):\n lock.acquire_write()\n\n # TODO: lk.FILE_TRACKER does not release private_lock_path\n lk.FILE_TRACKER.release_by_stat(os.stat(private_lock_path))\n\n\nclass ComplexAcquireAndRelease:\n def __init__(self, lock_path):\n self.lock_path = lock_path\n\n def p1(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n lock.acquire_write()\n barrier.wait() # ---------------------------------------- 1\n # others test timeout\n barrier.wait() # ---------------------------------------- 2\n lock.release_write() # release and others acquire read\n barrier.wait() # ---------------------------------------- 3\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 4\n lock.release_read()\n barrier.wait() # ---------------------------------------- 5\n\n # p2 upgrades read to write\n barrier.wait() # ---------------------------------------- 6\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 7\n # p2 releases write and read\n barrier.wait() # ---------------------------------------- 8\n\n # p3 acquires read\n barrier.wait() # ---------------------------------------- 9\n # p3 upgrades read to write\n barrier.wait() # ---------------------------------------- 10\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 11\n # p3 releases locks\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n def p2(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n # p1 acquires write\n barrier.wait() # ---------------------------------------- 1\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 2\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 3\n # p1 tests shared read\n barrier.wait() # ---------------------------------------- 4\n # others release reads\n barrier.wait() # ---------------------------------------- 5\n\n lock.acquire_write() # upgrade read to write\n barrier.wait() # ---------------------------------------- 6\n # others test timeout\n barrier.wait() # ---------------------------------------- 7\n lock.release_write() # release read AND write (need both)\n lock.release_read()\n barrier.wait() # ---------------------------------------- 8\n\n # p3 acquires read\n barrier.wait() # ---------------------------------------- 9\n # p3 upgrades read to write\n barrier.wait() # ---------------------------------------- 10\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 11\n # p3 releases locks\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n def p3(self, barrier):\n lock = lk.Lock(self.lock_path)\n\n # p1 acquires write\n barrier.wait() # ---------------------------------------- 1\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 2\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 3\n # p1 tests shared read\n barrier.wait() # ---------------------------------------- 4\n lock.release_read()\n barrier.wait() # ---------------------------------------- 5\n\n # p2 upgrades read to write\n barrier.wait() # ---------------------------------------- 6\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_write(lock_fail_timeout)\n with pytest.raises(lk.LockTimeoutError):\n lock.acquire_read(lock_fail_timeout)\n barrier.wait() # ---------------------------------------- 7\n # p2 releases write & read\n barrier.wait() # ---------------------------------------- 8\n\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 9\n lock.acquire_write()\n barrier.wait() # ---------------------------------------- 10\n # others test timeout\n barrier.wait() # ---------------------------------------- 11\n lock.release_read() # release read AND write in opposite\n lock.release_write() # order from before on p2\n barrier.wait() # ---------------------------------------- 12\n lock.acquire_read()\n barrier.wait() # ---------------------------------------- 13\n lock.release_read()\n\n\n#\n# Longer test case that ensures locks are reusable. Ordering is\n# enforced by barriers throughout -- steps are shown with numbers.\n#\ndef test_complex_acquire_and_release_chain(lock_path):\n test_chain = ComplexAcquireAndRelease(lock_path)\n multiproc_test(test_chain.p1, test_chain.p2, test_chain.p3)\n\n\nclass AssertLock(lk.Lock):\n \"\"\"Test lock class that marks acquire/release events.\"\"\"\n\n def __init__(self, lock_path, vals):\n super().__init__(lock_path)\n self.vals = vals\n\n # assert hooks for subclasses\n assert_acquire_read = lambda self: None\n assert_acquire_write = lambda self: None\n assert_release_read = lambda self: None\n assert_release_write = lambda self: None\n\n def acquire_read(self, timeout=None):\n self.assert_acquire_read()\n result = super().acquire_read(timeout)\n self.vals[\"acquired_read\"] = True\n return result\n\n def acquire_write(self, timeout=None):\n self.assert_acquire_write()\n result = super().acquire_write(timeout)\n self.vals[\"acquired_write\"] = True\n return result\n\n def release_read(self, release_fn=None):\n self.assert_release_read()\n result = super().release_read(release_fn)\n self.vals[\"released_read\"] = True\n return result\n\n def release_write(self, release_fn=None):\n self.assert_release_write()\n result = super().release_write(release_fn)\n self.vals[\"released_write\"] = True\n return result\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_read(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_write(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def enter_fn():\n # assert enter_fn is called while lock is held\n assert vals[\"acquired_%s\" % type]\n vals[\"entered_fn\"] = True\n\n def exit_fn(t, v, tb):\n # assert exit_fn is called while lock is held\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception\"] = t or v or tb\n\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n assert vals[\"acquired_%s\" % type]\n assert not vals[\"released_%s\" % type]\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"acquired_%s\" % type]\n assert vals[\"released_%s\" % type]\n assert not vals[\"exception\"]\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction_with_exception(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_read(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def assert_release_write(self):\n assert vals[\"entered_fn\"]\n assert not vals[\"exited_fn\"]\n\n def enter_fn():\n assert vals[\"acquired_%s\" % type]\n vals[\"entered_fn\"] = True\n\n def exit_fn(t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception\"] = t or v or tb\n return exit_result\n\n exit_result = False\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with pytest.raises(Exception):\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception\"]\n\n # test suppression of exceptions from exit_fn\n exit_result = True\n vals.clear()\n\n # should not raise now.\n with transaction(lock, acquire=enter_fn, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_fn\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception\"]\n\n\n@pytest.mark.parametrize(\n \"transaction,type\", [(lk.ReadTransaction, \"read\"), (lk.WriteTransaction, \"write\")]\n)\ndef test_transaction_with_context_manager(lock_path, transaction, type):\n class MockLock(AssertLock):\n def assert_acquire_read(self):\n assert not vals[\"entered_ctx\"]\n assert not vals[\"exited_ctx\"]\n\n def assert_release_read(self):\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n\n def assert_acquire_write(self):\n assert not vals[\"entered_ctx\"]\n assert not vals[\"exited_ctx\"]\n\n def assert_release_write(self):\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n\n class TestContextManager:\n def __enter__(self):\n vals[\"entered_ctx\"] = True\n\n def __exit__(self, t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_ctx\"] = True\n vals[\"exception_ctx\"] = t or v or tb\n return exit_ctx_result\n\n def exit_fn(t, v, tb):\n assert not vals[\"released_%s\" % type]\n vals[\"exited_fn\"] = True\n vals[\"exception_fn\"] = t or v or tb\n return exit_fn_result\n\n exit_fn_result, exit_ctx_result = False, False\n vals = collections.defaultdict(lambda: False)\n lock = MockLock(lock_path, vals)\n\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n pass\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert vals[\"exited_fn\"]\n assert not vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n vals.clear()\n with transaction(lock, acquire=TestContextManager):\n pass\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert not vals[\"exited_fn\"]\n assert not vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n # below are tests for exceptions with and without suppression\n def assert_ctx_and_fn_exception(raises=True):\n vals.clear()\n\n if raises:\n with pytest.raises(Exception):\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n raise Exception()\n else:\n with transaction(lock, acquire=TestContextManager, release=exit_fn):\n raise Exception()\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert vals[\"exited_fn\"]\n assert vals[\"exception_ctx\"]\n assert vals[\"exception_fn\"]\n\n def assert_only_ctx_exception(raises=True):\n vals.clear()\n\n if raises:\n with pytest.raises(Exception):\n with transaction(lock, acquire=TestContextManager):\n raise Exception()\n else:\n with transaction(lock, acquire=TestContextManager):\n raise Exception()\n\n assert vals[\"entered_ctx\"]\n assert vals[\"exited_ctx\"]\n assert not vals[\"exited_fn\"]\n assert vals[\"exception_ctx\"]\n assert not vals[\"exception_fn\"]\n\n # no suppression\n assert_ctx_and_fn_exception(raises=True)\n assert_only_ctx_exception(raises=True)\n\n # suppress exception only in function\n exit_fn_result, exit_ctx_result = True, False\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=True)\n\n # suppress exception only in context\n exit_fn_result, exit_ctx_result = False, True\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=False)\n\n # suppress exception in function and context\n exit_fn_result, exit_ctx_result = True, True\n assert_ctx_and_fn_exception(raises=False)\n assert_only_ctx_exception(raises=False)\n\n\ndef test_nested_write_transaction(lock_path):\n \"\"\"Ensure that the outermost write transaction writes.\"\"\"\n\n def write(t, v, tb):\n vals[\"wrote\"] = True\n\n vals = collections.defaultdict(lambda: False)\n lock = AssertLock(lock_path, vals)\n\n # write/write\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # read/write\n vals.clear()\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # write/read/write\n vals.clear()\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n # read/write/read/write\n vals.clear()\n with lk.ReadTransaction(lock):\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n with lk.ReadTransaction(lock):\n assert not vals[\"wrote\"]\n with lk.WriteTransaction(lock, release=write):\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert not vals[\"wrote\"]\n assert vals[\"wrote\"]\n\n\ndef test_nested_reads(lock_path):\n \"\"\"Ensure that write transactions won't re-read data.\"\"\"\n\n def read():\n vals[\"read\"] += 1\n\n vals = collections.defaultdict(lambda: 0)\n lock = AssertLock(lock_path, vals)\n\n # read/read\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # write/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # write/read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n # read/write/read/write\n vals.clear()\n assert vals[\"read\"] == 0\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.ReadTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n with lk.WriteTransaction(lock, acquire=read):\n assert vals[\"read\"] == 1\n\n\nclass LockDebugOutput:\n def __init__(self, lock_path):\n self.lock_path = lock_path\n self.host = socket.gethostname()\n\n def p1(self, barrier, q1, q2):\n # exchange pids\n p1_pid = os.getpid()\n q1.put(p1_pid)\n p2_pid = q2.get()\n\n # set up lock\n lock = lk.Lock(self.lock_path, debug=True)\n\n with lk.WriteTransaction(lock):\n # p1 takes write lock and writes pid/host to file\n barrier.wait() # ------------------------------------ 1\n\n assert lock.pid == p1_pid\n assert lock.host == self.host\n\n # wait for p2 to verify contents of file\n barrier.wait() # ---------------------------------------- 2\n\n # wait for p2 to take a write lock\n barrier.wait() # ---------------------------------------- 3\n\n # verify pid/host info again\n with lk.ReadTransaction(lock):\n assert lock.old_pid == p1_pid\n assert lock.old_host == self.host\n\n assert lock.pid == p2_pid\n assert lock.host == self.host\n\n barrier.wait() # ---------------------------------------- 4\n\n def p2(self, barrier, q1, q2):\n # exchange pids\n p2_pid = os.getpid()\n p1_pid = q1.get()\n q2.put(p2_pid)\n\n # set up lock\n lock = lk.Lock(self.lock_path, debug=True)\n\n # p1 takes write lock and writes pid/host to file\n barrier.wait() # ---------------------------------------- 1\n\n # verify that p1 wrote information to lock file\n with lk.ReadTransaction(lock):\n assert lock.pid == p1_pid\n assert lock.host == self.host\n\n barrier.wait() # ---------------------------------------- 2\n\n # take a write lock on the file and verify pid/host info\n with lk.WriteTransaction(lock):\n assert lock.old_pid == p1_pid\n assert lock.old_host == self.host\n\n assert lock.pid == p2_pid\n assert lock.host == self.host\n\n barrier.wait() # ------------------------------------ 3\n\n # wait for p1 to verify pid/host info\n barrier.wait() # ---------------------------------------- 4\n\n\ndef test_lock_debug_output(lock_path):\n test_debug = LockDebugOutput(lock_path)\n q1, q2 = Queue(), Queue()\n local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))\n\n\ndef test_lock_with_no_parent_directory(tmpdir):\n \"\"\"Make sure locks work even when their parent directory does not exist.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"foo/bar/baz/lockfile\")\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_lock_in_current_directory(tmpdir):\n \"\"\"Make sure locks work even when their parent directory does not exist.\"\"\"\n with tmpdir.as_cwd():\n # test we can create a lock in the current directory\n lock = lk.Lock(\"lockfile\")\n for i in range(10):\n with lk.ReadTransaction(lock):\n pass\n with lk.WriteTransaction(lock):\n pass\n\n # and that we can do the same thing after it's already there\n lock = lk.Lock(\"lockfile\")\n for i in range(10):\n with lk.ReadTransaction(lock):\n pass\n with lk.WriteTransaction(lock):\n pass\n\n\ndef test_attempts_str():\n assert lk._attempts_str(0, 0) == \"\"\n assert lk._attempts_str(0.12, 1) == \"\"\n assert lk._attempts_str(12.345, 2) == \" after 12.345s and 2 attempts\"\n\n\ndef test_lock_str():\n lock = lk.Lock(\"lockfile\")\n lockstr = str(lock)\n assert \"lockfile[0:0]\" in lockstr\n assert \"timeout=None\" in lockstr\n assert \"#reads=0, #writes=0\" in lockstr\n\n\ndef test_downgrade_write_okay(tmpdir):\n \"\"\"Test the lock write-to-read downgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_write()\n lock.downgrade_write_to_read()\n assert lock._reads == 1\n assert lock._writes == 0\n lock.release_read()\n\n\ndef test_downgrade_write_fails(tmpdir):\n \"\"\"Test failing the lock write-to-read downgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_read()\n msg = \"Cannot downgrade lock from write to read on file: lockfile\"\n with pytest.raises(lk.LockDowngradeError, match=msg):\n lock.downgrade_write_to_read()\n lock.release_read()\n\n\n@pytest.mark.parametrize(\n \"err_num,err_msg\",\n [\n (errno.EACCES, \"Fake EACCES error\"),\n (errno.EAGAIN, \"Fake EAGAIN error\"),\n (errno.ENOENT, \"Fake ENOENT error\"),\n ],\n)\ndef test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):\n \"\"\"Test poll lock exception handling.\"\"\"\n\n def _lockf(fd, cmd, len, start, whence):\n raise IOError(err_num, err_msg)\n\n with tmpdir.as_cwd():\n lockfile = \"lockfile\"\n lock = lk.Lock(lockfile)\n lock.acquire_read()\n\n monkeypatch.setattr(fcntl, \"lockf\", _lockf)\n\n if err_num in [errno.EAGAIN, errno.EACCES]:\n assert not lock._poll_lock(fcntl.LOCK_EX)\n else:\n with pytest.raises(IOError, match=err_msg):\n lock._poll_lock(fcntl.LOCK_EX)\n\n monkeypatch.undo()\n lock.release_read()\n\n\ndef test_upgrade_read_okay(tmpdir):\n \"\"\"Test the lock read-to-write upgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_read()\n lock.upgrade_read_to_write()\n assert lock._reads == 0\n assert lock._writes == 1\n lock.release_write()\n\n\ndef test_upgrade_read_fails(tmpdir):\n \"\"\"Test failing the lock read-to-write upgrade operation.\"\"\"\n with tmpdir.as_cwd():\n lock = lk.Lock(\"lockfile\")\n lock.acquire_write()\n msg = \"Cannot upgrade lock from read to write on file: lockfile\"\n with pytest.raises(lk.LockUpgradeError, match=msg):\n lock.upgrade_read_to_write()\n lock.release_write()\n","repo_name":"spack/spack","sub_path":"lib/spack/spack/test/llnl/util/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":41908,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"35382922303","text":"import logging\nfrom typing import List\n\nimport hydra\nimport torch\nfrom omegaconf import DictConfig\nfrom torch import nn\nfrom torch.nn import MSELoss\n\nfrom ..utils import (add_four_fourvectors_squared, add_fourvectors,\n add_fourvectors_squared, init_transforms)\n\n\nclass WeightedMSELoss(nn.Module):\n def __init__(self, weights):\n super().__init__()\n self.weights = weights\n\n def forward(self, pred, targets):\n diff_sq = (pred - targets) ** 2 / targets.shape[0]\n return torch.sum(self.weights * diff_sq)\n\n\nclass HiggsLoss(nn.Module):\n def __init__(self, targets: List[str], alphas: List[float] = None, use_square=True, output_mean=None, output_std=None, target_mean=None, target_std=None):\n super().__init__()\n self.targets = targets\n self.use_square = use_square\n _, self.output_transform, self.target_transform = init_transforms(\n fit_transforms=False, output_mean=output_mean, output_std=output_std, target_mean=target_mean, target_std=target_std)\n if alphas is not None:\n self.loss = WeightedMSELoss(alphas)\n else:\n self.loss = MSELoss()\n\n def forward(self, outputs: torch.Tensor, targets: torch.Tensor, attributes: torch.Tensor):\n self.output_transform.to(outputs.device)\n self.target_transform.to(outputs.device)\n outputs = self.output_transform.inverse_transform(outputs)\n\n if self.use_square:\n Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_squared_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred, Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred = calc_tree_squared(\n outputs, attributes)\n pred_map = {'Na_Genx': outputs[:, 0], 'Na_Geny': outputs[:, 1], 'Na_Genz': outputs[:, 2], 'Nb_Genx': Nbx_pred,\n 'Nb_Geny': Nby_pred, 'Nb_Genz': outputs[:, 3], 'Wa_Genx': Wax_pred, 'Wa_Geny': Way_pred, 'Wa_Genz': Waz_pred, 'Wa_Genm_squared': Wam_squared_pred, 'Wb_Genx': Wbx_pred, 'Wb_Geny': Wby_pred, 'Wb_Genz': Wbz_pred, 'Wb_Genm_squared': Wbm_squared_pred, 'H_Genx': Hx_pred, 'H_Geny': Hy_pred, 'H_Genz': Hz_pred, 'H_Genm_squared': Hm_squared_pred}\n else:\n Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred, Hx_pred, Hy_pred, Hz_pred, Hm_pred = calc_tree(\n outputs, attributes)\n pred_map = {'Na_Genx': outputs[:, 0], 'Na_Geny': outputs[:, 1], 'Na_Genz': outputs[:, 2], 'Nb_Genx': Nbx_pred,\n 'Nb_Geny': Nby_pred, 'Nb_Genz': outputs[:, 3], 'Wa_Genx': Wax_pred, 'Wa_Geny': Way_pred, 'Wa_Genz': Waz_pred, 'Wa_Genm': Wam_pred, 'Wb_Genx': Wbx_pred, 'Wb_Geny': Wby_pred, 'Wb_Genz': Wbz_pred, 'Wb_Genm': Wbm_pred, 'H_Genx': Hx_pred, 'H_Geny': Hy_pred, 'H_Genz': Hz_pred, 'H_Genm': Hm_pred}\n\n pred = torch.stack([pred_map[t] for t in self.targets], dim=1)\n pred = self.target_transform(pred)\n return self.loss(pred, targets)\n\n\ndef calc_tree(outputs: torch.Tensor, attributes: torch.Tensor):\n # Depends on order of outputs and attributes in dataset config.\n METx = attributes[:, 0]\n METy = attributes[:, 1]\n Lax_vis = attributes[:, 2]\n Lay_vis = attributes[:, 3]\n Laz_vis = attributes[:, 4]\n Lam_vis = attributes[:, 5]\n Lbx_vis = attributes[:, 6]\n Lby_vis = attributes[:, 7]\n Lbz_vis = attributes[:, 8]\n Lbm_vis = attributes[:, 9]\n Nax_pred = outputs[:, 0]\n Nay_pred = outputs[:, 1]\n Naz_pred = outputs[:, 2]\n Nam_pred = torch.zeros_like(Nax_pred) # Approximate 0 neutrino mass.\n Nbx_pred = METx - Nax_pred\n Nby_pred = METy - Nay_pred\n Nbz_pred = outputs[:, 3]\n Nbm_pred = torch.zeros_like(Nbx_pred) # Approximate 0 neutrino mass.\n Wax_pred, Way_pred, Waz_pred, Wam_pred = add_fourvectors(\n Nax_pred, Nay_pred, Naz_pred, Nam_pred, Lax_vis, Lay_vis, Laz_vis, Lam_vis)\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred = add_fourvectors(\n Nbx_pred, Nby_pred, Nbz_pred, Nbm_pred, Lbx_vis, Lby_vis, Lbz_vis, Lbm_vis)\n Hx_pred, Hy_pred, Hz_pred, Hm_pred = add_fourvectors(Wax_pred, Way_pred, Waz_pred, Wam_pred,\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred)\n return Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_pred, Hx_pred, Hy_pred, Hz_pred, Hm_pred\n\n\ndef calc_tree_squared(outputs: torch.Tensor, attributes: torch.Tensor):\n # Depends on order of outputs and attributes in dataset config.\n METx = attributes[:, 0]\n METy = attributes[:, 1]\n Lax_vis = attributes[:, 2]\n Lay_vis = attributes[:, 3]\n Laz_vis = attributes[:, 4]\n LaE_vis = attributes[:, 5]\n Lbx_vis = attributes[:, 6]\n Lby_vis = attributes[:, 7]\n Lbz_vis = attributes[:, 8]\n LbE_vis = attributes[:, 9]\n Nax_pred = outputs[:, 0]\n Nay_pred = outputs[:, 1]\n Naz_pred = outputs[:, 2]\n NaE_pred = (Nax_pred**2 + Nay_pred**2 + Naz_pred**2)**0.5\n Nbx_pred = METx - Nax_pred\n Nby_pred = METy - Nay_pred\n Nbz_pred = outputs[:, 3]\n NbE_pred = (Nbx_pred**2 + Nby_pred**2 + Nbz_pred**2)**0.5\n Wax_pred, Way_pred, Waz_pred, Wam_squared_pred = add_fourvectors_squared(\n Nax_pred, Nay_pred, Naz_pred, NaE_pred, Lax_vis, Lay_vis, Laz_vis, LaE_vis)\n Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred = add_fourvectors_squared(\n Nbx_pred, Nby_pred, Nbz_pred, NbE_pred, Lbx_vis, Lby_vis, Lbz_vis, LbE_vis)\n Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred = add_four_fourvectors_squared(\n Nax_pred, Nay_pred, Naz_pred, NaE_pred, Lax_vis, Lay_vis, Laz_vis, LaE_vis, Nbx_pred, Nby_pred, Nbz_pred, NbE_pred, Lbx_vis, Lby_vis, Lbz_vis, LbE_vis)\n return Nbx_pred, Nby_pred, Wax_pred, Way_pred, Waz_pred, Wam_squared_pred, Wbx_pred, Wby_pred, Wbz_pred, Wbm_squared_pred, Hx_pred, Hy_pred, Hz_pred, Hm_squared_pred\n\n\n@hydra.main(config_path=\"../../configs\", config_name=\"config\")\ndef main(cfg: DictConfig):\n feature_transform, output_transform, target_transform = hydra.utils.instantiate(\n cfg.transforms)\n datamodule = hydra.utils.instantiate(\n cfg.dataset, targets=cfg.dataset_criterion.targets, feature_transform=feature_transform, output_transform=output_transform, target_transform=target_transform)\n criterion = HiggsLoss(cfg.dataset_criterion.targets, None, output_transform.mean,\n output_transform.std, target_transform.mean, target_transform.std)\n dataloader = datamodule.train_dataloader()\n loss = 0.0\n for batch in dataloader:\n features, outputs, targets, attributes = batch\n loss += criterion(outputs, targets, attributes)\n\n logging.info(f'Expected loss: 0.0. Actual loss: {loss}')\n\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n","repo_name":"AlexSchuy/mass_regression","sub_path":"mass_regression/criterion/higgs.py","file_name":"higgs.py","file_ext":"py","file_size_in_byte":6792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23858547974","text":"import redis\nimport json\n\n\n\nr = redis.StrictRedis(host=\"192.168.1.7\", port=6379, db=0)\ndef add_redis():\n r = redis.Redis(host=\"192.168.1.7\", port=6379, password=\"123\", db=0)\n with r.pipeline(transaction=False) as p:\n\n for value in range(1,13000):\n # print(value)\n mapping = {value:value}\n r.zadd(\"Pp-RelationStoreFans_961948\", mapping)\n p.execute()\n\n\n\ndef get_redis_token(userId):\n token_redis = r.get('pp-user-test:token:app_user:' + userId)\n if token_redis is not None:\n print(type(token_redis))\n print(token_redis)\n str_token = str(token_redis, encoding=\"utf8\")\n dict_token = json.loads(str_token)\n\n token = dict_token[\"token\"]\n print(token)\n return token\n else:\n\n print(\"获取的token没有数据\")\n exit()\n\n\n\n\nif __name__ == '__main__':\n # add_redis()\n userId = input(\"输入userId:\")\n\n get_redis_token(userId)\n\n","repo_name":"So777888/openstore","sub_path":"get_redis_token.py","file_name":"get_redis_token.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8171210485","text":"# \ninput(\"Intercalando palabras\")\nstringword1= input(\"Ingrese la primera palabra:\")\nstringword2= input (\"Ingrese la segunda palabra:\")\n\nif len(stringword1) != len(stringword2):\n print (\"las palabras no tienen la misma cantidad de palabras, intente de nuevo\")\nelse:\n result= \"\"\n for i in range(len(stringword1)): \n result += stringword1[i] + stringword2[i] ## += agrega rl evalor del operador de la recha al operador de la idzquierda, es el que hace el mix por asi decirlo\n print(\"Las palabras intercaladas serian asi\",result)\n \n \n\n\n\n\n\n\n\n\n","repo_name":"gregbartels/python1","sub_path":"Tareas/Tarea 2/Ejercicio 3.py","file_name":"Ejercicio 3.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"429471116","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef extract(city, page):\n '''Extracting information for jobs city-wise'''\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:96.0) Gecko/20100101 Firefox/96.0'\n }\n url = f'https://jobs.accaglobal.com/jobs/{city}/{page}'\n r = requests.get(url, headers)\n soup = BeautifulSoup(r.content, 'html.parser')\n return soup\n\n\ndef transform(soup):\n '''Extracting relevant information from the soup object'''\n divs = soup.find_all('div', class_=\"lister__details cf js-clickable\")\n for block in divs:\n title = block.find_all('span')[0].text\n location = block.find_all(\n class_=\"lister__meta-item lister__meta-item--location\")[0].text\n salary = block.find_all(\n class_=\"lister__meta-item lister__meta-item--salary\")[0].text\n recuriter = block.find_all(\n class_=\"lister__meta-item lister__meta-item--recruiter\")[0].text\n a = [\n x['href'].strip()\n for x in block.find_all(class_='js-clickable-area-link', href=True)\n ]\n url = 'https://jobs.accaglobal.com' + str(a).replace(\"['\", '').replace(\n \"']\", '')\n job = {\n 'Title': title,\n 'Location': location,\n 'Salary': salary,\n 'Recuriter': recuriter,\n 'Link': url\n }\n joblist.append(job)\n\n\n# Create and empty job list\njoblist = []\n\n# Creating a writer object for writing data in multiple sheets\nwriter = pd.ExcelWriter('ACCA_jobs.xlsx', engine='xlsxwriter')\nfor city in ['karachi', 'lahore', 'islamabad']:\n for i in range(0, 10):\n print(f'Getting {city.capitalize()} jobs at page, {i}')\n c = extract(city, i)\n transform(c)\ndf = pd.DataFrame(joblist).drop_duplicates()\ndf.to_excel(writer, sheet_name='ACCA_Jobs', index=False)\nwriter.close()\n","repo_name":"kashifnaz/ACCA_Jobs_Web_Scraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8839949787","text":"# USAGE\n# python pi_detect_drowsiness.py --cascade haarcascade_frontalface_default.xml --shape-predictor shape_predictor_68_face_landmarks.dat\n# python pi_detect_drowsiness.py --cascade haarcascade_frontalface_default.xml --shape-predictor shape_predictor_68_face_landmarks.dat --alarm 1\n\n# import the necessary packages\nimport sys\nsys.path.insert(0, \"/usr/local/lib/python3.5/dist-packages\")\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nimport RPi.GPIO as GPIO\nimport time\nimport threading\n\ndef pin_int():\n\tglobal speaker,led_system,led_status,chair_mode1,chair_mode2\n\tspeaker=11\n\tled_system=33\n\tled_status=13\n\tchair_mode1=29\n\tchair_mode2=31\n\tGPIO.setmode(GPIO.BOARD)\n\tGPIO.setup(speaker,GPIO.OUT, initial = 0)\n\tGPIO.setup(led_system,GPIO.OUT, initial = 0)\n\tGPIO.setup(led_status,GPIO.OUT, initial = 1)\n\tGPIO.setup(chair_mode1,GPIO.OUT, initial = 0)\n\tGPIO.setup(chair_mode2,GPIO.OUT, initial = 0)\n#==========Define actions function==========================\nglobal count\nglobal x,y,detect_mode,old_status\ncount=0\ndetect_mode=0\nold_status=0\ndef led_blinking(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.LOW)\ndef status_blinking():\n\t#detect_mode=0\n\t#if (detect_mode==0):\n\ttime.sleep(0.5)\n\tGPIO.output(led_status,GPIO.LOW)\n\ttime.sleep(0.5)\n\tGPIO.output(led_status,GPIO.HIGH)\ndef speaker_alert(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(speaker,GPIO.HIGH)\n\t\ttime.sleep(pulse+0.3)\n\t\tGPIO.output(speaker,GPIO.LOW)\ndef led_blinking(count,pulse):\n\tfor i in range(count):\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\ttime.sleep(pulse)\n\t\tGPIO.output(led_system,GPIO.LOW)\n\n\n\ndef drownside_alert():\n\tglobal old_status\n\tprint (\"=========Starting Alert System=============\")\n\twhile True:\n\t\tif (old_status == 1):\n\t\t\tprint ( \"=Downside detected==\" )\n\t\t\tprint ( \"======>Triggered Speaker==\")\n\t\t\tGPIO.output(led_status,GPIO.LOW)\n\t\t\tGPIO.output(chair_mode2,GPIO.HIGH)\n\t\t\tGPIO.output(chair_mode1,GPIO.LOW)\n\t\t\tfor i in range(3):\n\t\t\t\ttime.sleep(0.2)\n\t\t\t\tGPIO.output(speaker,GPIO.HIGH)\n\t\t\t\tGPIO.output(led_system,GPIO.HIGH)\n\t\t\t\ttime.sleep(0.2+0.3)\n\t\t\t\tGPIO.output(led_system,GPIO.LOW)\n\t\t\t\tGPIO.output(speaker,GPIO.LOW)\n\t\t\tGPIO.output(chair_mode2,GPIO.LOW)\n\t\t#time.sleep(5)\n\t\tif (old_status == 2):\n \tprint ( \"==== No Downside detected ========\" )\n \t\tprint ( \"==== >Alert System Stopped\" )\n \tGPIO.output(speaker,GPIO.LOW)\n \tGPIO.output(led_system,GPIO.LOW)\n \tGPIO.output(led_status,GPIO.HIGH)\n \tGPIO.output(chair_mode1,GPIO.LOW)\n \tGPIO.output(chair_mode2,GPIO.LOW)\n\t\tif (old_status==0):\n\t\t\tstatus_blinking()\n\n#===========================================================\ndef euclidean_dist(ptA, ptB):\n\t# compute and return the euclidean distance between the two\n\t# points\n\treturn np.linalg.norm(ptA - ptB)\n\ndef eye_aspect_ratio(eye):\n\t# compute the euclidean distances between the two sets of\n\t# vertical eye landmarks (x, y)-coordinates\n\tA = euclidean_dist(eye[1], eye[5])\n\tB = euclidean_dist(eye[2], eye[4])\n\n\t# compute the euclidean distance between the horizontal\n\t# eye landmark (x, y)-coordinates\n\tC = euclidean_dist(eye[0], eye[3])\n\n\t# compute the eye aspect ratio\n\tear = (A + B) / (2.0 * C)\n\n\t# return the eye aspect ratio\n\treturn ear\n \n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--cascade\", required=True,\n\thelp = \"path to where the face cascade resides\")\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n\thelp=\"path to facial landmark predictor\")\nap.add_argument(\"-a\", \"--alarm\", type=int, default=0,\n\thelp=\"boolean used to indicate if TraffHat should be used\")\nargs = vars(ap.parse_args())\n\n# check to see if we are using GPIO/TrafficHat as an alarm\n#if args[\"alarm\"] > 0:\n#\tfrom gpiozero import TrafficHat\n#\tth = TrafficHat()\n#\tprint(\"[INFO] using TrafficHat alarm...\")\n \n# define two constants, one for the eye aspect ratio to indicate\n# blink and then a second constant for the number of consecutive\n# frames the eye must be below the threshold for to set off the\n# alarm\nEYE_AR_THRESH = 0.3 # do nhay cua mat , mat nho thi tang len mat to thi giam xuong tu 0.2 den 0.33\nEYE_AR_CONSEC_FRAMES = 4 # so lan do mat nham, muon nhanh thi giam ve 2 muon cham thi tang len 8 -10 \n# xong nho save file roi ra desktop chay\n\n# initialize the frame counter as well as a boolean used to\n# indicate if the alarm is going off\nglobal COUNTER,ALARM_ON,status_ret\nCOUNTER=0\nALARM_ON= False\nstatus_ret=0\n# load OpenCV's Haar cascade for face detection (which is faster than\n# dlib's built-in HOG detector, but less accurate), then create the\n# facial landmark predictor\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = cv2.CascadeClassifier(args[\"cascade\"])\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n# grab the indexes of the facial landmarks for the left and\n# right eye, respectively\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n\n# initialize the camera and grab a reference to the raw camera capture\n#camera = PiCamera()\n#camera.resolution = (640, 480)\n#camera.framerate = 32\n#vs= PiRGBArray(camera, size=(640, 480))\n#=============intit GPIO PIN================\npin_int()\nprint(\"[INFO] GPIO Init...\")\nx = threading.Thread(target=drownside_alert)\nx.start()\n#===========================================\n# start the video stream thread\n#==============================================\nprint(\"[INFO] starting video stream thread...\")\n#vs = VideoStream(src=0).start()\nvs = VideoStream(usePiCamera=True,resolution=(640,480),framerate=32).start()\n#vs= PiRGBArray(camera, size=(640, 480))\ntime.sleep(1.0)\n\n# loop over frames from the video stream\nwhile True:\n\t# grab the frame from the threaded video file stream, resize\n\t# it, and convert it to grayscale\n\t# channels)\n\t\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=450)\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\t# detect faces in the grayscale frame\n\trects = detector.detectMultiScale(gray, scaleFactor=1.1, \n\t\tminNeighbors=5, minSize=(30, 30),\n\t\tflags=cv2.CASCADE_SCALE_IMAGE)\n\n\t# loop over the face detections\n\tfor (x, y, w, h) in rects:\n\t\t# construct a dlib rectangle object from the Haar cascade\n\t\t# bounding box\n\t\trect = dlib.rectangle(int(x), int(y), int(x + w),\n\t\t\tint(y + h))\n\n\t\t# determine the facial landmarks for the face region, then\n\t\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t\t# array\n\t\tshape = predictor(gray, rect)\n\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t# extract the left and right eye coordinates, then use the\n\t\t# coordinates to compute the eye aspect ratio for both eyes\n\t\tleftEye = shape[lStart:lEnd]\n\t\trightEye = shape[rStart:rEnd]\n\t\tleftEAR = eye_aspect_ratio(leftEye)\n\t\trightEAR = eye_aspect_ratio(rightEye)\n\n\t\t# average the eye aspect ratio together for both eyes\n\t\tear = (leftEAR + rightEAR) / 2.0\n\n\t\t# compute the convex hull for the left and right eye, then\n\t\t# visualize each of the eyes\n\t\tleftEyeHull = cv2.convexHull(leftEye)\n\t\trightEyeHull = cv2.convexHull(rightEye)\n\t\tcv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n\t\tcv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n\n\t\t# check to see if the eye aspect ratio is below the blink\n\t\t# threshold, and if so, increment the blink frame counter\n\t\tif ear < EYE_AR_THRESH:\n\t\t\tCOUNTER += 1\n\n\t\t\t# if the eyes were closed for a sufficient number of\n\t\t\t# frames, then sound the alarm\n\t\t\tif COUNTER >= EYE_AR_CONSEC_FRAMES:\n\t\t\t\t# if the alarm is not on, turn it on\n#\t\t\t\tif not ALARM_ON:\n#\t\t\t\t\tALARM_ON = True\n#\n#\t\t\t\t\t# check to see if the TrafficHat buzzer should\n#\t\t\t\t\t# be sounded\n#\t\t\t\t\tif args[\"alarm\"] > 0:\n#\t\t\t\t\t\tth.buzzer.blink(0.1, 0.1, 10,\n#\t\t\t\t\t\t\tbackground=True)\n\t\t\t\t# draw an alarm on the frame\n\t\t\t\t#ALARM_ON = True\n\t\t\t\tdetect_mode=1\n\t\t\t\tstatus_ret=1\n\t\t\t\tcv2.putText(frame, \"DROWSINESS ALERT!\", (10, 30),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\t\t\t\tif (detect_mode==1):\n\t\t\t\t\tif (detect_mode != old_status):\n\t\t\t\t\t\told_status=detect_mode\n\t\t\t\t\t\t#x = threading.Thread(target=drownside_alert, args=[1,0,])\n\t\t\t\t\t\t#x.start()\n\t\t\t\t\t\t#COUNTER = 0\n\t\t# otherwise, the eye aspect ratio is not below the blink\n\t\t# threshold, so reset the counter and alarm\n\t\telse:\n\t\t\tCOUNTER = 0\n\t\t\tif (detect_mode==1):\n\t\t\t\t#ALARM_ON = False\n\t\t\t\tdetect_mode=3\n\t\t\t\told_status=0\n\t\t\t\t#x = threading.Thread(target=drownside_alert, args=[2,0,])\n\t\t\t\t#x.start()\n\t\t\t\tdetect_mode=0\n\t\t# draw the computed eye aspect ratio on the frame to help\n\t\t# with debugging and setting the correct eye aspect ratio\n\t\t# thresholds and frame counter\n\t\tcv2.putText(frame, \"EAR: {:.3f}\".format(ear), (300, 30),\n\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n \n\t# show the frame\n\tcv2.imshow(\"Frame\", frame)\t\n\t#if (detect_mode==0):\n\t\t#status_blinking()\n\t#GPIO.output(led_status,GPIO.LOW)\n\t#time.sleep(0.5)\n\tkey = cv2.waitKey(1) & 0xFF\n\t#GPIO.output(led_status,GPIO.HIGH)\n\t# if the `q` key was pressed, break from the loop\n\t#if key == ord(\"q\"):\n\t#\tbreak\n\n# do a bit of cleanup\n#cv2.destroyAllWindows()\nvs.stop()\n\n","repo_name":"LuizKun/Drownside_detection","sub_path":"FaBo9AXIS-MPU9250-Python/example/pi_detect_drowsiness.py","file_name":"pi_detect_drowsiness.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13296973135","text":"import numpy as np\nimport sys\nimport pdb\n\ndef least_squares_GD(y, tx, initial_w, max_iters, gamma):\n \"\"\"\n Estimate parameters of linear regression using gradient descent\n INPUTS\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n @initial_w (Dx1): Inintial values of the weights\n @max_iters: the number of epochs\n @gamma: learning rate of the gradient descent algorithm\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Dx1)\n @mse: MSE\n \"\"\"\n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n we = initial_w\n for n_iter in range(max_iters):\n # calculate gradient and losses\n grad = compute_gradient(y,tx,we)\n loss = compute_loss(y,tx,we)\n # update weights\n we = we - gamma*grad\n # store w and loss\n ws.append(np.copy(we))\n losses.append(loss)\n # return the last weight array and losses\n return ws[-1], losses[-1]\n\ndef least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n \"\"\"\n Estimate parameters of linear regression using stochastic gradient descent\n INPUTS\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n @initial_w (Dx1): Inintial values of the weights\n @max_iters: the number of epochs (== the number of samples to see)\n @gamma: learning rate of the gradient descent algorithm\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Dx1)\n @mse: MSE\n \"\"\"\n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n we = initial_w\n n_iter = 0\n for mini_y, mini_x in batch_iter(y, tx, 1, num_batches=max_iters, shuffle=True):\n # calculate gradient and losses\n grad = compute_gradient(mini_y, mini_x,we)\n loss = compute_loss(mini_y, mini_x,we)\n # update weights\n we = we - gamma*grad\n # store w and loss\n ws.append(np.copy(we))\n losses.append(loss)\n n_iter += 1\n # return the last weight array and losses\n return ws[-1], losses[-1]\n\ndef least_squares(y,tx):\n \"\"\"\n Estimate parameters of linear system using normal equations for least squares regression\n INPUTS\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Dx1)\n @mserror: MSE\n\n Ref: https://en.wikipedia.org/wiki/Linear_least_squares_(mathematics)#Derivation_of_the_normal_equations\n \"\"\"\n\n #(tx*x)^(-1)*tx\n factor = np.dot(np.linalg.inv(np.dot(tx.transpose(),tx)),tx.transpose())\n we = np.dot(factor,y)\n predictions = np.dot(tx,we)\n mserror = mse(y, predictions)\n return we, mserror\n\ndef ridge_regression(y, tx, lambda_):\n \"\"\"\n Estimate parameters of linear system using normal equations for ridge regression\n INPUTS\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Mx1)\n @mse: MSE\n \"\"\"\n N = y.shape[0]\n phi_tilda = tx\n M = phi_tilda.shape[1]\n factor1 = np.linalg.inv(np.dot(phi_tilda.transpose(), phi_tilda) + 2*N*lambda_*np.identity(M))\n factor2 = np.dot(factor1, phi_tilda.transpose())\n we = np.dot(factor2,y)\n predictions = np.dot(tx,we)\n mse = mse(y, predictions)\n return we, mse\n\ndef logistic_regression(y, tx, initial_w, max_iters, gamma):\n \"\"\"\n Estimate parameters of linear system using logistic regression\n INPUTS\n @tx ((DxN): input matrix\n @y (Nx1): Output vector\n @gamma: learning rate\n @max_iters: number of epochs\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Dx1)\n @mse: MSE\n \"\"\"\n ws = [initial_w]\n losses = []\n we = initial_w\n for n_iter in range(max_iters):\n # calculate gradient and losses\n grad = compute_logistic_gradient(y,tx,we)\n loss = compute_logistic_loss(y,tx,we)\n # update weights\n we = we - gamma*grad\n # store w and loss\n ws.append(np.copy(we))\n losses.append(loss)\n #print(\"Gradient Descent({bi}/{ti}): logistic loss={l}, w={w}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w=we))a\n return ws[-1], losses[-1]\n\ndef reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n \"\"\"\n Estimate parameters of linear system using regularized logistic regression\n INPUTS\n @tx ((DxN): input matrix\n @y (Nx1): Output vector\n @lambda_: regularization parameter\n @gamma: learning rate\n @max_iters: number of epochs\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @w: Optimal weights, array (Dx1)\n @mse: MSE\n \"\"\"\n ws = [initial_w]\n losses = []\n we = initial_w\n for n_iter in range(max_iters):\n # calculate gradient and losses\n grad = compute_logistic_gradient(y,tx,we)\n loss = compute_logistic_loss(y,tx,we)\n # calculate the regularization factor (L2 norm regularization)\n penalty = lambda_ * sum(we**2)\n we = we - gamma*grad - penalty\n # store w and loss\n ws.append(np.copy(we))\n losses.append(loss)\n\n return ws[-1], losses[-1]\n\n\"\"\"---------------------------------HELPERS------------------------------------------------\"\"\"\ndef load_data_higgs(path_dataset):\n \"\"\"Load data and convert it to the metrics system.\"\"\"\n col_pred = 1\n data = np.genfromtxt(path_dataset, delimiter=\",\", skip_header=1)\n\n #id_ = data[:,0]\n\n data = np.delete(data,col_pred,axis=1)\n data = np.delete(data,0,axis=1)\n\n #Read character of class 's' or 'b'\n y = np.genfromtxt(\n path_dataset, delimiter=\",\",dtype=\"str\", skip_header=1,usecols=col_pred)\n y_out = np.zeros(y.shape)\n s_ind = np.where(y == 's')\n b_ind = np.where(y == 'b')\n y_out[s_ind] = 1\n #y_out[b_ind] = -1\n y_out[b_ind] = 0\n\n return data,y_out #,id_\n\ndef write_submission_higgs(y,id_,path):\n\n y.shape = (y.shape[0],1)\n id_.shape = (id_.shape[0],1)\n header = \"Id,Prediction\"\n np.savetxt(path,np.concatenate((id_,y),axis=1),fmt='%d',delimiter=',',header=header,comments='')\n\n return True\n\ndef standardize(x):\n \"\"\"\n Standardize the original data set.\n \"\"\"\n mean_x = np.mean(x)\n x = x - mean_x\n std_x = np.std(x)\n x = x / std_x\n return x, mean_x, std_x\n\ndef batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):\n \"\"\"\n Generate a minibatch iterator for a dataset.\n Takes as input two iterables (here the output desired values 'y' and the input data 'tx')\n Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.\n Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.\n Example of use :\n for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):\n \n \"\"\"\n data_size = len(y)\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_y = y[shuffle_indices]\n shuffled_tx = tx[shuffle_indices]\n else:\n shuffled_y = y\n shuffled_tx = tx\n for batch_num in range(num_batches):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n if start_index != end_index:\n yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]\n\ndef mse(y_true,y_estim):\n \"\"\"\n Computes the mean squared error between two outputs\n INPUTS\n @y_true (Nx1): Output vector (True values)\n @y_estim (Nx1): Output vector (Estimated values)\n Where N is the number of samples\n OUTPUT\n @ mse: MSE value\n \"\"\"\n\n N = y_true.shape[0] #Number of samples\n y_true = y_true.reshape(N,1)\n y_estim = y_estim.reshape(N,1)\n e = y_true - y_estim\n e_squared = e**2\n mse = (1/float(2*N))*np.sum(e_squared)\n\n return mse\n\ndef rmse(y_true, y_estim):\n rmse = np.sqrt(2*mse(y_true, y_estim))\n return rmse\n\ndef compute_loss(y, tx, we):\n \"\"\"\n this function computes the loss of the estimation as the mse\n INPUTS\n @y (Nx1): Output vector (True values)\n @tx (NxD): Input vector\n @w (Dx1): weights\n Where N is the number of samples\n OUTPUT\n @loss: mse\n \"\"\"\n loss = mse(y,np.dot(tx,we))\n return loss\n\ndef compute_gradient(y, tx, we):\n \"\"\"\n INPUTS\n @y (Nx1): Output vector (True values)\n @tx (NxD): Input vector\n @w (Dx1): weights\n Where N is the number of samples\n OUTPUTS\n @grad (Dx1): gradient for each dimension\n \"\"\"\n N = y.shape[0]\n y = y.reshape(N,1)\n e = y - np.dot(tx,we)\n grad = -np.dot(tx.transpose(),e)/float(N)\n return grad\n\ndef build_poly(x, degree):\n \"\"\"\n polynomial basis functions for input data x, for j=0 up to j=degree.\n INPUTS\n @x (NxD) : vector of input data\n @degree : degree of the polynomial basis\n OUTPUTS\n @phi_tilda (N x D): polynomial matrix\n \"\"\"\n D = x.shape[1]\n phi_tilda = np.zeros(x.shape)\n for j in range(D):\n feature = x[:,j]\n phi_x = 0\n for i in range(degree):\n phi_x += np.power(feature,degree)\n\n return phi_tilda\n\n\ndef sigmoid(t):\n \"\"\"\n apply sigmoid function on t.\n \"\"\"\n sigma_ = np.exp(t) / (1 + np.exp(t))\n return sigma_\n\ndef compute_logistic_loss(y, tx, ww):\n \"\"\"\n compute the cost by negative log likelihood. (lecture 5b)\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n @w w(Dx1) : weights vector\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @loss: logistic loss\n \"\"\"\n N = y.shape[0]\n loss = 0\n for n in range(N):\n # sample shape is (D,)\n sample = tx[n,:]\n addend = np.log(1+np.exp(np.dot(sample.transpose(), ww))) - y[n]*np.dot(sample.transpose(), ww)\n loss += addend\n\n return loss\n\ndef compute_logistic_gradient(y, tx, we):\n \"\"\"\n compute the gradient of logistic loss. (lecture 5b)\n @y (Nx1): Output vector, y = 1 for signal and 0 for background\n @tx (NxD): Input matrix\n @w (Dx1) : weights vector\n Where N and D are respectively the number of samples and dimension of input vectors\n OUTPUTS\n @grad: logistic gradient\n \"\"\"\n y = y.reshape(y.shape[0],1)\n prediction = sigmoid(np.dot(tx,we))\n right_term = prediction - y\n grad = np.dot(tx.transpose(), right_term)\n return grad\n\ndef impute_lr(data):\n #find columns that have no -999\n clear_cols = [i for i in range(data.shape[1]) if -999 not in data[:,i]]\n #find rows that have no -999\n clear_rows = [i for i in range(data.shape[0]) if -999 not in data[i,:]]\n #pdb.set_trace()\n dirty_cols = [i for i in range(data.shape[1]) if i not in clear_cols]\n dirty_rows = [i for i in range(data.shape[0]) if i not in clear_rows]\n\n clear_samples = np.copy(data[clear_rows, :])\n #clear_samples, mean_x, std_x = standardize(clear_samples)\n w_lr = list()\n mse= list()\n #pdb.set_trace()\n for feature in dirty_cols:\n wf = imp.least_squares(clear_samples[:, feature], clear_samples[:, clear_cols])\n w_lr.append(wf[0])\n #pdb.set_trace()\n #mse.append(compute_loss(clear_samples[:, feature], clear_samples[:, clear_cols] ,wf[0]))\n for sample in dirty_rows:\n if data[sample,feature] == -999:\n replacement = np.dot(data[sample, clear_cols].transpose(), wf[0])\n data[sample, feature] = replacement\n\n return data\n\ndef cross_validation(x,y,k, mode, gamma=None, lambda_=None, max_iters=None, initial_w=None):\n \"\"\"\n INPUT:\n @x : input data, dimensions (NxD)\n @y : target labels, (Nx1) array\n @k : number of folds\n @gamma: learning rate in case of gradient descent models\n @lambda_: regularized term in case of regularized versions of algorithms\n @max_iters: number of epochs in case of iterative models\n @initial_w (Dx1): initial weigths for iterative models\n OUTPUT:\n @acc: (10x1) the accuracy of prediction on the validation dataset of every fold\n @losses: mse\n @weights: the calculated weigths of every fold\n \"\"\"\n # data dimensions\n D = x.shape[1]\n # split the data into k groups\n x_split = np.array_split(x, k, axis=0)\n y_split = np.array_split(y, k, axis=0)\n #initialize weights and metrics\n weights = list()\n acc = list()\n losses = list()\n\n #loop over folds\n for fold in range(k):\n # divide the data into the training set of (k-1) groups, and the validation set of 1 group\n x_train = [x_split[i] for i in range(k) if i!=fold]\n y_train = [y_split[i] for i in range(k) if i!=fold]\n x_train = np.concatenate(x_train, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n x_val = x_split[fold]\n y_val = y_split[fold]\n # choose the classification method\n if mode == 'linear_regression_eq':\n update, loss = least_squares(y_train, x_train)\n predictions = np.dot(x_val, update)\n pr_bool = predictions>=np.mean(predictions)\n elif mode == 'ridge_regression_eq':\n update, loss = ridge_regression(y_train, x_train, lambda_)\n predictions = np.dot(x_val, update)\n pr_bool = predictions>=np.mean(predictions)\n elif mode == 'linear_regression_GD':\n update, loss = least_squares_GD(y_train, x_train, initial_w, max_iters, gamma)\n predictions = np.dot(x_val, update)\n pr_bool = predictions>=np.mean(predictions)\n elif mode == 'linear_regression_SGD':\n update, loss = least_squares_SGD(y_train, x_train, initial_w, max_iters, gamma)\n predictions = np.dot(x_val, update)\n pr_bool = predictions>=np.mean(predictions)\n elif mode == 'logistic_regression':\n update, loss = logistic_regression(y_train, x_train, initial_w, max_iters, gamma)\n predictions = np.dot(x_val, update)\n predicted_prob = sigmoid(predictions)\n pr_bool = predicted_prob>0.5\n elif mode == 'reg_logistic_regression':\n update, loss = logistic_regression(y_train, x_train, initial_w, max_iters, gamma)\n predictions = np.dot(x_val, update)\n predicted_prob = sigmoid(predictions)\n pr_bool = predicted_prob>0.5\n else:\n raise ValueError(mode + ' mode of classification is not defined')\n weights.append(update)\n losses.append(loss)\n # transform the targets into boolean arrays\n y_bool = y_val==1\n y_bool = y_bool.reshape(y_bool.shape[0],1)\n correct = pr_bool == y_bool\n # calculate the accuracy as the ratio of correctly classified samples over the total number of samples\n acc.append(sum(correct)/float(len(y_val)))\n return acc, losses, weights\n","repo_name":"lejeunel/pcml_p2","sub_path":"proj1/implementations.py","file_name":"implementations.py","file_ext":"py","file_size_in_byte":15308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10889772827","text":"\n__kupfer_name__ = _(\"Send Keys\")\n__kupfer_actions__ = (\n\t\"CopyAndPaste\",\n\t\"SendKeys\",\n\t\"TypeText\",\n\t)\n__description__ = _(\"Send synthetic keyboard events using \"\n \"xautomation\")\n__version__ = \"\"\n__author__ = \"\"\n\nimport string\n\nimport gtk\n\nfrom kupfer.objects import Leaf, Action, Source, TextLeaf\nfrom kupfer.objects import OperationError, CommandMissingError\nfrom kupfer import pretty\nfrom kupfer import utils\nfrom kupfer import interface\n\n\nclass CopyAndPaste (Action):\n\t# rank down since it applies everywhere\n\trank_adjust = -2\n\tdef __init__(self):\n\t\tAction.__init__(self, _(\"Paste to Foreground Window\"))\n\tdef activate(self, leaf):\n\t\tclip = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)\n\t\tinterface.copy_to_clipboard(leaf, clip)\n\t\txte_paste_argv = ['xte', 'usleep 300000', 'keydown Control_L',\n\t\t 'key v', 'keyup Control_L']\n\t\tif not utils.spawn_async(xte_paste_argv):\n\t\t\traise CommandMissingError('xte')\n\tdef item_types(self):\n\t\tyield Leaf\n\tdef valid_for_item(self, leaf):\n\t\ttry:\n\t\t\treturn bool(interface.get_text_representation(leaf))\n\t\texcept AttributeError:\n\t\t\tpass\n\tdef get_description(self):\n\t\treturn _(\"Copy to clipboard and send Ctrl+V to foreground window\")\n\tdef get_icon_name(self):\n\t\treturn \"edit-paste\"\n\nclass SendKeys (Action):\n\tdef __init__(self):\n\t\tAction.__init__(self, _(\"Send Keys\"))\n\tdef activate(self, leaf):\n\t\ttext = leaf.object\n\t\tkeys, orig_mods = gtk.accelerator_parse(text)\n\t\tm = {\n\t\t\tgtk.gdk.SHIFT_MASK: \"Shift_L\",\n\t\t\tgtk.gdk.CONTROL_MASK: \"Control_L\",\n\t\t\tgtk.gdk.SUPER_MASK: \"Super_L\",\n\t\t\tgtk.gdk.MOD1_MASK: \"Alt_L\",\n\t\t}\n\t\tmod_names = []\n\t\tmods = orig_mods\n\t\tfor mod in m:\n\t\t\tif mod & mods:\n\t\t\t\tmod_names.append(m[mod])\n\t\t\t\tmods &= ~mod\n\t\tif mods != 0:\n\t\t\traise OperationError(_(\"Keys not yet implemented: %s\") %\n\t\t\t\t\tgtk.accelerator_get_label(keys, orig_mods))\n\t\tkey_arg = 'key %s' % (gtk.gdk.keyval_name(keys), )\n\t\tmods_down = ['keydown ' + n for n in mod_names]\n\t\tmods_up = ['keyup ' + n for n in reversed(mod_names)]\n\n\t\txte_paste_argv = ['xte', 'usleep 300000'] + \\\n\t\t\t\tmods_down + [key_arg] + mods_up\n\t\tif not utils.spawn_async(xte_paste_argv):\n\t\t\traise CommandMissingError('xte')\n\tdef item_types(self):\n\t\tyield TextLeaf\n\tdef valid_for_item(self, leaf):\n\t\ttext = leaf.object\n\t\tkeys, mods = gtk.accelerator_parse(text)\n\t\treturn keys\n\tdef get_description(self):\n\t\treturn _(\"Send keys to foreground window\")\n\nclass TypeText (Action):\n\trank_adjust = -2 \n\tdef __init__(self):\n\t\tAction.__init__(self, _(\"Type Text\"))\n\tdef activate(self, leaf):\n\t\ttext = interface.get_text_representation(leaf)\n\t\txte_paste_argv = ['xte', 'usleep 300000']\n\t\t# replace all newlines with 'key Return'\n\t\tfor line in text.splitlines(True):\n\t\t\txte_paste_argv.append(\"str \" + line.rstrip(\"\\r\\n\"))\n\t\t\tif line.endswith(\"\\n\"):\n\t\t\t\txte_paste_argv.append(\"key Return\")\n\t\tif not utils.spawn_async(xte_paste_argv):\n\t\t\traise CommandMissingError('xte')\n\tdef item_types(self):\n\t\tyield Leaf\n\tdef valid_for_item(self, leaf):\n\t\ttry:\n\t\t\treturn bool(interface.get_text_representation(leaf))\n\t\texcept AttributeError:\n\t\t\tpass\n\tdef get_description(self):\n\t\treturn _(\"Type the text to foreground window\")\n\n","repo_name":"tuxcanfly/kupfer","sub_path":"kupfer/plugin/sendkeys.py","file_name":"sendkeys.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"73"} +{"seq_id":"11482726919","text":"def calculate_total_price(N, M, C, D, P):\r\n total_price = 0 # 合計金額を0で初期化\r\n\r\n # 高橋くんが食べた各皿について\r\n for i in range(N):\r\n # その皿の色が価格が定義されている色かどうかを確認\r\n if C[i] in D:\r\n # 定義されている色ならその価格を合計に加える\r\n color_index = D.index(C[i])\r\n total_price += P[color_index + 1] # P[0]は定義されていない色なので +1 する\r\n else:\r\n # 定義されていない色ならP[0]を合計に加える\r\n total_price += P[0]\r\n\r\n return total_price\r\n\r\n\r\n\"\"\"\r\nphp\r\n\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nf = 800 \r\nhm = 1.5 \r\nhb = [30, 120, 180] \r\nd = np.linspace(1, 10, 100)\r\n\r\ndef okumura_hata(d, f, hb, hm):\r\n ahr = (1.1 * np.log10(f) - 0.7) * hm - (1.56 * np.log10(f) - 0.8)\r\n return 69.55 + 26.16 * np.log10(f) - 13.82 * np.log10(hb) + (44.9 - 6.55 * np.log10(hb)) * np.log10(d) - ahr\r\n\r\n# Calculate loss for each base station antenna height\r\nl = [okumura_hata(d, f, h, hm) for h in hb]\r\n\r\n# Plot\r\nplt.figure(figsize=(10, 6))\r\nfor i, loss in enumerate(l):\r\n plt.plot(d, loss, label=f'hb = {hb[i]} m')\r\nplt.xscale('log')\r\nplt.xlabel('d (km)')\r\nplt.ylabel('Loss (dB)')\r\nplt.title('Loss : d Okumura-Hata')\r\nplt.legend()\r\nplt.grid(True)\r\nplt.show()\r\n","repo_name":"s1f102103189/Atcorder","sub_path":"temp/Default Price.py","file_name":"Default Price.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22658617051","text":"import os\nimport json\nimport parquet\nimport pandas as pd\n\nhome = os.path.expanduser(\"~\")\ndir = \"/media/sumeyer/SSD_2/ML_DATA/\"\nfilename = \"part-r-00000-67ebd6f0-bfb4-42e0-b516-d7aaa77cbcb8.snappy.parquet\"\ndatafile = dir + filename\n\nprint(\"open file : \", datafile)\n\n\n## assuming parquet file with two rows and three columns:\n## foo bar baz\n## 1 2 3\n## 4 5 6\n\nwith open(datafile) as fo:\n # prints:\n # {\"foo\": 1, \"bar\": 2}\n # {\"foo\": 4, \"bar\": 5}\n for row in parquet.DictReader(fo):\n print(json.dumps(row))\n\n\nwith open(datafile) as fo:\n # prints:\n # 1,2\n # 4,5\n for row in parquet.reader(fo):\n print(\",\".join([str(r) for r in row]))\n\nprint(df.info())\nprint(df)","repo_name":"SvenMeyer/keras","sub_path":"import_parquet.py","file_name":"import_parquet.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35118562557","text":"def input_file(filename):\n coefficients = []\n with open(filename, 'rt') as file:\n for line in file: # loop over each line\n coefficients.append(list(map(int, list(str(line).strip())))) # parse them in some way\n return list(coefficients)\n\n\ndef check_octopi(my_list, size_counter):\n print(my_list)\n\n for idx in range(0, len(my_list)):\n for jdx in range(0, len(my_list[idx])):\n if my_list[idx][jdx] > 9:\n flash((idx, jdx), my_list, size_counter)\n\n # print(size_counter)\n\n\ndef part_one(my_list):\n step = 100\n size_counter = [0]\n for idx in range(0, step):\n my_list = [[octopus + 1 for octopus in row] for row in my_list]\n check_octopi(my_list, size_counter)\n print(size_counter)\n\n\ndef part_two(my_list):\n step = 0\n size_counter = [0]\n flag_all_not_0 = True\n while flag_all_not_0:\n step += 1\n my_list = [[octopus + 1 for octopus in row] for row in my_list]\n check_octopi(my_list, size_counter)\n\n if sum(sum(rows) for rows in my_list) == 0:\n flag_all_not_0 = False\n\n print(step)\n\n\ndef get_neighbors(idx, jdx, my_list):\n positions = [[-1, (0, 0)], [-1, (0, 0)], [-1, (0, 0)], [-1, (0, 0)], [-1, (0, 0)], [-1, (0, 0)], [-1, (0, 0)],\n [-1, (0, 0)]]\n if 0 < jdx: # left\n positions[0][0] = (my_list[idx][jdx - 1])\n positions[0][1] = (idx, jdx - 1)\n if 0 < jdx and 0 < idx: # top left\n positions[1][0] = (my_list[idx - 1][jdx - 1])\n positions[1][1] = (idx - 1, jdx - 1)\n if 0 < idx: # top\n positions[2][0] = (my_list[idx - 1][jdx])\n positions[2][1] = (idx - 1, jdx)\n if jdx + 1 < len(my_list[idx]) and 0 < idx: # top right\n positions[3][0] = (my_list[idx - 1][jdx + 1])\n positions[3][1] = (idx - 1, jdx + 1)\n if jdx + 1 < len(my_list[idx]): # right\n positions[4][0] = (my_list[idx][jdx + 1])\n positions[4][1] = (idx, jdx + 1)\n if jdx + 1 < len(my_list[idx]) and idx + 1 < len(my_list): # down right\n positions[5][0] = (my_list[idx + 1][jdx + 1])\n positions[5][1] = (idx + 1, jdx + 1)\n if idx + 1 < len(my_list): # down\n positions[6][0] = (my_list[idx + 1][jdx])\n positions[6][1] = (idx + 1, jdx)\n if 0 < jdx and idx + 1 < len(my_list): # down left\n positions[7][0] = (my_list[idx + 1][jdx - 1])\n positions[7][1] = (idx + 1, jdx - 1)\n return positions\n\n\ndef flash(location, my_list, size_counter):\n if my_list[location[0]][location[1]] > 9:\n my_list[location[0]][location[1]] = 0\n size_counter[0] = (size_counter[0] + 1)\n\n # left right top down\n neighbors = get_neighbors(location[0], location[1], my_list)\n for neighbor in neighbors:\n if neighbor[0] > 0:\n my_list[neighbor[1][0]][neighbor[1][1]] += 1\n for neighbor in neighbors:\n if my_list[neighbor[1][0]][neighbor[1][1]] > 9:\n flash(neighbor[1], my_list, size_counter)\n\n\ndef main():\n my_list = input_file('input.txt')\n print(my_list)\n part_one(my_list)\n part_two(my_list)\n\n\nmain()\n\n# list add 1 to all\n# for each octopus:\n# if greater than 9:\n# add 1 to all neighbors if not 0\n# turn 0\n","repo_name":"JunDP9/aoc","sub_path":"day11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73520246635","text":"from otlmow_model.BaseClasses.KeuzelijstField import KeuzelijstField\nfrom otlmow_model.BaseClasses.OTLObject import get_attribute_by_name, get_attribute_by_uri\n\nfrom otlmow_converter.FileFormats.JsonLdContext import JsonLdContext\nfrom otlmow_converter.FileFormats.JsonLdExporter import JsonLdExporter\n\n\n# dict encoder = asset object to dict\n# dict decoder = dict to asset object\n\nclass DictDecoder:\n @staticmethod\n def set_value_by_dictitem(instance_or_attribute, key, value, waarde_shortcut: bool = False, ld: bool = False, ld_context: dict={}):\n if not ld:\n attribute_to_set = get_attribute_by_name(instance_or_attribute, key)\n else:\n key = JsonLdContext.replace_context(key, context_dict=ld_context)\n attribute_to_set = get_attribute_by_uri(instance_or_attribute, key)\n if attribute_to_set.field.waardeObject is not None: # complex / union / KwantWrd / dte\n\n if isinstance(value, list):\n for index, list_item in enumerate(value):\n if attribute_to_set.waarde is None or len(attribute_to_set.waarde) <= index:\n attribute_to_set.add_empty_value()\n\n if attribute_to_set.field.waarde_shortcut_applicable and waarde_shortcut: # dte / kwantWrd\n attribute_to_set.waarde[index]._waarde.set_waarde(list_item)\n else: # complex / union\n for k, v in list_item.items():\n DictDecoder.set_value_by_dictitem(attribute_to_set.waarde[index], k, v, waarde_shortcut,\n ld=ld, ld_context=ld_context)\n\n elif isinstance(value, dict): # only complex / union possible\n if attribute_to_set.waarde is None:\n attribute_to_set.add_empty_value()\n\n for k, v in value.items():\n DictDecoder.set_value_by_dictitem(attribute_to_set.waarde, k, v, waarde_shortcut,\n ld=ld, ld_context=ld_context)\n else: # must be a dte / kwantWrd\n if attribute_to_set.waarde is None:\n attribute_to_set.add_empty_value()\n\n attribute_to_set.waarde._waarde.set_waarde(value)\n else:\n if issubclass(attribute_to_set.field, KeuzelijstField):\n if attribute_to_set.kardinaliteit_max != '1':\n value = [JsonLdContext.replace_context(list_item, context_dict=ld_context) for list_item in value]\n else:\n value = JsonLdContext.replace_context(value, context_dict=ld_context)\n attribute_to_set.set_waarde(value)\n","repo_name":"davidvlaminck/OTLMOW-Converter","sub_path":"otlmow_converter/FileFormats/DictDecoder.py","file_name":"DictDecoder.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"13903667658","text":"from math import fabs\n\n\nclass MU(object):\n \"\"\"\n Métodos para resolução de tópicos de física: MU\n Obs: Utilize parâmetros nomeados\n \"\"\"\n\n POSICAO = 1\n POSICAO_DE_ENCONTRO = 2\n TEMPO_DE_ENCONTRO = 3\n\n ERROR = \"Argumentos invalidos, verifique a documentação do método.\"\n\n def calcular(self, operacao, kwargs):\n \"\"\"\n Calcular operações.\n \"\"\"\n\n resultado = None\n\n if operacao == 1:\n resultado = self.posicao(kwargs)\n elif operacao == 2:\n resultado = self.instante_de_encontro(kwargs)\n elif operacao == 3:\n resultado = self.tempo_de_encontro(kwargs)\n else:\n print(\"Operação invalida!\")\n\n return resultado\n\n def posicao(self, kwargs):\n \"\"\"\n Calcular a função horária do movimento uniforme MU.\n\n Parâmetros:\n\n S = Posição final\n So = Posição inicial\n V = Velocidade\n T = Tempo\n\n Retorno = {\n 'S': Posição final,\n 'So': Posição inicial,\n 'V': Velocidade,\n 'T': Tempo\n }\n\n \"\"\"\n\n S = kwargs.get('S', None)\n So = kwargs.get('So', None)\n V = kwargs.get('V', None)\n T = kwargs.get('T', None)\n\n if (S is None and\n So is not None and\n V is not None and\n T is not None):\n\n S = So + V*T\n\n elif (So is None and\n S is not None and\n V is not None and\n T is not None):\n\n So = S - V*T\n\n elif (V is None and\n S is not None and\n So is not None and\n T is not None):\n\n V = (S - So)/T\n\n elif (V is None and\n S is not None and\n So is not None and\n T is not None):\n\n T = (S - So)/V\n\n else:\n return self.ERROR\n\n resultado = {\n 'S': S,\n 'So': So,\n 'V': V,\n 'T': T\n }\n\n return resultado\n\n def tempo_de_encontro(self, kwargs):\n \"\"\"\n Tempo de encontre entre dois objetos.\n\n Parâmetros:\n\n So1 = Posição inicial do primeiro objeto.\n V1 = Velocidade do primeiro objeto\n So2 = Posição inicial do segundo objeto\n V2 = Velocidade do segundo objeto\n\n Retorno:\n\n T = Momento do encontro entre o objeto 1 e o objeto 2\n \"\"\"\n\n So1 = kwargs.get('So1', None)\n V1 = kwargs.get('V1', None)\n So2 = kwargs.get('So2', None)\n V2 = kwargs.get('V2', None)\n\n if (So1 is None or\n V1 is None or\n So2 is None or\n V2 is None):\n return self.ERROR\n\n T = (So2 - So1)/(V1 - V2)\n\n return fabs(T)\n\n def instante_de_encontro(self, kwargs):\n \"\"\"\n Instante ou posição do encontro.\n\n Parâmetros:\n\n So1 = Posição inicial do primeiro objeto.\n V1 = Velocidade do primeiro objeto\n So2 = Posição inicial do segundo objeto\n V2 = Velocidade do segundo objeto\n\n Retorno:\n\n S = Posição de encontro de S1 e S2\n \"\"\"\n\n T = self.tempo_de_encontro(kwargs)\n\n\n So1 = kwargs.get('So1', None)\n V1 = kwargs.get('V1', None)\n\n kwargs = {\n 'So': So1,\n 'V': V1,\n 'T': T\n }\n\n # Para calcular o instante de encontro só usar o tempo\n # em qualquer uma das equações, no caso foi do objeto 01\n resultado = self.calcular(self.POSICAO, kwargs)\n\n return resultado['S']\n","repo_name":"PhilipeFerreira/Aulas","sub_path":"08_Fisica/Mecanica/Cinematica/Exercicios/biblioteca/fisica/mu.py","file_name":"mu.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32756559716","text":"\r\n# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.\r\n#\r\n# Example 1:\r\n#\r\n# Input:\r\n# 11110\r\n# 11010\r\n# 11000\r\n# 00000\r\n#\r\n# Output: 1\r\n#\r\n# Example 2:\r\n#\r\n# Input:\r\n# 11000\r\n# 11000\r\n# 00100\r\n# 00011\r\n#\r\n# Output: 3\r\n# ------------------------------------------------------------------------\r\n\r\n# Iterate through each of the cell and if it is an island, do dfs to mark all\r\n# adjacent islands, then increase the counter by 1.\r\n\r\nclass Solution():\r\n\r\n def numIslands(self, grid):\r\n if not grid:\r\n return 0\r\n\r\n count = 0\r\n for i in range(len(grid)):\r\n for j in range(len(grid[0])):\r\n if grid[i][j] == '1': # Only once this will happen on first 1, remaining neighbours will be tagged with '#' so they dont contribute to count++\r\n self.dfs(grid, i, j)\r\n count += 1\r\n return count\r\n\r\n def dfs(self, grid, i, j):\r\n if i< 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or grid[i][j] != '1':\r\n return\r\n grid[i][j] = '#' # Tagging visited coordinate, so we don't revisit. and do false count++ in above code.\r\n self.dfs(grid, i + 1, j)\r\n self.dfs(grid, i - 1, j)\r\n self.dfs(grid, i, j + 1)\r\n self.dfs(grid, i, j - 1)\r\n\r\n############################################################################\r\n\r\n\r\n\r\n\r\n","repo_name":"adityakverma/Interview_Prepration","sub_path":"Leetcode/DFS_BFS/LC-200. Number of Islands.py","file_name":"LC-200. Number of Islands.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10041645500","text":"class Parent:\n a=10\n def __init__(self):\n self.b=40\nclass child(Parent):\n a=888\n def __init__(self):\n super().__init__()\n print(super().a) #10\n #print(super().b)----->Attribute Error\n print(self.a) #888\n print(child.a) #888\n print(Parent.a) #10\n print(self.b) #40\n\nc=child()\n","repo_name":"ShikhaShrivastava/Python-core","sub_path":"OOP Concept/Super Method/Example-3.py","file_name":"Example-3.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35584449119","text":"\"\"\"\n\nPyHGNC is tested (NOT YET) on both Python3\n\n.. warning:: PyHGNC is not thoroughly tested on Windows.\n\nInstallation\n------------\n\n.. code-block:: sh\n\n $ git clone https://github.com/LeKono/pyhgnc.git\n $ cd pyhgnc\n $ pip3 install pyhgnc\n\"\"\"\n\nfrom .manager.query import QueryManager\nfrom .manager.database import update\n\nquery = QueryManager\n\n__all__ = ['update', 'query']\n\n__version__ = '0.2.4'\n\n__title__ = 'PyHGNC'\n__description__ = 'Importing and querying HGNC data'\n__url__ = 'https://github.com/LeKono/pyhgnc.git'\n\n__author__ = 'Christian Ebeling & Andrej Konotopez'\n__email__ = 'Andrej.Konotopez@scai.fraunhofer.de'\n\n__license__ = 'Apache 2.0 License'\n__copyright__ = 'Copyright (c) 2017 Andrej Konotopez, Fraunhofer Institute for Algorithms and Scientific ' \\\n 'Computing SCAI, Schloss Birlinghoven, 53754 Sankt Augustin, Germany'","repo_name":"cebel/pyhgnc","sub_path":"src/pyhgnc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"74792454635","text":"from flask import Flask,jsonify,request\r\n\r\napp = Flask(__name__)\r\nalunos = []\r\nprofessorres = []\r\ndisciplinas = []\r\nofertadas = []\r\n@app.route('/alunos')\r\ndef retorna_alunos():\r\n return jsonify(alunos)\r\n@app.route('/alunos', methods=['POST'])\r\ndef add_aluno():\r\n new = request.json\r\n if 'nome' not in new.keys():\r\n return jsonify({'erro':'aluno sem nome'}),400\r\n for aluno in alunos:\r\n if aluno['id'] == new['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n \r\n alunos.append(request.json)\r\n return jsonify(alunos),200\r\n \r\n@app.route('/alunos/')\r\ndef retorna_aluno_id(id):\r\n for aluno in alunos:\r\n if aluno['id'] == id:\r\n return jsonify(aluno) \r\n return jsonify({'erro':'aluno nao encontrado',}),400\r\n@app.route('/reseta',methods=['POST'])\r\ndef reseta_():\r\n alunos.clear()\r\n professorres.clear()\r\n disciplinas.clear()\r\n ofertadas.clear()\r\n return jsonify({'ok':'resetado com sucesso'}),200\r\n@app.route('/alunos/',methods=['DELETE'])\r\ndef delete_aluno(id):\r\n for index,aluno in enumerate(alunos):\r\n if aluno['id'] == id:\r\n del alunos[index]\r\n return\r\n return jsonify({'erro':'aluno nao encontrado'}),400\r\n@app.route(\"/alunos/\",methods=['PUT'])\r\ndef edita(id):\r\n dados = request.json\r\n if 'nome' not in dados.keys():\r\n return jsonify({'erro':'aluno sem nome'}),400\r\n for aluno in alunos:\r\n if aluno['id'] == id:\r\n aluno['nome'] = dados['nome']\r\n return jsonify(alunos)\r\n \r\n return jsonify({'erro':'aluno nao encontrado',}),400\r\n@app.route(\"/professores\",methods=['GET'])\r\ndef professor_show():\r\n return jsonify(professorres)\r\n@app.route(\"/professores\",methods=['POST'])\r\ndef add_prof():\r\n prof=request.json\r\n if 'nome' in prof.keys():\r\n for professor in professorres:\r\n if professor['id'] == prof['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n professorres.append(prof)\r\n return jsonify({}),200\r\n else:\r\n return jsonify({'erro':'professor sem nome'}),400 \r\n@app.route('/professores/')\r\ndef retorna_professor(id):\r\n for professor in professorres:\r\n if professor['id'] == id:\r\n return jsonify(professor)\r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n@app.route('/professores/',methods=['DELETE'])\r\ndef delete_prof(id):\r\n\r\n for index,prof in enumerate(professorres):\r\n if prof['id'] == id :\r\n del professorres[index]\r\n return \r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n@app.route('/professores/',methods=['PUT'])\r\ndef edita_prof(id):\r\n g = request.json\r\n if 'nome' in g.keys():\r\n for professor in professorres:\r\n if professor['id'] == id:\r\n professor['nome'] = g['nome']\r\n return jsonify(professor)\r\n return jsonify({'erro':'professor nao encontrado'}),400\r\n else:\r\n return jsonify({'erro':'professor sem nome'}),400\r\n@app.route('/disciplinas',methods=['GET'])\r\ndef diciplinas_retorno():\r\n return jsonify( disciplinas)\r\n@app.route('/disciplinas',methods=['POST'])\r\ndef add_diciplinas():\r\n dados = request.json\r\n if 'nome' not in dados.keys() or 'id' not in dados.keys() or 'carga_horaria' not in dados.keys() or 'plano_ensino' not in dados.keys() or 'status' not in dados.keys():\r\n return jsonify({'erro':'diciplina sem nome'}),400\r\n for disciplina in disciplinas:\r\n if disciplina['id'] == dados['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n \r\n else:\r\n disciplinas.append(dados)\r\n return jsonify(),200\r\n@app.route('/disciplinas/',methods=['GET'])\r\ndef diciplina_id(id):\r\n for diciplina in disciplinas:\r\n if diciplina['id'] == id:\r\n return jsonify(diciplina)\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route('/disciplinas/',methods=['DELETE'])\r\ndef delete_diciplina(id):\r\n for index,diciplina in enumerate(disciplinas):\r\n if diciplina['id'] == id:\r\n del disciplinas[index]\r\n return 'Diciplina deletada com sucesso'\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route('/disciplinas/',methods=['PUT'])\r\ndef edita_diciplina(id):\r\n dados=request.json\r\n for diciplina in disciplinas:\r\n if diciplina['id'] == id:\r\n if 'nome' in dados.keys():\r\n diciplina['nome'] = dados['nome']\r\n if 'status' in dados.keys():\r\n disciplina['status'] = dados['status']\r\n if 'plano_ensino' in dados.keys():\r\n disciplina['plano_ensino'] = dados['plano_ensino']\r\n if 'carga_horaria' in dados.keys():\r\n disciplina['carga_horaria'] = dados['carga_horaria']\r\n return 'Diciplina Atualizada com sucesso'\r\n return jsonify({'erro':'disciplina nao encontrada'}),400\r\n@app.route(\"/ofertadas\",methods=['GET'])\r\ndef retrona_ofertada():\r\n return jsonify(ofertadas)\r\n@app.route('/ofertadas',methods=['POST'])\r\ndef add_ofertada():\r\n dados = request.json\r\n\r\n if 'id_professor' in dados.keys():\r\n professor_valido = False\r\n ok_add = True\r\n for professor in professorres:\r\n if professor['id'] == dados['id_professor']:\r\n professor_valido = True\r\n if professor_valido == False:\r\n return jsonify({'erro' : 'id professor invalido'}),400\r\n print(len(dados.keys()))\r\n if len(dados.keys()) >=5:\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == dados['id']:\r\n return jsonify({'erro':'id ja utilizada'}),400\r\n ofertadas.append(dados)\r\n return jsonify(),200\r\n else:\r\n return jsonify({'erro':'data faltando'}),400\r\n@app.route('/ofertadas/',methods=['GET'])\r\ndef retorna_id_disp(id):\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == id:\r\n return jsonify( ofertada)\r\n return jsonify({'erro':'ofertada nao encontrada'}),400 \r\n@app.route('/ofertadas/',methods=['DELETE'])\r\ndef deleta_ofertada(id):\r\n for index,ofertada in enumerate(ofertadas):\r\n if ofertada['id'] == id:\r\n del ofertadas[index]\r\n return jsonify(),200\r\n return jsonify({'erro':'ofertada nao encontrada'}),400 \r\n@app.route('/ofertadas/',methods=['PUT'])\r\ndef edita_ofertada(id):\r\n dados = request.json\r\n for ofertada in ofertadas:\r\n if ofertada['id'] == id:\r\n if 'ano' in dados.keys():\r\n ofertada['ano'] =dados['ano']\r\n if 'semestre' in dados.keys():\r\n ofertada['semestre'] = dados['semestre']\r\n if 'turma' in dados.keys():\r\n ofertada['turma'] = dados['turma']\r\n if 'data' in dados.keys():\r\n ofertada['data'] = dados['data']\r\n if 'id_professor' in dados.keys():\r\n ofertada['id_professor']=dados['id_professor']\r\n return jsonify(),200\r\n return jsonify({'erro':'ofertada nao encontrada'}),400\r\n\r\n \r\nif __name__ == '__main__':\r\n app.run(port=5002,debug=True,host='localhost')\r\n","repo_name":"gabrielmonzato20/ac6_distr","sub_path":"ac5.py","file_name":"ac5.py","file_ext":"py","file_size_in_byte":7260,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17972085608","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport torch\nimport torch.nn\nimport torchvision\n\nfrom torch.nn import Module, Conv3d, BatchNorm3d, MaxPool3d, ReLU\nfrom torch.nn import functional as F\nfrom scipy import signal\n\nfrom AQA_head.nets.utils import ChannelShuffleLayer, Channelwise_1D, TemporalConv1DLayer, MaxPoolChannel, AvgPoolChannel\n\n\n\nclass AQA(Module):\n \"\"\"\n AQA implementation in pytorch\n \"\"\"\n\n def __init__(self, input_shape_JCA, input_shape_ADA, n_layers_JCA=4,\n n_layers_ADA=3,n_branches_JCA=3, n_branches_ADA=3,\n expansion_factor_JCA = 1.25, expansion_factor_ADA = 1.25, final_expansion_JCA_for_balance = 10,\n figure_skating_flag = 0, Ablated_flag = 'N',\n Spatial_Attention_Method = 'N', Temporal_Attention_Method = 'N',\n Coeff_Spatial_Attention = 1, Coeff_Temporal_Attention=1):\n\n super(AQA, self).__init__()\n is_dilated = False\n\n self.expansion_factor_JCA = expansion_factor_JCA\n self.expansion_factor_ADA = expansion_factor_ADA\n self.n_layers_JCA = n_layers_JCA\n self.n_layers_ADA = n_layers_ADA\n self.is_dilated = is_dilated\n self.n_branches_JCA = n_branches_JCA\n self.n_branches_ADA = n_branches_ADA\n self.final_expansion_JCA_for_balance = final_expansion_JCA_for_balance\n self.figure_skating_flag = figure_skating_flag\n self.n_channels_out = None\n self.Ablated_flag = Ablated_flag\n self.Spatial_Attention_Method = Spatial_Attention_Method\n self.Temporal_Attention_Method = Temporal_Attention_Method\n self.Coeff_Spatial_Attention = Coeff_Spatial_Attention\n self.Coeff_Temporal_Attention = Coeff_Temporal_Attention\n\n\n input_shape_JCA = list(input_shape_JCA)\n input_shape_ADA = list(input_shape_ADA)\n n_channels_in_ADA = input_shape_ADA[1]\n n_channels_in_JCA = input_shape_JCA[1]\n\n n_channels_out_JCA = self.__define_JCA_layers_overall(input_shape_JCA, n_layers_JCA, n_branches_JCA,\n expansion_factor_JCA, is_dilated)\n n_channels_out_ADA = self.__define_ADA_layers_overall(input_shape_ADA, n_layers_ADA, n_branches_ADA,\n expansion_factor_ADA, is_dilated)\n\n self.n_channels_out_JCA = n_channels_out_JCA\n self.n_channels_out_ADA = n_channels_out_ADA\n\n if self.Ablated_flag == 'ADA':\n self.n_channels_out = self.n_channels_out_ADA\n elif self.Ablated_flag == 'JCA':\n self.n_channels_out = self.n_channels_out_JCA * final_expansion_JCA_for_balance\n elif self.Ablated_flag == 'Appearance':\n self.n_channels_out = n_channels_in_ADA\n elif self.Ablated_flag == 'Pose':\n self.n_channels_out = n_channels_in_JCA\n else:\n self.n_channels_out = (self.n_channels_out_ADA + self.n_channels_out_JCA * final_expansion_JCA_for_balance)\n\n def forward(self, input):\n\n input_ADA = input[0]\n input_JCA = input[1]\n input_ADA = self.__attention_module(input_ADA)\n input_JCA = self.__attention_module(input_JCA)\n\n expansion_factor_JCA = self.expansion_factor_JCA\n expansion_factor_ADA = self.expansion_factor_ADA\n n_layers_JCA = self.n_layers_JCA\n n_layers_ADA = self.n_layers_ADA\n n_branches_JCA = self.n_branches_JCA\n n_branches_ADA = self.n_branches_ADA\n\n out = []\n\n if self.Ablated_flag == 'ADA':\n output_ADA = self.__call_ADA_layers(input_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA)\n out.append(output_ADA)\n output = output_ADA\n elif self.Ablated_flag == 'JCA':\n output_JCA = self.__call_JCA_layers(input_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA)\n out.append(output_JCA)\n output = output_JCA\n elif self.Ablated_flag == 'Appearance':\n output = input[0]\n elif self.Ablated_flag == 'Pose':\n output = input[1]\n else:\n output_ADA = self.__call_ADA_layers(input_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA)\n out.append(output_ADA)\n output_JCA = self.__call_JCA_layers(input_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA)\n out.append(output_JCA)\n output = torch.cat(out, dim=1)\n\n return output\n\n def __define_JCA_layers_overall(self, input_shape_JCA, n_layers_JCA, n_branches_JCA, expansion_factor_JCA, is_dilated):\n \"\"\"\n Define Overall JCA layers (Not the details)\n \"\"\"\n\n # how many layers of timeception\n for i in range(n_layers_JCA):\n\n n_channels_in_JCA = input_shape_JCA[1]\n layer_num = i + 1\n\n # get details about grouping\n getchannels = self.__get_n_channels_for_JCA(expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA)\n n_channels_per_branch_in_JCA, n_channels_base_out_JCA, n_channels_out_JCA = getchannels\n\n\n # temporal conv per group\n self.__define_JCA(input_shape_JCA, n_branches_JCA, is_dilated, layer_num)\n\n # activation\n layer_name = 'relu_JCA%d' % (layer_num)\n layer = ReLU()\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # SpatialConvForHeatmaps\n layer_name = 'conv_spatial_JCA%d' % (layer_num)\n layer = Conv3d(n_channels_out_JCA , n_channels_out_JCA , kernel_size=(1, 3, 3),padding=(0,layer_num!=n_layers_JCA,layer_num!=n_layers_JCA))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n layer_name = 'maxpool_spatial_JCA%d' % (layer_num)\n layer = MaxPool3d(kernel_size=(1, (self.figure_skating_flag != 1) + 1, (self.figure_skating_flag != 1) + 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n n_channels_in_JCA = n_channels_out_JCA\n input_shape_JCA[1] = n_channels_in_JCA\n input_shape_JCA[2] = int(input_shape_JCA[2]/float(2))\n\n layer_name = 'temp&spatialMaxPool_JCA_last_adapt'\n layer = MaxPool3d(kernel_size=(8, (self.n_layers_JCA != 3 or self.figure_skating_flag==1) + 1, (self.n_layers_JCA != 3 or self.figure_skating_flag==1) + 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n layer_name = 'channel_expansion_JCA_last_adapt'\n layer = Conv3d(input_shape_JCA[1], input_shape_JCA[1] * self.final_expansion_JCA_for_balance , kernel_size=(1, 1, 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n return n_channels_in_JCA\n\n def __define_JCA(self, input_shape_JCA,n_branches_JCA, is_dilated, layer_num):\n \"\"\"\n Define detailed covolutions inside the each JCA block\n \"\"\"\n getchannels_JCA = self.__get_n_channels_for_JCA(self.expansion_factor_JCA, n_branches_JCA, input_shape_JCA[1])\n n_channels_per_branch_in_JCA, n_channels_base_branch_out_JCA, n_channels_out_JCA = getchannels_JCA\n\n #assert n_channels_in_JCA % n_channels_per_branch_in_JCA == 0\n\n # type of multi-scale kernels to use: either multi_kernel_sizes or multi_dilation_rates\n if is_dilated:\n kernel_sizes_temporal = (3, 3, 3)\n dilation_rates_temporal = (1, 2, 3)\n else:\n kernel_sizes_temporal = (3, 5, 7)\n dilation_rates_temporal = (1, 1, 1)\n\n kernel_size_channel = 3\n dilation_rate_channel = 1\n\n for branch_num in range (n_branches_JCA):\n\n base_AvgPool_size = int(n_channels_per_branch_in_JCA/float(n_channels_base_branch_out_JCA))\n n_channels_current_branch_out_JCA = int(n_channels_per_branch_in_JCA/(base_AvgPool_size + branch_num))\n\n ## Temporal\n layer_name = 'temporal_b%d_JCA%d' % (branch_num, layer_num)\n layer = TemporalConv1DLayer(input_shape_JCA, kernel_sizes_temporal[branch_num],\n dilation_rates_temporal[branch_num], layer_name)\n setattr(self, layer_name, layer)\n\n ## Temporal MaxPool\n layer_name = 'tmpmaxpool_b%d_JCA%d' % (branch_num, layer_num)\n layer = MaxPool3d(kernel_size=(2,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## Channelwise\n layer_name = 'convch_b%d_JCA%d' % (branch_num, layer_num)\n layer = Channelwise_1D(input_shape_JCA, kernel_size_channel,\n dilation_rate_channel, layer_name)\n setattr(self, layer_name, layer)\n\n\n ## Channel-wise AvgPool\n # ChannelPoolsize = int(n_channels_per_branch_in_JCA/n_channels_per_branch_out_JCA)\n\n layer_name = 'chavgpool_b%d_JCA%d' % (branch_num, layer_num)\n layer = AvgPoolChannel(base_AvgPool_size + branch_num, layer_name)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn1_b%d_JCA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_current_branch_out_JCA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n\n ## BatchNorm\n # layer_name = 'bn2_b%d_JCA%d' % (branch_num, layer_num)\n # layer = BatchNorm3d(n_channels_current_branch_out_JCA)\n # layer._name = layer_name\n # setattr(self, layer_name, layer)\n\n\n def __define_ADA_layers_overall(self, input_shape_ADA, n_layers_ADA, n_branches_ADA, expansion_factor_ADA, is_dilated):\n \"\"\"\n Define layers inside the timeception layers.\n \"\"\"\n\n # how many layers of timeception\n for i in range(n_layers_ADA):\n\n n_channels_in_ADA = input_shape_ADA[1]\n\n layer_num = i + 1\n\n # get details about grouping\n getchannels_ADA = self.__get_n_channels_for_ADA(expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA=\n n_channels_in_ADA)\n n_channels_out_sep_per_branch_in_out, n_channels_out_ADA = getchannels_ADA\n\n # channel reduction\n layer_name = 'chreduce_ADA%d' % (layer_num)\n layer = Conv3d(n_channels_in_ADA , n_channels_out_sep_per_branch_in_out , kernel_size=(1, 1, 1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # temporal conv per group\n self.__define_ADA(input_shape_ADA, n_branches_ADA, is_dilated, layer_num)\n\n # activation\n layer_name = 'relu_ADA%d' % (layer_num)\n layer = ReLU()\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n # SpatialConv\n if self.Spatial_Attention_Method != 'N':\n\n layer_name = 'conv_spatial_ADA%d' % (layer_num)\n layer = Conv3d(n_channels_out_ADA , n_channels_out_ADA , kernel_size=(1, 3, 3), padding= (0,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n\n n_channels_in_ADA = n_channels_out_ADA\n input_shape_ADA[1] = n_channels_in_ADA\n\n\n return n_channels_in_ADA\n\n\n def __define_ADA(self, input_shape_ADA, n_branches_ADA, is_dilated, layer_num):\n \"\"\"\n Define layers inside grouped convolutional block.\n \"\"\"\n\n getchannels_ADA = self.__get_n_channels_for_ADA(self.expansion_factor_ADA, n_branches_ADA, input_shape_ADA[1])\n n_channels_sep_per_branch_in_out_ADA, n_channels_out_ADA = getchannels_ADA\n\n #assert n_channels_in_ADA % n_channels_per_branch_in_ADA == 0\n\n # type of multi-scale kernels to use: either multi_kernel_sizes or multi_dilation_rates\n if is_dilated:\n kernel_sizes_temporal = (3, 3, 3)\n dilation_rates_temporal = (1, 2, 3)\n else:\n kernel_sizes_temporal = (3, 5, 7)\n dilation_rates_temporal = (1, 1, 1)\n\n\n for branch_num in range (n_branches_ADA):\n\n ## Temporal\n temp_inp_branch_ADA = input_shape_ADA\n temp_inp_branch_ADA[1] = n_channels_sep_per_branch_in_out_ADA\n layer_name = 'temporal_b%d_ADA%d' % (branch_num, layer_num)\n layer = TemporalConv1DLayer(temp_inp_branch_ADA, kernel_sizes_temporal[branch_num],\n dilation_rates_temporal[branch_num], layer_name)\n\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn1_b%d_ADA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_sep_per_branch_in_out_ADA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## Temporal MaxPool\n layer_name = 'tmpmaxpool_b%d_ADA%d' % (branch_num, layer_num)\n layer = MaxPool3d(kernel_size=(2,1,1))\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n ## BatchNorm\n layer_name = 'bn2_b%d_ADA%d' % (branch_num, layer_num)\n layer = BatchNorm3d(n_channels_sep_per_branch_in_out_ADA)\n layer._name = layer_name\n setattr(self, layer_name, layer)\n\n def __call_JCA_layers(self, tensor, n_layers_JCA, n_branches_JCA, expansion_factor_JCA):\n\n\n input_shape = tensor.size()\n n_channels_in_JCA = input_shape[1]\n\n # how many layers of timeception\n\n for i in range(n_layers_JCA):\n layer_num = i + 1\n\n # get details about grouping\n getchannels_JCA = self.__get_n_channels_for_JCA(expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA)\n n_channels_per_branch_in, n_channels_base_branch_out, n_channels_out = getchannels_JCA\n\n # temporal conv per group\n tensor = self.__call_JCA(tensor, layer_num, n_branches_JCA)\n\n n_channels_in_JCA = n_channels_out\n\n tensor = getattr(self, 'temp&spatialMaxPool_JCA_last_adapt')(tensor)\n tensor = getattr(self, 'channel_expansion_JCA_last_adapt')(tensor)\n\n\n return tensor\n\n\n def __call_JCA(self, tensor, layer_num, n_branches):\n\n t = []\n\n for branches in range (n_branches):\n\n t_1 = getattr(self, 'temporal_b%d_JCA%d' % (branches, layer_num))(tensor)\n t_2 = getattr(self, 'convch_b%d_JCA%d' % (branches, layer_num))(t_1)\n t_3 = getattr(self, 'chavgpool_b%d_JCA%d' % (branches, layer_num))(t_2)\n t_4 = getattr(self, 'tmpmaxpool_b%d_JCA%d' % (branches, layer_num))(t_3)\n t_5 = getattr(self, 'bn1_b%d_JCA%d' % (branches, layer_num))(t_4)\n t.append(t_5)\n\n tensor = torch.cat(t, dim=1)\n tensor = getattr(self, 'relu_JCA%d' % (layer_num))(tensor)\n\n tensor = getattr(self, 'conv_spatial_JCA%d' % (layer_num))(tensor)\n tensor = getattr(self, 'maxpool_spatial_JCA%d' % (layer_num))(tensor)\n\n\n\n # concatenate channels of branches\n\n return tensor\n\n def __call_ADA_layers(self, tensor, n_layers_ADA, n_branches_ADA, expansion_factor_ADA):\n\n\n input_shape = tensor.size()\n n_channels_in_ADA = input_shape[1]\n\n # how many layers of timeception\n\n for i in range(n_layers_ADA):\n layer_num = i + 1\n\n # get details about grouping\n getchannels_ADA = self.__get_n_channels_for_ADA(expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA )\n n_channels_sep_per_branch_in_out, n_channels_out = getchannels_ADA\n\n # temporal conv per group\n tensor = self.__call_ADA(tensor, layer_num, n_branches_ADA)\n\n n_channels_in_ADA = n_channels_out\n\n\n return tensor\n\n\n def __call_ADA(self, tensor, layer_num, n_branches):\n\n t_1 = getattr(self, 'chreduce_ADA%d' % (layer_num))(tensor)\n t = []\n\n for branches in range (n_branches):\n\n t_2 = getattr(self, 'temporal_b%d_ADA%d' % (branches, layer_num))(t_1)\n # t_3 = getattr(self, 'bn1_b%d_ADA%d' % (branches, layer_num))(t_2)\n t_3 = getattr(self, 'tmpmaxpool_b%d_ADA%d' % (branches, layer_num))(t_2)\n t_5 = getattr(self, 'bn2_b%d_ADA%d' % (branches, layer_num))(t_3)\n t.append(t_5)\n\n t = torch.cat(t, dim=1)\n tensor = getattr(self, 'relu_ADA%d' % (layer_num))(t)\n\n if self.Spatial_Attention_Method != 'N':\n tensor = getattr(self, 'conv_spatial_ADA%d' % (layer_num))(tensor)\n\n # concatenate channels of branches\n\n return tensor\n\n def __get_n_channels_for_JCA(self, expansion_factor_JCA, n_branches_JCA, n_channels_in_JCA):\n\n if n_branches_JCA == 3:\n if expansion_factor_JCA == 13/12:\n channelAvgPool_size = 2\n elif expansion_factor_JCA == 47/60:\n channelAvgPool_size = 3\n else:\n raise ValueError('Current setting of expansion factor is not practical. Please try again')\n elif n_branches_JCA == 2:\n if expansion_factor_JCA == 5 / 6:\n channelAvgPool_size = 2\n elif expansion_factor_JCA == 7/12:\n channelAvgPool_size = 3\n else:\n raise ValueError('Current setting of expansion factor is not practical. Please try again')\n else:\n raise ValueError('Deploying more than 3 branches or a single branch is not valid. Please try again')\n\n n_channels_per_branch_in = int(n_channels_in_JCA)\n # n_channels_base_branch_out = int(n_channels_in_JCA * expansion_factor_JCA / float(n_branches_JCA))\n n_channels_base_branch_out = int (n_channels_in_JCA/float(channelAvgPool_size))\n\n n_channels_out = 0\n for branch in range(n_branches_JCA):\n n_channels_out = n_channels_out + int(n_channels_in_JCA/float(channelAvgPool_size + branch))\n\n return n_channels_per_branch_in, n_channels_base_branch_out, n_channels_out\n\n\n def __get_n_channels_for_ADA(self, expansion_factor_ADA, n_branches_ADA, n_channels_in_ADA):\n\n n_channels_out_sep_per_branch_in_out = int(n_channels_in_ADA * expansion_factor_ADA/ float(n_branches_ADA))\n n_channels_out = int(n_channels_out_sep_per_branch_in_out * n_branches_ADA)\n\n return n_channels_out_sep_per_branch_in_out, n_channels_out\n\n\n def __attention_module(self, tensor):\n\n Num_timesteps = tensor.size()[2]\n Spatial_size = tensor.size()[3]\n a = self.Coeff_Temporal_Attention\n \n if self.Temporal_Attention_Method == 'Linear':\n timeArray = torch.arange(Num_timesteps).to(device='cuda', dtype=torch.float)\n timeArray = a + (1 - a) * (timeArray) // Num_timesteps\n new_tensor = tensor * timeArray.view(1, 1, Num_timesteps, 1, 1)\n else:\n new_tensor = tensor\n\n if self.Temporal_Attention_Method == 'Gaussian':\n spatialArray = torch.from_numpy(signal.gaussian(Spatial_size, std = self.Coeff_Spatial_Attention)).to(device='cuda', dtype=torch.float)\n new_tensor = new_tensor * spatialArray.view(1, 1, 1, Spatial_size, 1)\n new_tensor = new_tensor * spatialArray.view(1, 1, 1, 1, Spatial_size)\n\n return new_tensor\n\n","repo_name":"MahdiNek/EAGLE-Eye","sub_path":"AQA_head/nets/AQA.py","file_name":"AQA.py","file_ext":"py","file_size_in_byte":19484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"71460475756","text":"import re\n\nemails = set()\n\nwith open(\"emails.txt\", \"r\") as file:\n\tfor line in file:\n\t\tif line in emails:\n\t\t\tcontinue\n\t\telse:\n\t\t\tverify_email = re.search(r\"\\w[\\w\\-\\.]+\\@\\w+\\.\\w{1,4}\", line)\n\t\t\t\n\t\t\tif verify_email is not None:\n\t\t\t\tfound = verify_email.group() \n\t\t\t\temails.add(found)\n\t\t\t\nwith open(\"emails_cleaned.txt\", \"w\") as exit_file:\n\tfor email in sorted(emails):\n\t\texit_file.write(email + \"\\n\")\n\t\t\n#L'execution doit se faire dans le même dossier !\n","repo_name":"edubaschool/Growth-Hacking-101","sub_path":"clean_emails_from_txt_file_with_regex.py","file_name":"clean_emails_from_txt_file_with_regex.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"73165733036","text":"\n# # # ##################################################################################################\n# # # #\n# # #\n# # #\n# # # def money_change(pounds):\n# # # pounds = 1.09 * euro\n# # # return pounds\n# # #\n# # # pound1 = float(input(\"Please enter the first pound value. \"))\n# # # print(pound1, type(pound1))\n# # #\n# # # pound2 = float(input(\"dearest sir or madam, please specify the number or quantity of pound you'd like ti exchange for euro. \"))\n# # # print(pound2, type(pound2))\n# # #\n# # #\n# # # outputMessage = \" The equivalent value in euro is : \"\n# # # print(f\"You're exchanging {pound1} for {money_change(pound1)}\")\n# # # print (outputMessage + str(money_change(pound2))\n# # #\n# # # # print (euros, type(euro)))\n# #\n# #\n\n# #\n# #\n\n\n\n# #\n# #\n# # # 1. Money change\n# # # Write a function called money_change\n# # # that takes an amount of money in English pounds as its parameter,\n# # # converts it into euros and returns the result (currency exchange rate: 1 GBP = 1.09 EUR).\n# # # Call this function at least twice in the program; at least one of the calls must be based on the data asked from the user). Output the results in euros.\n# #\n# # # Example of the function call:\n# #\n# # # >>>money_change(150.6)\n# # # 164.154\n# # # Example of the program output:\n# #\n# # # Please enter the amount of money in English pounds: 80\n# # # This is 87.2 EUR\n# # # 500 GBP is 545.0 EUR\n# #\n# # # general format for a function definition:\n# # # def functionName(input parameters):\n# # # DON'T FORGET THE COLON\n# #\n# # # function definition\n# # def money_change(pounds):\n# # # thanks, Python\n# # euros = 1.09 * pounds\n# # return euros\n# #\n# #\n# # pounds1 = float(input(\"Please enter the first pound value.\"))\n# # print(pounds1, type(pounds1))\n# # pounds2 = float(input(\n# # \"Dearest sir or madam, please specify the number or quantity of pounds you'd like to exchange for euros. Thank you.\"))\n\n# # print(pounds2, type(pounds2))\n# # outputMessage = \"The equivalent value in euros is: \"\n# # print(f\"You're exchanging {pounds1} for {money_change(pounds1)}\")\n# # print(outputMessage + str(money_change(pounds2)))\n# #\n# #\n# # # function calls\n# # # money_change(someValue1)\n# # # money_change(someValue2)\n# #\n# # #################################################################\n# #\n# #\n# # def money_change_2(pounds):\n# # # thanks, Python\n# # euros = 1.09 * pounds\n# # print(f\"The number of pounds you gave me is: {pounds}\")\n# # print(\"The number of euros is: \", euros, type(euros))\n# # return euros\n# #\n# #\n# # money_change_2(34345.5656)\n# #\n# # # the variablese euros and pounds only have function scope and cannot be called or referenced outside of the function\n# # # print(euros, type(euros))\n# # # print(pounds, type(pounds))\n# #\n#\n# # print(money_change_2(34345.5656), type(money_change_2(34345.5656)))\n#\n#\n#\n# def name_month(number):\n# if int(number) == 1:\n# print(\"January\")\n# if int(number) == 2:\n# print(\"February\")\n# if int(number) == 3:\n# print(\"March\")\n# if int(number) == 4:\n# print(\"April\")\n# if int(number) == 5:\n# print(\"May\")\n# if int(number) == 6:\n# print(\"June\")\n# if int(number) == 7:\n# print(\"July\")\n# if int(number) == 8:\n# print(\"August\")\n# if int(number) == 9:\n# print(\"September\")\n# if int(number) == 10:\n# print(\"October\")\n# if int(number) == 11:\n# print(\"November\")\n# if int(number) == 12:\n# print(\"December\")\n#\n# number = input(\"Enter the number of the month: \")\n# #name_month(number)\n#\n#\n# # function definition here\n# # assume the user enters integer values for date, month and year\n# def date_as_string(day, month, year): # date, month and year are parameters; also known as arguments\n# if month == 1:\n# monthName = \"January\"\n# if month == 2:\n# monthName = \"February\"\n# if month == 3:\n# monthName = \"March\"\n# if month == 4:\n# monthName = \"April\"\n# if month == 5:\n# monthName = \"May\"\n# if month == 6:\n# monthName = \"June\"\n# if month == 7:\n# monthName = \"July\"\n# if month == 8:\n# monthName = \"August\"\n# if month == 9:\n# monthName = \"September\"\n# if month == 10:\n# monthName = \"October\"\n# if month == 11:\n# monthName = \"November\"\n# if month == 12:\n# monthName = \"December\"\n# print(f\"{monthName} {day}, {year}\")\n# # function call below\n# # date_as_string(1, 1, 2021)\n# ######################################\n# # Then write a program that asks the user to enter a day, a month and a year in numbers,\n# # and the program outputs the corresponding date as a string.\n# # want output in the form January 1, 2021\n# day = input(\"Enter the number of the day: \")\n# month = int(input(\"Enter the number of the month: \"))\n# year = input(\"Enter the number of the year: \")\n# date_as_string(day, month, year)\n\n\n##########################################################################################################\n# def hello_world(words):\n# size = max(len(word) for word in words)\n# print(\"*\"*(size+4))\n#\n# for i in words:\n# print(\"*\"+\" \"+i+\" \"*((size+4)-len(i)-3)+\"*\")\n#\n# print(\"*\"*(size+4))\n#\n# hello_world([\"Hello\",\"world\",\"in\",\"a\",\"frame\"])\n\n\n\n#########################################################################################################\n# Python program to check if year is a leap year or not\nyear = 2000\n\n # To get year (integer input) from the user\nyear = int(input(\"Enter a year: \"))\nif (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n print(\"{0} is a leap year\".format(year))\n else:\n print(\"{0} is not a leap year\".format(year))\n else:\n print(\"{0} is a leap year\".format(year))\nelse:\n print(\"{0} is not a leap year\".format(year))\n","repo_name":"cammyong/SDA_Project","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31320925392","text":"import math\nfrom pickle import APPENDS\n\nwhile True:\n try:\n 輸入=input().split('')\n if 輸入[0]>輸入[1]:\n print(\"最小值:\"+輸入[1])\n else:\n if 輸入[1]>輸入[0]:\n print(\"最小值:\"+輸入[0])\n else:\n print(\"兩數相同\")\n except(EOFError):\n break","repo_name":"108534149/108534149PY","sub_path":"python20230106/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9778380578","text":"import sys, os, yaml, re, json\nimport numpy as np, torch as th\nfrom lib import reorder\nimport lib\nimport argparse, collections\nfrom termcolor import cprint, colored\nimport statistics\nfrom PIL import Image\nfrom torchvision.transforms import functional as transfunc\nimport rich\nc = rich.get_console()\n\n\ndef PracticalAttack(argv):\n '''\n Attack a pre-trained model\n '''\n ag = argparse.ArgumentParser()\n ag.add_argument('-A', '--attack', type=str, default='SPSA')\n ag.add_argument('-e', '--epsilon', default=1./255., type=float)\n '''\n A NOTE ON SELECTION OF EPSILON (For Attacking JDModel)\n\n 0.062 (16/255) -> top5 go out of sight with little exception\n 0.031 ( 8/255) -> 1-of-top5 does not go out of sight\n 0.015 ( 4/255) -> 3-of-top5 does not go out of sight\n 0.008 ( 2/255) -> top5 within sight but not close to each other\n 0.004 ( 1/255) -> quite good. (and cannot be lower)\n\n For BingModel\n 1/255 -> topk very persistent\n 2/255 -> top3 very persistent\n 4/255 -> top1 starts to vary\n 8/255 -> looks appropriate.\n '''\n ag.add_argument('-M', '--model', type=str, choices=['JDModel', 'BingModel'])\n ag.add_argument('-v', '--verbose', action='store_true', help='verbose?')\n ag.add_argument('-Q', '--qbudget', type=int, default=500, help='query budget')\n ag.add_argument('-k', '--topk', type=int, default=5, help='generate permutation for topk')\n ag.add_argument('-c', '--canseek', type=int, default=50, help='length of returned ranking list')\n ag.add_argument('-l', '--payload', type=str, required=True, help='path to the payload image')\n ag.add_argument('-V', '--visualize', action='store_true')\n ag.add_argument('-O', '--oneshot', action='store_true')\n ag.add_argument('--randperm', action='store_true', help='use a random permutation instead')\n ag = ag.parse_args(argv)\n cprint(json.dumps(vars(ag), indent=4), 'yellow')\n\n # Process the arguments\n if ag.epsilon > 1.0:\n ag.epsilon = ag.epsilon / 255.\n assert(ag.topk > 1)\n\n # Load the payload image\n image = Image.open(ag.payload, mode='r').resize((224,224), Image.ANTIALIAS)\n query = transfunc.to_tensor(image).clone().unsqueeze(0)\n print(f'* Payload Image Info: shape={query.shape}')\n #tmp = transfunc.to_pil_image(query.squeeze(), mode='RGB')\n #tmp.show()\n #input('2')\n\n # Load the target model\n cprint(f'Setting up the \"{ag.model}\" Model')\n if ag.model == 'JDModel':\n model = getattr(lib.snapshop, ag.model)(canseek=ag.canseek)\n elif ag.model == 'BingModel':\n model = getattr(lib.bing, ag.model)(canseek=ag.canseek)\n else:\n raise ValueError('unsupported model')\n print(model)\n\n # Start attacking\n cprint(f'>_< Starting {ag.attack} Attack with Epsilon = {ag.epsilon:.3f}',\n 'red', None, ['bold', 'underline'])\n argsort, _ = model(query, id='init')\n orig_argsort = argsort.clone().detach()\n if not ag.randperm:\n rperm = th.LongTensor([1, 5, 4, 3, 2]) - 1 # manually specified order\n else:\n rperm = np.arange(ag.topk)\n np.random.shuffle(rperm)\n rperm = th.from_numpy(rperm)\n otopk = argsort[:len(rperm)]\n rtopk = otopk[rperm]\n cprint(f'> Original CanSee\\n {argsort.tolist()}', 'cyan')\n cprint(f'> Original TopK {otopk}', 'green')\n cprint(f'> Attacker Rperm {rperm}', 'yellow')\n cprint(f'> Expected TopK {rtopk}', 'red')\n if ag.oneshot:\n print('Exiting as requested oneshot mode.')\n exit(0)\n\n qr, r, score, mrank, aux = getattr(reorder, ag.attack)(model, query, rperm,\n eps=ag.epsilon, parallel=1, maxprobe=ag.qbudget, verbose=True)\n #argsort, _ = model(query, id='final')\n #cprint(f'> FINAL TopK', 'red')\n #cprint(argsort.tolist(), 'cyan')\n c.print('Final score:', score)\n\n\nif __name__ == '__main__':\n PracticalAttack(sys.argv[1:])\n","repo_name":"cdluminate/advorder","sub_path":"PracticalOA.py","file_name":"PracticalOA.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"26856437298","text":"from cloudvolume import CloudVolume, Storage\nfrom shapely.geometry import Polygon, box\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image, ImageDraw\n\nmip = 6\nmask = Storage('gs://neuroglancer/pinky100_v0/edge_mask')\norder = Storage('gs://neuroglancer/pinky100_v0/z_order_corrected')\nout = CloudVolume('gs://neuroglancer/pinky100_v0/image_single_slices/roi', \n\t\t\t\t\t\t\t\t\t\t\t\t\tcdn_cache=True, mip=mip)\n\n# Get bounding box of a total slice\noffset = out.voxel_offset\nsize = tuple(out.shape[:2])\nx_slice = slice(offset[0], offset[0]+size[0])\ny_slice = slice(offset[1], offset[1]+size[1])\n\n# Build z remap dict\nf = order.get_file('dst_to_src.csv')\norder_arr = np.genfromtxt(BytesIO(f), dtype=np.int, \n\t\t\t\t\t\t\t\t\t\t\tdelimiter=',', skip_header=1)\ndst_to_src = {order_arr[i,0]:order_arr[i,1] \n\t\t\t\t\t\t\t\t\tfor i in range(order_arr.shape[0])}\n\n# Compile all ROI mask polygons (indexed by src_z)\n# ROI is translated by offset\nsrc_z_range = dst_to_src.values()\nsrc_z_filenames = list(map(str, src_z_range))\nmask_files = mask.get_files(src_z_filenames)\nmask_polygons = {}\nfor f in mask_files:\n\tif f['content'] is not None:\n\t\tpts = np.genfromtxt(BytesIO(f['content']), \n\t\t\t\t\t\t\t\tdtype=np.float, delimiter=',')\n\t\tpoly = Polygon(map(tuple, pts[:,:2] / 2**mip))\n\t\tpts = [(int(round(a[0]))-offset[0], int(round(a[1]))-offset[1]) for a \n\t\t\t\t\t\t\t\t\t\t\t\tin list(poly.exterior.coords)]\n\t\tmask_polygons[int(f['filename'])] = pts\n\n# Create mask image for each dst_z\ndst_z_range = dst_to_src.keys()\nfor dst_z in dst_z_range:\n\tsrc_z = dst_to_src[dst_z]\n\tpts = mask_polygons[src_z]\n\timg_mask = Image.new('1', size, 0)\n\tImageDraw.Draw(img_mask).polygon(pts, outline=1, fill=1)\n\timg_mask = np.transpose(np.array(img_mask)).astype(np.uint8)\n\tout[x_slice, y_slice, dst_z] = np.reshape(img_mask, img_mask.shape+(1,))\n","repo_name":"seung-lab/Alembic","sub_path":"src/tasks/python/preprocessing/ingest_masks.py","file_name":"ingest_masks.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"73"} +{"seq_id":"43964646389","text":"import pygame\nfrom objects.ball import Ball\nfrom objects.square import Square\nfrom funcs import *\nimport random\nballs_sp = []\n\n\ndef draw_line(pos):\n if pos[1] >= screen_height - 100 or pos == [10000000000, 10000000]:\n return\n k, b = get_k_and_b((x0, y0), pos)\n final = (-b // k, 0)\n pygame.draw.line(screen, pygame.Color(\"white\"), (x0, y0), final, width=1)\n\n\ndef spawn_balls(pos):\n balls_sp.append(Ball((x0, y0), pos, screen, screen_size))\n\n\ndef draw_lines_among_balls():\n if len(balls_sp) >= 2:\n for ball in balls_sp[1:]:\n for ball2 in balls_sp:\n pygame.draw.line(screen, pygame.Color('white'), ball2.get_coords(), ball.get_coords())\n\n\nif __name__ == '__main__':\n pygame.init()\n screen_size = screen_width, screen_height = 1000, 1000\n screen = pygame.display.set_mode(screen_size)\n screen.fill(pygame.Color('black'))\n running = True\n clock = pygame.time.Clock()\n pos = [10000000000, 10000000]\n x0 = screen_width // 2\n y0 = screen_height - 100\n need_draw_lines_among_balls = False\n need_draw_poligon = False\n need_draw_blocks = False\n squares = []\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEMOTION:\n pos = event.pos\n if event.type == pygame.MOUSEBUTTONDOWN and pos[1] < y0:\n spawn_balls(pos)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n y0 -= 100\n y0 -= 100\n if y0 <= 0:\n y0 = 0\n balls_sp.clear()\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n y0 += 100\n if y0 >= screen_height:\n y0 = screen_height\n elif event.key == pygame.K_c:\n balls_sp.clear()\n elif event.key == pygame.K_g:\n need_draw_lines_among_balls = True if need_draw_lines_among_balls is False else False\n elif event.key == pygame.K_t:\n need_draw_poligon = True if need_draw_poligon is False else False\n elif event.key == pygame.K_b:\n squares = []\n need_draw_blocks = True if need_draw_blocks is False else False\n\n clock.tick(600)\n pygame.display.flip()\n screen.fill(pygame.Color('black'))\n pygame.draw.line(screen, pygame.Color('white'), (0, y0), (screen_width, y0), width=3)\n if need_draw_blocks and len(squares) == 0:\n squares = []\n for _ in range(10):\n squares.append(Square((random.randint(0, 700), random.randint(0, 600)), 100))\n if need_draw_lines_among_balls:\n draw_lines_among_balls()\n if need_draw_poligon and len(balls_sp) >= 3:\n pygame.draw.polygon(screen, pygame.Color('white'), [ball.get_coords() for ball in balls_sp])\n for square in squares:\n pygame.draw.rect(screen, pygame.Color('white'), square.rect)\n if pos[1] < y0:\n draw_line(pos)\n if balls_sp:\n for ball in balls_sp:\n ball.move(y0, squares)\n ball.render()\n pygame.quit()","repo_name":"DulmievMusa/Balls_shooter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70909504555","text":"from django import template\n\nregister = template.Library()\n\n@register.filter(name='dynamic_index')\ndef dynamic_index(list_instance, index):\n \"\"\"\n Accepts a list of weeks materials and return a specific index according to week_number value.\n \"\"\"\n try:\n return list_instance[index]\n except:\n return ''","repo_name":"Ibrahem3amer/bala7","sub_path":"cms/templatetags/admin_interface_extras.py","file_name":"admin_interface_extras.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40538310000","text":"import api, points, base64, math, codecs\nfrom api.bottle import *\nfrom shutil import copyfile\n\ndef get_page(n):\n return math.ceil(n / 50)\n\ndef echoes(subscription):\n allechoareas = []\n for echoarea in subscription:\n temp = echoarea\n if not request.get_cookie(echoarea[0]):\n response.set_cookie(echoarea[0], api.get_last_msgid(echoarea[0]), path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n current = request.get_cookie(echoarea[0], secret='some-secret-key')\n if not current:\n current = api.get_last_msgid(echoarea[0])\n echoarea_msglist = api.get_echoarea(echoarea[0])\n\n new = 0\n last = False\n if len(echoarea_msglist) > 0:\n if current in echoarea_msglist:\n new = int(api.get_echoarea_count(echoarea[0])) - echoarea_msglist.index(current) - 1\n\n if new > 0:\n last = echoarea_msglist[-new];\n else:\n last = echoarea_msglist[-1];\n\n temp.append(new)\n temp.append(last)\n if last and len(last) > 0:\n temp.append(get_page(api.get_echoarea(echoarea[0]).index(last)))\n else:\n temp.append(get_page(len(api.get_echoarea(echoarea[0]))))\n allechoareas.append(temp)\n return allechoareas\n\n@route(\"/\")\ndef index():\n api.load_config()\n echoareas = []\n s = request.get_cookie(\"subscription\", secret='some-secret-key')\n if not s:\n subscription = []\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n s = subscription\n if api.nosubscription:\n subscription = api.echoareas\n else:\n subscription = []\n for ea in s:\n flag = False\n for e in api.echoareas:\n if ea in e:\n flag = True\n subscription.append(e)\n if not flag:\n subscription.append([ea, \"\"])\n ea = [[echoarea[0], echoarea[1], api.get_time(echoarea[0])] for echoarea in subscription]\n for echoarea in sorted(ea, key=lambda ea: ea[2], reverse=True)[0:5]:\n last = request.get_cookie(echoarea[0], secret='some-secret-key')\n if not last in api.get_echo_msgids(echoarea[0]):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea[0])\n if last and len(last) > 0:\n page = get_page(api.get_echoarea(echoarea[0]).index(last))\n else:\n page = get_page(len(api.get_echoarea(echoarea[0])))\n echoareas.append({\"echoname\": echoarea[0], \"count\": api.get_echoarea_count(echoarea[0]), \"dsc\": echoarea[1], \"msg\": api.get_last_msg(echoarea[0]), \"last\": last, \"page\": page})\n allechoareas = echoes(subscription)\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n return template(\"tpl/index.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoareas=echoareas, allechoareas=allechoareas, addr=addr, auth=auth, background=api.background, nosubscription=api.nosubscription)\n\n@route(\"/echolist\")\ndef echolist():\n api.load_config()\n echoareas = []\n s = request.get_cookie(\"subscription\", secret='some-secret-key')\n if not s:\n subscription = []\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n s = subscription\n if api.nosubscription:\n subscription = api.echoareas\n else:\n subscription = []\n for ea in s:\n flag = False\n for e in api.echoareas:\n if ea in e:\n flag = True\n subscription.append(e)\n if not flag:\n subscription.append([ea, \"\"])\n allechoareas = echoes(subscription)\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n return template(\"tpl/echolist.tpl\", nodename=api.nodename, dsc=api.nodedsc, allechoareas=allechoareas, addr=addr, auth=auth, background=api.background, nosubscription=api.nosubscription)\n\ndef ffeed(echoarea, msgid, page):\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n result = []\n last = request.get_cookie(echoarea, secret='some-secret-key')\n if not last in api.get_echoarea(echoarea):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea)\n if not page:\n if not last:\n page = get_page(len(msglist))\n if page == 0:\n page = 1\n else:\n page = get_page(msglist.index(last))\n page = int(page)\n start = page * 50 - 50\n end = start + 50\n for mid in msglist[start:end]:\n msg = api.get_msg(mid).split(\"\\n\")\n if len(msg) > 1:\n result.append([mid, msg])\n ea = [ea for ea in api.echoareas if ea[0] == echoarea]\n if len(ea) != 1:\n ea = [echoarea, \"\"]\n else:\n ea = ea[0]\n auth = request.get_cookie(\"authstr\")\n if len(msglist) <= end:\n end = api.get_last_msgid(echoarea)\n else:\n end = msglist[end]\n response.set_cookie(echoarea, end, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n return template(\"tpl/feed.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoarea=ea, page=page, msgs=result, msgid=msgid, background=api.background, auth=auth)\n\n@route(\"/.\")\n@route(\"/./\")\n@route(\"/.//\")\ndef echoreas(e1, e2, msgid=False, page=False):\n echoarea=e1 + \".\" + e2\n if not request.get_cookie(echoarea):\n response.set_cookie(echoarea, api.get_last_msgid(echoarea), max_age=180*24*60*60, secret='some-secret-key')\n last = msgid or request.get_cookie(echoarea, secret='some-secret-key')\n if not last in api.get_echoarea(echoarea):\n last = False\n if not last or len(last) == 0:\n last = api.get_last_msgid(echoarea)\n index = api.get_echoarea(echoarea)\n if len(index) > 0 and index[-1] != last and last in index:\n last = index[index.index(last) + 1]\n if len(index) == 0:\n last = False\n if echoarea != \"favicon.ico\":\n if last:\n redirect(\"/\" + last)\n else:\n redirect(\"/new/\" + echoarea)\n\n@route(\"/\")\ndef showmsg(msgid):\n api.load_config()\n if api.msg_filter(msgid):\n body = api.get_msg(msgid).split(\"\\n\")\n if body != [\"\"]:\n msgfrom, addr = points.check_point(request.get_cookie(\"authstr\"))\n kludges = body[0].split(\"/\")\n if \"repto\" in kludges:\n repto = kludges[kludges.index(\"repto\") + 1]\n else:\n repto = False\n if len(body) > 0:\n echoarea = [ea for ea in api.echoareas if ea[0] == body[1]]\n if len(echoarea) == 0:\n echoarea = [body[1], \"\"]\n else:\n echoarea = echoarea[0]\n else:\n echoarea = [\"\", \"\"]\n t = api.formatted_time(body[2])\n point = body[3]\n address = body[4]\n to = body[5]\n subj = body[6]\n body = body[8:]\n index = api.get_echoarea(echoarea[0])\n current = index.index(msgid)\n response.set_cookie(echoarea[0], msgid, max_age=180*24*60*60, secret='some-secret-key')\n auth = request.get_cookie(\"authstr\")\n return template(\"tpl/message.tpl\", nodename=api.nodename, echoarea=echoarea, index=index, msgid=msgid, repto=repto, current=current, time=t, point=point, address=address, to=to, subj=subj, body=body, msgfrom=msgfrom, background=api.background, auth=auth)\n else:\n redirect(\"/\")\n else:\n redirect(\"/\")\n\n@route(\"/msglist/\")\n@route(\"/msglist//\")\n@route(\"/msglist///\")\ndef msg_list(echoarea, page=False, msgid=False):\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n result = []\n for mid in msglist:\n msg = api.get_msg(mid).split(\"\\n\")\n try:\n subject = msg[6]\n f = msg[3]\n t = msg[5]\n result.append({\"msgid\": mid, \"subject\": subject, \"from\": f, \"to\": t})\n except:\n None\n ea = [ea for ea in api.echoareas if ea[0] == echoarea]\n if len(ea) == 0:\n ea = [echoarea, '']\n else:\n ea = ea[0]\n if not page:\n if not msgid:\n page = get_page(len(msglist))\n else:\n page = get_page(msglist.index(msgid))\n if page == 0:\n page = 1\n return template(\"tpl/msglist.tpl\", nodename=api.nodename, dsc=api.nodedsc, page=int(page), echoarea=ea, msgid=msgid, msglist=result, topiclist=False, background=api.background)\n\n@route(\"/new/.\")\n@route(\"/reply/.\")\n@route(\"/reply/./\")\ndef reply(e1, e2, msgid = False):\n echoarea = e1 + \".\" + e2\n auth = request.get_cookie(\"authstr\")\n if msgid:\n msg = api.get_msg(msgid).split(\"\\n\")\n else:\n msg = False\n return template(\"tpl/reply.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoarea=echoarea, msgid=msgid, msg=msg, auth=auth, hidehome=False, topiclist=False, background=api.background)\n\n@post(\"/a/savemsg/\")\n@post(\"/a/savemsg//\")\ndef save_messsage(echoarea, msgid = False):\n if api.echo_filter(echoarea):\n subj = request.forms.get(\"subj\")\n msgbody = request.forms.get(\"msgbody\")\n if len(subj) > 0 and len(msgbody) > 0:\n pauth = request.forms.get(\"authstr\")\n msgfrom, addr = points.check_point(pauth)\n if not addr:\n return \"auth error!\"\n response.set_cookie(\"authstr\", pauth, path=\"/\", max_age=3600000000)\n msg = \"\"\n msg = msg + echoarea + \"\\n\"\n msg = msg + request.forms.get(\"to\") + \"\\n\"\n msg = msg + subj + \"\\n\\n\"\n if msgid:\n msg = msg + \"@repto:\" + msgid + \"\\n\"\n msg = msg + msgbody\n msg = base64.b64encode(msg.encode(\"utf8\"))\n message=api.toss_msg(msgfrom, addr, msg)\n if message.startswith(\"msg ok\"):\n redirect(\"/%s\" % message[7:])\n else:\n redirect(\"/\")\n\n@post(\"/a/savefile\")\ndef savefile():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n if addr:\n dest = request.forms.get(\"dest\")\n fileecho = request.forms.get(\"fileecho\")\n tfileecho = request.forms.get(\"tfileecho\")\n f = request.files.get(\"file\")\n dsc = request.forms.get(\"dsc\")\n if fileecho == \"\":\n fecho = tfileecho\n else:\n fecho = fileecho\n path = \"files/\" + fecho\n if not api.file_filter(f.raw_filename):\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Некорректное имя файла\")\n if api.fecho_filter(fecho):\n f.save(\"temp\")\n if not os.path.exists(\"files/%s\" % fecho):\n os.makedirs(\"files/%s\" % fecho)\n hsh = api.fhsh(open(\"./temp\", \"rb\").read())\n hshs = []\n try:\n for row in open(\"fecho/%s\" % fecho, \"r\").read().split(\"\\n\"):\n hshs.append(row.split(\":\")[0])\n except:\n None\n blacklist = open(\"fblacklist.txt\", \"r\").read().split(\"\\n\")\n if not hsh in hshs and not hsh in blacklist:\n name = f.raw_filename\n while os.path.exists(\"files/%s/%s\" % (fecho, name)):\n tmp = name.split(\".\")\n name = \".\".join(tmp[:-1])\n suffix = name.split(\"_\")[-1]\n if suffix == name:\n suffix = \"0\"\n try:\n s = int(suffix)\n s += 1\n post = \"_\" + str(s)\n except:\n post = \"_1\"\n if suffix != \"0\":\n Name = name.replace(\"_\" + suffix, post) + \".\" + tmp[-1]\n else:\n name = name + post + \".\" + tmp[-1]\n try:\n size = str(os.stat(\"temp\").st_size)\n except:\n size = \"0\"\n copyfile(\"temp\", \"files/%s/%s\" % (fecho, name))\n os.remove(\"temp\")\n codecs.open(\"fecho/%s\" % fecho, \"a\", \"utf8\").write(\"%s:%s:%s:%s,%s:%s\\n\" % (hsh, name, size, api.nodename, addr, dsc.replace(\"\\n\", \" \").replace(\"\\r\", \"\")))\n codecs.open(\"files/indexes/files.txt\", \"a\", \"utf8\").write(\"%s/%s:%s\\n\" % (fecho, name, dsc.replace(\"\\n\", \" \").replace(\"\\r\", \"\")))\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Файл успешно загружен\")\n else:\n os.remove(\"./temp\")\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Такой файл уже существует\")\n os.remove(\"./temp\")\n else:\n return template(\"tpl/upload_message.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, message=\"Некорректное имя файлэхоконференции\")\n else:\n redirect(\"/\")\n\n@post(\"/s/subscription\")\n@route(\"/s/subscription\")\ndef subscription():\n api.load_config()\n s = request.forms.get(\"subscription\")\n subscription = []\n if request.forms.get(\"default\"):\n for ea in api.echoareas:\n subscription.append(ea[0])\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n redirect(\"/\")\n if s:\n for ea in s.strip().replace(\"\\r\", \"\").split(\"\\n\"):\n if api.echo_filter(ea):\n subscription.append(ea)\n response.set_cookie(\"subscription\", subscription, path=\"/\", max_age=180*24*60*60, secret='some-secret-key')\n redirect(\"/\")\n subscription = request.get_cookie(\"subscription\", secret='some-secret-key')\n echoareas = []\n for echoarea in api.echoareas:\n echoareas.append([echoarea[0], api.get_echoarea_count(echoarea[0]), echoarea[1]])\n return template(\"tpl/subscription.tpl\", nodename=api.nodename, dsc=api.nodedsc, echoareas=echoareas, subscription=subscription, background=api.background)\n\ndef sort_files(files):\n filelist = []\n for f in sorted(files):\n if f[0].endswith(\"/\") and not f in filelist:\n filelist.append(f)\n for f in sorted(files):\n if not f in filelist:\n filelist.append(f)\n return filelist\n\n@route(\"/s/filelist\")\n@route(\"/s/filelist/\")\ndef filelist(d = False):\n auth = request.get_cookie(\"authstr\")\n msgfrom, addr = points.check_point(auth)\n files = api.get_public_file_index(d)\n if not addr:\n return template(\"tpl/filelist.tpl\", nodename=api.nodename, dsc=api.nodedsc, files=sort_files(files), auth=False, background=api.background, d=d)\n files = files + api.get_file_index(d)\n try:\n files = files + api.get_private_file_index(msgfrom, d)\n except:\n None\n return template(\"tpl/filelist.tpl\", nodename=api.nodename, dsc=api.nodedsc, files=sort_files(files), auth=auth, background=api.background, d=d)\n\n@route(\"/s/download/\")\ndef download(filename):\n filename = filename.split(\"/\")\n return static_file(filename[-1], \"files/%s\" % \"/\".join(filename[:-1]))\n\n@route(\"/s/blacklisted/\")\ndef blacklist(msgid):\n if api.msg_filter(msgid):\n auth = request.get_cookie(\"authstr\")\n if points.is_operator(auth):\n api.delete_msg(msgid)\n open(\"blacklist.txt\", \"a\").write(msgid + \"\\n\")\n redirect(\"/\")\n\n@route(\"/login\")\n@post(\"/login\")\ndef login():\n username = request.forms.get(\"username\")\n password = request.forms.get(\"password\")\n auth = points.login(username, password)\n if auth:\n if auth != \"error\":\n response.set_cookie(\"authstr\", auth, path=\"/\", max_age=3600000000)\n redirect(\"/\")\n else:\n return template(\"tpl/login.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, username=username, auth=auth, registration=api.registration, alarm=\"Неверные учётные данные!\")\n return template(\"tpl/login.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, registration=api.registration, username=False, auth=False, alarm=False)\n\n@route(\"/profile\")\ndef profile():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n return template(\"tpl/profile.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, username=username, auth=auth, addr=addr)\n\n@route(\"/logout\")\ndef logout():\n response.set_cookie(\"authstr\", \"\", path=\"/\", max_age=-1, expires=0)\n redirect(\"/\")\n\n@route(\"/registration\")\n@post(\"/registration\")\ndef registration():\n if api.registration:\n username = request.forms.get(\"username\")\n password = request.forms.get(\"password\")\n if username and password:\n if points.check_username(username):\n return template(\"tpl/registration.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, alarm=\"Имя пользователя уже существует.\")\n else:\n hsh, phash = points.make_point(username, password)\n points.save_point(phash, username, hsh)\n response.set_cookie(\"authstr\", phash, path=\"/\", max_age=3600000000)\n redirect(\"/\")\n return template(\"tpl/registration.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, alarm=False)\n else:\n redirect(\"/\")\n\n@route(\"/s/upload\")\ndef upload_form():\n auth = request.get_cookie(\"authstr\")\n username, addr = points.check_point(auth)\n if addr:\n return template(\"tpl/upload.tpl\", nodename=api.nodename, dsc=api.nodedsc, background=api.background, fechoareas=api.fechoareas)\n else:\n redirect(\"/\")\n\n@route(\"/rss/\")\ndef rss(echoarea):\n response.set_header(\"content-type\", \"application/rss+xml; charset=utf-8\")\n api.load_config()\n msglist = api.get_echoarea(echoarea)\n msgs = []\n for msgid in msglist[-50:]:\n msgs.append([msgid, api.get_msg(msgid).split(\"\\n\")])\n return template(\"tpl/rss.tpl\", nodename=api.nodename, dsc=api.nodedsc, nodeurl=api.nodeurl, msgs=reversed(msgs), echoarea=echoarea)\n\n@route(\"/lib/css/\")\ndef pcss(filename):\n return static_file(filename, root=\"lib/css/\")\n\n@route(\"/lib/fonts/\")\ndef pcss(filename):\n return static_file(filename, root=\"lib/fonts/\")\n\n@route(\"/lib/\")\ndef plib(filename):\n return static_file(filename, root=\"lib/\")\n","repo_name":"idec-net/iing","sub_path":"api/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":19134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"38214306096","text":"# coding: utf-8\n'''\ndescribe:指标计算\nauthor:lxs\nversion:V2\ndate:2020\n'''\nimport time\nfrom mid_database_operate import dbase\nimport os\nfrom mid_ds_dataset import *\n#测试数据\ndata_re = {\n \"training_time\":time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n \"accuracy_rate\":90.0,\n \"model_version\":\"V14\",\n \"model_fileaddr\":\"http:/*?/\",\n \"dataset_list\":\"1,2,3\",\n \"training_model_id\":2,\n \"tags_index\":[\n {\"F1score\":2.0,\"Gscore\":3.0,\"precision_rate\":4.0,\"recall_rate\":90.0,\"label_id\":1},\n {\"F1score\":3.0,\"Gscore\":4.0,\"precision_rate\":5.0,\"recall_rate\":93.0,\"label_id\":2},\n {\"F1score\":4.0,\"Gscore\":5.0,\"precision_rate\":6.0,\"recall_rate\":94.0,\"label_id\":3},\n {\"F1score\":5.0,\"Gscore\":6.0,\"precision_rate\":7.0,\"recall_rate\":95.0,\"label_id\":4},\n ],\n \"error_images\":[\n {\"pic_address\":\"dasdad0\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n {\"pic_address\":\"dasdad1\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n {\"pic_address\":\"dasdad2\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n {\"pic_address\":\"dasdad3\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n {\"pic_address\":\"dasdad4\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n {\"pic_address\":\"dasdad5\",\"old_label\":\"严重\",\"new_label\":\"重度\"},\n ]\n }\n\n\nclass Calculation(object):\n def get_result(self,data,task_type):\n if task_type is 2:\n return data_re\n # index_calculation(model_file_path,dataset_path)\n if task_type is 0:\n ver = None\n if task_type is 1:\n ver = data[\"model_version\"]\n data_re[\"training_time\"] = data[\"training_time\"]\n data_re[\"model_version\"] = self.version_get(data[\"train_model_id\"],ver)\n data_re[\"training_model_id\"] = data[\"train_model_id\"]\n data_re[\"dataset_list\"] = str(data[\"train_dataset_id\"]).replace(\"[\",\"\").replace(\"]\",\"\")\n\n if not task_type is 2:\n data_re[\"model_fileaddr\"] = \"/{}/{}.h5\".format(os.path.split(data[\"new_model_path\"])[1],data[\"train_model_name\"])#存放得路径\n return data_re\n def version_get(self,model_id,version):\n res = dbase.model_info_get(model_id,version)\n if res[2] == None:\n return \"V1\"\n else:\n return (\"V{}\".format(int(res[2].model_version.replace(\"V\",\"\"))+1))\n \n\ncal_result = Calculation()\n\nif __name__ == \"__main__\":\n a = os.path.split(\"a:/n/c/c/d/a\")\n print(a[1])","repo_name":"liangxs0/Identification_engine_pollution_level","sub_path":"ModelingSystem_Mid/mid_calculation.py","file_name":"mid_calculation.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6389042523","text":"# 2020 J3 Art - Max Michet\r\nN = int(input())\r\nxCoors = []\r\nyCoors = []\r\n\r\nfor m in range(N):\r\n x, y = input().split(\",\")\r\n xCoors.append(int(x))\r\n yCoors.append(int(y))\r\n\r\n#bottom:\r\nbotX = min(xCoors) - 1\r\nbotY = min(yCoors) - 1\r\n#top:\r\ntopX = max(xCoors) + 1\r\ntopY = max(yCoors) + 1\r\n\r\nprint(str(botX) + \",\" + str(botY))\r\nprint(str(topX) + \",\" + str(topY))\r\n","repo_name":"carmenmanoil/cs_notes","sub_path":"Contests/Waterloo/Waterloo/2020/Junior/j3_2020_Art.py","file_name":"j3_2020_Art.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6123109049","text":"import pandas as pd\nimport math\nimport matplotlib.pyplot as mp\nimport matplotlib.dates as dates\nimport numpy as np\n\n# Use PYVENV in Development\n\n# Also read dist files. \n\nmocap_filename = \"mocap_valentijn/beach_repr_2b_velocity_M.tsv\"\n\n'''\n(PYVENV) pberck@ip30-163 MoCap % head beach_repr_2b_velocity_M.tsv\nNO_OF_FRAMES\t20887\nNO_OF_DATA_TYPES\t28\nFREQUENCY\t200\nTIME_STAMP\t2022-11-22, 21:34:11\nDATA_INCLUDED\tVelocity\nDATA_TYPES\tx_LWristOut_vel_M\tX_LWristIn_vel_M\tx_LHandOut_vel_M\tx_LHandIn_vel_M\tx_RWristOut_vel_M\tx_RWristIn_vel_M\tx_RHandOut_vel_M\tx_RHandIn_vel_M\tx_RThumb1_vel_M\tx_RThumbTip_vel_M\tx_RIndex2_vel_M\tx_RIndexTip_vel_M\tx_RMiddle2_vel_M\tx_RMiddleTip_vel_M\tx_RRing2_vel_M\tx_RRingTip_vel_M\tx_RPinky2_vel_M\tx_RPinkyTip_vel_M\tx_LThumb1_vel_M\tx_LThumbTip_vel_M\tx_LIndex2_vel_M\tx_LIndexTip_vel_M\tx_LMiddle2_vel_M\tx_LMiddleTip_vel_M\tx_LRing2_vel_M\tx_LRingTip_vel_M\tx_LPinky2_vel_M\tx_LPinkyTip_vel_M\n\n\n1\t0.00000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\t0.000\n2\t0.00500\t12.438\t14.483\t73.739\t109.118\t12.927\t11.315\t11.895\t12.532\t14.295\t14.165\t73.827\t265.273\t191.557\t22.159\t16.281\t16.797\t19.674\t26.238\t161.599\t184.912\t109.435\t119.367\t142.256\t161.760\t164.861\t208.077\t124.859\t144.613\n'''\n\n# Data is index plus timestamp plus 64*3 data points?\n\ndf = None\ndf_rows = []\nlnum = 0\nfreq = 200 # available in file header\nwith open(mocap_filename, \"r\") as f:\n for line in f:\n bits = line.split()\n #print( lnum, len(bits) )\n if len(bits) > 1:\n if bits[0] == \"FREQUENCY\":\n freq = int(bits[1])\n if bits[0] == \"DATA_TYPES\":\n column_names = bits # We add a Timestamp later to this one too\n print( column_names )\n if len(bits) > 15 and lnum > 7:\n bits = [ float(x) for x in bits ]\n df_rows.append( bits[1:] ) #skip index number\n lnum += 1\n\n#for x in column_names:\n# print( x )\n# check for \"finger movement only\", \"hand movement\", \"arm movement\" (not in this data, use distances?)\n'''\nx_LWristOut_vel_M\nx_LWristIn_vel_M\n\nx_LHandOut_vel_M\nx_LHandIn_vel_M\n\nx_LThumb1_vel_M\nx_LThumbTip_vel_M\n\nx_LIndex2_vel_M\nx_LIndexTip_vel_M\n\nx_LMiddle2_vel_M\nx_LMiddleTip_vel_M\n\nx_LRing2_vel_M\nx_LRingTip_vel_M\n\nx_LPinky2_vel_M\nx_LPinkyTip_vel_M\n\n# ---\n\nx_RWristOut_vel_M\nx_RWristIn_vel_M\n\nx_RHandOut_vel_M\nx_RHandIn_vel_M\n\nx_RThumb1_vel_M\nx_RThumbTip_vel_M\n\nx_RIndex2_vel_M\nx_RIndexTip_vel_M\n\nx_RMiddle2_vel_M\nx_RMiddleTip_vel_M\n\nx_RRing2_vel_M\nx_RRingTip_vel_M\n\nx_RPinky2_vel_M\nx_RPinkyTip_vel_M\n'''\n\ngroup_LHand = [\"x_LWristOut_vel_M\", \"x_LWristIn_vel_M\", \"x_LHandOut_vel_M\", \"x_LHandIn_vel_M\"]\ngroup_LFingers = [\"x_LThumb1_vel_M\", \"x_LThumbTip_vel_M\", \"x_LIndex2_vel_M\", \"x_LIndexTip_vel_M\",\n \"x_LMiddle2_vel_M\", \"x_LMiddleTip_vel_M\", \"x_LRing2_vel_M\", \"x_LRingTip_vel_M\",\n \"x_LPinky2_vel_M\", \"x_LPinkyTip_vel_M\"]\n\ngroup_RHand = [\"x_RWristOut_vel_M\", \"x_RWristIn_vel_M\", \"x_RHandOut_vel_M\", \"x_RHandIn_vel_M\"]\ngroup_RFingers = [\"x_RThumb1_vel_M\", \"x_RThumbTip_vel_M\", \"x_RIndex2_vel_M\", \"x_RIndexTip_vel_M\",\n \"x_RMiddle2_vel_M\", \"x_RMiddleTip_vel_M\", \"x_RRing2_vel_M\", \"x_RRingTip_vel_M\",\n \"x_RPinky2_vel_M\", \"x_RPinkyTip_vel_M\"]\n\n\ncolumn_names[0] = \"Timestamp\"\ndf = pd.DataFrame(df_rows, columns = column_names)\n#df['Time'] = pd.to_datetime(df['Timestamp']) # not used\ndf['x_LWristOut_vel_M_T'] = np.where( df[\"x_LWristOut_vel_M\"] > 240, 240, 0 )\nprint( df )\n\n# plot, with \"x=0\" interesting plot\ndf.plot(\n x=0, #df[\"Time\"],\n #y=[1,2,3,4,5,6,7,8,9,10],\n y=[1,2, 5,6], #[\"x_LWristOut_vel_M\"],\n kind=\"line\",\n figsize=(16, 8)\n)\n\n# ----------------------------\n\n# Read the dist data\ndf_dists = pd.read_csv(\"beach_repr_2b_dists.tsv\", sep=\"\\t\")\nprint( df_dists )\n\n#print( \",\".join(sorted(df_dists.columns)) )\n'''\nx_BackL,x_BackR,x_Chest,x_HeadFront,x_HeadL,x_HeadR,x_HeadTop,\n\nx_LAnkleOut,x_LArm,x_LElbowOut,x_LForefootIn,x_LForefootOut,x_LHandIn,x_LHandOut,x_LHeelBack,x_LIndex2,x_LIndexTip,x_LKneeOut,x_LMiddle2,x_LMiddleTip,x_LPinky2,x_LPinkyTip,x_LRing2,x_LRingTip,x_LShin,x_LShoulderBack,x_LShoulderTop,x_LThigh,x_LThumb1,x_LThumbTip,x_LToeTip,x_LWristIn,x_LWristOut,\n\nx_RAnkleOut,x_RArm,x_RElbowOut,x_RForefootIn,x_RForefootOut,x_RHandIn,x_RHandOut,x_RHeelBack,x_RIndex2,x_RIndexTip,x_RKneeOut,x_RMiddle2,x_RMiddleTip,x_RPinky2,x_RPinkyTip,x_RRing2,x_RRingTip,x_RShin,x_RShoulderBack,x_RShoulderTop,x_RThigh,x_RThumb1,x_RThumbTip,x_RToeTip,x_RWristIn,x_RWristOut,\n\nx_SpineTop,x_WaistLBack,x_WaistLFront,x_WaistRBack,x_WaistRFront\n'''\n\n# Create a dataframe with \"distance moved across threshold\" indicators.\ndf_dists_t = pd.DataFrame()\ndf_dists_t[\"Timestamp\"] = df_dists[\"Timestamp\"]\n\nfor sensor in [\"x_LArm\", \"x_LElbowOut\", \"x_LHandIn\",\"x_LHandOut\", \"x_LShoulderBack\",\"x_LShoulderTop\",\n \"x_LThigh\",\"x_LThumb1\",\"x_LThumbTip\",\"x_LToeTip\",\"x_LWristIn\",\"x_LWristOut\"]:\n df_dists_t[sensor+'_T'] = np.where( df_dists[sensor] > 1, 3, 0 )\n\nprint( df_dists_t )\n\nfig, axes = mp.subplots(nrows=2, ncols=1, figsize=(12,6), sharex=True, sharey=True)\nfig.suptitle( \"distances\" )\n\ncol = np.where(df_dists_t[\"x_LElbowOut_T\"] > 1, 'r', 'b')\nsiz = np.where(df_dists_t[\"x_LElbowOut_T\"] > 1, 1, 0)\n\naxes[0].plot(\n df_dists[\"Timestamp\"].values,\n df_dists[\"x_LElbowOut\"].values\n)\naxes[0].set_title(\"x_LElbowOut and marker\")\naxes[0].scatter(\n df_dists[\"Timestamp\"].values,\n df_dists_t[\"x_LElbowOut_T\"].values,\n s=siz, c=col\n)\naxes[1].set_title(\"x_RElbowOut\")\naxes[1].plot(\n df_dists[\"Timestamp\"].values,\n df_dists[\"x_RElbowOut\"].values\n)\n\n\ndf_dists.plot(\n x=\"Timestamp\",\n #y=[1,2,3,4,5,6,7,8,9,10],\n y=[\"x_LArm\", \"x_RArm\", \"x_LHandOut\", \"x_RHandOut\"],\n kind=\"line\",\n figsize=(16, 8)\n)\n#axes[0].scatter(\n# \"Timestamp\",\n# \"x_LWristOut_vel_M_T\",\n# marker='o',\n# s=siz, c=col, #\"red\",\n# data=df,\n# label=\"\"\n#)\n\n# ----------------------------\n\n# Plot\nfig, axes = mp.subplots(nrows=2, ncols=1, figsize=(16,8), sharex=True, sharey=True)\n\n''' see mocap_vel00.py\naxes[0].plot(\n \"Timestamp\",\n \"x_LWristOut_vel_M\",\n data=df,\n label=\"LW\"\n)\ncol = np.where(df[\"x_LWristOut_vel_M_T\"]<200,'b', 'r')\nsiz = np.where(df[\"x_LWristOut_vel_M_T\"]<200,0,1)\naxes[0].scatter(\n \"Timestamp\",\n \"x_LWristOut_vel_M_T\",\n marker='o',\n s=siz, c=col, #\"red\",\n data=df,\n label=\"\"\n)\naxes[0].legend(loc=\"upper right\")\n'''\n\nfor sensor in [\"x_RThumb1_vel_M\", \"x_RThumbTip_vel_M\", \"x_RIndex2_vel_M\", \"x_RIndexTip_vel_M\", \"x_RMiddle2_vel_M\", \"x_RMiddleTip_vel_M\", \"x_RRing2_vel_M\", \"x_RRingTip_vel_M\", \"x_RPinky2_vel_M\", \"x_RPinkyTip_vel_M\"]:\n axes[0].plot(\n \"Timestamp\",\n sensor,\n data=df\n )\n#axes[1].legend(loc=\"upper right\")\nbox = axes[0].get_position()\naxes[0].set_position([box.x0, box.y0 + box.height * 0.12, box.width, box.height * 0.88])\naxes[0].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=6)\n\n#axes[1].plot(\n# \"Timestamp\",\n# \"x_LHandOut_vel_M\",\n# data=df\n#)\nfor sensor in [\"x_LThumb1_vel_M\", \"x_LThumbTip_vel_M\", \"x_LIndex2_vel_M\", \"x_LIndexTip_vel_M\", \"x_LMiddle2_vel_M\", \"x_LMiddleTip_vel_M\", \"x_LRing2_vel_M\", \"x_LRingTip_vel_M\", \"x_LPinky2_vel_M\", \"x_LPinkyTip_vel_M\"]:\n axes[1].plot(\n \"Timestamp\",\n sensor,\n data=df\n )\n#axes[1].legend(loc=\"upper right\")\nbox = axes[1].get_position()\naxes[1].set_position([box.x0, box.y0 + box.height * 0.12, box.width, box.height * 0.88])\naxes[1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=6)\n\n# Use \"np.condition\" to determine hand/finger/arm movements? (1/0 columns)\n\nmp.show()\n'''\n# For creating new column with multiple conditions\nconditions = [\n (df['Base Column 1'] == 'A') & (df['Base Column 2'] == 'B'),\n (df['Base Column 3'] == 'C')]\nchoices = ['Conditional Value 1', 'Conditional Value 2']\ndf['New Column'] = np.select(conditions, choices, default='Conditional Value 1')\n\nsiz = np.where(df[\"x_LWristOut_vel_M_T\"]<200,0,1)\n\nconditions = [\n df['gender'].eq('male') & df['pet1'].eq(df['pet2']),\n df['gender'].eq('female') & df['pet1'].isin(['cat', 'dog'])\n]\nchoices = [5,5]\ndf['points'] = np.select(conditions, choices, default=0)\nprint(df)\n'''\n","repo_name":"pberck/MoCap","sub_path":"mocap_cmb_00.py","file_name":"mocap_cmb_00.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73001022315","text":"'''\nCreated on Sep 23, 2013\n\n@author: mmartin\n'''\n\nfrom base64 import (\n b64encode\n)\n\nimport json\nimport requests\n\nfrom mezeo_cdmi.objectid import ObjectId\nfrom mezeo_dds.access import (\n decodeKey,\n)\n\n\nCDMICONTAINER = 'application/cdmi-container'\nCDMIOBJECT = 'application/cdmi-object'\nCHILDREN = 'children'\nCOUNT = 10\nOBJECTTYPE = 'objectType'\nROOT = '/cdmi'\nSTORAGEROOT = '/cdmi/storage_root'\nSYSTEMDOMAIN = 'system_domain/'\nTOPLEVELDOMAIN = '/cdmi/cdmi_domains'\nVERSION = '1.0.0'\n\n\nclass CDMIExports(object):\n '''\n Return a list of all exported containers\n '''\n\n def __init__(self, host, userid, passwd, verify, debug=False):\n '''\n Initialize this object\n '''\n self.debug = debug\n self.domains = []\n self.exports = []\n self.headers = {'X-CDMI-Specification-Version': '1.0.1'}\n self.storage_root = None\n self.verify = verify\n # prepend url with http if it's not already there\n if (host.startswith('http:') or\n host.startswith('https:')):\n self.host = host\n else:\n self.host = 'http://%s' % host\n\n self.auth_basic = \"Basic %s\" % b64encode(\"%s:%s\" %\n (userid, passwd))\n\n self.headers['Authorization'] = self.auth_basic\n\n def get_DDS_key(self, objectid):\n '''\n Convert a CDMI object id into a CSP object id\n '''\n return decodeKey(ObjectId.decode(objectid))\n\n def _exports(self, path, returnCSP=False):\n domain = self.GET(path, self.headers)\n if CHILDREN not in domain:\n return\n if not domain['objectName'].endswith('/'):\n domain['objectName'] = '%s/' % domain['objectName']\n children = domain[CHILDREN]\n for child in children:\n child_uri = '%s%s' % (path, child)\n data = self.GET(child_uri, self.headers)\n if not data:\n return\n if not data['objectName'].endswith('/'):\n data['objectName'] = '%s/' % data['objectName']\n if 'exports' not in data:\n if CHILDREN in data:\n for gchild in data[CHILDREN]:\n self._exports('%s%s' % (child_uri, gchild),\n returnCSP=returnCSP)\n continue\n if returnCSP:\n key = self.get_DDS_key(data['objectID'])\n self.exports.append(b64encode(key))\n else:\n self.exports.append(child_uri)\n\n def get_exports(self, path, returnCSP=False):\n self.exports = []\n self._exports(path, returnCSP=returnCSP)\n return self.exports\n\n def GET(self, path, headers=None):\n '''\n Get data from CDMI\n '''\n if not headers:\n headers = self.headers\n url = '%s%s' % (self.host, path)\n res = requests.get(url=url,\n allow_redirects=True,\n headers=headers,\n verify=self.verify)\n if res.status_code in [200]:\n return json.loads(res.text)\n else:\n print ('Could not connect to server. Response status %d'\n % res.status_code)\n\n def set_header(self, name, value):\n self.headers[name] = value\n\n def set_headers(self, headers):\n self.headers = headers\n if 'Authorization' not in self.headers:\n self.headers['Authorization'] = self.auth_basic\n","repo_name":"building39/meztools","sub_path":"CDMIExports.py","file_name":"CDMIExports.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12605589328","text":"#!/usr/bin/env python3\n\"\"\"Contains the function 'lenet5'\"\"\"\n\nimport tensorflow.keras as K\n\n\ndef lenet5(X):\n \"\"\"Builds a modified version of the LeNet-5 architecture using keras\n\n Args:\n x: Keras containing the input images for the network\n \"\"\"\n init = K.initializers.HeNormal()\n conv1 = K.layers.Conv2D(filters=6,\n kernel_size=(5, 5),\n padding='same',\n activation='relu',\n kernel_initializer=init)(X)\n pool1 = K.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))(conv1)\n conv2 = K.layers.Conv2D(filters=16,\n kernel_size=(5, 5),\n padding='valid',\n activation='relu',\n kernel_initializer=init)(pool1)\n pool2 = K.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))(conv2)\n flat = K.layers.Flatten()(pool2)\n fc1 = K.layers.Dense(units=120,\n kernel_initializer=init,\n activation='relu')(flat)\n fc2 = K.layers.Dense(units=84,\n kernel_initializer=init,\n activation='relu')(fc1)\n fc3 = K.layers.Dense(units=10,\n kernel_initializer=init,\n activation='softmax')(fc2)\n model = K.Model(inputs=X, outputs=fc3)\n model.compile(optimizer=K.optimizers.Adam(),\n loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n","repo_name":"yonroa/holbertonschool-machine_learning","sub_path":"supervised_learning/0x07-cnn/5-lenet5.py","file_name":"5-lenet5.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42579129606","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom django.http import HttpResponse\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\n# Image Classification Imports\nimport numpy as np\nfrom PIL import Image\nfrom imageio import imread\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport tf_slim as slim\nfrom tf_slim.nets import inception\nimport tf_slim as slim\nimport cv2\n\ncurrent_path = os.path.join(os.getcwd(), 'inception_api')\nckpt_path = os.path.join(current_path, \"input/inception_v3.ckpt\")\nimages_path = os.path.join(current_path, \"images/*\")\nimg_width = 299\nimg_height = 299\nbatch_size = 16\nbatch_shape = [batch_size, img_height, img_width, 3]\nnum_classes = 1001\npredict_output = []\nclass_names_path = os.path.join(current_path, \"input/imagenet_class_names.txt\")\nwith open(class_names_path) as f:\n class_names = f.readlines()\n\n\n# To Load Image\ndef load_images(input_dir):\n global batch_shape\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n files = tf.gfile.Glob(input_dir)[:20]\n files.sort()\n for filepath in files:\n with tf.gfile.Open(filepath, \"rb\") as f:\n imgRaw = np.array(Image.fromarray(imread(f, as_gray=False, pilmode=\"RGB\")).resize((299, 299))).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = imgRaw * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images\n\n\ndef start_prediction():\n predict_output=[]\n return_arr = []\n\n X = tf.placeholder(tf.float32, shape=batch_shape)\n\n with slim.arg_scope(inception.inception_v3_arg_scope()):\n logits, end_points = inception.inception_v3(\n X, num_classes=num_classes, is_training=False, reuse=tf.AUTO_REUSE\n )\n\n predictions = end_points[\"Predictions\"]\n saver = tf.train.Saver(slim.get_model_variables())\n\n\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=tf.train.Scaffold(saver=saver),\n checkpoint_filename_with_path=ckpt_path,\n master='')\n\n with tf.train.MonitoredSession(session_creator=session_creator) as sess:\n for filenames, images in load_images(images_path):\n labels = sess.run(predictions, feed_dict={X: images})\n for filename, label, image in zip(filenames, labels, images):\n predict_output.append([filename, label, image])\n \n for x in predict_output:\n out_list = list(x[1])\n topPredict = sorted(range(len(out_list)), key=lambda i: out_list[i], reverse=True)[:5]\n for p in topPredict:\n return_arr.append(class_names[p-1].strip())\n\n return return_arr\n\n\n\ndef base64_to_image(base64_string):\n # Convert base64 string to PIL Image\n path=os.path.join(os.getcwd(), 'inception_api', 'images', 'example.jpg')\n imgdata = base64.b64decode(base64_string)\n image = Image.open(BytesIO(imgdata))\n image.save(path)\n\n# Create your views here.\n@api_view(['POST'])\ndef get_classification(request):\n rarr = []\n # print(\"Got request- \", request.data)\n image = base64_to_image(request.data['image'])\n prediction_output = start_prediction()\n # print(\"prediction_output: \", prediction_output)\n \n for val in prediction_output:\n rarr.append(' '.join(val.split(' ')[1:]))\n\n # print(\"Rarray- \", rarr)\n return Response({'status': 'success', 'prediction': rarr})\n \ndef index(request):\n return HttpResponse(\"Hello, world. You're at the inception_api index.\")","repo_name":"omkarshinde254/Inception_v3-Image-Classification","sub_path":"mysite/inception_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43262502666","text":"#%%\n\nimport os\n\nos.makedirs(os.path.join('..', 'data'), exist_ok=True)\ndata_file = os.path.join('..', 'data', 'house_tiny.csv')\nwith open(data_file, 'w') as f:\n f.write('NumRooms,Alley,Price\\n') # 列名\n f.write('NA,Pave,127500\\n') # 每行表示一个数据样本\n f.write('2,NA,106000\\n')\n f.write('4,NA,178100\\n')\n f.write('NA,NA,140000\\n')\n\n#%%\n\n# 如果没有安装pandas,只需取消对以下行的注释来安装pandas\n# !pip install pandas\nimport pandas as pd\n\ndata = pd.read_csv(data_file)\nprint(data)\n\n#%%\n\ninputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2]\n\n#%%\n\ninputs,outputs\n\n#%%\n\nc=inputs.iloc[:,0:1]\nc\n\n#%%\n\nc=c.fillna(c.mean())\nc\n\n#%%\n\ninputs.iloc[:,0:1]=c\n\n#%%\n\ninputs\n\n#%%\n\nc=inputs.iloc[:,0]\n\n#%%\n\nc\n\n#%%\n\nc.mean()\ninputs.iloc[:,0]=c.fillna(c.mean())\n\n#%%\n\ninputs.iloc[:,0]\n\n#%%\n\ninputs\n\n#%%\n\ninputs=pd.get_dummies(inputs,dummy_na=True)*1\ninputs\n\n#%%\n\n\n\n#%%\n\nimport torch\n\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)\nX, y\n\n#%%\n\n\n","repo_name":"mozhumz/machine_learning_py","sub_path":"com/hyj/nlp/dnn_study/data_pre_handle.py","file_name":"data_pre_handle.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40257974414","text":"import streamlit as st \nfrom multiapp import MultiApp\nfrom apps import habits, home, chatbot, song, habits_log\n\ndef local_css():\n with open(\"style.css\") as f:\n st.markdown(f'', unsafe_allow_html=True)\n\nlocal_css()\n\napp = MultiApp()\napp.add_app(\"homepage\", home.app)\napp.add_app(\"dashboard\", habits.app)\napp.add_app(\"habits log\", habits_log.app)\napp.add_app(\"chat with a friend!\", chatbot.app)\n\n\napp.run()","repo_name":"mhl343/AI-Hackathon","sub_path":"streamlit-dash/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"8505666255","text":"import pandas as pd\n\n#read in the image and tag files, sort by image_id\nimages = pd.read_csv('imgur_images.csv', encoding = 'ISO-8859–1')\ntags = pd.read_csv('imgur_tags.csv', encoding = 'ISO-8859–1')\n\nsorted_images = images.sort_values(by = ['image_id', 'extract_date'], ascending = [True, False])\nsorted_tags = tags.sort_values(by = ['image_id'], ascending = [True])\n\n#eliminate duplicates, keep latest row\ndeduped_images = sorted_images.drop_duplicates(['image_id'], keep='first')\ndeduped_tags = sorted_tags.drop_duplicates()\n\n#count the tags and rename the output field\ntagcount = deduped_tags.groupby('image_id', as_index = False).count()\ntagcount.columns = ['image_id', 'tag_count']\n\n#add the count of tags to the image data\nimagetags = pd.merge(deduped_images, tagcount, on = 'image_id', how = 'inner')\n\n#write to file\nimagetags.to_csv('imgur_imagetags.csv', index = False, encoding = 'ISO-8859–1')\n","repo_name":"RollingHillsAnalytics/imgur-analytics","sub_path":"imgur_tagcount_full.py","file_name":"imgur_tagcount_full.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"13289164217","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('apuser', '0006_alterpriceuser_user_type'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='alterpriceuser',\n name='created',\n field=models.DateTimeField(verbose_name='Дата регистрации', default=datetime.datetime(2015, 6, 4, 11, 43, 14, 207197), auto_now_add=True),\n preserve_default=False,\n ),\n ]\n","repo_name":"BlackPie/alterprice","sub_path":"src/apps/apuser/migrations/0007_alterpriceuser_created.py","file_name":"0007_alterpriceuser_created.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"37741742882","text":"import socket\r\nimport threading\r\nimport multiprocessing\r\nimport time\r\n\r\nserveraddr1 = ('127.0.0.1', 8080)#定义server的ip和地址\r\nserveraddr2 = ('127.0.0.1', 8081)#定义server的ip和地址\r\n\r\ndef client_thread(client,port):#客户端建立socket\r\n client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n client.bind(('127.0.0.1', port))\r\n client.listen(1)\r\n while(True):\r\n clientsocket, address = client.accept()\r\n print(clientsocket.recv(1024).decode('utf-8'))\r\n\r\ndef client_connect(serveraddr,client_name,input_text):\r\n #login指令\r\n target = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n target.connect(serveraddr)\r\n id = client_name #input('请输入你的用户名: ')\r\n target.send(('none login '+id).encode('utf-8'))\r\n port = int(target.recv(1024).decode('utf-8'))\r\n print('链接成功')\r\n target.close()\r\n #启动客户端\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n threading.Thread(target=client_thread, args=(client,port)).start()\r\n #开始发送指令\r\n while(True):\r\n time.sleep(1)\r\n input_text=input_method()\r\n put = id+' '+ input_text #input()\r\n target = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n target.connect(serveraddr)\r\n target.send(put.encode('utf-8'))\r\n callback = target.recv(1024).decode('utf-8')\r\n if(callback!='success'):\r\n print(callback)\r\n target.close()\r\n if put.split(' ')[1] == 'close':#关闭客户端\r\n break\r\n client.close()\r\n\r\ndef input_method():\r\n input_text = input()\r\n return input_text\r\n\r\ndef main():\r\n input_text=input_method()\r\n p1 = multiprocessing.Process(target=client_connect,args=(serveraddr1,\"daniel\",input_text))\r\n p1.start()\r\n #p2 = multiprocessing.Process(target=client_connect,args=(serveraddr2,\"daniel\"))\r\n #p2.start()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Daniel-Why/daylifecode","sub_path":"TWOIM/test/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5533778387","text":"import os\nimport sys\n\nimport psycopg2\n\nif len(sys.argv) not in (3 , 4):\n\traise Exception(\"Must pass input filename and target table name as arguments\")\n\nif len(sys.argv) == 3:\n\t_, infile_name, target_table = sys.argv\n\tcolumns = None\nelse:\n\t_, infile_name, target_table, columns = sys.argv\n\tcolumns = columns.split(',')\n\n\nwith open(infile_name, 'r') as data:\n\twith psycopg2.connect(os.environ['DB_URI']) as conn:\n\n\t\tcopy_params = {\n\t\t\t'file': data,\n\t\t\t'table': target_table,\n\t\t\t'sep': '|',\n\t\t}\n\t\tif columns:\n\t\t\tcopy_params.update({'columns': columns})\n\n\t\tcur = conn.cursor()\n\t\tcur.copy_from(**copy_params)\n\t\tconn.commit()\n","repo_name":"davidshere/irs-aws-990s","sub_path":"etl/write_to_db.py","file_name":"write_to_db.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35861120999","text":"# -*- coding: utf-8 -*-\n\nfrom lib import Logger\nfrom BaseSinker import BaseSinker\nfrom lib import HtmlFactory\nfrom lib import MailSender\n\n\nclass MultiSectionEmailSinker(BaseSinker):\n def __init__(self, tag, address, title, contexts):\n BaseSinker.__init__(self)\n self.name = \"MultiSectionEmailSinker\"\n self.tag = tag\n self.contexts = contexts\n self.address = address\n self.title = title\n\n def sink(self, sections):\n if sections is None or len(sections) == 0:\n Logger.e(self.id() + \"no data, abort\")\n return\n content = \"\"\n for context in self.contexts:\n key = context[\"id\"]\n section = sections[key]\n content += \"

    \"\n content += HtmlFactory.getTable(context[\"title\"], context['cols'].split(','), section)\n\n content = HtmlFactory.getHeader(content)\n\n title = \"[%s]\" % self.title\n mail_to = [self.address]\n #MailSender.sendMail2(mail_to, title, content)\n MailSender.sendMail(mail_to, title, content)\n\nif __name__ == \"__main__\":\n testTag = \"test\"\n testTitle = \"test email sinker\"\n testContexts = [ {\"id\":\"s1\", \"title\": \"section 1\", \"cols\": \"col1, col2\"}, {\"id\":\"s2\", \"title\": \"section 2\", \"cols\": \"colA, colB\"} ]\n testAddress = \"ulyx.yang@ndpmedia.com\"\n testSinker = MultiSectionEmailSinker(testTag, testAddress, testTitle, testContexts)\n testData = { \"s1\":[[\"123\", \"456\"], [\"789\", \"100\"]], \"s2\":[[\"1123\", \"4156\"], [\"7189\", \"1100\"]], }\n testSinker.sink(testData)\n\n\n","repo_name":"yangxu02/datax","sub_path":"MultiSectionEmailSinker.py","file_name":"MultiSectionEmailSinker.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11118110791","text":"import numpy as np\nimport pandas as pd\n\nfrom torch.utils import data\n\nfrom sklearn import preprocessing\n\n\nclass JASMIN(data.Dataset):\n def __init__(\n self,\n root: str,\n split: str,\n x_vars: list = None,\n t_var: str = \"tot_aod\",\n y_vars: list = None,\n t_bins: int = 2,\n ) -> None:\n super(JASMIN, self).__init__()\n # Handle default values\n if x_vars is None:\n x_vars = [\n \"RH900\",\n \"RH850\",\n \"RH700\",\n \"LTS\",\n \"EIS\",\n \"w500\",\n \"whoi_sst\",\n ]\n if y_vars is None:\n y_vars = [\"l_re\", \"liq_pc\", \"cod\", \"cwp\"]\n # Read csv\n df = pd.read_csv(root, index_col=0)\n # Filter AOD and Precip values\n df = df[df.tot_aod.between(0.07, 1.0)]\n df = df[df.precip < 0.5]\n # Make train test valid split\n days = df[\"timestamp\"].unique()\n days_valid = set(days[5::7])\n days_test = set(days[6::7])\n days_train = set(days).difference(days_valid.union(days_test))\n # Fit preprocessing transforms\n df_train = df[df[\"timestamp\"].isin(days_train)]\n self.data_xfm = preprocessing.StandardScaler()\n self.data_xfm.fit(df_train[x_vars].to_numpy())\n self.treatments_xfm = preprocessing.KBinsDiscretizer(\n n_bins=t_bins, encode=\"onehot-dense\"\n )\n self.treatments_xfm.fit(df_train[t_var].to_numpy().reshape(-1, 1))\n self.targets_xfm = preprocessing.StandardScaler()\n self.targets_xfm.fit(df_train[y_vars].to_numpy())\n # Split the data\n if split == \"train\":\n _df = df[df[\"timestamp\"].isin(days_train)]\n elif split == \"valid\":\n _df = df[df[\"timestamp\"].isin(days_valid)]\n elif split == \"test\":\n _df = df[df[\"timestamp\"].isin(days_test)]\n # Set variables\n self.data = self.data_xfm.transform(_df[x_vars].to_numpy(dtype=\"float32\"))\n self.treatments = self.treatments_xfm.transform(\n _df[t_var].to_numpy(dtype=\"float32\").reshape(-1, 1)\n )[:, 1:]\n self.targets = self.targets_xfm.transform(_df[y_vars].to_numpy(dtype=\"float32\"))\n # Variable properties\n self.dim_input = self.data.shape[-1]\n self.dim_targets = self.targets.shape[-1]\n self.dim_treatments = t_bins - 1\n self.data_names = x_vars\n self.target_names = y_vars\n self.treatment_names = [t_var]\n\n @property\n def data_frame(self):\n data = np.hstack(\n [\n self.data_xfm.inverse_transform(self.data),\n self.treatments,\n self.targets_xfm.inverse_transform(self.targets),\n ],\n )\n return pd.DataFrame(\n data=data,\n columns=self.data_names + self.treatment_names + self.target_names,\n )\n\n def __len__(self) -> int:\n return len(self.targets)\n\n def __getitem__(self, index) -> data.dataset.T_co:\n return self.data[index], self.treatments[index], self.targets[index]\n","repo_name":"OATML/clouds","sub_path":"clouds/datasets/jasmin.py","file_name":"jasmin.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74552704557","text":"import os\nimport xlsxwriter\n\ndef readFileName(wb, ws, path):\n\tfilenames = os.listdir(path)\n\tbold = wb.add_format({'bold': True})\n\tws.write('A1', 'Path', bold)\n\tws.write('B1', 'FileName', bold)\n\trow = 1\n\tcol = 0\n\n\tfor filename in filenames:\n\t\tfname, ext = os.path.splitext(filename)\n\t\tif ext.lower() in ['.jpg', '.jpeg','.png','.gif']:\n\t\t\tws.write(row, col, path)\n\t\t\tws.write(row, col+1, filename)\n\t\t\trow += 1\n\t\t#full_filename = os.path.join(path, filename)\n\t\t#print (full_filename)\n\tws.write(row, 0, 'Total', bold)\n\tws.write(row, 1, '=counta(B2:B'+str(row)+')', bold)\n\ndef main():\n\tfor i in os.listdir(os.getcwd()):\n\t\tif os.path.isdir(i):\n\t\t\trf = i + '.xlsx'\n\t\t\tprint(rf)\n\t\t\tworkbook = xlsxwriter.Workbook(rf)\n\t\t\tworksheet = workbook.add_worksheet()\n\t\t\treadFileName(workbook, worksheet, i)\n\t\t\tworkbook.close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"bullseye73/python","sub_path":"fs/recognitionResult.py","file_name":"recognitionResult.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"3850365787","text":"import komand\nfrom .schema import RunInput, RunOutput\n\n# Custom imports below\nimport json\nimport base64\nimport requests\nfrom komand_try_bro.util import utils\n\n\nclass Run(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"run\", description=\"Upload PCAP file\", input=RunInput(), output=RunOutput()\n )\n\n def run(self, params={}):\n server = self.connection.server\n pcap = base64.b64decode(params.get(\"pcap\"))\n scripts = params.get(\"scripts\")\n version = params.get(\"version\", \"master\")\n if pcap:\n checksum = utils.maybe_upload_pcap(server, pcap, self.logger)\n else:\n raise Exception(cause=\"Error: No PCAP supplied\")\n sources = utils.load_scripts(scripts, self.logger)\n self.logger.info(\"Run: Supplied Scripts: %s\", sources)\n req = {\"sources\": sources, \"version\": version, \"pcap\": checksum}\n data = json.dumps(req)\n headers = {\"Content-type\": \"application/json\"}\n res = requests.post(server + \"/run\", data=data, headers=headers).json()\n if res[\"stdout\"] != \"\":\n self.logger.info(res[\"stdout\"])\n return {\n \"id\": res[\"job\"],\n \"url\": \"{server}/#/trybro/saved/{job}\".format(server=server, job=res[\"job\"]),\n }\n\n def test(self):\n server = self.connection.server\n res = requests.get(server)\n if res.status_code != 200:\n raise Exception(cause=\"Test: Unsuccessful HTTP status code returned\")\n return {}\n","repo_name":"rapid7/insightconnect-plugins","sub_path":"plugins/try_bro/komand_try_bro/actions/run/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"26547108976","text":"\"\"\"\nUtility functions to get language specific formats.\n\nThese functions are taken from the original django implementation and updated\nto fit our needs.\n\nThe original code can be found here:\nhttps://github.com/django/django/blob/master/django/utils/formats.py\n\n\"\"\"\nfrom django.conf import settings\n# when working with django versions prior to 1.5, we need to use smart_str\n# instead of force_str\ntry:\n from django.utils.encoding import force_str as str_encode\nexcept ImportError:\n from django.utils.encoding import smart_str as str_encode\n\ntry:\n from importlib import import_module\nexcept ImportError:\n from django.utils.importlib import import_module\nfrom django.utils.translation import (\n check_for_language,\n get_language,\n to_locale\n)\n\nCUSTOM_FORMAT_MODULE_PATHS = getattr(settings, 'CUSTOM_FORMAT_MODULE_PATHS',\n ['localized_names.formats'])\n\n# format_cache is a mapping from (format_type, lang) to the format string.\n# By using the cache, it is possible to avoid running get_format_modules\n# repeatedly.\n_format_cache = {}\n_format_modules_cache = {}\n\nISO_INPUT_FORMATS = {\n 'DATE_INPUT_FORMATS': ('%Y-%m-%d',),\n 'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M:%S.%f', '%H:%M'),\n 'DATETIME_INPUT_FORMATS': (\n '%Y-%m-%d %H:%M:%S',\n '%Y-%m-%d %H:%M:%S.%f',\n '%Y-%m-%d %H:%M',\n '%Y-%m-%d'\n ),\n}\n\n\ndef iter_format_modules(lang):\n \"\"\"\n Does the heavy lifting of finding format modules.\n\n \"\"\"\n if check_for_language(lang):\n format_locations = []\n for path in CUSTOM_FORMAT_MODULE_PATHS:\n format_locations.append(path + '.%s')\n format_locations.append('django.conf.locale.%s')\n locale = to_locale(lang)\n locales = [locale]\n if '_' in locale:\n locales.append(locale.split('_')[0])\n for location in format_locations:\n for loc in locales:\n try:\n yield import_module('.formats', location % loc)\n except ImportError:\n pass\n\n\ndef get_format_modules(lang=None, reverse=False):\n \"\"\"\n Returns a list of the format modules found\n\n \"\"\"\n if lang is None:\n lang = get_language()\n modules = _format_modules_cache.setdefault(lang, list(\n iter_format_modules(lang)))\n if reverse:\n return list(reversed(modules))\n return modules\n\n\ndef get_format(format_type, lang=None, use_l10n=None):\n \"\"\"\n For a specific format type, returns the format for the current\n language (locale), defaults to the format in the settings.\n format_type is the name of the format, e.g. 'DATE_FORMAT'\n\n If use_l10n is provided and is not None, that will force the value to\n be localized (or not), overriding the value of settings.USE_L10N.\n\n \"\"\"\n format_type = str_encode(format_type)\n if use_l10n or (use_l10n is None and settings.USE_L10N):\n if lang is None:\n lang = get_language()\n cache_key = (format_type, lang)\n try:\n cached = _format_cache[cache_key]\n if cached is not None:\n return cached\n else:\n # Return the general setting by default\n return getattr(settings, format_type)\n except KeyError:\n for module in get_format_modules(lang):\n try:\n val = getattr(module, format_type)\n for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):\n if iso_input not in val:\n if isinstance(val, tuple):\n val = list(val)\n val.append(iso_input)\n _format_cache[cache_key] = val\n return val\n except AttributeError:\n pass\n _format_cache[cache_key] = None\n return getattr(settings, format_type)\n","repo_name":"bitlabstudio/django-libs","sub_path":"django_libs/format_utils.py","file_name":"format_utils.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"73"} +{"seq_id":"74060605036","text":"#!/usr/bin/env python\n\nfrom mininet.net import Mininet\nfrom mininet.node import OVSController\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nfrom mininet.link import TCLink\nimport time\nimport os\n\nUID=os.environ.get('UID',None)\nGID=os.environ.get('GID',None)\n\nAPPLICATION=\"/root/ccn-lite-extensions\"\n#APPLICATION=\"/root/RIOT/examples/ccn-lite-relay\"\n\ndef setupCCNNode(node, start_ccn_lite=True):\n node.cmd('ip tuntap add dev tap0 mode tap')\n node.cmd('brctl addbr tapbr0')\n node.cmd('brctl addif tapbr0 tap0')\n node.cmd('brctl addif tapbr0 '+str(node.intf()))\n node.cmd('ifconfig tapbr0 up')\n node.cmd('ifconfig tap0 up')\n if (start_ccn_lite):\n node.cmd(\"rm /tmp/hosts/*\"+str(node.intf()))\n node.popen([\"socat\" ,\"-d\", \"-d\", \"-v\", \"pty,rawer,link=/tmp/hosts/v_\"+str(node.intf()), \"EXEC:\\\"make term BOARD=native\\\",pty,rawer\"],cwd=APPLICATION)\n node.popen([\"socat\" ,\"-d\", \"-d\", \"-v\", \"UNIX-LISTEN:/tmp/hosts/\"+str(node.intf())+\",fork\", \"/tmp/hosts/v_\"+str(node.intf())],cwd=APPLICATION)\n if (UID and GID):\n node.cmd(\"sleep 0.3\")\n node.cmd([\"chown\", \"{}:{}\".format(UID,GID), \"/tmp/hosts/\"+str(node.intf())])\n node.cmd([\"chown\", \"{}:{}\".format(UID,GID), \"/tmp/hosts/v_\"+str(node.intf())])\n\ndef emptyNet():\n\n \"Create an empty network and add nodes to it.\"\n\n net = Mininet( controller=OVSController, link=TCLink )\n\n info( '*** Adding controller\\n' )\n net.addController( 'c0' )\n\n info( '*** Adding hosts\\n' )\n root = net.addHost( 'root', ip='10.123.123.1/32', inNamespace=False)\n h1 = net.addHost( 'h1', ip='10.0.0.2' )\n h2 = net.addHost( 'h2', ip='10.0.0.3' )\n\n info( '*** Adding switch\\n' )\n s1 = net.addSwitch( 's1' )\n s2 = net.addSwitch( 's2' )\n s3 = net.addSwitch( 's3' )\n\n info( '*** Creating links\\n' )\n intf = net.addLink( root, s1 ).intf1\n\n net.addLink(s1, s2, delay='20ms')\n net.addLink(s2, s3, delay='20ms')\n\n # enable this to set link losses\n # net.addLink( s1, s2, delay='20ms', loss=5 )\n # net.addLink( s2, s3, delay='20ms', loss=5 )\n\n net.addLink( root, s1 )\n net.addLink( h1, s2 )\n net.addLink( h2, s3 )\n\n info( '*** Starting network\\n')\n net.start()\n\n root.cmd( 'route add -net 10.0.0.0/24 dev ' + str( intf ) )\n setupCCNNode(root,start_ccn_lite=False)\n setupCCNNode(h1,start_ccn_lite=True)\n setupCCNNode(h2,start_ccn_lite=True)\n\n info( '*** Running CLI\\n' )\n CLI( net )\n\n info( '*** Stopping network' )\n net.stop()\n\nif __name__ == '__main__':\n setLogLevel( 'info' )\n emptyNet()\n","repo_name":"inetrg/ACM-ICN-LoRa-ICN-2022","sub_path":"mininet/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22999103732","text":"import datetime\n\nmahasiswa1 = {\n 'nama': 'Asep Sikasep',\n 'nim': '30012023',\n 'sks_lulus': 130,\n 'beasiswa': False,\n 'lahir': datetime.datetime(1999, 5, 10)\n}\n\nmahasiswa2 = {\n 'nama': 'Dimas Majid',\n 'nim': '30012024',\n 'sks_lulus': 140,\n 'beasiswa': True,\n 'lahir': datetime.datetime(2002, 7, 10)\n}\n\nmahasiswa3 = {\n 'nama': 'Udok Nyolodok',\n 'nim': '30012025',\n 'sks_lulus': 110,\n 'beasiswa': True,\n 'lahir': datetime.datetime(2000, 10, 10)\n}\n\ndata_mhs = {\n 'MAH001': mahasiswa1,\n 'MAH002': mahasiswa2,\n 'MAH003': mahasiswa3,\n}\n\nprint(f\"{'KEY':<6} {'Nama':<15} {'NIM':<9} {'SKS':<7} {'Beasiswa':<10} {'Lahir'} \")\nprint('='*60)\n\nfor mahasiswa in data_mhs:\n KEY = mahasiswa\n NAMA = data_mhs[KEY]['nama']\n NIM = data_mhs[KEY]['nim']\n SKS = data_mhs[KEY]['sks_lulus']\n BEASISWA = data_mhs[KEY]['beasiswa']\n LAHIR = data_mhs[KEY]['lahir'].strftime(\"%x\")\n\n print(f\"{KEY:<6} {NAMA:<15} {NIM:<9} {SKS:<7} {BEASISWA:^10} {LAHIR} \")\n","repo_name":"FaisalMuaris/Belajar-Python","sub_path":"37.multi-keys-nesting-dictionary.py","file_name":"37.multi-keys-nesting-dictionary.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23182354599","text":"# -*- coding: utf-8 -*-\r\n#!/usr/bin/python\r\n\r\nimport logging\r\nimport sys\r\n\r\nimport paramiko\r\nimport redis\r\n\r\nfrom ZapretInfoXMLParser import ZapretInfoXMLParser\r\nfrom QuaggaConfig import QuaggaConfig\r\n\r\n\r\nclass ZapretInfoDB(object):\r\n\r\n def __init__(self):\r\n self.host = '194.54.64.53'\r\n self.user = 'icmrsu'\r\n self.secret = 'gr@peb1ke'\r\n self.port = 22\r\n self.dump_file_path = '/gost-ssl/rzs/dump/dump.xml'\r\n self.r = redis.StrictRedis(host='localhost', port=6379, db=0)\r\n self.quagga = QuaggaConfig()\r\n\r\n # Зададим параметры логгировани��\r\n self.logger = logging.getLogger(__name__)\r\n self.logger.setLevel(logging.INFO)\r\n formatter = logging.Formatter(u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')\r\n console_handler = logging.StreamHandler(sys.stdout)\r\n console_handler.setLevel(logging.INFO)\r\n console_handler.setFormatter(formatter)\r\n file_handler = logging.FileHandler('zapret-info-db.log')\r\n file_handler.setLevel(logging.INFO)\r\n file_handler.setFormatter(formatter)\r\n self.logger.addHandler(console_handler)\r\n self.logger.addHandler(file_handler)\r\n\r\n def update_domains(self):\r\n # Получаем обновленный дамп реестра запрещенных сайтов с удаленного сервера по SFTP\r\n # и загружаем его в базу данных Redis\r\n client = paramiko.SSHClient()\r\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n client.connect(hostname=self.host, username=self.user, password=self.secret, port=self.port)\r\n sftp = client.open_sftp()\r\n\r\n self.logger.info('Start updating Redis ZapretInfo database from new xml dump...')\r\n with sftp.open(self.dump_file_path, 'r') as f:\r\n zapret_info_xml = ZapretInfoXMLParser(f)\r\n zapret_info_xml.get_domains()\r\n for domain in zapret_info_xml.domains:\r\n self.r.sadd('new_domains', domain)\r\n self.logger.info('New ZapretInfo dump contain %s domains' % len(zapret_info_xml.domains))\r\n\r\n # Получаем список доменов, которые были исключены из Реестра запрещенных сайтов\r\n self.r.sdiffstore('domains_for_delete', 'domains', 'new_domains')\r\n self.logger.info('After update required for delete %s domains' % self.r.scard('domains_for_delete'))\r\n # Подмениваем текущий список доменов обновленным\r\n self.r.rename('new_domains', 'domains')\r\n # Удаляем устаревшие домены и их IP адреса из базы данных Redis и BGP конфигурации Quagga\r\n self.delete_domains()\r\n\r\n def delete_domains(self):\r\n # Удаляем устаревшие домены и их IP адреса из базы данных Redis и BGP конфигурации Quagga\r\n ip_for_delete = set()\r\n # Удаляем все множества с IP адресами для доменов, которые были исключены из Реестра запрещенных сайтов\r\n #for d in range(self.r.scard('domains_for_delete')):\r\n # domain = self.r.spop('domains_for_delete')\r\n for domain in self.r.smembers('domains_for_delete'):\r\n # Сначала определим и сохраним множество IP адресов всех доменов которые необходимо удалить из Quagga\r\n ip_for_delete = ip_for_delete.union(self.r.smembers(domain))\r\n # Затем удаляем само множесвво из базы данных Redis\r\n self.logger.info('deleted domain ' + domain.encode('utf-8') + ' from Redis database')\r\n self.logger.info('domain %s had following addresses: %s' % (domain.encode('utf-8'), self.r.smembers(domain)))\r\n self.r.delete(domain)\r\n # Удаляем из Quagga определенные ранее IP адреса, домены которых были исключены из реестра запрещенных сайтов\r\n if ip_for_delete:\r\n self.quagga.delete_bgp_networks(ip_for_delete)\r\n\r\n def add_domains(self):\r\n # Добавляем в конфигурацию Quagga накопившиеся в базе Redis новые IP адреса\r\n # для доменов из реестра запрещенных сайтов\r\n current_ip_set = set()\r\n for domain in self.r.smembers('domains'):\r\n current_ip_set = current_ip_set.union(self.r.smembers(domain))\r\n self.logger.info('Current Redis database contain %s IP addresses' % len(current_ip_set))\r\n\r\n # Из множества IP адресов которые занесены в базу данных Redis вычитаем\r\n # множество IP адресов из конфигурации Quagga.\r\n # В итоге получаем набор адресов, которе необходимо занести в Quagga\r\n ip_for_add = current_ip_set.difference(self.quagga.read_current_networks())\r\n if ip_for_add:\r\n self.quagga.add_bgp_networks(ip_for_add)\r\n else:\r\n self.logger.info('Nothing to add in quagga')\r\n\r\nif __name__ == \"__main__\":\r\n z = ZapretInfoDB()\r\n z.update_domains()\r\n z.add_domains()","repo_name":"krtvand/dns-sniffer","sub_path":"ZapretInfoDB.py","file_name":"ZapretInfoDB.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"74966029354","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import *\nfrom icecream.icecream import ic\nfrom services.resource_connectors import *\nfrom services.config_manager_temp import Config_Manager\n\n\nclass Activity_Logger(object):\n _instance = None\n\n def __new__(cls, config_manager: Config_Manager, sshfs_connector: SSHFS_Connector, clear: bool = False):\n if cls._instance == None or clear:\n cls._instance = object.__new__(cls)\n return cls._instance\n\n def __init__(self, config_manager: Config_Manager, sshfs_connector: SSHFileSystem, clear: bool = False):\n self.config_manager = config_manager\n self.sshfs_connector = sshfs_connector\n self.setup_log_file()\n\n def log_activity(self, identifier):\n file_entries = self.sshfs_connector.read_file(self.log_file)\n file_entries.insert(0, f\"{identifier}\\n\")\n self.sshfs_connector.write_file(self.log_file, file_entries)\n\n def setup_log_file(self) -> None:\n self.log_file = f\"{self.config_manager.log_directory}/{datetime.now().date()}.log\"\n if not self.sshfs_connector.exists(self.log_file):\n print(f\"Log FILE not found. Log file created: {self.log_file}\")\n self.sshfs_connector.create_file(self.log_file)\n else:\n print(f\"Log File: {self.log_file}\")\n\n @property\n def recent_log_file(self) -> Any:\n ls_list = self.sshfs_connector.ls(\n self.config_manager.log_directory, detail=True)\n log_files = [entry for entry in ls_list if \"file\" in entry[\"type\"]\n if entry[\"name\"].endswith(\".log\")]\n return max(log_files, key=lambda dictio: dictio[\"mtime\"])[\"name\"]\n\n @property\n def latest_activity(self):\n with self.sshfs_connector.open(self.recent_log_file, \"r\") as stream:\n activity = stream.readline()\n if len(activity) == 0:\n return None\n return activity.strip(\"\\n\")\n","repo_name":"normanclt/github_actions","sub_path":"services/activity_logger.py","file_name":"activity_logger.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29821917588","text":"def plot_loss_curves(history):\n \"\"\"\n Plots model performance of loss and accuracy,\n both training and validationd scores\n\n Args:\n history (keras.src.callbacks.History): tensorflow history object\n\n Returns:\n plots training/validation loss and accuracy metrics.\n \"\"\"\n import matplotlib.pyplot as plt\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n accuracy = history.history['accuracy']\n val_accuracy = history.history['val_accuracy']\n\n epochs = range(len(history.history['loss']))\n\n # Utwórz siatkę 1x2 (jeden wiersz, dwie kolumny) dla wykresów\n fig, axs = plt.subplots(1, 2, figsize=(8, 4))\n\n # Wykres straty\n axs[0].plot(epochs, loss, label='training')\n axs[0].plot(epochs, val_loss, label='validate')\n axs[0].set_title('Loss')\n axs[0].set_xlabel('Epochs')\n axs[0].set_ylabel('Score')\n axs[0].legend()\n\n # Wykres dokładności\n axs[1].plot(epochs, accuracy, label='training')\n axs[1].plot(epochs, val_accuracy, label='validate')\n axs[1].set_title('Accuracy')\n axs[1].set_xlabel('Epochs')\n axs[1].set_ylabel('%')\n axs[1].legend()\n\n plt.tight_layout() # Zapewnia odpowiednie rozmieszczenie wykresów\n plt.show()\n","repo_name":"neon-symeon/tensorflow_hacks","sub_path":"tf_hacks_v_231007.py","file_name":"tf_hacks_v_231007.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20097352389","text":"import fast_phot as fap\nimport numpy as np\nimport sys\nimport os\n\nif len(sys.argv) == 3:\n countsFile = sys.argv[1]\n photFile = sys.argv[2]\nelse:\n sys.exit('Usage: calc_all_phot.py [input] [output]')\n\ninCounts = np.genfromtxt(countsFile,names=True,dtype=None)\nphotWrite = open(photFile, 'w')\nphotWrite.write('GRB\\tch\\ttype\\tflux\\tflux_unc\\tmab\\tmab_unc\\tap\\tsig\\n')\n\nfor index in np.arange(len(inCounts['GRB'])):\n GRB = inCounts['GRB'][index]\n #GRB = '0'+str(inCounts['GRB'][index])\n correction = inCounts['corrCorr'][index]\n bkgSubCounts = inCounts['flxSubCnts'][index]\n srcUnc = inCounts['uncCnts'][index]\n srcPx = inCounts['apPix'][index]\n bkgUnc = inCounts['uncBkg'][index]\n bkgPx = inCounts['anPix'][index]\n ch = inCounts['ch'][index]\n ftype = inCounts['type'][index]\n ap = inCounts['ap'][index]\n sig = inCounts['sig'][index]\n\n if inCounts['type'][index] == 'unc':\n correction = inCounts['apCorr'][index]\n flx = fap.calc_phot(0.4,correction,0,srcUnc,srcPx,bkgUnc,bkgPx)\n mab = fap.uJy2AB_unc(flx[1]*3,0)\n flx = np.asarray(flx)\n flx[1]*=3\n elif inCounts['type'][index] == 'flx' or inCounts['type'][index] == 'sub':\n flx = fap.calc_phot(0.4,correction,float(bkgSubCounts),srcUnc,srcPx,bkgUnc,bkgPx)\n mab = fap.uJy2AB_unc(flx[0],flx[1])\n else:\n sys.exit('What happened? Type not recognized')\n \n photWrite.write('{GRB}\\t{ch}\\t{ftype}\\t{flx}\\t{flxunc}\\t{m}\\t{munc}\\t{ap}\\t{sig}\\n'.\\\n format(GRB=GRB,ch=ch,ftype=ftype,flx=flx[0],flxunc=flx[1],\\\n m=mab[0],munc=mab[1],ap=ap,sig=sig))\nphotWrite.close()\n","repo_name":"mmyers5/GammIT","sub_path":"deprecated/calc_all_phot.py","file_name":"calc_all_phot.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72209746157","text":"# define function to find prime numbers\ndef find_prime(number, primes):\n if number == 0:\n return False\n elif number == 1:\n return False\n elif number == 2:\n return True\n else:\n if number % 2 == 0:\n return False\n else:\n for i in primes:\n if number % i == 0:\n return False\n if i * i > number:\n break\n return True\n\n\n# define and initialise data structures\nprimes = [2,3]\n\n# define and initialise variables\ninput_number = int(input(\"Input your number.\"))\nfound = False\n\n# determine the prime numbers from 2 to input_number\nfor i in range(input_number):\n if find_prime(i, primes):\n if i not in primes:\n primes.append(i)\n else:\n continue\n\n# determine two prime numbers that add up to input_number\nwhile found is False:\n for i in primes:\n for j in primes:\n if i + j == input_number:\n # output\n print(i,\"+\",j,\"=\",input_number)\n found = True\n if found is False:\n # output\n print(\"Not found.\")\n break\n \n","repo_name":"Leeyp/Euler","sub_path":"Gi1.py","file_name":"Gi1.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24137127133","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('list', views.listfunc, name='list'),\n path('create', views.ScrapeCreate.as_view(), name='create'),\n path('update/', views.DiaryUpdate.as_view(), name='update'),\n path('analysis', views.analysis, name='analysis'),\n path('signup', views.signupfunc, name='signup'),\n path('login', views.loginfunc, name='login'),\n path('logout', views.logoutfunc, name='logout'),\n path('graph', views.graph, name='graph'),\n]","repo_name":"shuya106/healthcare_webapp","sub_path":"HEALTHCHECK/myhealth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"38611032677","text":"#!/usr/bin/env python\n\n\"\"\"\nHeightSettingsChecker: checks that passed in settings are \n above minimum manuever height.\n\n\"\"\"\n\nimport rospy\n\nclass HeightSettingsChecker(object):\n def __init__(self):\n try:\n self._MIN_MANEUVER_HEIGHT = rospy.get_param('~min_maneuver_height')\n except KeyError as e:\n rospy.logerr('Could not lookup a parameter Height Settings Checker')\n raise\n\n def above_min_maneuver_height(self, current_height):\n if (current_height < self._MIN_MANEUVER_HEIGHT):\n return False\n else:\n return True\n","repo_name":"Pitt-RAS/iarc7_motion","sub_path":"src/iarc7_motion/iarc_tasks/task_utilities/height_settings_checker.py","file_name":"height_settings_checker.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"12970520116","text":"from django.shortcuts import render\nimport requests\n\n# Create your views here.\n\ndef index(request):\n\n try:\n context = None\n if 'city' in request.POST:\n city = request.POST['city']\n else:\n city = 'dubai'\n \n appid = 'c6b66c97f9e2dc63cea06554d6e294b3'\n url = 'https://api.openweathermap.org/data/2.5/weather'\n params = {\n 'q': city,\n 'appid': appid,\n 'units': 'metric'\n }\n req = requests.get(url=url, params=params)\n res = req.json()\n \n try:\n name = res['name']\n country = res['sys']['country']\n temp = res['main']['temp']\n temp_min = res['main']['temp_min']\n temp_max = res['main']['temp_max']\n humid = res['main']['humidity']\n status = res['weather'][0]['main']\n icon = res['weather'][0]['icon']\n wind = res['wind']['speed']\n except:\n return render(request, 'core/error.html')\n \n\n context = {\n 'name': name,\n 'country': country,\n 'temp': temp,\n 'temp_min': temp_min,\n 'temp_max': temp_max,\n 'humid': humid,\n 'status': status,\n 'icon': icon,\n 'wind': wind,\n }\n\n return render(request, 'core/index.html', context)\n \n except:\n return render(request, 'core/error.html')","repo_name":"nooto-code/weather-app","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20278125768","text":"\nimport mne\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\nimport numpy as np\nimport os\n\n\n#%matplotlib qt\n \n#USER_LIST = 'Res/Users.npy'\n#DB_PATH = 'Res/dataset.npy'\n\ndef SetPaths():\n global USER_LIST\n global DB_PATH\n global USER_PLOT_PATH \n dirname = os.path.dirname(__file__) ### This defines dirctory path\n USER_LIST = os.path.join(dirname, 'Res/Users.npy') ### this puts absolute path in filename\n DB_PATH = os.path.join(dirname, 'Res/dataset.npy') \n USER_PLOT_PATH = os.path.join(dirname,'Res/Plots')\n \n\n\n\ndef createDir(s):\n SetPaths()\n global PLOT_PATH\n PLOT_PATH = os.path.join(USER_PLOT_PATH,s)\n os.mkdir(PLOT_PATH)\n\n\ndef Input(fname):\n SetPaths()\n raw = mne.io.read_raw_edf(fname,preload=True)\n fig = raw.plot()\n fig.savefig(PLOT_PATH+'/'+'raw-data.png')\n fig = raw.plot_psd()\n #print(USER_PLOT_PATH)\n #print(USER_PLOT_PATH+'/'+'power_spectral_density.png')\n fig.savefig(PLOT_PATH+'/'+'power_spectral_density.png')\n return raw\n \n \n\ndef SetMontage(raw):\n SetPaths()\n raw.filter(None, 50., h_trans_bandwidth='auto', filter_length='auto',\n phase='zero')\n for x in raw.ch_names:\n str = x.replace(\".\", \"\")\n raw.rename_channels(mapping={x:str})\n raw.filter(1, 40, n_jobs=2) \n montage = mne.channels.make_standard_montage('standard_1005')\n fig = montage.plot()\n fig.savefig(PLOT_PATH+'/'+'Montage.png')\n raw.set_montage(montage,match_case=False)\n\n \n\ndef ApplyPCA(raw,n): \n SetPaths()\n dictionary = {\"T2\" : 100}\n eves = mne.events_from_annotations(raw,dictionary)\n events = eves[0] \n events_ids = {\"target/stimulus\":100}\n epochs = mne.Epochs(raw,events,event_id=events_ids,preload=True)\n fig = epochs.plot()\n fig.savefig(PLOT_PATH+'/'+'raw_epochs.png')\n fig = epochs.plot_psd()\n fig.savefig(PLOT_PATH+'/'+'epochs_psd.png') \n from mne.decoding import UnsupervisedSpatialFilter\n from sklearn.decomposition import PCA\n X = epochs.get_data()\n pca = UnsupervisedSpatialFilter(PCA(n), average=False)\n pca_data = pca.fit_transform(X)\n tmin, tmax = -0.1, 0.3\n ev = mne.EvokedArray(np.mean(pca_data, axis=0),\n mne.create_info(n,epochs.info['sfreq'],ch_types='eeg'),tmin=tmin)\n fig = ev.plot(show=False, window_title=\"PCA\", time_unit='s')\n fig.savefig(PLOT_PATH+'/'+'PCA_15_Channels.png')\n fig = ev.plot_image()\n fig.savefig(PLOT_PATH+'/'+'EvokedData_As_Image.png')\n \n epoch_avg = np.mean(pca_data, axis=0) \n return pca_data,epoch_avg\n\n\ndef ModifyDatabase(epochs,label):\n try:\n print(\"ModifyDatabase begins....\")\n SetPaths()\n X = epochs\n y = label\n db = np.load(DB_PATH,allow_pickle='TRUE')\n flatX = X.flatten()\n flatX = np.append(flatX,y)\n db = np.append(db,[flatX],axis=0)\n np.save(DB_PATH,db) \n print(\"ModifyDatabase successful\")\n #db.close()\n except Exception as e:\n print(e)\n print(\"ModifyDB haga\")\n \n\n \n\n#For Model\ndef PredInput(fname):\n raw = mne.io.read_raw_edf(fname,preload=True)\n return raw\n \n \n\ndef PredSetMontage(raw):\n raw.filter(None, 50., h_trans_bandwidth='auto', filter_length='auto',\n phase='zero')\n for x in raw.ch_names:\n str = x.replace(\".\", \"\")\n raw.rename_channels(mapping={x:str})\n raw.filter(1, 40, n_jobs=2) \n montage = mne.channels.make_standard_montage('standard_1005')\n raw.set_montage(montage,match_case=False)\n\n \n\ndef PredApplyPCA(raw,n): \n dictionary = {\"T2\" : 100}\n eves = mne.events_from_annotations(raw,dictionary)\n events = eves[0] \n events_ids = {\"target/stimulus\":100}\n epochs = mne.Epochs(raw,events,event_id=events_ids,preload=True)\n from mne.decoding import UnsupervisedSpatialFilter\n from sklearn.decomposition import PCA\n X = epochs.get_data()\n pca = UnsupervisedSpatialFilter(PCA(n), average=False)\n pca_data = pca.fit_transform(X)\n epoch_avg = np.mean(pca_data, axis=0) \n return pca_data,epoch_avg\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jtn-b/Mind-ID","sub_path":"BackEnd/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35831638511","text":"# encoding = 'utf-8'\nfrom keras.models import Sequential, Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Input, LSTM, Dense, Embedding, RepeatVector, TimeDistributed, Dropout, LeakyReLU\nfrom bert_serving.client import BertClient\nfrom data_loader import Loader\nimport numpy as np\nimport re\nimport random\nimport json\n\nCORPUS_PATH = 'data\\\\證券交易法標記輸出檔_v1'\n\nbc = BertClient()\nprint('bert is ready')\ndl = Loader()\ndl.load_corpus(CORPUS_PATH)\n\n\ndef baseline_model(category_out):\n # create model\n model = Sequential()\n model.add(Dense(768, input_dim=768, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(256, input_dim=768, activation='tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(category_out, activation='sigmoid'))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# prepare training data\ndef generate_sentence2pos_dict(text):\n dic = {}\n idx = 0\n for i in range(len(text)):\n if text[i] == ',' or text[i] == '。':\n idx += 1\n dic[i] = idx\n return dic\n\n\ndef generate_meaning_label(label, dic):\n tmp = {}\n for l in label:\n if dic[l['start_offset']] == dic[l['end_offset']-1]:\n tmp[dic[l['start_offset']]] = l['label']\n else:\n for i in range(dic[l['start_offset']], dic[l['end_offset']-1]+1):\n tmp[i] = l['label']\n return tmp\n\n\ndef one_hot_label(ids, dic):\n ret = []\n for id in ids:\n res = [0 for _ in range(len(dic))]\n res[dic[id]] = 1\n ret.append(res)\n return ret\n\n\ntraining_data = []\n# label_data = []\n# dl.text = [dl.text[0]]\nfor i in range(len(dl.text)):\n text = dl.text[i]\n label = dl.label[i]\n sentences = re.split('[,。]', text)\n dic = generate_sentence2pos_dict(text)\n l_dic = generate_meaning_label(label, dic)\n for idx, s in enumerate(sentences):\n if(s):\n if idx in l_dic:\n training_data.append([s, l_dic[idx]])\n # label_data.append(l_dic[idx])\n else:\n training_data.append([s, 0])\n # label_data.append(0) # no label\n\nlabel2id = {}\nid = 0\nfor l in (training_data):\n if l[1] not in label2id:\n label2id[l[1]] = id\n id += 1\n else:\n pass\n\nwith open('label2id.txt', 'w', encoding='utf-8') as f:\n for k in label2id.keys():\n f.write('{} : {}\\n'.format(label2id[k], k))\n\n# encode first\ntmp_data = bc.encode([data[0] for data in training_data])\nfor data, encode in zip(training_data, tmp_data):\n # print(bc.encode([data[0]]))\n data[0] = encode\n\n# write training_data\nwith open('training_data', 'w', encoding='utf-8') as f:\n j = {}\n j['data'] = []\n for d in training_data:\n tmp = {}\n tmp['code'] = d[0].tolist()\n tmp['label'] = d[1]\n j['data'].append(tmp)\n json.dump(j, f)\n\n# show stat\nstat_dict = {}\nfor d in training_data:\n if d[1] in stat_dict:\n stat_dict[d[1]] += 1\n else:\n stat_dict[d[1]] = 1\nfor k in stat_dict:\n print('{} {}'.format(k, stat_dict[k]))\n\n# save label\n# label2id = {}\n# for id, l in enumerate(set(training_data)):\n# label2id[l[1]] = id\n# with open('label2id.txt', 'w', encoding='utf-8') as f:\n# for k in label2id.keys():\n# f.write('{} : {}\\n'.format(label2id[k], k))\n#\n# label_count = {}\n# for l in training_data:\n# if l[1] in label_count:\n# label_count[l[1]] += 1\n# else:\n# label_count[l[1]] = 1\n# print(label_count)\n# max_num = max([label_count[k] for k in label_count.keys()])\n#\n#\n# using bert to generate article vector and use simple NN to predict the law category\n# print('start bert encoding')\n# t_X = bc.encode(training_data).tolist()\n#\n# balance data by copy data randomly for specific label until number is equal\n# =====\n# tmp_X = []\n# tmp_Y = []\n#\n# label2data= {}\n# tmp_label2data = {}\n#\n#\n# def random_generate(data, num):\n# res = []\n# for _ in range(num):\n# res.append(data[random.randint(0, len(data)-1)])\n# return res\n#\n#\n# for i, x in enumerate(t_X):\n# if training_data[i][1] in label2data:\n# label2data[training_data[i][1]] += [x]\n# else:\n# label2data[training_data[i][1]] = [x]\n# for k in label2data.keys():\n# tmp_X += random_generate(label2data[k], max_num)\n# tmp_Y += [k for _ in range(max_num)]\n# tmp_Y = one_hot_label(tmp_Y, label2id)\n# train_X = np.asarray(tmp_X)\n# train_Y = np.asarray(tmp_Y)\n# print(train_X.shape)\n# print(train_Y.shape)\n\n\n# ====\n# class DataGenerator(object):\n# def __init__(self, rescale=None):\n# self.train = []\n# self.target = []\n# self.train_sentences = []\n# self.reset()\n#\n# def reset(self):\n# self.train = []\n# self.target = []\n# self.train_sentences = []\n#\n# def flow_from_directory(self, data, label2id, batch_size=32):\n# input_data = np.zeros(\n# (batch_size, 768),\n# dtype='float32')\n# target_data = np.zeros(\n# (batch_size, len(label2id)),\n# dtype='float32')\n# while True:\n# time = 0\n# for i, d in enumerate(data):\n# input_data[time] = d[0]\n# # self.train_sentences.append(d[0])\n# target_data[time, label2id[d[1]]] = 1.\n# time += 1\n# # print(input_data)\n# # print(target_data)\n# if time == batch_size:\n# self.train = input_data\n# self.target = target_data\n# inputs = np.asarray(self.train, dtype='float32')\n# targets = np.asarray(self.target, dtype='float32')\n# self.reset()\n# time = 0\n# # print('in: '+str(inputs[0]))\n# # print('out: '+str(targets[0]))\n# yield inputs, targets\n# self.train = input_data\n# self.target = target_data\n# inputs = np.asarray(self.train, dtype='float32')\n# targets = np.asarray(self.target, dtype='float32')\n# self.reset()\n# yield inputs, targets\n\n\n# EPOCHS = 20\n# BATCH = 128\n# STEPS_PER_EPOCH = (len(training_data)/BATCH)\n# datagen = DataGenerator()\n# model = baseline_model(len(label2id))\n# model.summary()\n# # cp = ModelCheckpoint('law2fact.h', monitor='val_loss', save_best_only=True)\n# # model.fit(train_X, train_Y, epochs=20, batch_size=32, shuffle=True)\n# model.fit_generator(\n# generator=datagen.flow_from_directory(training_data, label2id, BATCH),\n# epochs=EPOCHS,\n# steps_per_epoch=STEPS_PER_EPOCH,\n# )\n#\n# model.save('law2fact.h')","repo_name":"huangcap/Law2Fact","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30409010081","text":"import logging\nimport os\nimport re\nimport shutil\nimport subprocess\n\nfrom abstract_publisher import AbstractPublisher\nfrom json_helpers import json_load, json_dump\nfrom package_version_rewriter import update_package_json_versions, rewrite_shrinkwrap_file\n\nclass NpmPublisher(AbstractPublisher):\n ''' Reads and publishes npm packages assuming an incrementing revision number rather than using\n full-fledged semver. Nuclide packages are developed as a consistent set, with 0.0.0 as the\n version that is always stored in source control, but with a 0.0.x scheme that is used for\n the versions published to npm.\n '''\n\n _version_regex = re.compile('^0\\.0\\.(\\d+)$')\n\n def __init__(self, config, npm, tmpdir, transpiler, boilerplate_files):\n self._config = config\n self._npm = npm\n self._tmpdir = os.path.join(tmpdir, 'npm')\n self._tmp_package = os.path.join(self._tmpdir, self.get_package_name())\n self._transpiler = transpiler\n self._boilerplate_files = boilerplate_files\n\n def get_package_name(self):\n return self._config.package_name\n\n def is_already_published(self, target_version):\n semver = '0.0.%s' % target_version\n return self._npm.is_published(self.get_package_name(), semver)\n\n def get_published_version(self):\n ''' Reads the `npm info` of the package, gets the current version (of the form 0.0.x)\n and then returns the incrementing version, integer x.\n '''\n logging.info('Attempting to determine version of %s in npm', self.get_package_name())\n\n # We often call this multiple times to check publication progress, so force non-memoization.\n semver = self._npm.info(self._config.package_directory, force=True).get('version', '')\n\n match = self._version_regex.match(semver)\n if match:\n version = int(match.group(1))\n logging.info('Version of %s is %d', self.get_package_name(), version)\n else:\n version = 0\n logging.warning('Version of %s is not available; defaulting to 0' %\n self.get_package_name())\n return version\n\n def is_published_version(self, target_version):\n return self.get_published_version() == target_version\n\n def prepublish(self, new_version, atom_semver):\n logging.info('Publishing %s to npm at version %s', self.get_package_name(), new_version)\n\n # Create temporary directory and copy package into it (without dependencies).\n package = self._config.package_directory\n logging.info('Copying %s to tmpdir', self.get_package_name())\n shutil.copytree(package, self._tmp_package, ignore=shutil.ignore_patterns('node_modules'))\n\n # Make sure that standard boilerplate files are included in the repo.\n for name, src in self._boilerplate_files.items():\n shutil.copyfile(\n src,\n os.path.join(self._tmp_package, name))\n\n # Load package.json and rewrite version number within it.\n package_file = os.path.join(self._tmp_package, 'package.json')\n package = json_load(package_file)\n package = update_package_json_versions(self.get_package_name(), package,\n self._config.nuclide_npm_package_names, new_version)\n\n # Delete \"_atomModuleCache\" field from package.json.\n # TODO (chenshen): delete following line once '_atomModuleCache' is not fake.\n if '_atomModuleCache' in package:\n del package['_atomModuleCache']\n\n # Specify the license if it is not already specified.\n if 'license' not in package:\n package['license'] = 'SEE LICENSE IN LICENSE'\n\n # Write the adjusted package file back to the temporary directory and publish it.\n json_dump(package, package_file)\n\n # Pre-transpile Babel files, as appropriate.\n self._transpiler.transpile_in_place(self.get_package_name(), self._tmp_package)\n\n rewrite_shrinkwrap_file(self._tmp_package,\n package, self._config.nuclide_npm_package_names, new_version)\n\n def publish(self, new_version, atom_semver):\n try:\n self._npm.publish(self._tmp_package)\n except subprocess.CalledProcessError:\n logging.error('FAILED to publish package %s at version %d; it may already be published',\n self.get_package_name(), new_version)\n","repo_name":"zgao/as-nuclide","sub_path":"scripts/lib/publishers/npm_publisher.py","file_name":"npm_publisher.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7269589737","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('src.shop.views',\n\n url(r'^$', 'index', name='index'),\n url(r'^category/(?P[^/]+)/$', 'category', name='category'),\n url(r'^product/(?P[^/]+)/$', 'product_detail', name='product_detail'),\n\n\n)\n","repo_name":"D1onisko/market","sub_path":"src/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70819564717","text":"import json as Json\n\nfrom xml_provider.xml_provider import XmlProvider\n\n\nclass DataFilesToRunConvertor:\n\n def __init__(self):\n self._import_path = None\n self._isa_json = None\n self._current_data_file = None\n self._data_files = None\n\n self.experiment_reference = None\n pass\n\n\n def get_data_files(self):\n data_files = []\n studies = self._isa_json[\"investigation\"][\"studies\"]\n for study in studies:\n assays = study[\"assays\"]\n for assay in assays:\n data_files.extend(assay[\"dataFiles\"])\n \n return data_files\n\n\n @property\n def import_path(self):\n return self._import_path\n\n @import_path.setter\n def import_path(self, path):\n self._import_path = path\n with open(path) as isa_file:\n self._isa_json = Json.load(isa_file)\n self._data_files = self.get_data_files()\n pass\n\n\n def validate(self):\n if self._is_empty(self.experiment_reference):\n raise ValueError(\"Experiment reference number is needed in dataFiles\")\n\n for data_file in self._data_files:\n if self._is_empty(data_file[\"name\"]):\n raise ValueError(\"name is needed in dataFiles\")\n \n requirment_comments = [\"file checksum\"]\n not_fulfilled_requirement_comments = requirment_comments.copy()\n for comment in data_file[\"comments\"]:\n key = comment[\"name\"]\n value = comment[\"value\"]\n if key in requirment_comments and not self._is_empty(value):\n not_fulfilled_requirement_comments.remove(key)\n if len(not_fulfilled_requirement_comments) > 0:\n raise ValueError(f\"{not_fulfilled_requirement_comments} is/are needed in dataFiles comments\")\n pass\n\n\n def convert(self):\n self.validate()\n\n run_set = self.run_set_xml()\n for data_file in self._data_files:\n self._current_data_file = data_file\n run_set.add(\n self.run_xml() \\\n .add(self.title_xml()) \\\n .add(self.experiment_ref_xml()) \\\n .add(\n self.data_block_xml() \\\n .add(self.files_xml())\n ) \n ) \n\n return run_set\n\n\n def run_set_xml(self):\n run_set = XmlProvider(\"RUN_SET\")\n\n return run_set\n\n\n def run_xml(self):\n run = XmlProvider(\"RUN\")\n run.set_attribute(\"alias\", self._current_data_file[\"@id\"])\n\n return run\n\n\n def title_xml(self):\n title = XmlProvider(\"TITLE\")\n title.set_text(self._current_data_file[\"name\"])\n\n return title\n\n\n def experiment_ref_xml(self):\n experiment_ref = XmlProvider(\"EXPERIMENT_REF\")\n experiment_ref.set_attribute(\"refname\", self.experiment_reference)\n\n return experiment_ref\n\n\n def data_block_xml(self):\n data_block = XmlProvider(\"DATA_BLOCK\")\n\n return data_block\n\n\n def files_xml(self):\n files = XmlProvider(\"FILES\")\n\n file = XmlProvider(\"FILE\")\n file.parent = files\n file.set_attribute(\"filename\", self._current_data_file[\"name\"])\n for comment in self._current_data_file[\"comments\"]:\n name = comment[\"name\"]\n value = comment[\"value\"]\n if name == \"file type\":\n file.set_attribute(\"filetype\", value)\n elif name == \"file checksum\":\n file.set_attribute(\"checksum\", value)\n\n return files\n\n\n def _is_empty(self, text: str):\n return not (text and text.strip())","repo_name":"elixir-europe/biohackathon-projects-2022","sub_path":"27/isa_ena/converter/data_files.py","file_name":"data_files.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"73"} +{"seq_id":"71542306795","text":"import utils as ut\n\ndef parse1(line):\n\treturn line\n\ndef day05p1():\n print('day 05 part 1')\n lines = ut.get_file('day05_input.txt', parse1)\n max_seat_id = 0\n for line in lines:\n # s = ''\n # for letter in line[:-3]:\n # if letter == 'F': s += '0'\n # if letter == 'B': s += '1'\n # row_num = int(s, 2)\n #\n # s = ''\n # for letter in line[-3:]:\n # if letter == 'L': s += '0'\n # if letter == 'R': s += '1'\n # col_num = int(s, 2)\n #\n # max_seat_id = max(max_seat_id, row_num * 8 + col_num)\n\n mapping = {\n 'F': '0',\n 'B': '1',\n 'L': '0',\n 'R': '1'\n }\n for k,v in mapping.items():\n line = line.replace(k,v)\n\n max_seat_id = max(max_seat_id, int(line, 2))\n return max_seat_id\n\nprint(day05p1()) # 7:35\n\ndef parse2(line):\n\treturn line\n\ndef day05p2():\n print('day 05 part 2')\n lines = ut.get_file('day05_input.txt', parse2)\n max_seat_id = 0\n seat_id_list = []\n for line in lines:\n s = ''\n for letter in line[:-3]:\n if letter == 'F': s += '0'\n if letter == 'B': s += '1'\n row_num = int(s, 2)\n s = ''\n for letter in line[-3:]:\n if letter == 'L': s += '0'\n if letter == 'R': s += '1'\n col_num = int(s, 2)\n seat_id = row_num * 8 + col_num\n seat_id_list.append(seat_id)\n # max_seat_id = max(max_seat_id, row_num * 8 + col_num)\n seat_id_list.sort()\n\n seat_id = seat_id_list[0]\n for val in seat_id_list:\n if val == seat_id:\n seat_id += 1\n else:\n print(val)\n seat_id = val+1\n\n\n return seat_id_list\n\nprint(day05p2()) # 12:34 cumulative\n","repo_name":"yufengg/adventofcode","sub_path":"day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"20647540700","text":"from beaker.middleware import SessionMiddleware\nfrom app import vn, auth\nimport bottle\nimport os\n\n#Beaker session middleware\nsession_opts = {\n 'session.type': 'file',\n 'session.cookie_expires': True,\n 'session.auto' : True,\n 'session.data_dir': 'cache'\n}\napp = bottle.default_app()\napp = SessionMiddleware(app, session_opts)\nport = int(os.environ.get(\"PORT\", 5000))\n\nimport socket\nhostname = socket.gethostname()\n#if on heroku host name will not contain rizvan\nif 'Rizvan' in hostname:\n bottle.run(host='0.0.0.0', port=port, app=app)\n\n\n","repo_name":"rizvn/videonotes","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23713489879","text":"import sys, os\n\nfrom PyQt5.QtWidgets import *\nimport numpy as np\n\nGUI_FILE_NAME = 'gui'\nos.system('python -m PyQt5.uic.pyuic -x ' + GUI_FILE_NAME + '.ui -o ' + GUI_FILE_NAME + '.py')\n\nfrom gui import Ui_MainWindow\nfrom pandasModel import *\n\n\nclass Form(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.df = pd.DataFrame()\n\n # signal\n self.btnOpen.clicked.connect(self.openFile)\n self.btnStudent.clicked.connect(self.countStudent)\n self.btnAve.clicked.connect(self.calAverage)\n self.cboGrade.currentTextChanged.connect(self.viewGrade)\n\n def viewGrade(self, txt):\n if txt:\n if txt == 'ALL':\n df = self.df\n else:\n df = self.df[self.df.grade == int(txt)].copy()\n self.drawDf(df)\n\n def drawDf(self, df):\n model = pandasModel(df)\n self.tableView.setModel(model)\n\n self.tableView.resizeColumnsToContents()\n\n def openFile(self):\n file_name = 'score.csv'\n self.df = pd.read_csv(file_name)\n arr = np.sort(self.df.grade.unique())\n self.cboGrade.clear()\n self.cboGrade.addItem('ALL')\n self.cboGrade.addItems([str(x) for x in arr])\n self.drawDf(self.df)\n\n def calAverage(self):\n df = self.df.copy()\n df = df.set_index('names')\n df = df.loc[:, 'kor':'mat']\n df['합계'] = df.sum(axis=1)\n df['평균'] = df.loc[:, 'kor':'mat'].mean(axis=1).round()\n self.drawDf(df)\n\n def countStudent(self):\n df = self.df.copy()\n grade = df['grade'].value_counts()\n df2 = grade.to_frame(name='학생수').sort_index()\n df2.rename(index=lambda x: str(x) + '학년', inplace=True)\n self.drawDf(df2)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = Form()\n w.show()\n sys.exit(app.exec_())\n","repo_name":"XyRo1234/xyro","sub_path":"02_실습자료_PyQt5/실습예제/9부. Pandas와 Matplot/1.Pandas/ex9_1_01/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32799467830","text":"\"\"\" \nTask: Given a natural number n, (n>99).\nWrite a program that determines its third (from the beginning) digit.\n\"\"\"\n\nn = int(input())\n\nwhile n > 999:\n n //= 10\n\nprint(n % 10)","repo_name":"malikinss/portfolio","sub_path":"Python/Just Python/beegeek/Beegeek Python For Beginners/8_2_5.py","file_name":"8_2_5.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43681437801","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport random\nimport string\n\ntry:\n browser = webdriver.Chrome()\n browser.get(\"http://suninjuly.github.io/huge_form.html\")\n elements = browser.find_elements(By.CSS_SELECTOR, \"input[type='text']\")\n\n for element in elements:\n random_word = ''.join(random.choice(string.ascii_lowercase) for _ in range(8))\n element.send_keys(random_word)\n\n checkbox = browser.find_element(By.CSS_SELECTOR, \"input[type='checkbox']\")\n checkbox.click()\n\n radio = browser.find_element(By.CSS_SELECTOR, \"input[value='robots']\")\n radio.click()\n\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\nfinally:\n time.sleep(3)\n browser.quit()","repo_name":"KopchukVolod/SeleniumWebDriver-on-Python","sub_path":"The code navigates to a webpage that contains a huge form with numerous input fields of various types.py","file_name":"The code navigates to a webpage that contains a huge form with numerous input fields of various types.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"37033893661","text":"\"\"\"\n输入一个正整数 target ,输出所有和为 target 的连续正整数序列(至少含有两个数)。\n序列内的数字由小到大排列,不同序列按照首个数字从小到大排列。\n示例 1:\n\n输入:target = 9\n输出:[[2,3,4],[4,5]]\n\n示例 2:\n\n输入:target = 15\n输出:[[1,2,3,4,5],[4,5,6],[7,8]]\n\"\"\"\nclass Solution:\n def FindContinuousSequence(self, tsum):\n \"\"\"\n time O(target)\n space O(1)\n :param tsum:\n :return:\n \"\"\"\n l, r = 1, 2\n ans = []\n while l < r:\n sum = (l + r) * (r - l + 1) / 2\n if sum == tsum:\n ans.append([x for x in range(l, r + 1, 1)])\n l = l + 1\n elif sum < tsum:\n r += 1\n else:\n l += 1\n return ans\n\n def FindContinuousSequence_root(self, tsum):\n res = []\n # y不能超过target的中值,即y<=target//2 + 1,range函数左开右闭,所以这里是+2\n for y in range(1, tsum // 2 + 2):\n # 应用我们的求根公式\n x = (1/4 + y**2 + y - 2 * tsum) ** (1/2) + 0.5\n # 我们要确保x不能是复数,且x必须是整数\n if type(x) != complex and x - int(x) == 0:\n res.append(list(range(int(x), y + 1)))\n return res\n\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.FindContinuousSequence(15))","repo_name":"XyK0907/for_work","sub_path":"LeetCode/Offer/和为s的连续整数序列.py","file_name":"和为s的连续整数序列.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42861934607","text":"# !/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom datetime import datetime\r\n\r\n\r\ndef get_worker():\r\n surname = input(\"Фамилия: \")\r\n name = input(\"Имя: \")\r\n zodiac = input(\"Знак зодиака: \")\r\n date = input(\"Дата: \")\r\n\r\n return {\r\n 'surname': surname,\r\n 'name': name,\r\n 'zodiac': zodiac,\r\n 'date': datetime.strptime(date, \"%Y-%m-%d\")\r\n }\r\n\r\n\r\ndef display_workers(staff):\r\n if staff:\r\n\r\n line = '+-{}-+-{}-+-{}-+-{}-+-{}-+'.format(\r\n '-' * 4,\r\n '-' * 30,\r\n '-' * 20,\r\n '-' * 15,\r\n '-' * 15\r\n )\r\n print(line)\r\n print(\r\n '| {:^4} | {:^30} | {:^20} | {:^15} | {:^15} |'.format(\r\n \"№\",\r\n \"Фамилия\",\r\n \"Имя\",\r\n \"Знак зодиака\",\r\n \"Дата рождения\"\r\n )\r\n )\r\n print(line)\r\n\r\n # Вывести данные о всех сотрудниках.\r\n for idx, worker in enumerate(staff, 1):\r\n print(\r\n '| {:^4} | {:^30} | {:^20} | {:^15} | {:^15} |'.format(\r\n idx,\r\n worker.get('surname', ''),\r\n worker.get('name', ''),\r\n worker.get('zodiac', ''),\r\n str(worker.get('date', '').date())\r\n )\r\n )\r\n print(line)\r\n\r\n else:\r\n print(\"Список пуст.\")\r\n\r\n\r\ndef select_workers(staff):\r\n month1 = int(input(\"Введите месяц: \"))\r\n result = []\r\n for worker in staff:\r\n if worker.get('date', '').month == month1:\r\n result.append(worker)\r\n return result\r\n\r\n\r\ndef main():\r\n workers = []\r\n\r\n while True:\r\n\r\n command = input(\">>> \").lower()\r\n\r\n if command == 'exit':\r\n break\r\n\r\n elif command == 'add':\r\n worker = get_worker()\r\n\r\n workers.append(worker)\r\n if len(workers) > 1:\r\n workers.sort(key=lambda item: item.get('name', ''))\r\n\r\n elif command == 'list':\r\n display_workers(workers)\r\n\r\n elif command.startswith('select'):\r\n selected = select_workers(workers)\r\n display_workers(selected)\r\n\r\n elif command == 'help':\r\n print(\"Список команд:\\n\")\r\n print(\"add - добавить запись;\")\r\n print(\"list - вывести список;\")\r\n print(\"select - список родившихся в один месяц;\")\r\n print(\"help - отобразить справку;\")\r\n print(\"exit - завершить работу с программой.\")\r\n\r\n else:\r\n print(\"\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"tamaranesterenko/Python.LR_11","sub_path":"IDZ.py","file_name":"IDZ.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36138473364","text":"import struct\n\n\nclass CBits:\n \"\"\"\n Changes bits from a byte register\n \"\"\"\n\n def __init__(\n self,\n num_bits: int,\n register_address: int,\n start_bit: int,\n register_width=1,\n lsb_first=True,\n ) -> None:\n self.bit_mask = ((1 << num_bits) - 1) << start_bit\n self.register = register_address\n self.star_bit = start_bit\n self.lenght = register_width\n self.lsb_first = lsb_first\n\n def __get__(\n self,\n obj,\n objtype=None,\n ) -> int:\n mem_value = obj._i2c.readfrom_mem(obj._address, self.register, self.lenght)\n\n reg = 0\n order = range(len(mem_value) - 1, -1, -1)\n if not self.lsb_first:\n order = reversed(order)\n for i in order:\n reg = (reg << 8) | mem_value[i]\n\n reg = (reg & self.bit_mask) >> self.star_bit\n\n return reg\n\n def __set__(self, obj, value: int) -> None:\n memory_value = obj._i2c.readfrom_mem(obj._address, self.register, self.lenght)\n\n reg = 0\n order = range(len(memory_value) - 1, -1, -1)\n if not self.lsb_first:\n order = range(0, len(memory_value))\n for i in order:\n reg = (reg << 8) | memory_value[i]\n reg &= ~self.bit_mask\n\n value <<= self.star_bit\n reg |= value\n reg = reg.to_bytes(self.lenght, \"big\")\n\n obj._i2c.writeto_mem(obj._address, self.register, reg)\n\n\nclass RegisterStruct:\n \"\"\"\n Register Struct\n \"\"\"\n\n def __init__(self, register_address: int, form: str) -> None:\n self.format = form\n self.register = register_address\n self.lenght = struct.calcsize(form)\n\n def __get__(\n self,\n obj,\n objtype=None,\n ):\n if self.lenght <= 2:\n value = struct.unpack(\n self.format,\n memoryview(\n obj._i2c.readfrom_mem(obj._address, self.register, self.lenght)\n ),\n )[0]\n else:\n value = struct.unpack(\n self.format,\n memoryview(\n obj._i2c.readfrom_mem(obj._address, self.register, self.lenght)\n ),\n )\n return value\n\n def __set__(self, obj, value):\n mem_value = struct.pack(self.format, value)\n obj._i2c.writeto_mem(obj._address, self.register, mem_value)\n\n\nclass _BoundStructArray:\n \"\"\"\n Array object that `StructArray` constructs on demand.\n\n :param object obj: The device object to bind to. It must have a `i2c` attribute\n :param int register_address: The register address to read the bit from\n :param str struct_format: The struct format string for each register element\n :param int count: Number of elements in the array\n \"\"\"\n\n def __init__(\n self,\n obj,\n register_address,\n struct_format,\n count,\n ):\n self.format = struct_format\n self.first_register = register_address\n self.obj = obj\n self.count = count\n self.length = struct.calcsize(struct_format)\n\n def __getitem__(self, index):\n if not 0 <= index < self.count:\n raise IndexError()\n reg_to_get = self.first_register + self.length * index\n value = struct.unpack(\n self.format,\n memoryview(\n self.obj._i2c.readfrom_mem(self.obj._address, reg_to_get, self.length)\n ),\n )\n return value\n\n def __setitem__(self, index, value) -> None:\n reg_to_write = self.first_register + self.length * index\n mem_value = struct.pack(self.format, *value)\n self.obj._i2c.writeto_mem(self.obj._address, reg_to_write, mem_value)\n\n def __len__(self) -> int:\n return self.count\n\n\nclass StructArray:\n \"\"\"\n Repeated array of structured registers that are readable and writeable.\n\n Based on the index, values are offset by the size of the structure.\n\n Values are tuples that map to the values in the defined struct. See struct\n module documentation for struct format string and its possible value types.\n\n .. note:: This assumes the device addresses correspond to 8-bit bytes. This is not suitable for\n devices with registers of other widths such as 16-bit.\n\n :param int register_address: The register address to begin reading the array from\n :param str struct_format: The struct format string for this register.\n :param int count: Number of elements in the array\n \"\"\"\n\n def __init__(self, register_address: int, struct_format: str, count: int) -> None:\n self.format = struct_format\n self.address = register_address\n self.count = count\n self.array_id = \"_structarray{}\".format(register_address)\n\n def __get__(\n self,\n obj,\n objtype=None,\n ) -> _BoundStructArray:\n # We actually can't handle the indexing ourselves due to\n # data descriptor limits. So, we return\n # an object that can instead. This object is bound to the\n # object passed in here by its\n # initializer and then cached on the object itself. That way its lifetime is tied to the\n # lifetime of the object itself.\n if not hasattr(obj, self.array_id):\n setattr(\n obj,\n self.array_id,\n _BoundStructArray(obj, self.address, self.format, self.count),\n )\n return getattr(obj, self.array_id)\n","repo_name":"jposada202020/MicroPython_PCA9685","sub_path":"micropython_pca9685/i2c_helpers.py","file_name":"i2c_helpers.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"21269864207","text":"import string\n\nclass Tile:\n rows = 32\n columns = 32\n field = [[0 for j in range(32)] for i in range(32)]\n\n def __init__(self, character, row, column):\n self.character = character\n self.row = row\n self.column = column\n\n def tile_character(self, char_index= 0):\n character_string = (string.printable)\n self.character = character_string[char_index]\n\n def print_field(self):\n for j in range(len(self.field)):\n for i in range(len(self.field[j])):\n print(self.tile_character()[self.field[j][i]], ' ', end='')\n print() # starts a new row\n\ntest_tile = Tile(0,16,16)\nTile.print_field(test_tile)\n\n","repo_name":"PdxCodeGuild/20180116-FullStack-Day","sub_path":"Code/Maggie/180130_ascii.py","file_name":"180130_ascii.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"22574172579","text":"name = input('Name: ')\n\ntry:\n f = open(name, 'r')\n\n lines = 0\n comments = 0\n\n for s in f:\n lines += 1\n if '#' in s:\n comments += 1\n f.close()\n\n print(f'{100 * comments / lines:.2f}%')\nexcept FileNotFoundError:\n print(f'Cannot open \"{name}\".')\n","repo_name":"StefanBjornander/Languages","sub_path":"Python/Python2022/Uppgift11_2.py","file_name":"Uppgift11_2.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35118576007","text":"import copy\nfrom collections import Counter\n\n\ndef input_file(filename):\n nodes = {}\n with open(filename, 'rt') as file:\n for line in file: # loop over each line\n line_string = (line.strip().split('-'))\n\n if line_string[0] in nodes:\n nodes[line_string[0]].append(line_string[1])\n else:\n nodes[line_string[0]] = []\n nodes[line_string[0]].append(line_string[1])\n\n if line_string[1] in nodes:\n nodes[line_string[1]].append(line_string[0])\n else:\n nodes[line_string[1]] = []\n nodes[line_string[1]].append(line_string[0])\n print(nodes)\n return nodes\n\n\ndef seek_paths(nodes, current_route, node_key, possible_routes):\n for node in nodes[node_key]:\n if node == 'start':\n continue\n if node == 'end':\n current_route.append(node)\n possible_routes.append(current_route)\n continue\n if (node.islower() and node not in current_route) or node.isupper():\n copied_current_route = copy.deepcopy(current_route)\n copied_current_route.append(node)\n seek_paths(nodes, copied_current_route, node, possible_routes)\n\n\ndef seek_paths_improved(nodes, current_route, node_key, possible_routes):\n for node in nodes[node_key]:\n counter = Counter(current_route)\n may_visit = small_cave_counter(dict(counter))\n if node == 'start':\n continue\n if node == 'end':\n current_route.append(node)\n possible_routes.append(current_route)\n continue\n if node.isupper():\n copy_and_continue(current_route, node, nodes, possible_routes)\n if node.islower():\n if node not in current_route:\n copy_and_continue(current_route, node, nodes, possible_routes)\n elif counter[node] < 2 and may_visit:\n copy_and_continue(current_route, node, nodes, possible_routes)\n\n\ndef copy_and_continue(current_route, node, nodes, possible_routes):\n copied_current_route = copy.deepcopy(current_route)\n copied_current_route.append(node)\n seek_paths_improved(nodes, copied_current_route, node, possible_routes)\n\n\ndef small_cave_counter(visited_amount):\n filtered_visited_amount_by_lower = {k: v for (k, v) in visited_amount.items() if k.islower() and (k != 'end' and k != 'start')}\n filtered_visited_amount_by_value = {k: v for (k, v) in filtered_visited_amount_by_lower.items() if v > 1}\n return True if len(filtered_visited_amount_by_value) == 0 else False\n\n\ndef part_one(nodes):\n possible_routes = []\n current_route = ['start']\n seek_paths(nodes, current_route, 'start', possible_routes)\n\n\ndef part_two(nodes):\n possible_routes = []\n current_route = ['start']\n seek_paths_improved(nodes, current_route, 'start', possible_routes)\n print(len(possible_routes))\n\n\ndef main():\n nodes = input_file('input.txt')\n part_one(nodes)\n part_two(nodes)\n\n\nmain()\n","repo_name":"JunDP9/aoc","sub_path":"day12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16059277644","text":"from aiogram.types import ReplyKeyboardMarkup, KeyboardButton, WebAppInfo, ReplyKeyboardRemove\n\nweb_app_markup = ReplyKeyboardMarkup(\n row_width=1,\n resize_keyboard=True,\n one_time_keyboard=True,\n keyboard=[[KeyboardButton(text='Выбор персонажа', web_app=WebAppInfo(url='https://ewynona.github.io/'))]]\n)\n\nremove_markup = ReplyKeyboardRemove()\n","repo_name":"ewynona/telegram_ai","sub_path":"app/telegram_ai/bot/keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5432040057","text":"import time\n\n\ndef setCoveringProblem(universe,subSets,costs):\n\n #set used to save the selected subsets\n selected = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n #sets that represent the elements that are not used yet\n notUsed = universe.copy()\n subSetsCopy = subSets.copy()\n costCopy = costs.copy()\n \n #value to store the total cost\n totalCost = 0\n \n #veryfies if notUsed is empty or not, so the cycle can run until then\n while notUsed:\n #variables definition\n bestSubset = None\n bestRatio = -0.1\n bestPos = -1\n print(\"largo notUsed: \",len(notUsed))\n print(notUsed)\n for i in range(len(subSetsCopy)):\n #print(subSetsCopy[i])\n localCost = costCopy[i]\n costRatio = localCost/ len(subSets)\n \n if costRatio > bestRatio:\n for x in subSetsCopy[i]:\n if x in notUsed:\n bestSubset = subSetsCopy[i]\n bestRatio = costRatio\n bestPos = i\n else:\n break\n #save the best subset of the iteration\n selected[bestPos]=1\n #selected.append(bestSubset[0])\n #add the cost of the subset to the total\n totalCost += costs[bestPos]\n '''\n print(\"best subset: \",bestSubset)\n print(\"cost of the subset: $\",costs[bestPos])\n print(\"total cost rn: $\",totalCost)\n print()\n time.sleep(1)\n '''\n #removing the values from te arrays to disregard them in the next iterations\n for i in range(len(bestSubset)):\n for x in notUsed:\n if(x == bestSubset[i]):\n notUsed.remove(bestSubset[i])\n break\n\n for x in range(len(subSetsCopy)):\n if(subSetsCopy[x][0] == bestSubset[0]):\n subSetsCopy.pop(x)\n costCopy.pop(x)\n break\n return selected","repo_name":"mirkowoo/IO-SCP","sub_path":"GreedySCP.py","file_name":"GreedySCP.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10850845844","text":"from threading import Thread\nimport time\nimport sys, getopt\nimport os\nimport os.path\nimport subprocess\nimport re\nimport base64\nfrom datetime import datetime\nfrom collections import Counter\nimport urllib.parse\nfrom sireader2 import SIReader, SIReaderReadout, SIReaderControl, SIReaderException, SIReaderTimeout, SIReaderCardChanged\nfrom LongRunningClass import LongRunningClass\n\nTWELVE_HOURS_IN_SECONDS = (12 * 3600)\n\nclass si_stick_contents:\n def __init__(self):\n self.stick = None\n self.start_timestamp = 0\n self.finish_timestamp = 0\n self.controls_list = None\n self.bad_download = False\n\n def set_bad_download(self):\n self.bad_download = True\n\n def set_stick(self, stick):\n self.stick = stick\n\n def set_stick_info(self, start=0, finish=0, controls_list = None):\n self.start_timestamp = start\n self.finish_timestamp = finish\n self.controls_list = controls_list\n\n\nclass si_processor(LongRunningClass):\n\n def __init__(self, reader, stick_callback, status_update_callback):\n self.si_reader = reader\n self.verbose = False\n self.debug = False\n self.stick_callback = stick_callback\n self.status_update_callback = status_update_callback\n self.force_exit_called = False\n self.si_reader = reader\n \n def get_and_log_results_string(self, read_stick, event):\n upload_entry_list = [ \"{:d};{:d}\".format(read_stick.stick, read_stick.start_timestamp) ]\n upload_entry_list.append(\"start:{:d}\".format(read_stick.start_timestamp))\n upload_entry_list.append(\"finish:{:d}\".format(read_stick.finish_timestamp))\n upload_entry_list.extend(read_stick.controls_list)\n qr_result_string = \",\".join(upload_entry_list)\n if self.verbose:\n print (f\"Got results {qr_result_string} for si_stick {stick_values[SI_STICK_KEY]}.\")\n \n with open(\"{}-results.log\".format(event), \"a\") as LOGFILE:\n LOGFILE.write(qr_result_string + \"\\n\")\n \n return qr_result_string\n \n \n def start(self):\n sireader_thread = Thread(target=self.sireader_main)\n sireader_thread.start()\n \n def sireader_main(self):\n if self.force_exit_called: return\n \n loop_count = 0\n while True:\n finish_adjusted = False\n if self.force_exit_called: return\n \n read_stick = self.si_reader.read_results()\n \n if ((loop_count % 60) == 0):\n if self.status_update_callback != None:\n self.status_update_callback(self)\n \n if read_stick.stick != None:\n self.stick_callback(self, read_stick)\n \n time.sleep(1)\n loop_count += 1\n\n def force_exit(self):\n super().force_exit()\n self.force_exit_called = True\n\nclass generic_si_reader:\n def __init__(self):\n pass\n\n def get(self):\n pass\n\n def read_results(self):\n pass\n\nclass real_si_reader(generic_si_reader):\n\n def __init__(self):\n self.verbose = False\n self.debug = False\n self.serial_port_name = \"\"\n self.si_reader = None\n\n def set_serial_port(self, serial_port_name):\n self.serial_port_name = serial_port_name\n\n ###############################################################\n def get(self):\n # connect to base station, the station is automatically detected,\n # if this does not work, give the path to the port as an argument\n # see the pyserial documentation for further information.\n try:\n if (self.serial_port_name != \"\"):\n si = SIReaderReadout(port=self.serial_port_name)\n else:\n si = SIReaderReadout()\n except SIReaderException as sire:\n si = None\n if self.verbose:\n print (f\"Cannot find si download station, reason: {sire}\")\n \n self.si_reader = si\n return si\n \n \n #################################################################\n def read_results(self):\n # wait for a card to be inserted into the reader\n try:\n if not self.si_reader.poll_sicard():\n return(si_stick_contents())\n except SIReaderException as sire:\n self.si_reader.ack_sicard()\n #print (f\"Bad card download, error {sire}.\")\n return (si_stick_contents())\n \n # some properties are now set\n card_number = self.si_reader.sicard\n card_type = self.si_reader.cardtype\n \n # read out card data\n try:\n card_data = self.si_reader.read_sicard()\n except (SIReaderException, SIReaderTimeout, SIReaderCardChanged) as sire:\n #print (f\"Bad card ({card_number}) download, error {sire}.\")\n bad_stick_values = si_stick_contents()\n bad_stick_values.set_stick(card_number)\n bad_stick_values.set_bad_download()\n return(bad_stick_values)\n \n # beep\n self.si_reader.ack_sicard()\n \n # Wait for the card to be removed from the reader\n while not self.si_reader.poll_sicard():\n time.sleep(1)\n \n \n # Convert to the format expected by the rest of the program\n # Check for old sticks which only use 12 hour time, which have some trouble if\n # the event starts before noon and ends after noon\n if card_data['start'] != None:\n start_timestamp = self.get_24hour_timestamp(card_data['start'])\n else:\n start_timestamp = 0\n \n if card_data['finish'] != None:\n finish_timestamp = self.get_24hour_timestamp(card_data['finish'])\n else:\n finish_timestamp = 0\n if self.debug: print (f\"No finish timestamp on stick {card_number} - please scan finish and then download.\")\n \t\n array_of_punches = []\n if ((finish_timestamp < start_timestamp) and (finish_timestamp < TWELVE_HOURS_IN_SECONDS)):\n # Anomaly detected! Adjust any timestamp less than the start forward by 12 hours\n # First convert the tuples of datetime objects to just a value in seconds\n # Then adjust the appropriate entries (those less than the start timestamp) by 12 hours\n # Then format it as : separated string items\n if (finish_timestamp != 0): finish_timestamp += TWELVE_HOURS_IN_SECONDS\n orig_punches = []\n new_punches = []\n orig_punches = map(lambda punch: (punch[0], self.get_24hour_timestamp(punch[1])), card_data['punches'])\n new_punches = map(lambda punch: (punch[0], punch[1] + TWELVE_HOURS_IN_SECONDS if (punch[1] < start_timestamp) else punch[1]), orig_punches)\n array_of_punches = map(lambda punch: \"{}:{}\".format(str(punch[0]), str(punch[1])), new_punches)\n if self.verbose: print (f\"Adjusting some times for {card_number} by twelve hours.\")\n else:\n array_of_punches = map(lambda punch: \"{}:{}\".format(str(punch[0]), str(self.get_24hour_timestamp(punch[1]))), card_data['punches'])\n \n #print \"Here is the array of punches {}.\".format(array_of_punches)\n \n entry_to_return = si_stick_contents()\n entry_to_return.set_stick(card_number);\n entry_to_return.set_stick_info(start = start_timestamp, finish = finish_timestamp, controls_list = list(array_of_punches))\n \n return(entry_to_return)\n\n ###############################################################\n def get_24hour_timestamp(self, punch_time):\n # Take a datetime object, from reading the si card, and convert to seconds since midnight\n #print \"Datetime object looks like: {}\".format(dir(punch_time))\n #return (datetime.timestamp(punch_time))\n return ((punch_time.hour * 3600) + (punch_time.minute * 60) + punch_time.second)\n \n\nclass fake_si_reader(generic_si_reader):\n def __init__(self):\n self.simulated_entries = []\n self.verbose = False\n \n def initialize(self, filename_of_fake_results):\n \n if filename_of_fake_results != \"\":\n filename = filename_of_fake_results\n else:\n filename = \"fake_entries_for_manage_event\"\n \n try:\n with open(filename, \"r\") as FAKE_ENTRIES:\n for line in FAKE_ENTRIES:\n line = line.strip()\n if line.startswith(\"#\"): # Ignore comment lines\n continue\n if line == \"\":\n self.simulated_entries.append(si_stick_contents())\n else:\n value = line.split(\",\")\n #print (f\"The line is --{line}--\")\n #print (f\"It has {len(value)} entries.\")\n first_entry_pieces = value[0].split(\";\")\n if (len(first_entry_pieces) > 1):\n # log entry format from a real SI unit download\n # 503555;0,start:0,finish:0\n # 24680;1000,start:1000,finish:2000,151:1100,152:1500,155:1600,151:1680\n si_stick = int(first_entry_pieces[0])\n start = int(value[1].split(\":\")[1])\n finish = int(value[2].split(\":\")[1])\n else:\n # Entry format easier for a human to enter\n # stick,start,finish,controls\n # 24680,1000,2000,151:1100,152:1500,155:1600,151:1680\n si_stick = int(value[0])\n start = int(value[1])\n finish = int(value[2])\n \n fake_stick = si_stick_contents()\n fake_stick.set_stick(si_stick)\n if (len(value) > 3):\n fake_stick.set_stick_info(start = start, finish = finish, controls_list = value[3:])\n else:\n fake_stick.set_stick_info(start = start, finish = finish, controls_list = [])\n self.simulated_entries.append(fake_stick)\n except FileNotFoundError:\n # Fine if the file is not there, we'll just do nothing\n if self.verbose:\n print (f\"File {filename} not found, no entries available.\")\n \n if self.verbose:\n print (f\"Found {len(self.simulated_entries)} entries to process.\")\n\n\n def get(self):\n return self\n \n def read_results(self):\n if len(self.simulated_entries) > 0:\n return self.simulated_entries.pop()\n else:\n return si_stick_contents()\n","repo_name":"markaoconnell/QRienteering","sub_path":"OMeetWithMemberList/MeetSW/si_reader.py","file_name":"si_reader.py","file_ext":"py","file_size_in_byte":10114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"42481358866","text":"import regla\nfrom sys import maxsize\n\n\n\n#Algoritmo BFS (Breadth-First Search)\ndef BFS(inicio, fin, tablero):\n frontera = []\n visitado = set()\n frontera.append(inicio)\n visitado.add(inicio)\n while frontera != []:\n padre = frontera.pop(0)\n if (padre == fin):\n return True\n\n hijos = get_sucesores(padre)\n for hijo in hijos:\n if not (hijo in visitado):\n if not bloqueado(hijo[0], hijo[1], padre[0], padre[1], tablero):\n frontera.append(hijo)\n visitado.add(hijo)\n return False\n\ndef PathMaker(origen, actual):\n tot_camino = [actual]\n while actual in origen.keys():\n actual = origen[actual]\n tot_camino.append(actual)\n return tot_camino\n\ndef camino(inicio, fin, tablero):\n frontera = []\n visitado = set()\n frontera.append(inicio)\n visitado.add(inicio)\n distancia = {}\n distancia[inicio] = 0\n while frontera != []:\n padre = frontera.pop(0)\n if (padre == fin):\n return distancia[fin]\n\n hijos = get_sucesores(padre)\n for hijo in hijos:\n if not (hijo in visitado):\n if not bloqueado(hijo[0], hijo[1], padre[0], padre[1], tablero):\n frontera.append(hijo)\n distancia[hijo] = distancia[padre] + 1\n visitado.add(hijo)\n\n return maxsize\n\ndef bloqueado(x1, y1, x2, y2, tablero):\n for pared in tablero.paredes:\n if (pared.orientacion == \"horizontal\"):\n if (y1 < y2):\n if (pared.top_l.y == y1 and (pared.top_l.x == x1 or (pared.top_l.x + 1) == x1)):\n return True\n if (y1 > y2):\n if (pared.top_l.y == y2 and (pared.top_l.x == x1 or (pared.top_l.x + 1) == x1)):\n return True\n if (pared.orientacion == \"vertical\"):\n if (x1 < x2):\n if (pared.top_l.x == x1 and (pared.top_l.y == y1 or (pared.top_l.y + 1) == y1)):\n return True\n if (x1 > x2):\n if (pared.top_l.x == x2 and (pared.top_l.y == y2 or (pared.top_l.y + 1) == y2)):\n return True\n return False\n\ndef get_sucesores(padre):\n hijos = set()\n p0 = padre[0]\n p1 = padre[1]\n x1 = padre[0] - 1\n x2 = padre[0] + 1\n y1 = padre[1] - 1\n y2 = padre[1] + 1\n if (x1 >= 0):\n hijos.add((x1, p1))\n if (y1 >= 0):\n hijos.add((p0, y1))\n if (x2 <= 8):\n hijos.add((x2, p1))\n if (y2 <= 8):\n hijos.add((p0, y2))\n return hijos","repo_name":"Digit314/TP_Quoridor-2020","sub_path":"Quoridor-CA-master/busqueda.py","file_name":"busqueda.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29829871996","text":"\n\"\"\"\n\nPerfusion Slic demo\n\n\nRun demo:\n\n>>> python perfusion_slic_demo.py\n\nUsing QIN Breast data as an example. This data comes in matlab .mat format.\n\n\nTo load a 4D nifti image instead use:\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfile1 = \"dce_mri.nii\"\nimg = nib.load(file1)\n\n# Determining ratio of voxel sizes\nhdr = img.get_header()\nraw1 = hdr.structarr\npixdim = raw1['pixdim']\n\nvox_size = np.abs(np.around([pixdim[1], pixdim[2], pixdim[3]], 2))\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n#Image\nimg1 = np.array(img.get_data())\n\nAuthor: Benjamin Irving (20141124)\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport nibabel as nib\nfrom time import time\nfrom perfusionslic import PerfSLIC\nimport h5py\nstart1 = time()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Load Nifti data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# Folders\n\n# file1 = \"dce_mri.nii\"\n# img = nib.load(file1)\n#\n# # Determining ratio of voxel sizes\n# hdr = img.get_header()\n# raw1 = hdr.structarr\n# pixdim = raw1['pixdim']\n#\n# vox_size = np.abs(np.around([pixdim[1], pixdim[2], pixdim[3]], 2))\n#\n# #Image\n# img1 = np.array(img.get_data())\n\n# ~~~~~~~~~~~~~~~~~~ Load .mat data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nf = h5py.File('QIN-Breast-DCE-MRI-BC10-V1.mat', 'r')\nimg = f.get('da/data')\nhdr = f.get('da/hdr')\nSliceThickness = hdr[\"SliceThickness\"][0]\nPixelSpacing = hdr[\"PixelSpacing\"][0]\nvox_size = np.around([PixelSpacing[0], PixelSpacing[1], SliceThickness[0]])\nimg1 = np.transpose(img, [3, 2, 1, 0])\n\n# Load reconstructed roi\n# f2 = h5py.File('QIN-Breast-DCE-MRI-BC10-V1roi.mat', 'r')\n# roi = f2.get('roi1')\n# roi1 = np.transpose(roi, [2, 1, 0])\n\n# Select a sub-region containing tumour (for speed and memory reasons)\n\nimg1 = img1[20:160, 35:180, :, :]\n# roi1 = roi1[20:160, 35:180, :]\n\n# ~~~~~~~~~~~~~~~~~~ Running Perfusion SLIC ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nprint(\"Initialise the perf slic class\")\n\nps1 = PerfSLIC(img1, vox_size)\n\nprint(\"Normalising image...\")\n\nps1.normalise_curves()\n\nprint(\"Extracting features...\")\n\nps1.feature_extraction(n_components=3)\n\nprint(\"Extracting supervoxels...\")\n\nsegments = ps1.supervoxel_extraction(compactness=0.02, segment_size=1000)\n\n# Plot the PCA modes\nps1.plot_pca_modes()\n\n# Plot a static version of the supervoxels\nps1.plotstatic()\n\n# plot a dynamic version of the image in the background\nps1.plotdynamic(img_slice=54, save_animation=True)\n\n# ~~~~~~~~~~~~~~~~~~~~~ Saving region ~~~~~~~~~~~~~~~~~~~~~~~~~~~\ntime_complete = time() - start1\n#\nprint(\"Saving a nifti version of the extracted segments \", end=\"\")\n\nfile1 = 'slic_regions.nii'\n\nrb1 = np.array(segments, dtype=np.int)\nimg = nib.Nifti1Image(rb1, np.eye(4))\nimg.update_header()\nimg.to_filename(file1)\n\n\nprint(\"Done\")\n\n\n\n\n","repo_name":"benjaminirving/perfusion-slic","sub_path":"perfusion_slic_demo.py","file_name":"perfusion_slic_demo.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"35424856637","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\n'''\nrun ./train.py --sdept ../../trained_sde_model/4fold_3_2_layer_model.pt\n'''\n\n# 0 define backend\nimport sys, os, time\nimport argparse\nimport glob\n\n# %env DDE_BACKEND=tensorflow.compat.v1\n# %env XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/home/cyan3/miniforge/envs/tf\n\nos.environ['DDE_BACKEND'] = \"pytorch\" # v2\nos.environ['XLA_FLAGS'] = \"--xla_gpu_cuda_data_dir=/usr/local/home/cyan3/miniforge/envs/tf\"\n\n# https://stackoverflow.com/questions/68614547/tensorflow-libdevice-not-found-why-is-it-not-found-in-the-searched-path\n# this directory has /nvvm/libdevice/libdevice.10.bc\n\nprint(os.environ['DDE_BACKEND'])\n\nimport torch\ntorch.set_printoptions(precision=3)\ntorch.set_printoptions(sci_mode=False)\nprint(torch.cuda.is_available())\nprint(torch.cuda.device_count())\nprint(torch.cuda.get_device_name(0))\nprint(torch.version.cuda)\nprint(torch.cuda.current_device())\ntorch.cuda.set_device(0)\n\n# https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html\ntry:\n torch.jit.enable_onednn_fusion(True)\nexcept:\n print(\"no onednn\")\n\ncuda0 = torch.device('cuda:0')\ncpu = torch.device('cpu')\ndevice = cuda0\n\nimport deepxde as dde\nimport numpy as np\nfrom numpy import linalg as LA\nimport math\nimport matplotlib.pyplot as plt\nimport pylab\nfrom os.path import dirname, join as pjoin\nfrom scipy import stats\nimport scipy.io\nfrom scipy.stats import truncnorm, norm\nfrom scipy.optimize import linprog\nfrom scipy import sparse\nfrom scipy.stats import multivariate_normal\nfrom scipy.spatial.distance import cdist\nif dde.backend.backend_name == \"pytorch\":\n exp = dde.backend.torch.exp\nelse:\n from deepxde.backend import tf\n exp = tf.exp\nimport cvxpy as cp\nimport numpy as np\nfrom scipy.linalg import solve_discrete_are\nfrom scipy.linalg import sqrtm as sqrtm2\n\n######################################\n\nimport torch\nfrom torch.autograd import Function\nimport numpy as np\nimport scipy.linalg\n\nsys.path.insert(0,'..')\nfrom layers import *\n\nsde_path = './sde/T_t200_2D/'\nsys.path.insert(0,sde_path)\nfrom trained_sde_model import *\n\nfrom common import *\n\n\n# In[2]:\n\n\ndef tcst1(x, y, network_f, network_g, args):\n psi, rho, u1, u2 = y[:, 0:1], y[:, 1:2], y[:, 2:3], y[:, 3:4]\n\n # x = c10, c12, t\n\n # psi eq (4a), rho eq (4b), u1 eq (6), u2 eq (6)\n dpsi_c10 = dde.grad.jacobian(psi, x, j=0)\n dpsi_c12 = dde.grad.jacobian(psi, x, j=1)\n dpsi_t = dde.grad.jacobian(psi, x, j=2)\n\n hpsi_c10 = dde.grad.hessian(psi, x, i=0, j=0)\n hpsi_c12 = dde.grad.hessian(psi, x, i=1, j=1)\n\n drho_t = dde.grad.jacobian(rho, x, j=2)\n\n drho_c10 = dde.grad.hessian(rho, x, i=0, j=0)\n drho_c12 = dde.grad.hessian(rho, x, i=1, j=1)\n\n # d1\n leaf_x = x[:, 0:2].detach()\n leaf_u1_u2 = y[:, 2:4].detach()\n leaf_t = x[:, 2].detach().unsqueeze(1)\n\n ###########################################\n\n leaf_vec = torch.cat(\n (\n x[:, 0:2], # leaf_x,\n # i think this makes sense since we\n # take jacobian of it w.r.t x for divergence\n y[:, 2:4],\n x[:, 2].unsqueeze(1),\n ),\n dim=1)\n leaf_vec = leaf_vec.requires_grad_(True)\n\n d1 = network_f.forward(leaf_vec)\n d2 = network_g.forward(leaf_vec)**2 / 2 # elementwise\n # divergence terms\n d_rhod1_c10 = dde.grad.jacobian(rho*d1[:, 0], x, j=0)\n d_rhod1_c12 = dde.grad.jacobian(rho*d1[:, 1], x, j=1)\n\n ###########################################\n\n # divergence = trace of jacobian\n # divergence is a scalar\n\n u_term = torch.mul(dpsi_c10.squeeze(), d1[:, 0])\\\n + torch.mul(dpsi_c12.squeeze(), d1[:, 1])\\\n + torch.mul(d2[:, 0], hpsi_c10.squeeze())\\\n + torch.mul(d2[:, 1], hpsi_c12.squeeze()).unsqueeze(dim=0)\n\n # import ipdb; ipdb.set_trace()\n\n d_uterm_du1_du2 = torch.autograd.grad(\n outputs=u_term,\n inputs=leaf_vec,\n grad_outputs=torch.ones_like(u_term),\n retain_graph=True)[0]\n\n l_u1 = u1 - d_uterm_du1_du2[:, 2]\n l_u2 = u2 - d_uterm_du1_du2[:, 3]\n if args.bound_u > 0:\n # print(\"bounding u\")\n l_u1_bound = -torch.sum(u1[u1 < -0.005]) +\\\n torch.sum(u1[u1 > 0.005]) \n l_u2_bound = -torch.sum(u2[u2 < -0.005]) +\\\n torch.sum(u2[u2 > 0.005])\n\n l_u1 += args.bound_u * l_u1_bound\n l_u2 += args.bound_u * l_u2_bound\n\n return [\n -dpsi_t + 0.5 * (u1**2 + u2**2)\\\n - (dpsi_c10 * d1[:, 0] + dpsi_c12 * d1[:, 1])\\\n - (d2[:, 0] * hpsi_c10 + d2[:, 1] * hpsi_c12),\n\n -drho_t - (d_rhod1_c10 + d_rhod1_c12)\\\n + (d2[:, 0] * drho_c10 + d2[:, 1] * drho_c12),\n\n l_u1,\n l_u2\n ]\n\ndef get_model(\n d,\n N,\n batchsize,\n model_type,\n activations, # sigmoid, tanh\n mu_0,\n sigma_0,\n mu_T,\n sigma_T,\n T_t,\n args,\n network_f,\n network_g,\n optimizer=\"adam\",\n init=\"Glorot normal\",\n train_distribution=\"Hammersley\",\n timemode=0,\n ni=0,\n epsilon=1e-3\n ):\n M = N**d\n\n linspaces = []\n for i in range(d):\n linspaces.append(np.transpose(\n np.linspace(args.state_bound_min, args.state_bound_max, N))\n )\n\n linspace_tensors = []\n for i in range(d):\n t = torch.from_numpy(\n linspaces[i]).requires_grad_(False)\n t = t.to(device)\n linspace_tensors.append(t)\n\n meshes = np.meshgrid(*linspaces)\n mesh_vectors = []\n for i in range(d):\n mesh_vectors.append(meshes[i].reshape(M,1))\n state = np.hstack(tuple(mesh_vectors))\n\n ######################################\n\n rv0 = multivariate_normal(mu_0, sigma_0 * np.eye(d))\n rvT = multivariate_normal(mu_T, sigma_T * np.eye(d))\n\n rho0=rv0.pdf(state)\n rho0 = np.float32(rho0)\n\n rhoT= rvT.pdf(state)\n rhoT = np.float32(rhoT)\n\n ######################################\n\n time_0=np.hstack((\n state,\n T_0*np.ones((len(mesh_vectors[0]), 1))\n ))\n \n if batchsize is not None:\n rho_0_BC = dde.icbc.PointSetBC(\n time_0,\n rho0[..., np.newaxis],\n component=1,\n batch_size=batchsize,\n shuffle=True\n )\n else:\n rho_0_BC = dde.icbc.PointSetBC(\n time_0,\n rho0[..., np.newaxis],\n component=1,\n )\n\n ######################################\n\n time_t=np.hstack((\n state,\n T_t*np.ones((len(mesh_vectors[0]), 1))\n ))\n \n if batchsize is not None:\n rho_T_BC = dde.icbc.PointSetBC(\n time_t,\n rhoT[..., np.newaxis],\n component=1,\n batch_size=batchsize,\n shuffle=True\n )\n else:\n rho_T_BC = dde.icbc.PointSetBC(\n time_t,\n rhoT[..., np.newaxis],\n component=1,\n )\n\n ######################################\n\n geom=dde.geometry.geometry_3d.Cuboid(\n [args.state_bound_min]*d,\n [args.state_bound_max]*d)\n timedomain = dde.geometry.TimeDomain(0., T_t)\n\n geomtime = dde.geometry.GeometryXTime(geom, timedomain)\n\n bif = samples_between_initial_and_final\n if args.bif > 0:\n bif = args.bif\n\n batchsize2 = None\n if len(args.batchsize2) > 0:\n batchsize2 = int(args.batchsize2)\n\n # dde.data.TimePDE\n data = WASSPDE(\n geomtime,\n lambda x, y: tcst1(\n x, y, network_f, network_g, args),\n [rho_0_BC,rho_T_BC],\n num_domain=bif,\n num_initial=ni, # initial_samples,\n train_distribution=train_distribution,\n domain_batch_size=batchsize2\n )\n\n # d+1 inputs: + t\n # 5 outputs: 2 eq\n net = dde.nn.FNN(\n [d+1] + [70] *4 + [4],\n # \"sigmoid\",\n activations,\n init\n # \"zeros\",\n )\n model = model_types[model_type](data, net)\n\n ######################################\n\n losses=[\n \"MSE\",\"MSE\", \"MSE\", \"MSE\",\n \"MSE\",\n \"MSE\",\n ]\n # loss functions are based on PDE + BC: eq outputs, BCs\n\n model.compile(\"adam\", lr=1e-3,loss=losses)\n\n # import ipdb; ipdb.set_trace()\n\n return model, meshes\n\nif __name__ == '__main__':\n\n # In[3]:\n\n\n sde = SDE()\n # state path to model information file\n # load model parameters\n\n files = glob.glob(\n sde_path + \"/*.pt\", \n recursive = False)\n assert(len(files) == 1)\n print(\"using model: \", files[0])\n sde.load_state_dict(torch.load(files[0]))\n\n if torch.cuda.is_available():\n print(\"Using GPU.\")\n sde = sde.to(cuda0)\n # set model to evaluation mode\n sde.eval()\n\n\n # In[4]:\n\n\n d = 2\n N = 15\n batchsize = None\n\n mu_0 = [0.35, 0.35]\n\n sigma = 0.1\n T_t = 200.0\n bcc = np.array([0.41235, 0.37605])\n\n class Container(object):\n state_bound_min = 0.1\n state_bound_max = 0.6\n bound_u = 0\n \n bif = 100000\n batchsize2 = \"5000\"\n batch2_period = 5000\n args = Container()\n\n num_epochs = 15000\n de = 1000\n\n\n # In[5]:\n\n\n model, meshes = get_model(\n d,\n N,\n batchsize,\n 0,\n \"tanh\",\n\n mu_0,\n sigma,\n\n bcc,\n sigma,\n\n T_t,\n args,\n sde.network_f,\n sde.network_g,\n )\n\n print(model)\n\n\n # In[6]:\n\n\n resampler_cb = PDEPointResampler2(\n pde_points=True,\n bc_points=False,\n period=args.batch2_period)\n ck_path = \"./tt200_2d_mse\"\n\n start = time.time()\n losshistory, train_state = model.train(\n iterations=num_epochs,\n display_every=de,\n callbacks=[resampler_cb],\n model_save_path=ck_path)\n end = time.time()\n\n dde.saveplot(losshistory, train_state, issave=True, isplot=True)\n model_path = model.save(ck_path)\n print(model_path)\n\n # \n","repo_name":"cyan-at/gradschool","sub_path":"research/tcst/training/iman/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41512063177","text":"from django.urls import path\nfrom . import views\n\napp_name = 'resultApp'\n\nurlpatterns = [\n path('danger/', views.danger_result_view, name='danger_result'),\n path('safe/', views.safe_result_view, name='safe_result'),\n path('send_email/', views.send_email, name='send_email'),\n]","repo_name":"oblsoun/VOVsnap","sub_path":"resultApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27651727805","text":"from constant import *\nimport datetime\nfrom tabulate import *\n\n\nborder = \"+-------------------------+\"\n\ndef print_security_algo_supported(enb_cipher_algo_supported, integrityProtAlgorithm_eNB, epc_cipher_algo_supported, epc_integ_algo_supported, preferred_algorithms):\n print(\"\\n\" + border)\n print(\"| eNB Security Algorithms |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"eNB_cipheringAlgorithm_supported\", str(enb_cipher_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"eNB_integrityAlgorithm_supported\", str(integrityProtAlgorithm_eNB)))\n print(\"| {:<28} | {:<}\".format(\"eNB_preferred_cipheringAlgorithm\", str(preferred_algorithms[0])))\n print(\"| {:<28} | {:<}\".format(\"eNB_preferred_integrityAlgorithm\", str(preferred_algorithms[1])))\n print(\"\\n\" + border)\n print(\"| EPC Security Algorithms |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"EPC_cipheringAlgorithm_supported\", str(epc_cipher_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"EPC_integrityAlgorithm_supported\", str(epc_integ_algo_supported)))\n print(\"| {:<28} | {:<}\".format(\"EPC_preferred_cipheringAlgorithm\", str(preferred_algorithms[2])))\n print(\"| {:<28} | {:<}\".format(\"EPC_preferred_integrityAlgorithm\", str(preferred_algorithms[3])))\n print_table()\n\n\n\ndef print_table():\n print(\"\\n\\nThe table below provides a summary of the algorithm types and their corresponding descriptions.\")\n parameters = [\"Type\", \"Decription\"]\n data = [\n [\"EEA0\", \"Null ciphering algorithm\"],\n [\"EEA1\", \"SNOW 3G\"],\n [\"EEA2\", \"AES\"],\n [\"EEA3\", \"ZUC\"]\n ]\n\n # Crea la tabella utilizzando tabulate\n table = tabulate(data, headers=parameters, tablefmt=\"grid\")\n\n # Stampa la tabella\n print(table)\n\ndef print_cell_identity(mcc, mnc, cellReservedForOperatorUse, trackingAreaCode, cellIdentity, intraFreqReselection, cellBarred):\n print(\"\\n\\n\")\n print(border)\n print(\"| Cell identity |\")\n print(border)\n print(\"| {:<28} | {:<}\".format(\"mcc\", str(mcc)))\n print(\"| {:<28} | {:<}\".format(\"mnc\", str(mnc)))\n #print(\"| {:<28} | {:<}\".format(\"cellReservedForOperatorUse\", cellReservedForOperatorUse))\n print(\"| {:<28} | {:<}\".format(\"trackingAreaCode\", trackingAreaCode))\n print(\"| {:<28} | {:<}\".format(\"cellIdentity\", cellIdentity))\n #print(\"| {:<28} | {:<}\".format(\"intraFreqReselection\", intraFreqReselection))\n #print(\"| {:<28} | {:<}\".format(\"cellBarred\", cellBarred))\n \n\ndef print_header():\n current_time = datetime.datetime.now() \n print(f\"Starting 5GMap (https://github.com/chiacchius/5gmap) at {current_time.strftime('%Y-%m-%d %H:%M %Z')}\")\n print(\"It may take several minutes.\")\n print(\"\"\"\n╭─────────────────────────────────────────────────╮\n│ .------. ,----. ,--. ,--. ,---. ,------. │\n│ | .--.' ' .-./ | `.' | / O \\\\ | .--. ' │\n│ '---. \\\\ | | .---.| |'.'| || .-. || '--' | │\n│ .---' / ' '--' || | | || | | || | --' │\n│ `----' `------' `--' `--'`--' `--'`--' │\n╰─────────────────────────────────────────────────╯\n\"\"\")\n if REAL_TESTING:\n #manage imsi \n print(\"[5GMAP] Binding to Base Station\")\n\n else:\n\n print(\"[5GMAP] Simulation with srsran\")\n\n\ndef parse_file(file_path):\n with open(file_path, 'r') as file:\n current_section = None\n sections = {}\n lines = []\n rrc_uplink_messages = []\n rrc_downlink_messages = []\n nas_uplink_messages = []\n nas_downlink_messages = []\n\n for line in file:\n\n if line.startswith(\"MIB:\") | line.startswith(\"SIB1:\") | line.startswith(\"SIB2:\") | line.startswith(\"SIB3:\"):\n if current_section is not None:\n sections[current_section] = lines\n\n current_section = line[:-2]\n lines = \"\"\n\n elif line.startswith(\"UPLINK_RRC:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n rrc_uplink_messages.append(line[11:-1])\n\n elif line.startswith(\"DOWNLINK_RRC:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n rrc_downlink_messages.append(line[13:-1])\n \n elif line.startswith(\"UPLINK_NAS:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n nas_uplink_messages.append(line[11:-1])\n\n elif line.startswith(\"DOWNLINK_NAS:\"):\n if current_section is not None:\n sections[current_section] = lines\n current_section = None\n nas_downlink_messages.append(line[13:-1])\n\n else:\n lines = lines + line\n\n # Aggiungiamo l'ultima sezione trovata\n if current_section is not None:\n sections[current_section] = lines\n\n sections[\"UPLINK_RRC\"] = rrc_uplink_messages\n sections[\"DOWNLINK_RRC\"] = rrc_downlink_messages\n sections[\"UPLINK_NAS\"] = nas_uplink_messages\n sections[\"DOWNLINK_NAS\"] = nas_downlink_messages\n return sections\n \n","repo_name":"chiacchius/5gmap","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3736374345","text":"# Ask the user for a number\r\nuser_input = input(\"Enter a number: \")\r\n\r\n# Convert the user input to an integer\r\nnumber = int(user_input)\r\n\r\n# Check if the number is odd or even\r\nif number % 2 == 0:\r\n print(f\"{number} is an even number.\")\r\nelse:\r\n print(f\"{number} is an odd number.\")","repo_name":"Tamrazrk/week-2-python-day-1-Exercise-xp","sub_path":"Odd Or Even.py","file_name":"Odd Or Even.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72268909675","text":"#! /usr/bin/python3\n\nimport os\nimport pdfrw\nimport PlayerCharacter as pc\nimport json\nimport register\n\ntemplatePath = 'template.pdf'\n\nannotMarker = '/Annots'\nfieldMarker = '/T'\nsubMarker = '/Subtype'\nwidgetMarker = '/Widget'\n\ndef writePDF(inputPath, outputPath, dataDict):\n\t\"\"\"Function to fill in the forms of a PDF with values from Dict.\n\t\n\t:param inputPath: Path of template PDF\n\t:param outputPath: Path to make new PDF at\n\t:param dataDict: Dictionary with keys with same name as form fields.\n\t\"\"\"\n\ttemplatePDF = pdfrw.PdfReader(inputPath)\n\tannotations = templatePDF.pages[0][annotMarker]\n\tfor annotation in annotations:\n\t\tif annotation[subMarker] == widgetMarker:\n\t\t\tif annotation[fieldMarker]:\n\t\t\t\tkey = annotation[fieldMarker][1:-1]\n\t\t\t\tif key in dataDict.keys():\n\t\t\t\t\tannotation.update(pdfrw.PdfDict(V='{}'.format(dataDict[key])))\n\n\tpdfrw.PdfWriter().write(outputPath, templatePDF)\n\n# Make a Character\nplayer = pc.PlayerCharacter()\nregister.registerOptions(player)\n\ndefaultData = {\n\t'CharacterName': player.name,\n\t'STR': player.pstr,\n\t'STRmod': player.strmod,\n\t'DEX': player.dex,\n\t'DEXmod ': player.dexmod,\n\t'CON': player.con,\n\t'CONmod': player.conmod,\n\t'INT': player.pint,\n\t'INTmod': player.intmod,\n\t'WIS': player.wis,\n\t'WISmod': player.wismod,\n\t'CHA': player.cha,\n\t'CHamod': player.chamod,\n\t'ClassLevel': player.classes,\n\t'Background': player.background,\n\t'PlayerName': player.pname,\n\t'Race ': player.race,\n\t'Alignment': player.alignment,\n\t'XP': player.xp,\n\t'AC': player.ac,\n\t'Initiative': player.init,\n\t'Speed': player.speed,\n\t'HPMax': player.hpmax,\n\t'HPCurrent': player.hpcurrent,\n\t'HPTemp': player.hptemp,\n\t'HDTotal': player.hptotal,\n\t'HD': player.hd,\n\t'PersonalityTraits ': player.ptraits,\n\t'Ideals': player.ideals,\n\t'Bonds': player.bonds,\n\t'Flaws': player.flaws,\n\t'ProficienciesLang': player.profs,\n\t'Features and Traits': player.features,\n\t'Equipment': \"\\n\".join(player.equipment),\n\t'CP': player.cp,\n\t'SP': player.sp,\n\t'EP': player.ep,\n\t'GP': player.gp,\n\t'PP': player.pp,\n\t'AttacksSpellcasting': player.attacks,\n\t'Wpn Name': player.wpn1,\n\t'Wpn Name 2': player.wpn2,\n\t'Wpn Name 3': player.wpn3,\n\t'Wpn1 AtkBonus': player.wpn1atk,\n\t'Wpn2 AtkBonus ': player.wpn2atk,\n\t'Wpn3 AtkBonus ': player.wpn3atk,\n\t'Wpn1 Damage': player.wpn1dmg,\n\t'Wpn2 Damage ': player.wpn2dmg,\n\t'Wpn3 Damage ': player.wpn3dmg,\n\t'Inspiration': player.inspir,\n\t'ProfBonus': player.prof,\n\t'ST Strength': player.ststr,\n\t'ST Dexterity': player.stdex,\n\t'ST Constitution': player.stcon,\n\t'ST Intelligence': player.stint,\n\t'ST Wisdom': player.stwis,\n\t'ST Charisma': player.stcha,\n\t'Acrobatics': player.skillProfs['acrobatics'][1],\n\t'Animal': player.skillProfs['animal'][1],\n\t'Arcana': player.skillProfs['aracna'][1],\n\t'Athletics': player.skillProfs['athletics'][1],\n\t'Deception ': player.skillProfs['deception'][1],\n\t'History ': player.skillProfs['history'][1],\n\t'Insight': player.skillProfs['insight'][1],\n\t'Intimidation': player.skillProfs['intimidation'][1],\n\t'Investigation ': player.skillProfs['investigation'][1],\n\t'Medicine': player.skillProfs['medicine'][1],\n\t'Nature': player.skillProfs['nature'][1],\n\t'Perception ': player.skillProfs['perception'][1],\n\t'Performance': player.skillProfs['performance'][1],\n\t'Persuasion': player.skillProfs['persuasion'][1],\n\t'Religion': player.skillProfs['religion'][1],\n\t'SleightofHand': player.skillProfs['sleight'][1],\n\t'Stealth ': player.skillProfs['stealth'][1],\n\t'Survival': player.skillProfs['survival'][1],\n\t'Passive': player.passivep #Passive Perception\n}\n\nwritePDF(templatePath, \"test.pdf\", defaultData)","repo_name":"Ethck/dndCharGen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35043261465","text":"operator = [\"+\",\"-\",\"*\",\"/\",\"%\",\"^\"]\r\n\r\nnumeral = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\r\n\r\nexpression = []\r\n\r\nprint(\"Enter the expression :\")\r\nexp = input()\r\nprint(\"Expression = \",exp)\r\noperand = \"\"\r\n\r\nfor i in exp:\r\n # 22 + 32 - 1\r\n if i in numeral:\r\n operand = operand + i\r\n elif i in operator:\r\n expression.append(operand)\r\n expression.append(i)\r\n operand = \"\"\r\n else:\r\n print(\"Invalid character\")\r\n break\r\n\r\nexpression.append(operand)\r\nprint(\"expression in from of list = \",expression) \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"kanikavishwakarma/Python-Codes","sub_path":"Infix into list.py","file_name":"Infix into list.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11316294009","text":"import urllib\nimport base64\n\nfrom app import utils\nfrom app.clients.base import BaseHttpClient\n\nfrom app.config import settings\n\nimport httpx\n \nclass SpotifyClient(BaseHttpClient):\n def __init__(self):\n self.client_id = settings.spotify_client_id\n self.secret = settings.spotify_client_secret\n self.api_base_url = 'https://api.spotify.com/v1'\n self.account_base_url = 'https://accounts.spotify.com'\n self.redirect_uri = 'http://localhost:80/callback'\n \n def make_authorize_url(self):\n url = self.account_base_url + '/authorize'\n verifier, challenge = utils.gen_code_verifier()\n\n params = {\n 'client_id': self.client_id,\n 'response_type': 'code',\n 'redirect_uri': self.redirect_uri,\n 'state': verifier,\n 'scope': 'playlist-modify-public',\n 'code_challenge_method': 'S256',\n 'code_challenge': challenge,\n }\n\n return url + '?' + urllib.parse.urlencode(params)\n \n def get_token(self, code: str, state: str):\n url = self.account_base_url + '/api/token'\n \n headers = {\n 'Authorization': f\"Basic {base64.b64encode(f'{self.client_id}:{self.secret}'.encode()).decode()}\",\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n \n data = {\n 'client_id': self.client_id,\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri,\n 'code': code,\n 'code_verifier': state,\n }\n \n resp = httpx.post(url, headers=headers, data=data)\n \n return resp.json()\n\n \n \n","repo_name":"rjnocelli/the-music-discoverer","sub_path":"backend/app/clients/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41958135033","text":"#This package involves these functions may be uesrd in \n#the process of building CNN and other networks\n\nimport numpy as np \nimport pandas as pd \n\ndef get_data(balance_class_one = True):\n\t#images are 48 x 48 = 2304 sizes vector\n\tX = []\n\tY = []\n\ttitle_row = True\n\tfor line in open('fer2013.csv'):\n\t\tif title_row == True:\n\t\t\ttitle_row = False\n\t\telse:\n\t\t\trow = line.split(',')\n\t\t\ty = int(row[0])\n\t\t\tx = [int(n) for n in row[1].split()]\n\t\t\tY.append(y)\n\t\t\tX.append(x)\n\n\tX,Y = np.array(X)/255.0,np.array(Y)\n\n\t#based on preprocess of data, there is an inbalance class: class 1\n\tif balance_class_one == True:\n\t\tX0, Y0 = X[Y!=1,:],Y[Y!=1]\n\t\tX1 = X[Y==1,:]\n\t\tX1 = np.repeat(X1,9,axis = 0)\n\t\tX = np.vstack((X0,X1))\n\t\tY = np.concatenate((Y0,[1]*len(X1)))\n\n\treturn X,Y\n\ndef get_image_data():\n\tX,Y = get_data()\n\tN,D = X.shape\n\td = int(np.sqrt(D))\n\tX = X.reshape(N,1,d,d)\n\treturn X,Y\n\ndef get_binary_data():\n\t# this function is used to get the binary data of the first two class\n\tX =[]\n\tY =[]\n\tfirst_row = True\n\tfor line in open('fer2013.csv'):\n\t\tif first_row == True:\n\t\t\tfirst_row = False\n\t\telse:\n\t\t\trow = line.split(',')\n\t\t\ty = int(row[0])\n\t\t\tif y==0 or y==1:\n\t\t\t\tx = [int(n) for n in row[1].split()]\n\t\t\t\tY.append(y)\n\t\t\t\tX.append(x)\n\treturn np.array(X)/255.0,np.array(Y)\n\ndef Cross_validation(model,X,Y,K=5):\n\tX,Y = shuffle(X,Y)\n\terrors = []\n\tsize = len(Y)/K\n\tfor k in range(K):\n\t\tX_train = np.vstack([X[:k*size,:]],X[(k+1)*size:,:])\n\t\ty_train = np.concatenate((X[:k*size,:],X[(k+1)*size:,:]))\n\t\tx_test = X[k*size:(k+1)*size,:]\n\t\ty_test = Y[k*size:(k+1)*size,:]\n\n\t\tmodel.fit(X_train,y_train)\n\t\terror = model.score(X_test,Y_test)\n\t\terrors.append(error)\n\treturn np.mean(errors)\n\n\ndef weights_and_bias_init(D1,D2):\n\t# D1 and D2 are the dimentionality of input and output\n\t# parameters should be inependent of the number of parameters\n\tW = np.random.randn(D1,D2)/np.sqrt(D1)\n\tb = np.zeros(D2)\n\treturn W.astype(np.float32),b.astype(np.float32)\n\ndef softmax(a):\n\tA = np.exp(a)\n\treturn A/A.sum(axis=1,keepdims=True)\n\ndef sigmoid(a):\n\treturn 1/(1+np.exp(-a))\n\ndef relu(x):\n\treturn x*(x>0)\n\ndef init_filter(shape, poolsz):\n\t#this function is used in the concolutional nueral network, this is for theano\n w = np.random.randn(*shape) / np.sqrt(np.prod(shape[1:]) + shape[0]*np.prod(shape[2:] / np.prod(poolsz)))\n return w.astype(np.float32)\n\ndef sigmoid_cost(T,Y):\n\treturn -(T*np.log(Y)+(1-T)*np.log(1-Y)).sum()\n\ndef cost2(T,Y):\n\t#T should be the indicator matrix for crossentropy, but there: T is the target matrix\n\tN = len(Y)\n\treturn -np.log(Y[np.arange(N),T]).sum()\n\ndef cost(T,Y):\n\t# T is the indicator matrix\n\treturn -(T*np.log(Y)).sum()\n\ndef error_rate(target,prediction):\n\treturn np.mean(target!=prediction)\n\ndef predict(Y_given_X):\n\treturn np.argmax(Y_given_X,axis=1)\n\ndef y2indicator(Y):\n\tN = len(Y)\n\tK = len(set(Y))\n\tT = np.zeros([N,K])\n\tfor n in range(N):\n\t\tT[n,Y[n]] = 1\n\treturn T\n\n","repo_name":"CharlieHao/CNN-facial_expression","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"5463636639","text":"import argparse\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.optim as op\r\nimport torch.nn as nn\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport math\r\nfrom torchvision import datasets, transforms\r\nimport random\r\n\r\n\r\n\r\nclass AE(nn.Module):\r\n def __init__(self, hidden_layer_size, n_inputs):\r\n super(AE, self).__init__()\r\n self.encoder_LSTM = nn.LSTM(n_inputs, hidden_layer_size, batch_first=True)\r\n self.decoder_LSTM = nn.LSTM(hidden_layer_size, hidden_layer_size, batch_first=True)\r\n self.hidden_layer_size = hidden_layer_size\r\n self.n_inputs = n_inputs\r\n self.func = nn.Linear(hidden_layer_size, n_inputs)\r\n self.indicator = nn.Linear(hidden_layer_size, 10)\r\n\r\n\r\n def forward(self, x_t):\r\n x, (z, y) = self.encoder_LSTM(x_t)\r\n z = z.view(-1, 1, self.hidden_layer_size).repeat(1, x_t.size(1) , 1)\r\n h_temp , s = self.decoder_LSTM(z)\r\n return self.func(h_temp), self.indicator(h_temp)\r\n\r\nclass LSTM_AE_MNIST(Dataset):\r\n def __init__(self, k, *, flatten=False):\r\n self.data = []\r\n self.tags = []\r\n if flatten:\r\n for (i,j) in k:\r\n self.data.extend([i.reshape(-1,1)])\r\n self.tags.extend([j])\r\n else:\r\n for (i,j) in k:\r\n self.data.extend([i.squeeze(0)])\r\n self.tags.extend([j])\r\n\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __getitem__(self, item):\r\n return self.data[item], self.tags[item]\r\n\r\n\r\ndef training(epochs, optim, model,clip, data, values):\r\n\r\n vals = []\r\n training =[]\r\n training_success = []\r\n validation_success = []\r\n adder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n loss_func = nn.MSELoss()\r\n cross_entry_point = nn.CrossEntropyLoss()\r\n # print(\"he\")\r\n for e in range(1, epochs):\r\n total_acc = 0\r\n total_loss =0\r\n total_all = 0\r\n\r\n for datum, tags in data:\r\n # print(\"ha\")\r\n optim.zero_grad()\r\n tags = tags.to(adder)\r\n datum = datum.to(adder)\r\n outputs, new_input = model(datum)\r\n loss = loss_func(outputs, datum)\r\n new_loss = cross_entry_point(new_input.reshape(len(datum), -1), tags)\r\n loss = loss + new_loss\r\n loss.backward()\r\n nn.utils.clip_grad_norm_(model.parameters(), clip)\r\n optim.step()\r\n total_loss = total_loss + loss.item()\r\n with torch.no_grad():\r\n _, new_acc = torch.max(new_input.reshape(len(datum),-1) , 1)\r\n total_acc = total_acc + (new_acc == tags).sum().item()\r\n total_all = total_all + len(datum)\r\n # print(total_acc)\r\n # print(total_all)\r\n\r\n with torch.no_grad():\r\n total_loss_2 = 0\r\n total_acc_2 = 0\r\n total_all_2 = 0\r\n for datum, tags in values:\r\n tags = tags.to(adder)\r\n datum = datum.to(adder)\r\n outputs, new_input = model(datum)\r\n loss = loss_func(outputs, datum)\r\n new_loss = cross_entry_point(new_input.view(len(datum), -1), tags)\r\n loss = loss + new_loss\r\n total_loss_2 = total_loss + loss.item()\r\n _, new_acc = torch.max(new_input.view(len(datum), -1), 1)\r\n total_acc_2 = total_acc_2 + (new_acc == tags).sum().item()\r\n total_all_2 = total_all_2 + len(datum)\r\n\r\n new_item = total_loss/len(data)\r\n new_item_3 = total_acc / total_all\r\n training.append(new_item)\r\n training_success.append(new_item_3)\r\n\r\n new_item2 = total_loss_2/len(values)\r\n new_item_4 = total_acc_2 / total_all_2\r\n vals.append(new_item2)\r\n validation_success.append((new_item_4))\r\n\r\n print(\"~~~~~~~~~~~~~~~~~\")\r\n print(f\" Epoch {e}\\n train loss: {new_item:.3f}\\n val loss: {new_item2:.3f}\\n\")\r\n print(f\"train accuracy: {new_item_3:.3f}\\n validation acc: {new_item_4:.3f}\\n\")\r\n\r\n return training, vals, training_success, validation_success\r\n\r\n\r\n\r\nparser= argparse.ArgumentParser()\r\nparser.add_argument(\"--epochs\", type = int, default = 30)\r\nparser.add_argument(\"--input_size\", type = int, choices=[1,28], default = 1)\r\nparser.add_argument(\"--optim\", choices =[\"sgd\", \"adam\"], default=\"adam\")\r\nparser.add_argument(\"--clip\", type=float, default=0.1)\r\nparser.add_argument(\"--batch_size\", type=int, default=64)\r\nparser.add_argument(\"--hidden_size\", type=int, default=256)\r\nparser.add_argument(\"--lr\", type=float, default=0.001)\r\n\r\narguments = parser.parse_args()\r\n\r\ntran = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\r\nmnist_train = datasets.MNIST(\"./data\", train = True, download = True, transform =tran)\r\nmnist_test = list(datasets.MNIST(\"./data\", train = False, download = True, transform =tran))\r\n\r\nprint(f\"parameters: epochs:{arguments.epochs} input_size: {arguments.input_size}optimizer:{arguments.optim} \"\r\n f\"learning rate:{arguments.lr} clipping:{arguments.clip} batch_size:{arguments.batch_size} hidden_size:{arguments.hidden_size}\")\r\nif arguments.input_size == 1:\r\n data = DataLoader(LSTM_AE_MNIST(mnist_train, flatten=True), batch_size=arguments.batch_size, shuffle=True)\r\n values = DataLoader(LSTM_AE_MNIST(mnist_test[:5000], flatten=True), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n check = DataLoader(LSTM_AE_MNIST(mnist_test[5000:], flatten=True), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n\r\nelse:\r\n data = DataLoader(LSTM_AE_MNIST(mnist_train), batch_size=arguments.batch_size, shuffle=True)\r\n values = DataLoader(LSTM_AE_MNIST(mnist_test[:5000]), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n check = DataLoader(LSTM_AE_MNIST(mnist_test[5000:]), batch_size=arguments.batch_size,\r\n shuffle=False)\r\n\r\nadder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nmodel = AE(arguments.hidden_size, arguments.input_size).to(adder)\r\n\r\nif arguments.optim == \"sgd\":\r\n optim = op.SGD(model.parameters(), lr=arguments.lr)\r\nelse:\r\n optim = op.Adam(model.parameters(), lr=arguments.lr)\r\n\r\ntraining , vals,training_success, validation_success = training(arguments.epochs, optim, model, arguments.clip, data, values)\r\n\r\ncheck_2 =0\r\ncheck_all =0\r\n\r\nwith torch.no_grad():\r\n model = AE(arguments.hidden_size, arguments.input_size)\r\n adder = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n model = model.to(adder)\r\n for datum, tags in check:\r\n datum = datum.to(adder)\r\n tags = tags.to(adder)\r\n outputs, new_input = model(datum)\r\n _, new_acc = torch.max(new_input.reshape(len(datum), -1), 1)\r\n check_2 = check_2 + (new_acc == tags).sum().item()\r\n check_all = check_all + len(datum)\r\n acc = check_2 / check_all\r\n print(f\"acc : {acc}.3f\")\r\n\r\nplt.title(\"Success per epoch\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.xlabel(\"Epochs\")\r\nplt.plot(validation_success, label=\"Valid\")\r\nplt.plot(training_success, label=\"Train\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.title(\"Training Loss per epoch\")\r\nplt.ylabel(\"Loss\")\r\nplt.xlabel(\"Epochs\")\r\nplt.plot(vals, label=\"Valid\")\r\nplt.plot(training, label=\"Train\")\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n\r\ndatum = random.choice(next(iter(check))[0])\r\nfig, res = plt.subplots(1,2, constrained_layout=True)\r\nres[0].imshow(datum.numpy(), cmap=\"binary\")\r\nres[0].set_title(\"input\")\r\nres[0].axis(\"off\")\r\n\r\nwith torch.no_grad():\r\n out = model(datum.unsqueeze(0))[0][0]\r\n res[1].imshow(out.cpu().numpy(), cmap=\"binary\")\r\nres[1].axis(\"off\")\r\nres[1].set_title(\"output\")\r\nplt.show()\r\n\r\n","repo_name":"Ido220694/LSTM-Auto-Encoder","sub_path":"lstm_ae_mnist.py","file_name":"lstm_ae_mnist.py","file_ext":"py","file_size_in_byte":8024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70225426475","text":"# The numeric value sof these tokens doesn't matter. They'll\n# be reassigned to unique values just below.\nBEGIN_DOC = 0\nBEGIN_LINE = 0\nBEGIN_PARA = 0\nBEGIN_TERM = 0\nBEGIN_INSTR = 0\nINDENT = 0\nDEDENT = 0\nNAME = 0\nTEXT = 0\nBEGIN_ANCHOR = 0\nBEGIN_ASIDE = 0\nBEGIN_LINK = 0\nBEGIN_QUOTED = 0\nEND_QUOTED = 0\nEND_HYPERTEXT = 0\nPIVOT_TERM = 0\nARG_SEP = 0\nHYPERTEXT_DIVIDER = 0\nEND_LINE = 0\nEND_DOC = 0\nLEX_ERROR = 0\nUNLEXED = 0\n\ndef load():\n g = globals()\n x = []\n for name, value in g.items():\n if isinstance(value, int):\n x.append(name)\n i = 0\n for item in x:\n g[item] = i\n i += 1\n del g['load']\n return x\n\nnames_by_value = load()\n\ndef ends_line(token):\n return token.ttype in [END_LINE, END_DOC]","repo_name":"dhh1128/intent","sub_path":"intent/lang/tok_types.py","file_name":"tok_types.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"27464398897","text":"nums=[int(i) for i in input().split()]\nscore=nums.count(0)\ndelN=0\nnums=list(filter((delN).__ne__,nums))\nif score==0: \n print(*nums)\n\nelse:\n out=nums+([0]*score)\n print(*out)","repo_name":"richard-tanakov/richard-tanakov-stepik_developer_python","sub_path":"Tuples/Number_product4.py","file_name":"Number_product4.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"17476025400","text":"start_num = int(input(\"Enter the start of the interval: \"))\nend_num = int(input(\"Enter the end of the interval: \"))\n\nprint(\"The prime numbers between these two numbers are: \")\n\nfor num in range(start_num, end_num):\n if num > 1:\n prime = True\n for x in range(2, int(num ** 0.5) + 1):\n if num % x == 0:\n prime = False\n break\n if prime:\n print(num)\nprint(\"Program ending\") \n\n ","repo_name":"Lawando69/PythonWork","sub_path":"Homework - exercise 4/activity03.py","file_name":"activity03.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42763507585","text":"# Least squares solving\n# y = m x + b\n# y0 = m x0 + b\n# y1 = m x1 + b\n# y2 = m x2 + b\n# y3 = m x3 + b\n# y4 = m x4 + b\n# equivalent to A p = y\n# with\n# A = [x0 1\n# x1 1\n# x2 1\n# x3 1\n# x4 1]\n# p = [m\n# b]\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.random import default_rng\n\nrng = default_rng(1)\n\nN = 100\nx = np.array(range(N))\nm = 0.5\nb = 1\n\nnoise_std = 0.1\ny = x * m + b\nnoise = rng.standard_normal(len(y)) * noise_std\ny_with_noise = y + noise\n\nA = np.array([x, np.ones(len(x))]).T\nsolution = np.linalg.lstsq(a=A, b=y_with_noise)\n# m_est = solution[0][0]\n# b_est = solution[0][1]\nm_est, b_est = solution[0]\n\ny_est = x * m_est + b_est\n\n\nplt.plot(x, y, 'k', label='Original data')\nplt.plot(x, y_with_noise, 'rx', label='Noisy data')\nplt.plot(x, y_est, 'g', label='Fitted line')\nplt.legend()\nplt.xlim([-1, N])\nplt.ylim([0, N/2 + b])\nplt.show()\n","repo_name":"jhbrito/IVC2324","sub_path":"Exercicio11.1Leastsquares.py","file_name":"Exercicio11.1Leastsquares.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2444760010","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import ClientView, MailView, MessagesByMailView\n\n\nrouter = routers.DefaultRouter()\nrouter.register('client', ClientView)\nrouter.register('mail', MailView)\nrouter.register('mailMessages', MessagesByMailView)\n\nurlpatterns = [\n path('', include(router.urls)),\n]","repo_name":"Sniperat/Notification_service","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36246390708","text":"import asyncio\nfrom datetime import timedelta\nimport logging\nfrom ssl import SSLSession\n\nfrom requests.exceptions import SSLError\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_PASSWORD, CONF_USERNAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers.aiohttp_client import async_create_clientsession\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator\n\nfrom .smarthome import smarthome\nfrom .const import (\n DATA_CLIENT,\n DATA_COORDINATOR,\n DOMAIN,\n DEFAULT_NAME,\n PLATFORMS,\n UPDATE_INTERVAL,\n)\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Set up this integration using UI.\"\"\"\n if hass.data.get(DOMAIN) is None:\n hass.data.setdefault(DOMAIN, {})\n\n username = entry.data.get(CONF_USERNAME)\n password = entry.data.get(CONF_PASSWORD)\n session = async_create_clientsession(hass, auto_cleanup=True, verify_ssl=False)\n client = smarthome(session, username, password)\n await client.login()\n coordinator = DataUpdateCoordinator(\n hass,\n _LOGGER,\n name=DEFAULT_NAME,\n update_method=client.get_devices,\n update_interval=timedelta(seconds=UPDATE_INTERVAL),\n )\n\n await coordinator.async_refresh()\n\n if not coordinator.last_update_success:\n raise ConfigEntryNotReady\n\n hass.data[DOMAIN][entry.entry_id] = {\n DATA_CLIENT: client,\n DATA_COORDINATOR: coordinator,\n }\n\n for platform in PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, platform)\n )\n\n entry.add_update_listener(async_reload_entry)\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Handle removal of an entry.\"\"\"\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unloaded:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unloaded\n\n\nasync def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Reload config entry.\"\"\"\n await async_unload_entry(hass, entry)\n await async_setup_entry(hass, entry)\n","repo_name":"n71154plus/Hitachi_smart_app","sub_path":"custom_components/Hitachi_smart_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"73"} +{"seq_id":"37854456726","text":"from urllib import request\n\ndef add(x, y, sleep=0):\n if sleep > 0: print(\"Sleep for %s seconds\"%sleep)\n with request.urlopen('http://localhost:5555/add?x=%s&y=%s&sleep=%s'%(x,y,sleep)) as f:\n return int(f.read())\n\ndef multiply(x, y, sleep=0):\n if sleep > 0: print(\"Sleep for %s seconds\"%sleep)\n with request.urlopen('http://localhost:5556/multiply?x=%s&y=%s&sleep=%s'%(x,y,sleep)) as f:\n return int(f.read())\n\nimport concurrent.futures\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n a = executor.submit(add, 3, 4, 0)\n m = executor.submit(multiply, 4, 4)\n while a.running() or m.running():\n import time\n time.sleep(1)\n print(\"waiting...\")\n\nprint(a.result() + m.result())\n","repo_name":"perryism/async_example","sub_path":"async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41833093154","text":"### BY BOAZ MENARD AND BRANDON ELMORE ###\r\nDISTANCE_THRESHOLD = 3 # distance in inches within which an object is detected\r\nSTARTING_SPEED = 50\r\n\r\nfrom picamera import PiCamera\r\nimport traceback\r\nimport picar\r\nfrom picar.obstacle_sensor import *\r\nfrom picar.front_wheels import *\r\nfrom picar.back_wheels import *\r\nfrom picar.line_sensor import *\r\nimport time\r\nimport Pyro4\r\nimport pickle\r\nfrom PIL import Image\r\n\r\nsteering = Front_Wheels() # create a Front_Wheels object for steering the car\r\nmotors = Back_Wheels() # create a Back_Wheels object to move the car\r\nobjSensor = Obstacle_Sensor() # create an Object_Sensor() object to detect distance to objects\r\nlineSensor = Line_Sensor() # create a Line_Sensor() object to detect lines on the floor\r\ncamera = PiCamera()\r\npicar.setup()\r\nsteering.ready()\r\nmotors.speed = STARTING_SPEED\r\nmotors.ready()\r\nhardRight = 135\r\nslightRight = 100\r\nstr8 = 90\r\nslightLeft = 80\r\nhardLeft = 45\r\n\r\nstarfleetcomm = Pyro4.Proxy('PYRONAME:starfleetcomm')\r\n\r\n\r\n\r\ndef followLine():\r\n '''\r\n Read line sensor and return a two tuple with the first element the speed\r\n and the second element the turn angle ie (speed, angle).\r\n '''\r\n readings = lineSensor.read_digital()\r\n print('Line Sensor: ', readings)\r\n if readings == [0, 0, 0, 0, 0]:\r\n steering.turn(hardRight)\r\n elif readings == [1, 1, 1, 1, 1]:\r\n motors.stop()\r\n steering.turn_straight()\r\n motors.backward()\r\n time.sleep(.5)\r\n motors.stop()\r\n steering.turn_left()\r\n motors.forward()\r\n time.sleep(1)\r\n steering.turn(str8)\r\n elif readings == [0, 0, 1, 1, 1] or readings == [0, 0, 0, 1, 1] or readings == [0, 0, 0, 0, 1]:\r\n steering.turn(str8)\r\n elif readings == [0, 1, 1, 1, 0] or readings == [0, 0, 1, 0, 0] or readings == [0, 0, 1, 1, 0] or readings == [0, 1, 1, 0, 0]:\r\n steering.turn(slightLeft)\r\n elif readings == [1, 0, 0, 0, 0] or readings == [1, 1, 0, 0, 0] or readings == [1, 1, 1, 0, 0]:\r\n steering.turn(hardLeft)\r\n\r\n\r\n \r\ndef Obstacle():\r\n \"\"\"Takes a picture when an object is detected then waits for an input from user\"\"\"\r\n if objSensor.distance() <= DISTANCE_THRESHOLD:\r\n motors.stop()\r\n camera.start_preview()\r\n time.sleep(.5)\r\n camera.capture('/home/pi/Desktop/TermProject/object.jpg')\r\n camera.stop_preview()\r\n img = Image.open('/home/pi/Desktop/TermProject/object.jpg')\r\n imge = pickle.dumps(img)\r\n starfleetcomm.sendImage(imge)\r\n while not starfleetcomm.isNewCommandPosted():\r\n time.sleep(2)\r\n Decision = starfleetcomm.getCommand()\r\n if Decision == 'M':\r\n steering.turn(45)\r\n steering.turn(135)\r\n steering.ready()\r\n time.sleep(3)\r\n motors.forward()\r\n elif Decision == 'I':\r\n motors.forward()\r\n steering.turn(45)\r\n time.sleep(2)\r\n traverseTheMaze()\r\n elif Decision == \"S\":\r\n motors.forward()\r\n time.sleep(1)\r\n \r\n \r\ndef traverseTheMaze():\r\n '''\r\n Continuosly steers the PiCar-S until no line is detected, where by\r\n the function exits.\r\n '''\r\n defaultSpeed = s = 20\r\n motors.speed = 20\r\n motors.forward()\r\n while s != 0:\r\n print(objSensor.distance())\r\n Obstacle()\r\n followLine()\r\n \r\n motors.speed = s\r\n motors.stop()\r\n motors.speed = defaultSpeed\r\n#def steering():\r\n #steering.turn_left()\r\n","repo_name":"boazmenard/Design-Thinking-II","sub_path":"A Maze'n Robot - Klingons/TraverseTheMaze.py","file_name":"TraverseTheMaze.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38594273972","text":"import pytest\nimport urllib3\nfrom influx import Influx\n\n@pytest.fixture\ndef test_database():\n ''' Returns a InfluxDB object'''\n token = \"ci_zJ9DSnTbO4fSjRVxKjn2956LhXDre0y8DkMNgMmpp1ptQDsNe_u5RMwxGr0XAN2pjyHOuJ5yAd1KfnQGQUg==\"\n org = \"cheekyagentpotter@gmail.com\"\n bucket = \"test\"\n url=\"https://eu-central-1-1.aws.cloud2.influxdata.com\"\n \n return Influx(token=token, org=org, bucket=bucket, url=url)\n\n@pytest.fixture\ndef test_faulty_database():\n ''' Returns a faulty InfluxDB object'''\n token = \"these\"\n org = \"credentials\"\n bucket = \"won't\"\n url=\"work\"\n \n return Influx(token=token, org=org, bucket=bucket, url=url)\n\ndef test_write_and_query(test_database):\n data = \"mem,host=host1 used_percent=23.43234543\"\n test_database.write(data)\n\n query = '''\n from(bucket: \"test\")\n |> range(start: -30d)\\\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"host\"] == \"host1\")\n |> yield(name: \"mean\")\n '''\n assert test_database.query(query)[0].records[0].get_value() == 23.43234543\n\ndef test_faulty_db(test_faulty_database):\n with pytest.raises(Exception):\n data = \"mem,host=host1 used_percent=23.43234543\"\n test_faulty_database.write(data)\n\n","repo_name":"jonakr/DBproject","sub_path":"src/tests/test_influx.py","file_name":"test_influx.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"}